summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml6
-rw-r--r--Makefile3
-rw-r--r--cmd/podman/attach.go3
-rw-r--r--cmd/podman/cliconfig/config.go17
-rw-r--r--cmd/podman/common.go4
-rw-r--r--cmd/podman/exec.go3
-rw-r--r--cmd/podman/images.go4
-rw-r--r--cmd/podman/main_local.go4
-rw-r--r--cmd/podman/restore.go1
-rw-r--r--cmd/podman/rm.go2
-rw-r--r--cmd/podman/shared/create.go192
-rw-r--r--cmd/podman/start.go2
-rw-r--r--cni/87-podman-bridge.conflist3
-rw-r--r--completions/bash/podman1
-rw-r--r--contrib/cirrus/README.md2
-rw-r--r--docs/Readme.md9
-rw-r--r--docs/source/markdown/podman-attach.1.md4
-rw-r--r--docs/source/markdown/podman-container-restore.1.md9
-rw-r--r--docs/source/markdown/podman-create.1.md8
-rw-r--r--docs/source/markdown/podman-exec.1.md2
-rw-r--r--docs/source/markdown/podman-rm.1.md3
-rw-r--r--docs/source/markdown/podman-run.1.md8
-rw-r--r--docs/source/markdown/podman-start.1.md4
-rw-r--r--go.mod11
-rw-r--r--go.sum12
-rw-r--r--install.md2
-rw-r--r--libpod.conf32
-rw-r--r--libpod/config/default.go17
-rw-r--r--libpod/container.go8
-rw-r--r--libpod/container_api.go5
-rw-r--r--libpod/container_internal_linux.go50
-rw-r--r--libpod/events/journal_linux.go2
-rw-r--r--libpod/events/logfile.go2
-rw-r--r--libpod/kube.go9
-rw-r--r--libpod/networking_linux.go49
-rw-r--r--libpod/oci_attach_linux.go2
-rw-r--r--libpod/options.go25
-rw-r--r--pkg/adapter/containers.go24
-rw-r--r--pkg/adapter/pods.go126
-rw-r--r--pkg/rootless/rootless_linux.c14
-rw-r--r--pkg/spec/config_linux_cgo.go2
-rw-r--r--pkg/spec/config_linux_nocgo.go2
-rw-r--r--pkg/spec/config_unsupported.go2
-rw-r--r--pkg/spec/createconfig.go412
-rw-r--r--pkg/spec/namespaces.go433
-rw-r--r--pkg/spec/security.go172
-rw-r--r--pkg/spec/spec.go315
-rw-r--r--pkg/spec/spec_test.go6
-rw-r--r--pkg/spec/storage.go12
-rw-r--r--pkg/util/mountOpts.go31
-rw-r--r--test/e2e/checkpoint_test.go11
-rw-r--r--test/e2e/create_staticmac_test.go46
-rw-r--r--test/e2e/generate_kube_test.go33
-rw-r--r--test/e2e/run_volume_test.go22
-rw-r--r--test/e2e/test.yaml3
-rw-r--r--vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go943
-rw-r--r--vendor/github.com/containernetworking/cni/libcni/api.go230
-rw-r--r--vendor/github.com/containernetworking/cni/libcni/conf.go4
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/invoke/args.go2
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/types/args.go2
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/types/types.go25
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/utils/utils.go51
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/example_changes.go97
-rw-r--r--vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go411
-rw-r--r--vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go32
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/example_changes.go97
-rw-r--r--vendor/github.com/json-iterator/go/iter.go27
-rw-r--r--vendor/github.com/json-iterator/go/iter_array.go10
-rw-r--r--vendor/github.com/json-iterator/go/iter_object.go24
-rw-r--r--vendor/github.com/json-iterator/go/iter_skip_sloppy.go19
-rw-r--r--vendor/github.com/json-iterator/go/reflect.go5
-rw-r--r--vendor/github.com/json-iterator/go/reflect_extension.go2
-rw-r--r--vendor/github.com/json-iterator/go/reflect_map.go4
-rw-r--r--vendor/github.com/json-iterator/go/reflect_marshaler.go12
-rw-r--r--vendor/github.com/json-iterator/go/reflect_struct_decoder.go44
-rw-r--r--vendor/github.com/klauspost/compress/flate/gen.go265
-rw-r--r--vendor/github.com/klauspost/cpuid/private-gen.go476
-rw-r--r--vendor/github.com/onsi/gomega/CHANGELOG.md5
-rw-r--r--vendor/github.com/onsi/gomega/go.mod3
-rw-r--r--vendor/github.com/onsi/gomega/go.sum4
-rw-r--r--vendor/github.com/onsi/gomega/gomega_dsl.go2
-rw-r--r--vendor/github.com/uber/jaeger-client-go/.travis.yml9
-rw-r--r--vendor/github.com/uber/jaeger-client-go/CHANGELOG.md39
-rw-r--r--vendor/github.com/uber/jaeger-client-go/Gopkg.lock125
-rw-r--r--vendor/github.com/uber/jaeger-client-go/Gopkg.toml2
-rw-r--r--vendor/github.com/uber/jaeger-client-go/Makefile3
-rw-r--r--vendor/github.com/uber/jaeger-client-go/README.md23
-rw-r--r--vendor/github.com/uber/jaeger-client-go/config/config.go4
-rw-r--r--vendor/github.com/uber/jaeger-client-go/config/config_env.go24
-rw-r--r--vendor/github.com/uber/jaeger-client-go/constants.go2
-rw-r--r--vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go2
-rw-r--r--vendor/github.com/uber/jaeger-client-go/metrics.go20
-rw-r--r--vendor/github.com/uber/jaeger-client-go/propagation.go8
-rw-r--r--vendor/github.com/uber/jaeger-client-go/reporter.go2
-rw-r--r--vendor/github.com/uber/jaeger-client-go/sampler.go446
-rw-r--r--vendor/github.com/uber/jaeger-client-go/sampler_remote.go334
-rw-r--r--vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go (renamed from vendor/github.com/uber/jaeger-client-go/sampler_options.go)71
-rw-r--r--vendor/github.com/uber/jaeger-client-go/sampler_v2.go93
-rw-r--r--vendor/github.com/uber/jaeger-client-go/span.go144
-rw-r--r--vendor/github.com/uber/jaeger-client-go/span_context.go (renamed from vendor/github.com/uber/jaeger-client-go/context.go)161
-rw-r--r--vendor/github.com/uber/jaeger-client-go/tracer.go127
-rw-r--r--vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go93
-rw-r--r--vendor/github.com/uber/jaeger-client-go/zipkin.go5
-rw-r--r--vendor/github.com/ulikunitz/xz/example.go40
-rw-r--r--vendor/go.uber.org/atomic/.codecov.yml15
-rw-r--r--vendor/go.uber.org/atomic/.gitignore11
-rw-r--r--vendor/go.uber.org/atomic/.travis.yml27
-rw-r--r--vendor/go.uber.org/atomic/LICENSE.txt19
-rw-r--r--vendor/go.uber.org/atomic/Makefile51
-rw-r--r--vendor/go.uber.org/atomic/README.md36
-rw-r--r--vendor/go.uber.org/atomic/atomic.go351
-rw-r--r--vendor/go.uber.org/atomic/error.go55
-rw-r--r--vendor/go.uber.org/atomic/glide.lock17
-rw-r--r--vendor/go.uber.org/atomic/glide.yaml6
-rw-r--r--vendor/go.uber.org/atomic/string.go49
-rw-r--r--vendor/golang.org/x/net/html/atom/gen.go712
-rw-r--r--vendor/golang.org/x/sys/unix/mkasm_darwin.go61
-rw-r--r--vendor/golang.org/x/sys/unix/mkpost.go122
-rw-r--r--vendor/golang.org/x/sys/unix/mksyscall.go407
-rw-r--r--vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go415
-rw-r--r--vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go614
-rw-r--r--vendor/golang.org/x/sys/unix/mksyscall_solaris.go335
-rw-r--r--vendor/golang.org/x/sys/unix/mksysctl_openbsd.go355
-rw-r--r--vendor/golang.org/x/sys/unix/mksysnum.go190
-rw-r--r--vendor/golang.org/x/sys/unix/types_aix.go237
-rw-r--r--vendor/golang.org/x/sys/unix/types_darwin.go283
-rw-r--r--vendor/golang.org/x/sys/unix/types_dragonfly.go263
-rw-r--r--vendor/golang.org/x/sys/unix/types_freebsd.go400
-rw-r--r--vendor/golang.org/x/sys/unix/types_netbsd.go290
-rw-r--r--vendor/golang.org/x/sys/unix/types_openbsd.go283
-rw-r--r--vendor/golang.org/x/sys/unix/types_solaris.go266
-rw-r--r--vendor/golang.org/x/text/encoding/charmap/maketables.go556
-rw-r--r--vendor/golang.org/x/text/encoding/htmlindex/gen.go173
-rw-r--r--vendor/golang.org/x/text/encoding/internal/identifier/gen.go142
-rw-r--r--vendor/golang.org/x/text/encoding/japanese/maketables.go161
-rw-r--r--vendor/golang.org/x/text/encoding/korean/maketables.go143
-rw-r--r--vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go161
-rw-r--r--vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go140
-rw-r--r--vendor/golang.org/x/text/internal/language/compact/gen.go64
-rw-r--r--vendor/golang.org/x/text/internal/language/compact/gen_index.go113
-rw-r--r--vendor/golang.org/x/text/internal/language/compact/gen_parents.go54
-rw-r--r--vendor/golang.org/x/text/internal/language/gen.go1520
-rw-r--r--vendor/golang.org/x/text/internal/language/gen_common.go20
-rw-r--r--vendor/golang.org/x/text/language/gen.go305
-rw-r--r--vendor/golang.org/x/text/unicode/bidi/gen.go133
-rw-r--r--vendor/golang.org/x/text/unicode/bidi/gen_ranges.go57
-rw-r--r--vendor/golang.org/x/text/unicode/bidi/gen_trieval.go64
-rw-r--r--vendor/golang.org/x/text/unicode/norm/maketables.go986
-rw-r--r--vendor/golang.org/x/text/unicode/norm/triegen.go117
-rw-r--r--vendor/modules.txt417
150 files changed, 4262 insertions, 13693 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index 145e49457..c27b76110 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -150,15 +150,15 @@ vendor_task:
# Runs within Cirrus's "community cluster"
container:
- image: "quay.io/libpod/gate:latest"
+ image: docker.io/library/golang:1.13
cpu: 4
memory: 12
timeout_in: 30m
vendor_script:
- - '/usr/local/bin/entrypoint.sh vendor |& ${TIMESTAMP}'
- - 'cd ${GOSRC} && ./hack/tree_status.sh |& ${TIMESTAMP}'
+ - 'cd ${CIRRUS_WORKING_DIR} && make vendor'
+ - 'cd ${CIRRUS_WORKING_DIR} && ./hack/tree_status.sh'
on_failure:
failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh |& ${TIMESTAMP}'
diff --git a/Makefile b/Makefile
index feb8e0ca3..2c1ded9d2 100644
--- a/Makefile
+++ b/Makefile
@@ -522,6 +522,9 @@ vendor:
$(GO) mod vendor && \
$(GO) mod verify
+vendor-in-container:
+ podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.13 make vendor
+
.PHONY: \
.gopathok \
binaries \
diff --git a/cmd/podman/attach.go b/cmd/podman/attach.go
index b78633ed6..b03673f29 100644
--- a/cmd/podman/attach.go
+++ b/cmd/podman/attach.go
@@ -2,6 +2,7 @@ package main
import (
"github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
"github.com/spf13/cobra"
@@ -31,7 +32,7 @@ func init() {
attachCommand.SetHelpTemplate(HelpTemplate())
attachCommand.SetUsageTemplate(UsageTemplate())
flags := attachCommand.Flags()
- flags.StringVar(&attachCommand.DetachKeys, "detach-keys", "", "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: `a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`")
+ flags.StringVar(&attachCommand.DetachKeys, "detach-keys", define.DefaultDetachKeys, "Select the key sequence for detaching a container. Format is a single character `[a-Z]` or a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: `a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`")
flags.BoolVar(&attachCommand.NoStdin, "no-stdin", false, "Do not attach STDIN. The default is false")
flags.BoolVar(&attachCommand.SigProxy, "sig-proxy", true, "Proxy received signals to the process")
flags.BoolVarP(&attachCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
diff --git a/cmd/podman/cliconfig/config.go b/cmd/podman/cliconfig/config.go
index 58d67ddc1..780b68333 100644
--- a/cmd/podman/cliconfig/config.go
+++ b/cmd/podman/cliconfig/config.go
@@ -467,14 +467,15 @@ type RestartValues struct {
type RestoreValues struct {
PodmanCommand
- All bool
- Keep bool
- Latest bool
- TcpEstablished bool
- Import string
- Name string
- IgnoreRootfs bool
- IgnoreStaticIP bool
+ All bool
+ Keep bool
+ Latest bool
+ TcpEstablished bool
+ Import string
+ Name string
+ IgnoreRootfs bool
+ IgnoreStaticIP bool
+ IgnoreStaticMAC bool
}
type RmValues struct {
diff --git a/cmd/podman/common.go b/cmd/podman/common.go
index 4cdcbed44..4db043f31 100644
--- a/cmd/podman/common.go
+++ b/cmd/podman/common.go
@@ -188,7 +188,7 @@ func getCreateFlags(c *cliconfig.PodmanCommand) {
"Run container in background and print container ID",
)
createFlags.String(
- "detach-keys", "",
+ "detach-keys", define.DefaultDetachKeys,
"Override the key sequence for detaching a container. Format is a single character `[a-Z]` or a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: `a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`",
)
createFlags.StringSlice(
@@ -328,7 +328,7 @@ func getCreateFlags(c *cliconfig.PodmanCommand) {
)
createFlags.String(
"mac-address", "",
- "Container MAC address (e.g. 92:d0:c6:0a:29:33), not currently supported",
+ "Container MAC address (e.g. 92:d0:c6:0a:29:33)",
)
createFlags.StringP(
"memory", "m", "",
diff --git a/cmd/podman/exec.go b/cmd/podman/exec.go
index 649a7b0db..afa701897 100644
--- a/cmd/podman/exec.go
+++ b/cmd/podman/exec.go
@@ -2,6 +2,7 @@ package main
import (
"github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
"github.com/spf13/cobra"
@@ -34,7 +35,7 @@ func init() {
execCommand.SetUsageTemplate(UsageTemplate())
flags := execCommand.Flags()
flags.SetInterspersed(false)
- flags.StringVar(&execCommand.DetachKeys, "detach-keys", "", "Override the key sequence for detaching a container. Format is a single character [a-Z] or ctrl-<value> where <value> is one of: a-z, @, ^, [, , or _")
+ flags.StringVar(&execCommand.DetachKeys, "detach-keys", define.DefaultDetachKeys, "Select the key sequence for detaching a container. Format is a single character [a-Z] or ctrl-<value> where <value> is one of: a-z, @, ^, [, , or _")
flags.StringArrayVarP(&execCommand.Env, "env", "e", []string{}, "Set environment variables")
flags.BoolVarP(&execCommand.Interactive, "interactive", "i", false, "Keep STDIN open even if not attached")
flags.BoolVarP(&execCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
diff --git a/cmd/podman/images.go b/cmd/podman/images.go
index 6157fda2a..7d498517c 100644
--- a/cmd/podman/images.go
+++ b/cmd/podman/images.go
@@ -291,6 +291,10 @@ func getImagesTemplateOutput(ctx context.Context, images []*adapter.ContainerIma
if len(tag) == 71 && strings.HasPrefix(tag, "sha256:") {
imageDigest = digest.Digest(tag)
tag = ""
+ } else {
+ if img.Digest() != "" {
+ imageDigest = img.Digest()
+ }
}
params := imagesTemplateParams{
Repository: repo,
diff --git a/cmd/podman/main_local.go b/cmd/podman/main_local.go
index 6057eeec3..f630f1210 100644
--- a/cmd/podman/main_local.go
+++ b/cmd/podman/main_local.go
@@ -16,7 +16,6 @@ import (
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
- "github.com/containers/libpod/libpod/config"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/cgroups"
"github.com/containers/libpod/pkg/rootless"
@@ -34,9 +33,6 @@ const remote = false
func init() {
cgroupManager := define.SystemdCgroupsManager
- if runtimeConfig, err := config.NewConfig(""); err == nil {
- cgroupManager = runtimeConfig.CgroupManager
- }
cgroupHelp := "Cgroup manager to use (cgroupfs or systemd)"
cgroupv2, _ := cgroups.IsCgroup2UnifiedMode()
if rootless.IsRootless() && !cgroupv2 {
diff --git a/cmd/podman/restore.go b/cmd/podman/restore.go
index 90d0b2dc4..caefadb6d 100644
--- a/cmd/podman/restore.go
+++ b/cmd/podman/restore.go
@@ -47,6 +47,7 @@ func init() {
flags.StringVarP(&restoreCommand.Name, "name", "n", "", "Specify new name for container restored from exported checkpoint (only works with --import)")
flags.BoolVar(&restoreCommand.IgnoreRootfs, "ignore-rootfs", false, "Do not apply root file-system changes when importing from exported checkpoint")
flags.BoolVar(&restoreCommand.IgnoreStaticIP, "ignore-static-ip", false, "Ignore IP address set via --static-ip")
+ flags.BoolVar(&restoreCommand.IgnoreStaticMAC, "ignore-static-mac", false, "Ignore MAC address set via --mac-address")
markFlagHiddenForRemoteClient("latest", flags)
}
diff --git a/cmd/podman/rm.go b/cmd/podman/rm.go
index 89062f524..6329a9d8e 100644
--- a/cmd/podman/rm.go
+++ b/cmd/podman/rm.go
@@ -43,7 +43,7 @@ func init() {
flags.BoolVarP(&rmCommand.Force, "force", "f", false, "Force removal of a running or unusable container. The default is false")
flags.BoolVarP(&rmCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
flags.BoolVar(&rmCommand.Storage, "storage", false, "Remove container from storage library")
- flags.BoolVarP(&rmCommand.Volumes, "volumes", "v", false, "Remove the volumes associated with the container")
+ flags.BoolVarP(&rmCommand.Volumes, "volumes", "v", false, "Remove anonymous volumes associated with the container")
markFlagHiddenForRemoteClient("storage", flags)
markFlagHiddenForRemoteClient("latest", flags)
}
diff --git a/cmd/podman/shared/create.go b/cmd/podman/shared/create.go
index cb39e334d..c7ea2e389 100644
--- a/cmd/podman/shared/create.go
+++ b/cmd/podman/shared/create.go
@@ -26,7 +26,6 @@ import (
"github.com/docker/docker/pkg/signal"
"github.com/docker/go-connections/nat"
"github.com/docker/go-units"
- "github.com/opencontainers/selinux/go-selinux/label"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -195,72 +194,6 @@ func CreateContainer(ctx context.Context, c *GenericCLIResults, runtime *libpod.
return ctr, createConfig, nil
}
-func parseSecurityOpt(config *cc.CreateConfig, securityOpts []string, runtime *libpod.Runtime) error {
- var (
- labelOpts []string
- )
-
- if config.PidMode.IsHost() {
- labelOpts = append(labelOpts, label.DisableSecOpt()...)
- } else if config.PidMode.IsContainer() {
- ctr, err := runtime.LookupContainer(config.PidMode.Container())
- if err != nil {
- return errors.Wrapf(err, "container %q not found", config.PidMode.Container())
- }
- secopts, err := label.DupSecOpt(ctr.ProcessLabel())
- if err != nil {
- return errors.Wrapf(err, "failed to duplicate label %q ", ctr.ProcessLabel())
- }
- labelOpts = append(labelOpts, secopts...)
- }
-
- if config.IpcMode.IsHost() {
- labelOpts = append(labelOpts, label.DisableSecOpt()...)
- } else if config.IpcMode.IsContainer() {
- ctr, err := runtime.LookupContainer(config.IpcMode.Container())
- if err != nil {
- return errors.Wrapf(err, "container %q not found", config.IpcMode.Container())
- }
- secopts, err := label.DupSecOpt(ctr.ProcessLabel())
- if err != nil {
- return errors.Wrapf(err, "failed to duplicate label %q ", ctr.ProcessLabel())
- }
- labelOpts = append(labelOpts, secopts...)
- }
-
- for _, opt := range securityOpts {
- if opt == "no-new-privileges" {
- config.NoNewPrivs = true
- } else {
- con := strings.SplitN(opt, "=", 2)
- if len(con) != 2 {
- return fmt.Errorf("invalid --security-opt 1: %q", opt)
- }
-
- switch con[0] {
- case "label":
- labelOpts = append(labelOpts, con[1])
- case "apparmor":
- config.ApparmorProfile = con[1]
- case "seccomp":
- config.SeccompProfilePath = con[1]
- default:
- return fmt.Errorf("invalid --security-opt 2: %q", opt)
- }
- }
- }
-
- if config.SeccompProfilePath == "" {
- var err error
- config.SeccompProfilePath, err = libpod.DefaultSeccompPath()
- if err != nil {
- return err
- }
- }
- config.LabelOpts = labelOpts
- return nil
-}
-
func configureEntrypoint(c *GenericCLIResults, data *inspect.ImageData) []string {
entrypoint := []string{}
if c.IsSet("entrypoint") {
@@ -336,10 +269,6 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod.
return nil, err
}
- if c.String("mac-address") != "" {
- return nil, errors.Errorf("--mac-address option not currently supported")
- }
-
imageID := ""
inputCommand = c.InputArgs[1:]
@@ -352,11 +281,6 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod.
rootfs = c.InputArgs[0]
}
- sysctl, err := validateSysctl(c.StringSlice("sysctl"))
- if err != nil {
- return nil, errors.Wrapf(err, "invalid value for sysctl")
- }
-
if c.String("memory") != "" {
memoryLimit, err = units.RAMInBytes(c.String("memory"))
if err != nil {
@@ -695,61 +619,96 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod.
pidsLimit = 0
}
+ pid := &cc.PidConfig{
+ PidMode: pidMode,
+ }
+ ipc := &cc.IpcConfig{
+ IpcMode: ipcMode,
+ }
+
+ cgroup := &cc.CgroupConfig{
+ Cgroups: c.String("cgroups"),
+ Cgroupns: c.String("cgroupns"),
+ CgroupParent: c.String("cgroup-parent"),
+ CgroupMode: cgroupMode,
+ }
+
+ userns := &cc.UserConfig{
+ GroupAdd: c.StringSlice("group-add"),
+ IDMappings: idmappings,
+ UsernsMode: usernsMode,
+ User: user,
+ }
+
+ uts := &cc.UtsConfig{
+ UtsMode: utsMode,
+ NoHosts: c.Bool("no-hosts"),
+ HostAdd: c.StringSlice("add-host"),
+ Hostname: c.String("hostname"),
+ }
+
+ net := &cc.NetworkConfig{
+ DNSOpt: c.StringSlice("dns-opt"),
+ DNSSearch: c.StringSlice("dns-search"),
+ DNSServers: c.StringSlice("dns"),
+ HTTPProxy: c.Bool("http-proxy"),
+ MacAddress: c.String("mac-address"),
+ Network: network,
+ NetMode: netMode,
+ IPAddress: c.String("ip"),
+ Publish: c.StringSlice("publish"),
+ PublishAll: c.Bool("publish-all"),
+ PortBindings: portBindings,
+ }
+
+ sysctl, err := validateSysctl(c.StringSlice("sysctl"))
+ if err != nil {
+ return nil, errors.Wrapf(err, "invalid value for sysctl")
+ }
+
+ secConfig := &cc.SecurityConfig{
+ CapAdd: c.StringSlice("cap-add"),
+ CapDrop: c.StringSlice("cap-drop"),
+ Privileged: c.Bool("privileged"),
+ ReadOnlyRootfs: c.Bool("read-only"),
+ ReadOnlyTmpfs: c.Bool("read-only-tmpfs"),
+ Sysctl: sysctl,
+ }
+
+ if err := secConfig.SetLabelOpts(runtime, pid, ipc); err != nil {
+ return nil, err
+ }
+ if err := secConfig.SetSecurityOpts(runtime, c.StringArray("security-opt")); err != nil {
+ return nil, err
+ }
+
config := &cc.CreateConfig{
Annotations: annotations,
BuiltinImgVolumes: ImageVolumes,
ConmonPidFile: c.String("conmon-pidfile"),
ImageVolumeType: c.String("image-volume"),
- CapAdd: c.StringSlice("cap-add"),
- CapDrop: c.StringSlice("cap-drop"),
CidFile: c.String("cidfile"),
- Cgroupns: c.String("cgroupns"),
- Cgroups: c.String("cgroups"),
- CgroupParent: c.String("cgroup-parent"),
Command: command,
UserCommand: userCommand,
Detach: c.Bool("detach"),
Devices: c.StringSlice("device"),
- DNSOpt: c.StringSlice("dns-opt"),
- DNSSearch: c.StringSlice("dns-search"),
- DNSServers: c.StringSlice("dns"),
Entrypoint: entrypoint,
Env: env,
// ExposedPorts: ports,
- GroupAdd: c.StringSlice("group-add"),
- Hostname: c.String("hostname"),
- HostAdd: c.StringSlice("add-host"),
- HTTPProxy: c.Bool("http-proxy"),
- NoHosts: c.Bool("no-hosts"),
- IDMappings: idmappings,
Init: c.Bool("init"),
InitPath: c.String("init-path"),
Image: imageName,
ImageID: imageID,
Interactive: c.Bool("interactive"),
// IP6Address: c.String("ipv6"), // Not implemented yet - needs CNI support for static v6
- IPAddress: c.String("ip"),
- Labels: labels,
+ Labels: labels,
// LinkLocalIP: c.StringSlice("link-local-ip"), // Not implemented yet
LogDriver: logDriver,
LogDriverOpt: c.StringSlice("log-opt"),
- MacAddress: c.String("mac-address"),
Name: c.String("name"),
- Network: network,
// NetworkAlias: c.StringSlice("network-alias"), // Not implemented - does this make sense in Podman?
- IpcMode: ipcMode,
- NetMode: netMode,
- UtsMode: utsMode,
- PidMode: pidMode,
- CgroupMode: cgroupMode,
- Pod: podName,
- Privileged: c.Bool("privileged"),
- Publish: c.StringSlice("publish"),
- PublishAll: c.Bool("publish-all"),
- PortBindings: portBindings,
- Quiet: c.Bool("quiet"),
- ReadOnlyRootfs: c.Bool("read-only"),
- ReadOnlyTmpfs: c.Bool("read-only-tmpfs"),
+ Pod: podName,
+ Quiet: c.Bool("quiet"),
Resources: cc.CreateResourceConfig{
BlkioWeight: blkioWeight,
BlkioWeightDevice: c.StringSlice("blkio-weight-device"),
@@ -778,30 +737,27 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod.
},
RestartPolicy: c.String("restart"),
Rm: c.Bool("rm"),
+ Security: *secConfig,
StopSignal: stopSignal,
StopTimeout: c.Uint("stop-timeout"),
- Sysctl: sysctl,
Systemd: systemd,
Tmpfs: c.StringArray("tmpfs"),
Tty: tty,
- User: user,
- UsernsMode: usernsMode,
MountsFlag: c.StringArray("mount"),
Volumes: c.StringArray("volume"),
WorkDir: workDir,
Rootfs: rootfs,
VolumesFrom: c.StringSlice("volumes-from"),
Syslog: c.Bool("syslog"),
- }
- if config.Privileged {
- config.LabelOpts = label.DisableSecOpt()
- } else {
- if err := parseSecurityOpt(config, c.StringArray("security-opt"), runtime); err != nil {
- return nil, err
- }
+ Pid: *pid,
+ Ipc: *ipc,
+ Cgroup: *cgroup,
+ User: *userns,
+ Uts: *uts,
+ Network: *net,
}
- config.SecurityOpts = c.StringArray("security-opt")
+
warnings, err := verifyContainerResources(config, false)
if err != nil {
return nil, err
diff --git a/cmd/podman/start.go b/cmd/podman/start.go
index 2d2cf74d2..d4b4534bb 100644
--- a/cmd/podman/start.go
+++ b/cmd/podman/start.go
@@ -35,7 +35,7 @@ func init() {
startCommand.SetUsageTemplate(UsageTemplate())
flags := startCommand.Flags()
flags.BoolVarP(&startCommand.Attach, "attach", "a", false, "Attach container's STDOUT and STDERR")
- flags.StringVar(&startCommand.DetachKeys, "detach-keys", "", "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: `a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`")
+ flags.StringVar(&startCommand.DetachKeys, "detach-keys", define.DefaultDetachKeys, "Select the key sequence for detaching a container. Format is a single character `[a-Z]` or a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: `a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`")
flags.BoolVarP(&startCommand.Interactive, "interactive", "i", false, "Keep STDIN open even if not attached")
flags.BoolVarP(&startCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
flags.BoolVar(&startCommand.SigProxy, "sig-proxy", false, "Proxy received signals to the process (default true if attaching, false otherwise)")
diff --git a/cni/87-podman-bridge.conflist b/cni/87-podman-bridge.conflist
index 9db416a19..a7bcf47bb 100644
--- a/cni/87-podman-bridge.conflist
+++ b/cni/87-podman-bridge.conflist
@@ -33,6 +33,9 @@
{
"type": "firewall",
"backend": "iptables"
+ },
+ {
+ "type": "tuning"
}
]
}
diff --git a/completions/bash/podman b/completions/bash/podman
index 0abf9e738..4d552b0a7 100644
--- a/completions/bash/podman
+++ b/completions/bash/podman
@@ -877,6 +877,7 @@ _podman_container_restore() {
--tcp-established
--ignore-rootfs
--ignore-static-ip
+ --ignore-static-mac
"
case "$prev" in
-i|--import)
diff --git a/contrib/cirrus/README.md b/contrib/cirrus/README.md
index 779f95d95..de9a33714 100644
--- a/contrib/cirrus/README.md
+++ b/contrib/cirrus/README.md
@@ -27,7 +27,7 @@ task (pass or fail) is set based on the exit status of the last script to execut
4. ``lint``: Execute regular `make lint` to check for any code cruft.
Should also run for less than a few minutes.
-5. ``vendor``: runs `make vendor` followed by `./hack/tree_status.sh` to check
+5. ``vendor``: runs `make vendor-in-container` followed by `./hack/tree_status.sh` to check
whether the git tree is clean. The reasoning for that is to make sure that
the vendor.conf, the code and the vendored packages in ./vendor are in sync
at all times.
diff --git a/docs/Readme.md b/docs/Readme.md
index 4514afa77..209dcd6b4 100644
--- a/docs/Readme.md
+++ b/docs/Readme.md
@@ -1,3 +1,10 @@
+# Podman Documentation
+
+The online man pages and other documents regarding Podman can be found at
+[Read The Docs](https://podman.readthedocs.io/en/latest/index.html). The man pages
+can be found under the [Commands](https://podman.readthedocs.io/en/latest/Commands.html)
+link on that page.
+
# Build the Docs
## Directory Structure
@@ -18,4 +25,4 @@
| | |
| ------------------------------------ | --------------------------- |
| docs/remote-docs.sh | Read the docs/source/markdown files and format for each platform |
-| docs/links-to-html.lua | pandoc filter to do aliases for html files | \ No newline at end of file
+| docs/links-to-html.lua | pandoc filter to do aliases for html files |
diff --git a/docs/source/markdown/podman-attach.1.md b/docs/source/markdown/podman-attach.1.md
index cef01f0f6..1ac2e49a9 100644
--- a/docs/source/markdown/podman-attach.1.md
+++ b/docs/source/markdown/podman-attach.1.md
@@ -20,9 +20,7 @@ it in the **libpod.conf** file: see **libpod.conf(5)** for more information.
## OPTIONS
**--detach-keys**=*sequence*
-Override the key sequence for detaching a container. Format is a single character `[a-Z]` or
-a comma separated sequence of `ctrl-<value>`, where `<value>` is one of:
-`a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`.
+Specify the key sequence for detaching a container. Format is a single character `[a-Z]` or one or more `ctrl-<value>` characters where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Specifying "" will disable this feature. The default is *ctrl-p,ctrl-q*.
**--latest**, **-l**
diff --git a/docs/source/markdown/podman-container-restore.1.md b/docs/source/markdown/podman-container-restore.1.md
index 1d2cf0b3e..d71daf4af 100644
--- a/docs/source/markdown/podman-container-restore.1.md
+++ b/docs/source/markdown/podman-container-restore.1.md
@@ -76,6 +76,15 @@ a container is restored multiple times from an exported checkpoint with **--name
Using **--ignore-static-ip** tells Podman to ignore the IP address if it was configured
with **--ip** during container creation.
+**--ignore-static-mac**
+
+If the container was started with **--mac-address** the restored container also
+tries to use that MAC address and restore fails if that MAC address is already
+in use. This can happen, if a container is restored multiple times from an
+exported checkpoint with **--name, -n**.
+
+Using **--ignore-static-mac** tells Podman to ignore the MAC address if it was
+configured with **--mac-address** during container creation.
## EXAMPLE
podman container restore mywebserver
diff --git a/docs/source/markdown/podman-create.1.md b/docs/source/markdown/podman-create.1.md
index 2e0dd934c..d9ee69f82 100644
--- a/docs/source/markdown/podman-create.1.md
+++ b/docs/source/markdown/podman-create.1.md
@@ -198,9 +198,7 @@ it in the **libpod.conf** file: see **libpod.conf(5)** for more information.
**--detach-keys**=*sequence*
-Override the key sequence for detaching a container. Format is a single character `[a-Z]` or
-a comma separated sequence of `ctrl-<value>`, where `<value>` is one of:
-`a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`.
+Specify the key sequence for detaching a container. Format is a single character `[a-Z]` or one or more `ctrl-<value>` characters where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Specifying "" will disable this feature. The default is *ctrl-p,ctrl-q*.
**--device**=*device*
@@ -500,6 +498,10 @@ Current supported mount TYPES are `bind`, `volume`, and `tmpfs`.
· tmpfs-mode: File mode of the tmpfs in octal. (e.g. 700 or 0700.) Defaults to 1777 in Linux.
+ · tmpcopyup: Enable copyup from the image directory at the same location to the tmpfs. Used by default.
+
+ · notmpcopyup: Disable copying files from the image to the tmpfs.
+
**--name**=*name*
Assign a name to the container
diff --git a/docs/source/markdown/podman-exec.1.md b/docs/source/markdown/podman-exec.1.md
index 4c17c056a..9624425dc 100644
--- a/docs/source/markdown/podman-exec.1.md
+++ b/docs/source/markdown/podman-exec.1.md
@@ -15,7 +15,7 @@ podman\-exec - Execute a command in a running container
**--detach-keys**=*sequence*
-Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`.
+Specify the key sequence for detaching a container. Format is a single character `[a-Z]` or one or more `ctrl-<value>` characters where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Specifying "" will disable this feature. The default is *ctrl-p,ctrl-q*.
**--env**, **-e**
diff --git a/docs/source/markdown/podman-rm.1.md b/docs/source/markdown/podman-rm.1.md
index 207d9d61d..c1663129c 100644
--- a/docs/source/markdown/podman-rm.1.md
+++ b/docs/source/markdown/podman-rm.1.md
@@ -42,7 +42,8 @@ The storage option conflicts with the **--all**, **--latest**, and **--volumes**
**--volumes**, **-v**
-Remove the volumes associated with the container.
+Remove anonymous volumes associated with the container. This does not include named volumes
+created with `podman volume create`, or the `--volume` option of `podman run` and `podman create`.
## EXAMPLE
Remove a container by its name *mywebserver*
diff --git a/docs/source/markdown/podman-run.1.md b/docs/source/markdown/podman-run.1.md
index 8baa39570..28b00ee29 100644
--- a/docs/source/markdown/podman-run.1.md
+++ b/docs/source/markdown/podman-run.1.md
@@ -204,9 +204,7 @@ it in the **libpod.conf** file: see **libpod.conf(5)** for more information.
**--detach-keys**=*sequence*
-Override the key sequence for detaching a container. Format is a single character `[a-Z]` or
-a comma separated sequence of `ctrl-<value>`, where `<value>` is one of:
-`a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`.
+Specify the key sequence for detaching a container. Format is a single character `[a-Z]` or one or more `ctrl-<value>` characters where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Specifying "" will disable this feature. The default is *ctrl-p,ctrl-q*.
**--device**=*device*
@@ -511,6 +509,10 @@ Current supported mount TYPES are `bind`, `volume`, and `tmpfs`.
· tmpfs-mode: File mode of the tmpfs in octal. (e.g. 700 or 0700.) Defaults to 1777 in Linux.
+ · tmpcopyup: Enable copyup from the image directory at the same location to the tmpfs. Used by default.
+
+ · notmpcopyup: Disable copying files from the image to the tmpfs.
+
**--name**=*name*
Assign a name to the container
diff --git a/docs/source/markdown/podman-start.1.md b/docs/source/markdown/podman-start.1.md
index 4c81d17bd..84af9d800 100644
--- a/docs/source/markdown/podman-start.1.md
+++ b/docs/source/markdown/podman-start.1.md
@@ -23,9 +23,7 @@ starting multiple containers.
**--detach-keys**=*sequence*
-Override the key sequence for detaching a container. Format is a single character `[a-Z]` or
-a comma separated sequence of `ctrl-<value>`, where `<value>` is one of:
-`a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`.
+Specify the key sequence for detaching a container. Format is a single character `[a-Z]` or one or more `ctrl-<value>` characters where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Specifying "" will disable this feature. The default is *ctrl-p,ctrl-q*.
**--interactive**, **-i**
diff --git a/go.mod b/go.mod
index 743278ef1..1b4e58225 100644
--- a/go.mod
+++ b/go.mod
@@ -9,15 +9,16 @@ require (
github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc // indirect
- github.com/containernetworking/cni v0.7.1
+ github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784
github.com/containernetworking/plugins v0.8.2
github.com/containers/buildah v1.11.5-0.20191031204705-20e92ffe0982
+ github.com/containers/conmon v2.0.2+incompatible // indirect
github.com/containers/image/v5 v5.0.0
github.com/containers/psgo v1.3.2
github.com/containers/storage v1.13.5
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect
- github.com/cri-o/ocicni v0.1.1-0.20190702175919-7762645d18ca
+ github.com/cri-o/ocicni v0.1.1-0.20190920040751-deac903fd99b
github.com/cyphar/filepath-securejoin v0.2.2
github.com/davecgh/go-spew v1.1.1
github.com/docker/distribution v2.7.1+incompatible
@@ -37,11 +38,11 @@ require (
github.com/hashicorp/go-multierror v1.0.0
github.com/hpcloud/tail v1.0.0
github.com/imdario/mergo v0.3.7 // indirect
- github.com/json-iterator/go v1.1.7
+ github.com/json-iterator/go v1.1.8
github.com/mattn/go-isatty v0.0.8 // indirect
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618
github.com/onsi/ginkgo v1.10.3
- github.com/onsi/gomega v1.7.0
+ github.com/onsi/gomega v1.7.1
github.com/opencontainers/go-digest v1.0.0-rc1
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
github.com/opencontainers/runc v1.0.0-rc8.0.20190827142921-dd075602f158
@@ -59,7 +60,7 @@ require (
github.com/stretchr/testify v1.4.0
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
github.com/uber-go/atomic v1.4.0 // indirect
- github.com/uber/jaeger-client-go v2.19.0+incompatible
+ github.com/uber/jaeger-client-go v2.20.0+incompatible
github.com/uber/jaeger-lib v0.0.0-20190122222657-d036253de8f5 // indirect
github.com/varlink/go v0.0.0-20190502142041-0f1d566d194b
github.com/vishvananda/netlink v1.0.0
diff --git a/go.sum b/go.sum
index c7e868bea..262cc86a0 100644
--- a/go.sum
+++ b/go.sum
@@ -53,12 +53,16 @@ github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL
github.com/containernetworking/cni v0.7.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/cni v0.7.1 h1:fE3r16wpSEyaqY4Z4oFrLMmIGfBYIKpPrHK31EJ9FzE=
github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 h1:rqUVLD8I859xRgUx/WMC3v7QAFqbLKZbs+0kqYboRJc=
+github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/plugins v0.8.2 h1:5lnwfsAYO+V7yXhysJKy3E1A2Gy9oVut031zfdOzI9w=
github.com/containernetworking/plugins v0.8.2/go.mod h1:TxALKWZpWL79BC3GOYKJzzXr7U8R23PdhwaLp6F3adc=
github.com/containers/buildah v1.11.4-0.20191028173731-21b4778b359e h1:iDavHEx5Yr7o+0l6495Ya6N0YEPplIUZuWC2e14baDM=
github.com/containers/buildah v1.11.4-0.20191028173731-21b4778b359e/go.mod h1:Igrk75FAxLnzDaHUbtpWB8pwL+Bv+cnakWMvqAXW2v8=
github.com/containers/buildah v1.11.5-0.20191031204705-20e92ffe0982 h1:5WUe09k2sJSbmxwLHZLHc41TrIPrP0GlbhX+WDJBqvs=
github.com/containers/buildah v1.11.5-0.20191031204705-20e92ffe0982/go.mod h1:eGWB4tLoo0hIBuytQpvgUC0hk2mvl2ofaYBeDsU/qoc=
+github.com/containers/conmon v2.0.2+incompatible h1:h2HCdd/EBpwFn7RT82Y2GyXnVUHWxk1Jm4cESSZG4P8=
+github.com/containers/conmon v2.0.2+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.0.0 h1:arnXgbt1ucsC/ndtSpiQY87rA0UjhF+/xQnPzqdBDn4=
github.com/containers/image/v5 v5.0.0/go.mod h1:MgiLzCfIeo8lrHi+4Lb8HP+rh513sm0Mlk6RrhjFOLY=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
@@ -83,6 +87,8 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cri-o/ocicni v0.1.1-0.20190702175919-7762645d18ca h1:CJstDqYy9ClWuPcDHMTCAiUS+ckekluYetGR2iYYWuo=
github.com/cri-o/ocicni v0.1.1-0.20190702175919-7762645d18ca/go.mod h1:BO0al9TKber3XUTucLzKgoG5sq8qiOB41H7zSdfw6r8=
+github.com/cri-o/ocicni v0.1.1-0.20190920040751-deac903fd99b h1:SgS+WV10y2Bubuy2HquSBori6DXj9sqRN77Hgs5H7Qc=
+github.com/cri-o/ocicni v0.1.1-0.20190920040751-deac903fd99b/go.mod h1:ZOuIEOp/3MB1eCBWANnNxM3zUA3NWh76wSRCsnKAg2c=
github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
@@ -220,6 +226,8 @@ github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwK
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
+github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/juju/errors v0.0.0-20180806074554-22422dad46e1/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q=
github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U=
github.com/juju/testing v0.0.0-20190613124551-e81189438503/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA=
@@ -295,6 +303,8 @@ github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
@@ -399,6 +409,8 @@ github.com/uber-go/atomic v1.4.0 h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o=
github.com/uber-go/atomic v1.4.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g=
github.com/uber/jaeger-client-go v2.19.0+incompatible h1:pbwbYfHUoaase0oPQOdZ1GcaUjImYGimUXSQ/+8+Z8Q=
github.com/uber/jaeger-client-go v2.19.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
+github.com/uber/jaeger-client-go v2.20.0+incompatible h1:ttG9wKdl2ikV/BGOtu+eb+VPp+R7jMeuM177Ihs5Fdc=
+github.com/uber/jaeger-client-go v2.20.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/uber/jaeger-lib v0.0.0-20190122222657-d036253de8f5 h1:CwmGyzHTzCqCdZJkWR0A7ucZXgrCY7spRcpvm7ci//s=
github.com/uber/jaeger-lib v0.0.0-20190122222657-d036253de8f5/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
diff --git a/install.md b/install.md
index 2583c4f9d..39b639176 100644
--- a/install.md
+++ b/install.md
@@ -72,6 +72,8 @@ sudo apt-get install -qq -y software-properties-common uidmap
sudo add-apt-repository -y ppa:projectatomic/ppa
sudo apt-get update -qq
sudo apt-get -qq -y install podman
+sudo mkdir -p /etc/containers
+echo -e "[registries.search]\nregistries = ['docker.io', 'quay.io']" | sudo tee /etc/containers/registries.conf
```
#### Fedora
diff --git a/libpod.conf b/libpod.conf
index 47f66ecc1..7e0228c19 100644
--- a/libpod.conf
+++ b/libpod.conf
@@ -142,8 +142,36 @@ runc = [
]
crun = [
- "/usr/bin/crun",
- "/usr/local/bin/crun",
+ "/usr/bin/crun",
+ "/usr/sbin/crun",
+ "/usr/local/bin/crun",
+ "/usr/local/sbin/crun",
+ "/sbin/crun",
+ "/bin/crun",
+ "/run/current-system/sw/bin/crun",
+]
+
+# Kata Containers is an OCI runtime, where containers are run inside lightweight
+# Virtual Machines (VMs). Kata provides additional isolation towards the host,
+# minimizing the host attack surface and mitigating the consequences of
+# containers breakout.
+# Please notes that Kata does not support rootless podman yet, but we can leave
+# the paths below blank to let them be discovered by the $PATH environment
+# variable.
+
+# Kata Containers with the default configured VMM
+kata-runtime = [
+ "/usr/bin/kata-runtime",
+]
+
+# Kata Containers with the QEMU VMM
+kata-qemu = [
+ "/usr/bin/kata-qemu",
+]
+
+# Kata Containers with the Firecracker VMM
+kata-fc = [
+ "/usr/bin/kata-fc",
]
# The [runtimes] table MUST be the last thing in this file.
diff --git a/libpod/config/default.go b/libpod/config/default.go
index 17574c059..5decaeab7 100644
--- a/libpod/config/default.go
+++ b/libpod/config/default.go
@@ -6,6 +6,7 @@ import (
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/events"
+ "github.com/containers/libpod/pkg/cgroups"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/util"
"github.com/containers/storage"
@@ -47,6 +48,12 @@ func defaultConfigFromMemory() (*Config, error) {
c.ImageDefaultTransport = _defaultTransport
c.StateType = define.BoltDBStateStore
c.OCIRuntime = "runc"
+
+ // If we're running on cgroups v2, default to using crun.
+ if onCgroupsv2, _ := cgroups.IsCgroup2UnifiedMode(); onCgroupsv2 {
+ c.OCIRuntime = "crun"
+ }
+
c.OCIRuntimes = map[string][]string{
"runc": {
"/usr/bin/runc",
@@ -58,7 +65,15 @@ func defaultConfigFromMemory() (*Config, error) {
"/usr/lib/cri-o-runc/sbin/runc",
"/run/current-system/sw/bin/runc",
},
- // TODO - should we add "crun" defaults here as well?
+ "crun": {
+ "/usr/bin/crun",
+ "/usr/sbin/crun",
+ "/usr/local/bin/crun",
+ "/usr/local/sbin/crun",
+ "/sbin/crun",
+ "/bin/crun",
+ "/run/current-system/sw/bin/crun",
+ },
}
c.ConmonPath = []string{
"/usr/libexec/podman/conmon",
diff --git a/libpod/container.go b/libpod/container.go
index 8e24391b9..4f7fc067e 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -138,6 +138,10 @@ type Container struct {
// being checkpointed. If requestedIP is set it will be used instead
// of config.StaticIP.
requestedIP net.IP
+ // A restored container should have the same MAC address as before
+ // being checkpointed. If requestedMAC is set it will be used instead
+ // of config.StaticMAC.
+ requestedMAC net.HardwareAddr
// This is true if a container is restored from a checkpoint.
restoreFromCheckpoint bool
@@ -296,6 +300,10 @@ type ContainerConfig struct {
// This cannot be set unless CreateNetNS is set.
// If not set, the container will be dynamically assigned an IP by CNI.
StaticIP net.IP `json:"staticIP"`
+ // StaticMAC is a static MAC to request for the container.
+ // This cannot be set unless CreateNetNS is set.
+ // If not set, the container will be dynamically assigned a MAC by CNI.
+ StaticMAC net.HardwareAddr `json:"staticMAC"`
// PortMappings are the ports forwarded to the container's network
// namespace
// These are not used unless CreateNetNS is true
diff --git a/libpod/container_api.go b/libpod/container_api.go
index a6f5b54d5..b8cfe02f6 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -794,6 +794,11 @@ type ContainerCheckpointOptions struct {
// important to be able to restore a container multiple
// times with '--import --name'.
IgnoreStaticIP bool
+ // IgnoreStaticMAC tells the API to ignore the MAC set
+ // during 'podman run' with '--mac-address'. This is especially
+ // important to be able to restore a container multiple
+ // times with '--import --name'.
+ IgnoreStaticMAC bool
}
// Checkpoint checkpoints a container
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index 471648bc8..26d6771b0 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -794,6 +794,15 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
c.config.StaticIP = nil
}
+ // If a container is restored multiple times from an exported checkpoint with
+ // the help of '--import --name', the restore will fail if during 'podman run'
+ // a static container MAC address was set with '--mac-address'. The user
+ // can tell the restore process to ignore the static MAC with
+ // '--ignore-static-mac'
+ if options.IgnoreStaticMAC {
+ c.config.StaticMAC = nil
+ }
+
// Read network configuration from checkpoint
// Currently only one interface with one IP is supported.
networkStatusFile, err := os.Open(filepath.Join(c.bundlePath(), "network.status"))
@@ -803,9 +812,9 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
// TODO: This implicit restoring with or without IP depending on an
// unrelated restore parameter (--name) does not seem like the
// best solution.
- if err == nil && options.Name == "" && !options.IgnoreStaticIP {
+ if err == nil && options.Name == "" && (!options.IgnoreStaticIP || !options.IgnoreStaticMAC) {
// The file with the network.status does exist. Let's restore the
- // container with the same IP address as during checkpointing.
+ // container with the same IP address / MAC address as during checkpointing.
defer networkStatusFile.Close()
var networkStatus []*cnitypes.Result
networkJSON, err := ioutil.ReadAll(networkStatusFile)
@@ -815,16 +824,35 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if err := json.Unmarshal(networkJSON, &networkStatus); err != nil {
return err
}
- // Take the first IP address
- var IP net.IP
- if len(networkStatus) > 0 {
- if len(networkStatus[0].IPs) > 0 {
- IP = networkStatus[0].IPs[0].Address.IP
+ if !options.IgnoreStaticIP {
+ // Take the first IP address
+ var IP net.IP
+ if len(networkStatus) > 0 {
+ if len(networkStatus[0].IPs) > 0 {
+ IP = networkStatus[0].IPs[0].Address.IP
+ }
+ }
+ if IP != nil {
+ // Tell CNI which IP address we want.
+ c.requestedIP = IP
}
}
- if IP != nil {
- // Tell CNI which IP address we want.
- c.requestedIP = IP
+ if !options.IgnoreStaticMAC {
+ // Take the first device with a defined sandbox.
+ var MAC net.HardwareAddr
+ for _, n := range networkStatus[0].Interfaces {
+ if n.Sandbox != "" {
+ MAC, err = net.ParseMAC(n.Mac)
+ if err != nil {
+ return errors.Wrapf(err, "failed to parse MAC %v", n.Mac)
+ }
+ break
+ }
+ }
+ if MAC != nil {
+ // Tell CNI which MAC address we want.
+ c.requestedMAC = MAC
+ }
}
}
@@ -1314,7 +1342,7 @@ func (c *Container) copyOwnerAndPerms(source, dest string) error {
// Teardown CNI config on refresh
func (c *Container) refreshCNI() error {
// Let's try and delete any lingering network config...
- podNetwork := c.runtime.getPodNetwork(c.ID(), c.config.Name, "", c.config.Networks, c.config.PortMappings, c.config.StaticIP)
+ podNetwork := c.runtime.getPodNetwork(c.ID(), c.config.Name, "", c.config.Networks, c.config.PortMappings, c.config.StaticIP, c.config.StaticMAC)
return c.runtime.netPlugin.TearDownPod(podNetwork)
}
diff --git a/libpod/events/journal_linux.go b/libpod/events/journal_linux.go
index 470c76959..9e6fffc29 100644
--- a/libpod/events/journal_linux.go
+++ b/libpod/events/journal_linux.go
@@ -54,6 +54,7 @@ func (e EventJournalD) Write(ee Event) error {
// Read reads events from the journal and sends qualified events to the event channel
func (e EventJournalD) Read(options ReadOptions) error {
+ defer close(options.EventChannel)
eventOptions, err := generateEventOptions(options.Filters, options.Since, options.Until)
if err != nil {
return errors.Wrapf(err, "failed to generate event options")
@@ -87,7 +88,6 @@ func (e EventJournalD) Read(options ReadOptions) error {
if err != nil {
return err
}
- defer close(options.EventChannel)
for {
if _, err := j.Next(); err != nil {
return err
diff --git a/libpod/events/logfile.go b/libpod/events/logfile.go
index 4b65b0ad0..93e6fa3c9 100644
--- a/libpod/events/logfile.go
+++ b/libpod/events/logfile.go
@@ -41,6 +41,7 @@ func (e EventLogFile) Write(ee Event) error {
// Reads from the log file
func (e EventLogFile) Read(options ReadOptions) error {
+ defer close(options.EventChannel)
eventOptions, err := generateEventOptions(options.Filters, options.Since, options.Until)
if err != nil {
return errors.Wrapf(err, "unable to generate event options")
@@ -68,7 +69,6 @@ func (e EventLogFile) Read(options ReadOptions) error {
options.EventChannel <- event
}
}
- close(options.EventChannel)
return nil
}
diff --git a/libpod/kube.go b/libpod/kube.go
index d0e7baf95..47a77991e 100644
--- a/libpod/kube.go
+++ b/libpod/kube.go
@@ -487,13 +487,16 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) {
if err := c.syncContainer(); err != nil {
return nil, errors.Wrapf(err, "unable to sync container during YAML generation")
}
+
logrus.Debugf("Looking in container for user: %s", c.User())
- u, err := lookup.GetUser(c.state.Mountpoint, c.User())
+ execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, c.User(), nil)
if err != nil {
return nil, err
}
- user := int64(u.Uid)
- sc.RunAsUser = &user
+ uid := int64(execUser.Uid)
+ gid := int64(execUser.Gid)
+ sc.RunAsUser = &uid
+ sc.RunAsGroup = &gid
}
return &sc, nil
}
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index ac1144fbe..cba7b636a 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -28,23 +28,34 @@ import (
)
// Get an OCICNI network config
-func (r *Runtime) getPodNetwork(id, name, nsPath string, networks []string, ports []ocicni.PortMapping, staticIP net.IP) ocicni.PodNetwork {
+func (r *Runtime) getPodNetwork(id, name, nsPath string, networks []string, ports []ocicni.PortMapping, staticIP net.IP, staticMAC net.HardwareAddr) ocicni.PodNetwork {
defaultNetwork := r.netPlugin.GetDefaultNetworkName()
network := ocicni.PodNetwork{
Name: name,
Namespace: name, // TODO is there something else we should put here? We don't know about Kube namespaces
ID: id,
NetNS: nsPath,
- Networks: networks,
RuntimeConfig: map[string]ocicni.RuntimeConfig{
defaultNetwork: {PortMappings: ports},
},
}
- if staticIP != nil {
- network.Networks = []string{defaultNetwork}
+ if staticIP != nil || staticMAC != nil {
+ network.Networks = []ocicni.NetAttachment{{Name: defaultNetwork}}
+ var rt ocicni.RuntimeConfig = ocicni.RuntimeConfig{PortMappings: ports}
+ if staticIP != nil {
+ rt.IP = staticIP.String()
+ }
+ if staticMAC != nil {
+ rt.MAC = staticMAC.String()
+ }
network.RuntimeConfig = map[string]ocicni.RuntimeConfig{
- defaultNetwork: {IP: staticIP.String(), PortMappings: ports},
+ defaultNetwork: rt,
+ }
+ } else {
+ network.Networks = make([]ocicni.NetAttachment, len(networks))
+ for i, netName := range networks {
+ network.Networks[i].Name = netName
}
}
@@ -62,7 +73,16 @@ func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) ([]*cnitypes.Re
requestedIP = ctr.config.StaticIP
}
- podNetwork := r.getPodNetwork(ctr.ID(), ctr.Name(), ctrNS.Path(), ctr.config.Networks, ctr.config.PortMappings, requestedIP)
+ var requestedMAC net.HardwareAddr
+ if ctr.requestedMAC != nil {
+ requestedMAC = ctr.requestedMAC
+ // cancel request for a specific MAC in case the container is reused later
+ ctr.requestedMAC = nil
+ } else {
+ requestedMAC = ctr.config.StaticMAC
+ }
+
+ podNetwork := r.getPodNetwork(ctr.ID(), ctr.Name(), ctrNS.Path(), ctr.config.Networks, ctr.config.PortMappings, requestedIP, requestedMAC)
results, err := r.netPlugin.SetUpPod(podNetwork)
if err != nil {
@@ -78,10 +98,10 @@ func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) ([]*cnitypes.Re
networkStatus := make([]*cnitypes.Result, 0)
for idx, r := range results {
- logrus.Debugf("[%d] CNI result: %v", idx, r.String())
- resultCurrent, err := cnitypes.GetResult(r)
+ logrus.Debugf("[%d] CNI result: %v", idx, r.Result.String())
+ resultCurrent, err := cnitypes.GetResult(r.Result)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing CNI plugin result %q: %v", r.String(), err)
+ return nil, errors.Wrapf(err, "error parsing CNI plugin result %q: %v", r.Result.String(), err)
}
networkStatus = append(networkStatus, resultCurrent)
}
@@ -443,7 +463,16 @@ func (r *Runtime) teardownNetNS(ctr *Container) error {
requestedIP = ctr.config.StaticIP
}
- podNetwork := r.getPodNetwork(ctr.ID(), ctr.Name(), ctr.state.NetNS.Path(), ctr.config.Networks, ctr.config.PortMappings, requestedIP)
+ var requestedMAC net.HardwareAddr
+ if ctr.requestedMAC != nil {
+ requestedMAC = ctr.requestedMAC
+ // cancel request for a specific MAC in case the container is reused later
+ ctr.requestedMAC = nil
+ } else {
+ requestedMAC = ctr.config.StaticMAC
+ }
+
+ podNetwork := r.getPodNetwork(ctr.ID(), ctr.Name(), ctr.state.NetNS.Path(), ctr.config.Networks, ctr.config.PortMappings, requestedIP, requestedMAC)
if err := r.netPlugin.TearDownPod(podNetwork); err != nil {
return errors.Wrapf(err, "error tearing down CNI namespace configuration for container %s", ctr.ID())
diff --git a/libpod/oci_attach_linux.go b/libpod/oci_attach_linux.go
index eeaee6d43..46c70e7eb 100644
--- a/libpod/oci_attach_linux.go
+++ b/libpod/oci_attach_linux.go
@@ -152,7 +152,7 @@ func (c *Container) attachToExec(streams *AttachStreams, keys string, resize <-c
func processDetachKeys(keys string) ([]byte, error) {
// Check the validity of the provided keys first
if len(keys) == 0 {
- keys = define.DefaultDetachKeys
+ return []byte{}, nil
}
detachKeys, err := term.ToBytes(keys)
if err != nil {
diff --git a/libpod/options.go b/libpod/options.go
index 66e8ef93c..00b5626b4 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -1052,6 +1052,31 @@ func WithStaticIP(ip net.IP) CtrCreateOption {
}
}
+// WithStaticMAC indicates that the container should request a static MAC from
+// the CNI plugins.
+// It cannot be set unless WithNetNS has already been passed.
+// Further, it cannot be set if additional CNI networks to join have been
+// specified.
+func WithStaticMAC(mac net.HardwareAddr) CtrCreateOption {
+ return func(ctr *Container) error {
+ if ctr.valid {
+ return define.ErrCtrFinalized
+ }
+
+ if !ctr.config.CreateNetNS {
+ return errors.Wrapf(define.ErrInvalidArg, "cannot set a static MAC if the container is not creating a network namespace")
+ }
+
+ if len(ctr.config.Networks) != 0 {
+ return errors.Wrapf(define.ErrInvalidArg, "cannot set a static MAC if joining additional CNI networks")
+ }
+
+ ctr.config.StaticMAC = mac
+
+ return nil
+ }
+}
+
// WithLogDriver sets the log driver for the container
func WithLogDriver(driver string) CtrCreateOption {
return func(ctr *Container) error {
diff --git a/pkg/adapter/containers.go b/pkg/adapter/containers.go
index 64550f545..287bd8474 100644
--- a/pkg/adapter/containers.go
+++ b/pkg/adapter/containers.go
@@ -400,17 +400,8 @@ func (r *LocalRuntime) Run(ctx context.Context, c *cliconfig.RunValues, exitCode
}
}
- config, err := r.Runtime.GetConfig()
- if err != nil {
- return exitCode, err
- }
- detachKeys := c.String("detach-keys")
- if detachKeys == "" {
- detachKeys = config.DetachKeys
- }
-
// if the container was created as part of a pod, also start its dependencies, if any.
- if err := StartAttachCtr(ctx, ctr, outputStream, errorStream, inputStream, detachKeys, c.Bool("sig-proxy"), true, c.IsSet("pod")); err != nil {
+ if err := StartAttachCtr(ctx, ctr, outputStream, errorStream, inputStream, c.String("detach-keys"), c.Bool("sig-proxy"), true, c.IsSet("pod")); err != nil {
// We've manually detached from the container
// Do not perform cleanup, or wait for container exit code
// Just exit immediately
@@ -547,12 +538,13 @@ func (r *LocalRuntime) Restore(ctx context.Context, c *cliconfig.RestoreValues)
)
options := libpod.ContainerCheckpointOptions{
- Keep: c.Keep,
- TCPEstablished: c.TcpEstablished,
- TargetFile: c.Import,
- Name: c.Name,
- IgnoreRootfs: c.IgnoreRootfs,
- IgnoreStaticIP: c.IgnoreStaticIP,
+ Keep: c.Keep,
+ TCPEstablished: c.TcpEstablished,
+ TargetFile: c.Import,
+ Name: c.Name,
+ IgnoreRootfs: c.IgnoreRootfs,
+ IgnoreStaticIP: c.IgnoreStaticIP,
+ IgnoreStaticMAC: c.IgnoreStaticMAC,
}
filterFuncs = append(filterFuncs, func(c *libpod.Container) bool {
diff --git a/pkg/adapter/pods.go b/pkg/adapter/pods.go
index f6795970b..85f93ed3e 100644
--- a/pkg/adapter/pods.go
+++ b/pkg/adapter/pods.go
@@ -666,10 +666,69 @@ func getPodPorts(containers []v1.Container) []ocicni.PortMapping {
return infraPorts
}
+func setupSecurityContext(securityConfig *createconfig.SecurityConfig, userConfig *createconfig.UserConfig, containerYAML v1.Container) {
+ if containerYAML.SecurityContext == nil {
+ return
+ }
+ if containerYAML.SecurityContext.ReadOnlyRootFilesystem != nil {
+ securityConfig.ReadOnlyRootfs = *containerYAML.SecurityContext.ReadOnlyRootFilesystem
+ }
+ if containerYAML.SecurityContext.Privileged != nil {
+ securityConfig.Privileged = *containerYAML.SecurityContext.Privileged
+ }
+
+ if containerYAML.SecurityContext.AllowPrivilegeEscalation != nil {
+ securityConfig.NoNewPrivs = !*containerYAML.SecurityContext.AllowPrivilegeEscalation
+ }
+
+ if seopt := containerYAML.SecurityContext.SELinuxOptions; seopt != nil {
+ if seopt.User != "" {
+ securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=user:%s", seopt.User))
+ securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("user:%s", seopt.User))
+ }
+ if seopt.Role != "" {
+ securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=role:%s", seopt.Role))
+ securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("role:%s", seopt.Role))
+ }
+ if seopt.Type != "" {
+ securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=type:%s", seopt.Type))
+ securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("type:%s", seopt.Type))
+ }
+ if seopt.Level != "" {
+ securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=level:%s", seopt.Level))
+ securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("level:%s", seopt.Level))
+ }
+ }
+ if caps := containerYAML.SecurityContext.Capabilities; caps != nil {
+ for _, capability := range caps.Add {
+ securityConfig.CapAdd = append(securityConfig.CapAdd, string(capability))
+ }
+ for _, capability := range caps.Drop {
+ securityConfig.CapDrop = append(securityConfig.CapDrop, string(capability))
+ }
+ }
+ if containerYAML.SecurityContext.RunAsUser != nil {
+ userConfig.User = fmt.Sprintf("%d", *containerYAML.SecurityContext.RunAsUser)
+ }
+ if containerYAML.SecurityContext.RunAsGroup != nil {
+ if userConfig.User == "" {
+ userConfig.User = "0"
+ }
+ userConfig.User = fmt.Sprintf("%s:%d", userConfig.User, *containerYAML.SecurityContext.RunAsGroup)
+ }
+}
+
// kubeContainerToCreateConfig takes a v1.Container and returns a createconfig describing a container
func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, runtime *libpod.Runtime, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID string) (*createconfig.CreateConfig, error) {
var (
containerConfig createconfig.CreateConfig
+ pidConfig createconfig.PidConfig
+ networkConfig createconfig.NetworkConfig
+ cgroupConfig createconfig.CgroupConfig
+ utsConfig createconfig.UtsConfig
+ ipcConfig createconfig.IpcConfig
+ userConfig createconfig.UserConfig
+ securityConfig createconfig.SecurityConfig
)
// The default for MemorySwappiness is -1, not 0
@@ -685,54 +744,15 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container
imageData, _ := newImage.Inspect(ctx)
- containerConfig.User = "0"
+ userConfig.User = "0"
if imageData != nil {
- containerConfig.User = imageData.Config.User
+ userConfig.User = imageData.Config.User
}
- if containerYAML.SecurityContext != nil {
- if containerConfig.SecurityOpts != nil {
- if containerYAML.SecurityContext.ReadOnlyRootFilesystem != nil {
- containerConfig.ReadOnlyRootfs = *containerYAML.SecurityContext.ReadOnlyRootFilesystem
- }
- if containerYAML.SecurityContext.Privileged != nil {
- containerConfig.Privileged = *containerYAML.SecurityContext.Privileged
- }
+ setupSecurityContext(&securityConfig, &userConfig, containerYAML)
- if containerYAML.SecurityContext.AllowPrivilegeEscalation != nil {
- containerConfig.NoNewPrivs = !*containerYAML.SecurityContext.AllowPrivilegeEscalation
- }
-
- }
- if seopt := containerYAML.SecurityContext.SELinuxOptions; seopt != nil {
- if seopt.User != "" {
- containerConfig.SecurityOpts = append(containerConfig.SecurityOpts, fmt.Sprintf("label=user:%s", seopt.User))
- containerConfig.LabelOpts = append(containerConfig.LabelOpts, fmt.Sprintf("user:%s", seopt.User))
- }
- if seopt.Role != "" {
- containerConfig.SecurityOpts = append(containerConfig.SecurityOpts, fmt.Sprintf("label=role:%s", seopt.Role))
- containerConfig.LabelOpts = append(containerConfig.LabelOpts, fmt.Sprintf("role:%s", seopt.Role))
- }
- if seopt.Type != "" {
- containerConfig.SecurityOpts = append(containerConfig.SecurityOpts, fmt.Sprintf("label=type:%s", seopt.Type))
- containerConfig.LabelOpts = append(containerConfig.LabelOpts, fmt.Sprintf("type:%s", seopt.Type))
- }
- if seopt.Level != "" {
- containerConfig.SecurityOpts = append(containerConfig.SecurityOpts, fmt.Sprintf("label=level:%s", seopt.Level))
- containerConfig.LabelOpts = append(containerConfig.LabelOpts, fmt.Sprintf("level:%s", seopt.Level))
- }
- }
- if caps := containerYAML.SecurityContext.Capabilities; caps != nil {
- for _, capability := range caps.Add {
- containerConfig.CapAdd = append(containerConfig.CapAdd, string(capability))
- }
- for _, capability := range caps.Drop {
- containerConfig.CapDrop = append(containerConfig.CapDrop, string(capability))
- }
- }
- }
var err error
- containerConfig.SeccompProfilePath, err = libpod.DefaultSeccompPath()
+ containerConfig.Security.SeccompProfilePath, err = libpod.DefaultSeccompPath()
if err != nil {
return nil, err
}
@@ -755,20 +775,28 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container
containerConfig.StopSignal = 15
// If the user does not pass in ID mappings, just set to basics
- if containerConfig.IDMappings == nil {
- containerConfig.IDMappings = &storage.IDMappingOptions{}
+ if userConfig.IDMappings == nil {
+ userConfig.IDMappings = &storage.IDMappingOptions{}
}
- containerConfig.NetMode = ns.NetworkMode(namespaces["net"])
- containerConfig.IpcMode = ns.IpcMode(namespaces["ipc"])
- containerConfig.UtsMode = ns.UTSMode(namespaces["uts"])
+ networkConfig.NetMode = ns.NetworkMode(namespaces["net"])
+ ipcConfig.IpcMode = ns.IpcMode(namespaces["ipc"])
+ utsConfig.UtsMode = ns.UTSMode(namespaces["uts"])
// disabled in code review per mheon
//containerConfig.PidMode = ns.PidMode(namespaces["pid"])
- containerConfig.UsernsMode = ns.UsernsMode(namespaces["user"])
+ userConfig.UsernsMode = ns.UsernsMode(namespaces["user"])
if len(containerConfig.WorkDir) == 0 {
containerConfig.WorkDir = "/"
}
+ containerConfig.Pid = pidConfig
+ containerConfig.Network = networkConfig
+ containerConfig.Uts = utsConfig
+ containerConfig.Ipc = ipcConfig
+ containerConfig.Cgroup = cgroupConfig
+ containerConfig.User = userConfig
+ containerConfig.Security = securityConfig
+
// Set default environment variables and incorporate data from image, if necessary
envs := shared.EnvVariablesFromData(imageData)
diff --git a/pkg/rootless/rootless_linux.c b/pkg/rootless/rootless_linux.c
index 94933ddd0..9604de638 100644
--- a/pkg/rootless/rootless_linux.c
+++ b/pkg/rootless/rootless_linux.c
@@ -24,12 +24,16 @@
int renameat2 (int olddirfd, const char *oldpath, int newdirfd, const char *newpath, unsigned int flags)
{
-# ifdef __NR_renameat2
- return (int) syscall (__NR_renameat2, olddirfd, oldpath, newdirfd, newpath, flags);
+# ifdef SYS_renameat2
+ return (int) syscall (SYS_renameat2, olddirfd, oldpath, newdirfd, newpath, flags);
# else
- /* no way to implement it atomically. */
- errno = ENOSYS;
- return -1;
+ /* This might be an issue if another process is trying to read the file while it is empty. */
+ int fd = open (newpath, O_EXCL|O_CREAT, 0700);
+ if (fd < 0)
+ return fd;
+ close (fd);
+ /* We are sure we created the file, let's overwrite it. */
+ return rename (oldpath, newpath);
# endif
}
#endif
diff --git a/pkg/spec/config_linux_cgo.go b/pkg/spec/config_linux_cgo.go
index a1527752a..c47156456 100644
--- a/pkg/spec/config_linux_cgo.go
+++ b/pkg/spec/config_linux_cgo.go
@@ -10,7 +10,7 @@ import (
seccomp "github.com/seccomp/containers-golang"
)
-func getSeccompConfig(config *CreateConfig, configSpec *spec.Spec) (*spec.LinuxSeccomp, error) {
+func getSeccompConfig(config *SecurityConfig, configSpec *spec.Spec) (*spec.LinuxSeccomp, error) {
var seccompConfig *spec.LinuxSeccomp
var err error
diff --git a/pkg/spec/config_linux_nocgo.go b/pkg/spec/config_linux_nocgo.go
index 10329ff3b..8d720b6d4 100644
--- a/pkg/spec/config_linux_nocgo.go
+++ b/pkg/spec/config_linux_nocgo.go
@@ -6,6 +6,6 @@ import (
spec "github.com/opencontainers/runtime-spec/specs-go"
)
-func getSeccompConfig(config *CreateConfig, configSpec *spec.Spec) (*spec.LinuxSeccomp, error) {
+func getSeccompConfig(config *SecurityConfig, configSpec *spec.Spec) (*spec.LinuxSeccomp, error) {
return nil, nil
}
diff --git a/pkg/spec/config_unsupported.go b/pkg/spec/config_unsupported.go
index 160414878..a2c7f4416 100644
--- a/pkg/spec/config_unsupported.go
+++ b/pkg/spec/config_unsupported.go
@@ -8,7 +8,7 @@ import (
"github.com/pkg/errors"
)
-func getSeccompConfig(config *CreateConfig, configSpec *spec.Spec) (*spec.LinuxSeccomp, error) {
+func getSeccompConfig(config *SecurityConfig, configSpec *spec.Spec) (*spec.LinuxSeccomp, error) {
return nil, errors.New("function not supported on non-linux OS's")
}
func addDevice(g *generate.Generator, device string) error {
diff --git a/pkg/spec/createconfig.go b/pkg/spec/createconfig.go
index 2a8fe7332..244a8d1cd 100644
--- a/pkg/spec/createconfig.go
+++ b/pkg/spec/createconfig.go
@@ -1,7 +1,6 @@
package createconfig
import (
- "net"
"os"
"strconv"
"strings"
@@ -12,7 +11,6 @@ import (
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/namespaces"
"github.com/containers/storage"
- "github.com/cri-o/ocicni/pkg/ocicni"
"github.com/docker/go-connections/nat"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
@@ -55,89 +53,126 @@ type CreateResourceConfig struct {
Ulimit []string //ulimit
}
-// CreateConfig is a pre OCI spec structure. It represents user input from varlink or the CLI
-type CreateConfig struct {
- Annotations map[string]string
- Args []string
+// PidConfig configures the pid namespace for the container
+type PidConfig struct {
+ PidMode namespaces.PidMode //pid
+}
+
+// IpcConfig configures the ipc namespace for the container
+type IpcConfig struct {
+ IpcMode namespaces.IpcMode //ipc
+}
+
+// CgroupConfig configures the cgroup namespace for the container
+type CgroupConfig struct {
+ Cgroups string
+ Cgroupns string
+ CgroupParent string // cgroup-parent
+ CgroupMode namespaces.CgroupMode //cgroup
+}
+
+// UserConfig configures the user namespace for the container
+type UserConfig struct {
+ GroupAdd []string // group-add
+ IDMappings *storage.IDMappingOptions
+ UsernsMode namespaces.UsernsMode //userns
+ User string //user
+}
+
+// UtsConfig configures the uts namespace for the container
+type UtsConfig struct {
+ UtsMode namespaces.UTSMode //uts
+ NoHosts bool
+ HostAdd []string //add-host
+ Hostname string
+}
+
+// NetworkConfig configures the network namespace for the container
+type NetworkConfig struct {
+ DNSOpt []string //dns-opt
+ DNSSearch []string //dns-search
+ DNSServers []string //dns
+ ExposedPorts map[nat.Port]struct{}
+ HTTPProxy bool
+ IP6Address string //ipv6
+ IPAddress string //ip
+ LinkLocalIP []string // link-local-ip
+ MacAddress string //mac-address
+ NetMode namespaces.NetworkMode //net
+ Network string //network
+ NetworkAlias []string //network-alias
+ PortBindings nat.PortMap
+ Publish []string //publish
+ PublishAll bool //publish-all
+}
+
+// SecurityConfig configures the security features for the container
+type SecurityConfig struct {
CapAdd []string // cap-add
CapDrop []string // cap-drop
- CidFile string
- ConmonPidFile string
- Cgroupns string
- Cgroups string
- CgroupParent string // cgroup-parent
- Command []string // Full command that will be used
- UserCommand []string // User-entered command (or image CMD)
- Detach bool // detach
- Devices []string // device
- DNSOpt []string //dns-opt
- DNSSearch []string //dns-search
- DNSServers []string //dns
- Entrypoint []string //entrypoint
- Env map[string]string //env
- ExposedPorts map[nat.Port]struct{}
- GroupAdd []string // group-add
- HealthCheck *manifest.Schema2HealthConfig
- NoHosts bool
- HostAdd []string //add-host
- Hostname string //hostname
- HTTPProxy bool
- Init bool // init
- InitPath string //init-path
- Image string
- ImageID string
- BuiltinImgVolumes map[string]struct{} // volumes defined in the image config
- IDMappings *storage.IDMappingOptions
- ImageVolumeType string // how to handle the image volume, either bind, tmpfs, or ignore
- Interactive bool //interactive
- IpcMode namespaces.IpcMode //ipc
- IP6Address string //ipv6
- IPAddress string //ip
- Labels map[string]string //label
- LinkLocalIP []string // link-local-ip
- LogDriver string // log-driver
- LogDriverOpt []string // log-opt
- MacAddress string //mac-address
- Name string //name
- NetMode namespaces.NetworkMode //net
- Network string //network
- NetworkAlias []string //network-alias
- PidMode namespaces.PidMode //pid
- Pod string //pod
- PodmanPath string
- CgroupMode namespaces.CgroupMode //cgroup
- PortBindings nat.PortMap
- Privileged bool //privileged
- Publish []string //publish
- PublishAll bool //publish-all
- Quiet bool //quiet
- ReadOnlyRootfs bool //read-only
- ReadOnlyTmpfs bool //read-only-tmpfs
- Resources CreateResourceConfig
- RestartPolicy string
- Rm bool //rm
- StopSignal syscall.Signal // stop-signal
- StopTimeout uint // stop-timeout
- Sysctl map[string]string //sysctl
- Systemd bool
- Tmpfs []string // tmpfs
- Tty bool //tty
- UsernsMode namespaces.UsernsMode //userns
- User string //user
- UtsMode namespaces.UTSMode //uts
- Mounts []spec.Mount
- MountsFlag []string // mounts
- NamedVolumes []*libpod.ContainerNamedVolume
- Volumes []string //volume
- VolumesFrom []string
- WorkDir string //workdir
LabelOpts []string //SecurityOpts
NoNewPrivs bool //SecurityOpts
ApparmorProfile string //SecurityOpts
SeccompProfilePath string //SecurityOpts
SecurityOpts []string
- Rootfs string
- Syslog bool // Whether to enable syslog on exit commands
+ Privileged bool //privileged
+ ReadOnlyRootfs bool //read-only
+ ReadOnlyTmpfs bool //read-only-tmpfs
+ Sysctl map[string]string //sysctl
+}
+
+// CreateConfig is a pre OCI spec structure. It represents user input from varlink or the CLI
+type CreateConfig struct {
+ Annotations map[string]string
+ Args []string
+ CidFile string
+ ConmonPidFile string
+ Command []string // Full command that will be used
+ UserCommand []string // User-entered command (or image CMD)
+ Detach bool // detach
+ Devices []string // device
+ Entrypoint []string //entrypoint
+ Env map[string]string //env
+ HealthCheck *manifest.Schema2HealthConfig
+ Init bool // init
+ InitPath string //init-path
+ Image string
+ ImageID string
+ BuiltinImgVolumes map[string]struct{} // volumes defined in the image config
+ ImageVolumeType string // how to handle the image volume, either bind, tmpfs, or ignore
+ Interactive bool //interactive
+ Labels map[string]string //label
+ LogDriver string // log-driver
+ LogDriverOpt []string // log-opt
+ Name string //name
+ PodmanPath string
+ Pod string //pod
+ Quiet bool //quiet
+ Resources CreateResourceConfig
+ RestartPolicy string
+ Rm bool //rm
+ StopSignal syscall.Signal // stop-signal
+ StopTimeout uint // stop-timeout
+ Systemd bool
+ Tmpfs []string // tmpfs
+ Tty bool //tty
+ Mounts []spec.Mount
+ MountsFlag []string // mounts
+ NamedVolumes []*libpod.ContainerNamedVolume
+ Volumes []string //volume
+ VolumesFrom []string
+ WorkDir string //workdir
+ Rootfs string
+ Security SecurityConfig
+ Syslog bool // Whether to enable syslog on exit commands
+
+ // Namespaces
+ Pid PidConfig
+ Ipc IpcConfig
+ Cgroup CgroupConfig
+ User UserConfig
+ Uts UtsConfig
+ Network NetworkConfig
}
func u32Ptr(i int64) *uint32 { u := uint32(i); return &u }
@@ -199,7 +234,6 @@ func (c *CreateConfig) createExitCommand(runtime *libpod.Runtime) ([]string, err
// GetContainerCreateOptions takes a CreateConfig and returns a slice of CtrCreateOptions
func (c *CreateConfig) getContainerCreateOptions(runtime *libpod.Runtime, pod *libpod.Pod, mounts []spec.Mount, namedVolumes []*libpod.ContainerNamedVolume) ([]libpod.CtrCreateOption, error) {
var options []libpod.CtrCreateOption
- var portBindings []ocicni.PortMapping
var err error
if c.Interactive {
@@ -216,15 +250,6 @@ func (c *CreateConfig) getContainerCreateOptions(runtime *libpod.Runtime, pod *l
logrus.Debugf("adding container to pod %s", c.Pod)
options = append(options, runtime.WithPod(pod))
}
- if c.Cgroups == "disabled" {
- options = append(options, libpod.WithNoCgroups())
- }
- if len(c.PortBindings) > 0 {
- portBindings, err = c.CreatePortBindings()
- if err != nil {
- return nil, errors.Wrapf(err, "unable to create port bindings")
- }
- }
if len(mounts) != 0 || len(namedVolumes) != 0 {
destinations := []string{}
@@ -253,179 +278,72 @@ func (c *CreateConfig) getContainerCreateOptions(runtime *libpod.Runtime, pod *l
// does not have one
options = append(options, libpod.WithEntrypoint(c.Entrypoint))
- networks := make([]string, 0)
- userNetworks := c.NetMode.UserDefined()
- if IsPod(userNetworks) {
- userNetworks = ""
- }
- if userNetworks != "" {
- for _, netName := range strings.Split(userNetworks, ",") {
- if netName == "" {
- return nil, errors.Wrapf(err, "container networks %q invalid", networks)
- }
- networks = append(networks, netName)
- }
- }
-
- if c.NetMode.IsNS() {
- ns := c.NetMode.NS()
- if ns == "" {
- return nil, errors.Errorf("invalid empty user-defined network namespace")
- }
- _, err := os.Stat(ns)
- if err != nil {
- return nil, err
- }
- } else if c.NetMode.IsContainer() {
- connectedCtr, err := runtime.LookupContainer(c.NetMode.Container())
- if err != nil {
- return nil, errors.Wrapf(err, "container %q not found", c.NetMode.Container())
- }
- options = append(options, libpod.WithNetNSFrom(connectedCtr))
- } else if !c.NetMode.IsHost() && !c.NetMode.IsNone() {
- hasUserns := c.UsernsMode.IsContainer() || c.UsernsMode.IsNS() || len(c.IDMappings.UIDMap) > 0 || len(c.IDMappings.GIDMap) > 0
- postConfigureNetNS := hasUserns && !c.UsernsMode.IsHost()
- options = append(options, libpod.WithNetNS(portBindings, postConfigureNetNS, string(c.NetMode), networks))
- }
+ // TODO: MNT, USER, CGROUP
+ options = append(options, libpod.WithStopSignal(c.StopSignal))
+ options = append(options, libpod.WithStopTimeout(c.StopTimeout))
- if c.CgroupMode.IsNS() {
- ns := c.CgroupMode.NS()
- if ns == "" {
- return nil, errors.Errorf("invalid empty user-defined network namespace")
- }
- _, err := os.Stat(ns)
- if err != nil {
- return nil, err
- }
- } else if c.CgroupMode.IsContainer() {
- connectedCtr, err := runtime.LookupContainer(c.CgroupMode.Container())
- if err != nil {
- return nil, errors.Wrapf(err, "container %q not found", c.CgroupMode.Container())
- }
- options = append(options, libpod.WithCgroupNSFrom(connectedCtr))
+ logPath := getLoggingPath(c.LogDriverOpt)
+ if logPath != "" {
+ options = append(options, libpod.WithLogPath(logPath))
}
- if c.UsernsMode.IsNS() {
- ns := c.UsernsMode.NS()
- if ns == "" {
- return nil, errors.Errorf("invalid empty user-defined user namespace")
- }
- _, err := os.Stat(ns)
- if err != nil {
- return nil, err
- }
- options = append(options, libpod.WithIDMappings(*c.IDMappings))
- } else if c.UsernsMode.IsContainer() {
- connectedCtr, err := runtime.LookupContainer(c.UsernsMode.Container())
- if err != nil {
- return nil, errors.Wrapf(err, "container %q not found", c.UsernsMode.Container())
- }
- options = append(options, libpod.WithUserNSFrom(connectedCtr))
- } else {
- options = append(options, libpod.WithIDMappings(*c.IDMappings))
+ if c.LogDriver != "" {
+ options = append(options, libpod.WithLogDriver(c.LogDriver))
}
- if c.PidMode.IsContainer() {
- connectedCtr, err := runtime.LookupContainer(c.PidMode.Container())
- if err != nil {
- return nil, errors.Wrapf(err, "container %q not found", c.PidMode.Container())
- }
-
- options = append(options, libpod.WithPIDNSFrom(connectedCtr))
+ secOpts, err := c.Security.ToCreateOptions()
+ if err != nil {
+ return nil, err
}
+ options = append(options, secOpts...)
- if c.IpcMode.IsContainer() {
- connectedCtr, err := runtime.LookupContainer(c.IpcMode.Container())
- if err != nil {
- return nil, errors.Wrapf(err, "container %q not found", c.IpcMode.Container())
- }
-
- options = append(options, libpod.WithIPCNSFrom(connectedCtr))
+ nsOpts, err := c.Cgroup.ToCreateOptions(runtime)
+ if err != nil {
+ return nil, err
}
+ options = append(options, nsOpts...)
- if IsPod(string(c.UtsMode)) {
- options = append(options, libpod.WithUTSNSFromPod(pod))
+ nsOpts, err = c.Ipc.ToCreateOptions(runtime)
+ if err != nil {
+ return nil, err
}
- if c.UtsMode.IsContainer() {
- connectedCtr, err := runtime.LookupContainer(c.UtsMode.Container())
- if err != nil {
- return nil, errors.Wrapf(err, "container %q not found", c.UtsMode.Container())
- }
+ options = append(options, nsOpts...)
- options = append(options, libpod.WithUTSNSFrom(connectedCtr))
+ nsOpts, err = c.Pid.ToCreateOptions(runtime)
+ if err != nil {
+ return nil, err
}
+ options = append(options, nsOpts...)
- // TODO: MNT, USER, CGROUP
- options = append(options, libpod.WithStopSignal(c.StopSignal))
- options = append(options, libpod.WithStopTimeout(c.StopTimeout))
- if len(c.DNSSearch) > 0 {
- options = append(options, libpod.WithDNSSearch(c.DNSSearch))
- }
- if len(c.DNSServers) > 0 {
- if len(c.DNSServers) == 1 && strings.ToLower(c.DNSServers[0]) == "none" {
- options = append(options, libpod.WithUseImageResolvConf())
- } else {
- options = append(options, libpod.WithDNS(c.DNSServers))
- }
- }
- if len(c.DNSOpt) > 0 {
- options = append(options, libpod.WithDNSOption(c.DNSOpt))
- }
- if c.NoHosts {
- options = append(options, libpod.WithUseImageHosts())
- }
- if len(c.HostAdd) > 0 && !c.NoHosts {
- options = append(options, libpod.WithHosts(c.HostAdd))
- }
- logPath := getLoggingPath(c.LogDriverOpt)
- if logPath != "" {
- options = append(options, libpod.WithLogPath(logPath))
+ nsOpts, err = c.Network.ToCreateOptions(runtime, &c.User)
+ if err != nil {
+ return nil, err
}
+ options = append(options, nsOpts...)
- if c.LogDriver != "" {
- options = append(options, libpod.WithLogDriver(c.LogDriver))
+ nsOpts, err = c.Uts.ToCreateOptions(runtime, pod)
+ if err != nil {
+ return nil, err
}
+ options = append(options, nsOpts...)
- if c.IPAddress != "" {
- ip := net.ParseIP(c.IPAddress)
- if ip == nil {
- return nil, errors.Wrapf(define.ErrInvalidArg, "cannot parse %s as IP address", c.IPAddress)
- } else if ip.To4() == nil {
- return nil, errors.Wrapf(define.ErrInvalidArg, "%s is not an IPv4 address", c.IPAddress)
- }
- options = append(options, libpod.WithStaticIP(ip))
+ nsOpts, err = c.User.ToCreateOptions(runtime)
+ if err != nil {
+ return nil, err
}
-
- options = append(options, libpod.WithPrivileged(c.Privileged))
+ options = append(options, nsOpts...)
useImageVolumes := c.ImageVolumeType == TypeBind
// Gather up the options for NewContainer which consist of With... funcs
options = append(options, libpod.WithRootFSFromImage(c.ImageID, c.Image, useImageVolumes))
- options = append(options, libpod.WithSecLabels(c.LabelOpts))
options = append(options, libpod.WithConmonPidFile(c.ConmonPidFile))
options = append(options, libpod.WithLabels(c.Labels))
- options = append(options, libpod.WithUser(c.User))
- if c.IpcMode.IsHost() {
- options = append(options, libpod.WithShmDir("/dev/shm"))
-
- } else if c.IpcMode.IsContainer() {
- ctr, err := runtime.LookupContainer(c.IpcMode.Container())
- if err != nil {
- return nil, errors.Wrapf(err, "container %q not found", c.IpcMode.Container())
- }
- options = append(options, libpod.WithShmDir(ctr.ShmDir()))
- }
options = append(options, libpod.WithShmSize(c.Resources.ShmSize))
- options = append(options, libpod.WithGroups(c.GroupAdd))
if c.Rootfs != "" {
options = append(options, libpod.WithRootFS(c.Rootfs))
}
// Default used if not overridden on command line
- if c.CgroupParent != "" {
- options = append(options, libpod.WithCgroupParent(c.CgroupParent))
- }
-
if c.RestartPolicy != "" {
if c.RestartPolicy == "unless-stopped" {
return nil, errors.Wrapf(define.ErrInvalidArg, "the unless-stopped restart policy is not supported")
@@ -459,38 +377,6 @@ func (c *CreateConfig) getContainerCreateOptions(runtime *libpod.Runtime, pod *l
return options, nil
}
-// CreatePortBindings iterates ports mappings and exposed ports into a format CNI understands
-func (c *CreateConfig) CreatePortBindings() ([]ocicni.PortMapping, error) {
- return NatToOCIPortBindings(c.PortBindings)
-}
-
-// NatToOCIPortBindings iterates a nat.portmap slice and creates []ocicni portmapping slice
-func NatToOCIPortBindings(ports nat.PortMap) ([]ocicni.PortMapping, error) {
- var portBindings []ocicni.PortMapping
- for containerPb, hostPb := range ports {
- var pm ocicni.PortMapping
- pm.ContainerPort = int32(containerPb.Int())
- for _, i := range hostPb {
- var hostPort int
- var err error
- pm.HostIP = i.HostIP
- if i.HostPort == "" {
- hostPort = containerPb.Int()
- } else {
- hostPort, err = strconv.Atoi(i.HostPort)
- if err != nil {
- return nil, errors.Wrapf(err, "unable to convert host port to integer")
- }
- }
-
- pm.HostPort = int32(hostPort)
- pm.Protocol = containerPb.Proto()
- portBindings = append(portBindings, pm)
- }
- }
- return portBindings, nil
-}
-
// AddPrivilegedDevices iterates through host devices and adds all
// host devices to the spec
func (c *CreateConfig) AddPrivilegedDevices(g *generate.Generator) error {
diff --git a/pkg/spec/namespaces.go b/pkg/spec/namespaces.go
new file mode 100644
index 000000000..a45137416
--- /dev/null
+++ b/pkg/spec/namespaces.go
@@ -0,0 +1,433 @@
+package createconfig
+
+import (
+ "net"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/pkg/cgroups"
+ "github.com/cri-o/ocicni/pkg/ocicni"
+ "github.com/docker/go-connections/nat"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-tools/generate"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+func (c *NetworkConfig) ToCreateOptions(runtime *libpod.Runtime, userns *UserConfig) ([]libpod.CtrCreateOption, error) {
+ var portBindings []ocicni.PortMapping
+ var err error
+ if len(c.PortBindings) > 0 {
+ portBindings, err = NatToOCIPortBindings(c.PortBindings)
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to create port bindings")
+ }
+ }
+
+ options := make([]libpod.CtrCreateOption, 0)
+ userNetworks := c.NetMode.UserDefined()
+ networks := make([]string, 0)
+
+ if IsPod(userNetworks) {
+ userNetworks = ""
+ }
+ if userNetworks != "" {
+ for _, netName := range strings.Split(userNetworks, ",") {
+ if netName == "" {
+ return nil, errors.Errorf("container networks %q invalid", userNetworks)
+ }
+ networks = append(networks, netName)
+ }
+ }
+
+ if c.NetMode.IsNS() {
+ ns := c.NetMode.NS()
+ if ns == "" {
+ return nil, errors.Errorf("invalid empty user-defined network namespace")
+ }
+ _, err := os.Stat(ns)
+ if err != nil {
+ return nil, err
+ }
+ } else if c.NetMode.IsContainer() {
+ connectedCtr, err := runtime.LookupContainer(c.NetMode.Container())
+ if err != nil {
+ return nil, errors.Wrapf(err, "container %q not found", c.NetMode.Container())
+ }
+ options = append(options, libpod.WithNetNSFrom(connectedCtr))
+ } else if !c.NetMode.IsHost() && !c.NetMode.IsNone() {
+ postConfigureNetNS := userns.getPostConfigureNetNS()
+ options = append(options, libpod.WithNetNS(portBindings, postConfigureNetNS, string(c.NetMode), networks))
+ }
+
+ if len(c.DNSSearch) > 0 {
+ options = append(options, libpod.WithDNSSearch(c.DNSSearch))
+ }
+ if len(c.DNSServers) > 0 {
+ if len(c.DNSServers) == 1 && strings.ToLower(c.DNSServers[0]) == "none" {
+ options = append(options, libpod.WithUseImageResolvConf())
+ } else {
+ options = append(options, libpod.WithDNS(c.DNSServers))
+ }
+ }
+ if len(c.DNSOpt) > 0 {
+ options = append(options, libpod.WithDNSOption(c.DNSOpt))
+ }
+ if c.IPAddress != "" {
+ ip := net.ParseIP(c.IPAddress)
+ if ip == nil {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "cannot parse %s as IP address", c.IPAddress)
+ } else if ip.To4() == nil {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "%s is not an IPv4 address", c.IPAddress)
+ }
+ options = append(options, libpod.WithStaticIP(ip))
+ }
+
+ if c.MacAddress != "" {
+ mac, err := net.ParseMAC(c.MacAddress)
+ if err != nil {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "cannot parse %s as MAC address: %v", c.MacAddress, err)
+ }
+ options = append(options, libpod.WithStaticMAC(mac))
+ }
+
+ return options, nil
+}
+
+func (c *NetworkConfig) ConfigureGenerator(g *generate.Generator) error {
+ netMode := c.NetMode
+ if netMode.IsHost() {
+ logrus.Debug("Using host netmode")
+ if err := g.RemoveLinuxNamespace(string(spec.NetworkNamespace)); err != nil {
+ return err
+ }
+ } else if netMode.IsNone() {
+ logrus.Debug("Using none netmode")
+ } else if netMode.IsBridge() {
+ logrus.Debug("Using bridge netmode")
+ } else if netCtr := netMode.Container(); netCtr != "" {
+ logrus.Debugf("using container %s netmode", netCtr)
+ } else if IsNS(string(netMode)) {
+ logrus.Debug("Using ns netmode")
+ if err := g.AddOrReplaceLinuxNamespace(string(spec.NetworkNamespace), NS(string(netMode))); err != nil {
+ return err
+ }
+ } else if IsPod(string(netMode)) {
+ logrus.Debug("Using pod netmode, unless pod is not sharing")
+ } else if netMode.IsSlirp4netns() {
+ logrus.Debug("Using slirp4netns netmode")
+ } else if netMode.IsUserDefined() {
+ logrus.Debug("Using user defined netmode")
+ } else {
+ return errors.Errorf("unknown network mode")
+ }
+
+ if c.HTTPProxy {
+ for _, envSpec := range []string{
+ "http_proxy",
+ "HTTP_PROXY",
+ "https_proxy",
+ "HTTPS_PROXY",
+ "ftp_proxy",
+ "FTP_PROXY",
+ "no_proxy",
+ "NO_PROXY",
+ } {
+ envVal := os.Getenv(envSpec)
+ if envVal != "" {
+ g.AddProcessEnv(envSpec, envVal)
+ }
+ }
+ }
+
+ if g.Config.Annotations == nil {
+ g.Config.Annotations = make(map[string]string)
+ }
+
+ if c.PublishAll {
+ g.Config.Annotations[libpod.InspectAnnotationPublishAll] = libpod.InspectResponseTrue
+ } else {
+ g.Config.Annotations[libpod.InspectAnnotationPublishAll] = libpod.InspectResponseFalse
+ }
+
+ return nil
+}
+
+// NatToOCIPortBindings iterates a nat.portmap slice and creates []ocicni portmapping slice
+func NatToOCIPortBindings(ports nat.PortMap) ([]ocicni.PortMapping, error) {
+ var portBindings []ocicni.PortMapping
+ for containerPb, hostPb := range ports {
+ var pm ocicni.PortMapping
+ pm.ContainerPort = int32(containerPb.Int())
+ for _, i := range hostPb {
+ var hostPort int
+ var err error
+ pm.HostIP = i.HostIP
+ if i.HostPort == "" {
+ hostPort = containerPb.Int()
+ } else {
+ hostPort, err = strconv.Atoi(i.HostPort)
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to convert host port to integer")
+ }
+ }
+
+ pm.HostPort = int32(hostPort)
+ pm.Protocol = containerPb.Proto()
+ portBindings = append(portBindings, pm)
+ }
+ }
+ return portBindings, nil
+}
+
+func (c *CgroupConfig) ToCreateOptions(runtime *libpod.Runtime) ([]libpod.CtrCreateOption, error) {
+ options := make([]libpod.CtrCreateOption, 0)
+ if c.CgroupMode.IsNS() {
+ ns := c.CgroupMode.NS()
+ if ns == "" {
+ return nil, errors.Errorf("invalid empty user-defined network namespace")
+ }
+ _, err := os.Stat(ns)
+ if err != nil {
+ return nil, err
+ }
+ } else if c.CgroupMode.IsContainer() {
+ connectedCtr, err := runtime.LookupContainer(c.CgroupMode.Container())
+ if err != nil {
+ return nil, errors.Wrapf(err, "container %q not found", c.CgroupMode.Container())
+ }
+ options = append(options, libpod.WithCgroupNSFrom(connectedCtr))
+ }
+
+ if c.CgroupParent != "" {
+ options = append(options, libpod.WithCgroupParent(c.CgroupParent))
+ }
+
+ if c.Cgroups == "disabled" {
+ options = append(options, libpod.WithNoCgroups())
+ }
+
+ return options, nil
+}
+
+func (c *UserConfig) ToCreateOptions(runtime *libpod.Runtime) ([]libpod.CtrCreateOption, error) {
+ options := make([]libpod.CtrCreateOption, 0)
+ if c.UsernsMode.IsNS() {
+ ns := c.UsernsMode.NS()
+ if ns == "" {
+ return nil, errors.Errorf("invalid empty user-defined user namespace")
+ }
+ _, err := os.Stat(ns)
+ if err != nil {
+ return nil, err
+ }
+ options = append(options, libpod.WithIDMappings(*c.IDMappings))
+ } else if c.UsernsMode.IsContainer() {
+ connectedCtr, err := runtime.LookupContainer(c.UsernsMode.Container())
+ if err != nil {
+ return nil, errors.Wrapf(err, "container %q not found", c.UsernsMode.Container())
+ }
+ options = append(options, libpod.WithUserNSFrom(connectedCtr))
+ } else {
+ options = append(options, libpod.WithIDMappings(*c.IDMappings))
+ }
+
+ options = append(options, libpod.WithUser(c.User))
+ options = append(options, libpod.WithGroups(c.GroupAdd))
+
+ return options, nil
+}
+
+func (c *UserConfig) ConfigureGenerator(g *generate.Generator) error {
+ if IsNS(string(c.UsernsMode)) {
+ if err := g.AddOrReplaceLinuxNamespace(string(spec.UserNamespace), NS(string(c.UsernsMode))); err != nil {
+ return err
+ }
+ // runc complains if no mapping is specified, even if we join another ns. So provide a dummy mapping
+ g.AddLinuxUIDMapping(uint32(0), uint32(0), uint32(1))
+ g.AddLinuxGIDMapping(uint32(0), uint32(0), uint32(1))
+ }
+
+ if (len(c.IDMappings.UIDMap) > 0 || len(c.IDMappings.GIDMap) > 0) && !c.UsernsMode.IsHost() {
+ if err := g.AddOrReplaceLinuxNamespace(string(spec.UserNamespace), ""); err != nil {
+ return err
+ }
+ }
+ for _, uidmap := range c.IDMappings.UIDMap {
+ g.AddLinuxUIDMapping(uint32(uidmap.HostID), uint32(uidmap.ContainerID), uint32(uidmap.Size))
+ }
+ for _, gidmap := range c.IDMappings.GIDMap {
+ g.AddLinuxGIDMapping(uint32(gidmap.HostID), uint32(gidmap.ContainerID), uint32(gidmap.Size))
+ }
+ return nil
+}
+
+func (c *UserConfig) getPostConfigureNetNS() bool {
+ hasUserns := c.UsernsMode.IsContainer() || c.UsernsMode.IsNS() || len(c.IDMappings.UIDMap) > 0 || len(c.IDMappings.GIDMap) > 0
+ postConfigureNetNS := hasUserns && !c.UsernsMode.IsHost()
+ return postConfigureNetNS
+}
+
+func (c *UserConfig) InNS(isRootless bool) bool {
+ hasUserns := c.UsernsMode.IsContainer() || c.UsernsMode.IsNS() || len(c.IDMappings.UIDMap) > 0 || len(c.IDMappings.GIDMap) > 0
+ return isRootless || (hasUserns && !c.UsernsMode.IsHost())
+}
+
+func (c *IpcConfig) ToCreateOptions(runtime *libpod.Runtime) ([]libpod.CtrCreateOption, error) {
+ options := make([]libpod.CtrCreateOption, 0)
+ if c.IpcMode.IsHost() {
+ options = append(options, libpod.WithShmDir("/dev/shm"))
+ } else if c.IpcMode.IsContainer() {
+ connectedCtr, err := runtime.LookupContainer(c.IpcMode.Container())
+ if err != nil {
+ return nil, errors.Wrapf(err, "container %q not found", c.IpcMode.Container())
+ }
+
+ options = append(options, libpod.WithIPCNSFrom(connectedCtr))
+ options = append(options, libpod.WithShmDir(connectedCtr.ShmDir()))
+ }
+
+ return options, nil
+}
+
+func (c *IpcConfig) ConfigureGenerator(g *generate.Generator) error {
+ ipcMode := c.IpcMode
+ if IsNS(string(ipcMode)) {
+ return g.AddOrReplaceLinuxNamespace(string(spec.IPCNamespace), NS(string(ipcMode)))
+ }
+ if ipcMode.IsHost() {
+ return g.RemoveLinuxNamespace(string(spec.IPCNamespace))
+ }
+ if ipcCtr := ipcMode.Container(); ipcCtr != "" {
+ logrus.Debugf("Using container %s ipcmode", ipcCtr)
+ }
+
+ return nil
+}
+
+func (c *CgroupConfig) ConfigureGenerator(g *generate.Generator) error {
+ cgroupMode := c.CgroupMode
+ if cgroupMode.IsDefaultValue() {
+ // If the value is not specified, default to "private" on cgroups v2 and "host" on cgroups v1.
+ unified, err := cgroups.IsCgroup2UnifiedMode()
+ if err != nil {
+ return err
+ }
+ if unified {
+ cgroupMode = "private"
+ } else {
+ cgroupMode = "host"
+ }
+ }
+ if cgroupMode.IsNS() {
+ return g.AddOrReplaceLinuxNamespace(string(spec.CgroupNamespace), NS(string(cgroupMode)))
+ }
+ if cgroupMode.IsHost() {
+ return g.RemoveLinuxNamespace(string(spec.CgroupNamespace))
+ }
+ if cgroupMode.IsPrivate() {
+ return g.AddOrReplaceLinuxNamespace(string(spec.CgroupNamespace), "")
+ }
+ if cgCtr := cgroupMode.Container(); cgCtr != "" {
+ logrus.Debugf("Using container %s cgroup mode", cgCtr)
+ }
+ return nil
+}
+
+func (c *PidConfig) ToCreateOptions(runtime *libpod.Runtime) ([]libpod.CtrCreateOption, error) {
+ options := make([]libpod.CtrCreateOption, 0)
+ if c.PidMode.IsContainer() {
+ connectedCtr, err := runtime.LookupContainer(c.PidMode.Container())
+ if err != nil {
+ return nil, errors.Wrapf(err, "container %q not found", c.PidMode.Container())
+ }
+
+ options = append(options, libpod.WithPIDNSFrom(connectedCtr))
+ }
+
+ return options, nil
+}
+
+func (c *PidConfig) ConfigureGenerator(g *generate.Generator) error {
+ pidMode := c.PidMode
+ if IsNS(string(pidMode)) {
+ return g.AddOrReplaceLinuxNamespace(string(spec.PIDNamespace), NS(string(pidMode)))
+ }
+ if pidMode.IsHost() {
+ return g.RemoveLinuxNamespace(string(spec.PIDNamespace))
+ }
+ if pidCtr := pidMode.Container(); pidCtr != "" {
+ logrus.Debugf("using container %s pidmode", pidCtr)
+ }
+ if IsPod(string(pidMode)) {
+ logrus.Debug("using pod pidmode")
+ }
+ return nil
+}
+
+func (c *UtsConfig) ToCreateOptions(runtime *libpod.Runtime, pod *libpod.Pod) ([]libpod.CtrCreateOption, error) {
+ options := make([]libpod.CtrCreateOption, 0)
+ if IsPod(string(c.UtsMode)) {
+ options = append(options, libpod.WithUTSNSFromPod(pod))
+ }
+ if c.UtsMode.IsContainer() {
+ connectedCtr, err := runtime.LookupContainer(c.UtsMode.Container())
+ if err != nil {
+ return nil, errors.Wrapf(err, "container %q not found", c.UtsMode.Container())
+ }
+
+ options = append(options, libpod.WithUTSNSFrom(connectedCtr))
+ }
+ if c.NoHosts {
+ options = append(options, libpod.WithUseImageHosts())
+ }
+ if len(c.HostAdd) > 0 && !c.NoHosts {
+ options = append(options, libpod.WithHosts(c.HostAdd))
+ }
+
+ return options, nil
+}
+
+func (c *UtsConfig) ConfigureGenerator(g *generate.Generator, net *NetworkConfig, runtime *libpod.Runtime) error {
+ hostname := c.Hostname
+ var err error
+ if hostname == "" {
+ if utsCtrID := c.UtsMode.Container(); utsCtrID != "" {
+ utsCtr, err := runtime.GetContainer(utsCtrID)
+ if err != nil {
+ return errors.Wrapf(err, "unable to retrieve hostname from dependency container %s", utsCtrID)
+ }
+ hostname = utsCtr.Hostname()
+ } else if net.NetMode.IsHost() || c.UtsMode.IsHost() {
+ hostname, err = os.Hostname()
+ if err != nil {
+ return errors.Wrap(err, "unable to retrieve hostname of the host")
+ }
+ } else {
+ logrus.Debug("No hostname set; container's hostname will default to runtime default")
+ }
+ }
+ g.RemoveHostname()
+ if c.Hostname != "" || !c.UtsMode.IsHost() {
+ // Set the hostname in the OCI configuration only
+ // if specified by the user or if we are creating
+ // a new UTS namespace.
+ g.SetHostname(hostname)
+ }
+ g.AddProcessEnv("HOSTNAME", hostname)
+
+ utsMode := c.UtsMode
+ if IsNS(string(utsMode)) {
+ return g.AddOrReplaceLinuxNamespace(string(spec.UTSNamespace), NS(string(utsMode)))
+ }
+ if utsMode.IsHost() {
+ return g.RemoveLinuxNamespace(string(spec.UTSNamespace))
+ }
+ if utsCtr := utsMode.Container(); utsCtr != "" {
+ logrus.Debugf("using container %s utsmode", utsCtr)
+ }
+ return nil
+}
diff --git a/pkg/spec/security.go b/pkg/spec/security.go
new file mode 100644
index 000000000..05ed94e66
--- /dev/null
+++ b/pkg/spec/security.go
@@ -0,0 +1,172 @@
+package createconfig
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/docker/docker/oci/caps"
+ "github.com/opencontainers/runtime-tools/generate"
+ "github.com/opencontainers/selinux/go-selinux/label"
+ "github.com/pkg/errors"
+)
+
+func (c *SecurityConfig) ToCreateOptions() ([]libpod.CtrCreateOption, error) {
+ options := make([]libpod.CtrCreateOption, 0)
+ options = append(options, libpod.WithSecLabels(c.LabelOpts))
+ options = append(options, libpod.WithPrivileged(c.Privileged))
+ return options, nil
+}
+
+func (c *SecurityConfig) SetLabelOpts(runtime *libpod.Runtime, pidConfig *PidConfig, ipcConfig *IpcConfig) error {
+ if c.Privileged {
+ c.LabelOpts = label.DisableSecOpt()
+ return nil
+ }
+
+ var labelOpts []string
+ if pidConfig.PidMode.IsHost() {
+ labelOpts = append(labelOpts, label.DisableSecOpt()...)
+ } else if pidConfig.PidMode.IsContainer() {
+ ctr, err := runtime.LookupContainer(pidConfig.PidMode.Container())
+ if err != nil {
+ return errors.Wrapf(err, "container %q not found", pidConfig.PidMode.Container())
+ }
+ secopts, err := label.DupSecOpt(ctr.ProcessLabel())
+ if err != nil {
+ return errors.Wrapf(err, "failed to duplicate label %q ", ctr.ProcessLabel())
+ }
+ labelOpts = append(labelOpts, secopts...)
+ }
+
+ if ipcConfig.IpcMode.IsHost() {
+ labelOpts = append(labelOpts, label.DisableSecOpt()...)
+ } else if ipcConfig.IpcMode.IsContainer() {
+ ctr, err := runtime.LookupContainer(ipcConfig.IpcMode.Container())
+ if err != nil {
+ return errors.Wrapf(err, "container %q not found", ipcConfig.IpcMode.Container())
+ }
+ secopts, err := label.DupSecOpt(ctr.ProcessLabel())
+ if err != nil {
+ return errors.Wrapf(err, "failed to duplicate label %q ", ctr.ProcessLabel())
+ }
+ labelOpts = append(labelOpts, secopts...)
+ }
+
+ c.LabelOpts = append(c.LabelOpts, labelOpts...)
+ return nil
+}
+
+func (c *SecurityConfig) SetSecurityOpts(runtime *libpod.Runtime, securityOpts []string) error {
+ for _, opt := range securityOpts {
+ if opt == "no-new-privileges" {
+ c.NoNewPrivs = true
+ } else {
+ con := strings.SplitN(opt, "=", 2)
+ if len(con) != 2 {
+ return fmt.Errorf("invalid --security-opt 1: %q", opt)
+ }
+
+ switch con[0] {
+ case "label":
+ c.LabelOpts = append(c.LabelOpts, con[1])
+ case "apparmor":
+ c.ApparmorProfile = con[1]
+ case "seccomp":
+ c.SeccompProfilePath = con[1]
+ default:
+ return fmt.Errorf("invalid --security-opt 2: %q", opt)
+ }
+ }
+ }
+
+ if c.SeccompProfilePath == "" {
+ var err error
+ c.SeccompProfilePath, err = libpod.DefaultSeccompPath()
+ if err != nil {
+ return err
+ }
+ }
+ c.SecurityOpts = securityOpts
+ return nil
+}
+
+func (c *SecurityConfig) ConfigureGenerator(g *generate.Generator, user *UserConfig) error {
+ // HANDLE CAPABILITIES
+ // NOTE: Must happen before SECCOMP
+ if c.Privileged {
+ g.SetupPrivileged(true)
+ }
+
+ useNotRoot := func(user string) bool {
+ if user == "" || user == "root" || user == "0" {
+ return false
+ }
+ return true
+ }
+
+ configSpec := g.Config
+ var err error
+ var caplist []string
+ bounding := configSpec.Process.Capabilities.Bounding
+ if useNotRoot(user.User) {
+ configSpec.Process.Capabilities.Bounding = caplist
+ }
+ caplist, err = caps.TweakCapabilities(configSpec.Process.Capabilities.Bounding, c.CapAdd, c.CapDrop, nil, false)
+ if err != nil {
+ return err
+ }
+
+ configSpec.Process.Capabilities.Bounding = caplist
+ configSpec.Process.Capabilities.Permitted = caplist
+ configSpec.Process.Capabilities.Inheritable = caplist
+ configSpec.Process.Capabilities.Effective = caplist
+ configSpec.Process.Capabilities.Ambient = caplist
+ if useNotRoot(user.User) {
+ caplist, err = caps.TweakCapabilities(bounding, c.CapAdd, c.CapDrop, nil, false)
+ if err != nil {
+ return err
+ }
+ }
+ configSpec.Process.Capabilities.Bounding = caplist
+
+ // HANDLE SECCOMP
+ if c.SeccompProfilePath != "unconfined" {
+ seccompConfig, err := getSeccompConfig(c, configSpec)
+ if err != nil {
+ return err
+ }
+ configSpec.Linux.Seccomp = seccompConfig
+ }
+
+ // Clear default Seccomp profile from Generator for privileged containers
+ if c.SeccompProfilePath == "unconfined" || c.Privileged {
+ configSpec.Linux.Seccomp = nil
+ }
+
+ for _, opt := range c.SecurityOpts {
+ // Split on both : and =
+ splitOpt := strings.Split(opt, "=")
+ if len(splitOpt) == 1 {
+ splitOpt = strings.Split(opt, ":")
+ }
+ if len(splitOpt) < 2 {
+ continue
+ }
+ switch splitOpt[0] {
+ case "label":
+ configSpec.Annotations[libpod.InspectAnnotationLabel] = splitOpt[1]
+ case "seccomp":
+ configSpec.Annotations[libpod.InspectAnnotationSeccomp] = splitOpt[1]
+ case "apparmor":
+ configSpec.Annotations[libpod.InspectAnnotationApparmor] = splitOpt[1]
+ }
+ }
+
+ g.SetRootReadonly(c.ReadOnlyRootfs)
+ for sysctlKey, sysctlVal := range c.Sysctl {
+ g.AddLinuxSysctl(sysctlKey, sysctlVal)
+ }
+
+ return nil
+}
diff --git a/pkg/spec/spec.go b/pkg/spec/spec.go
index 33e9ec076..7a220012f 100644
--- a/pkg/spec/spec.go
+++ b/pkg/spec/spec.go
@@ -1,7 +1,6 @@
package createconfig
import (
- "os"
"strings"
"github.com/containers/libpod/libpod"
@@ -10,13 +9,11 @@ import (
"github.com/containers/libpod/pkg/cgroups"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/sysinfo"
- "github.com/docker/docker/oci/caps"
"github.com/docker/go-units"
"github.com/opencontainers/runc/libcontainer/user"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/pkg/errors"
- "github.com/sirupsen/logrus"
)
const cpuPeriod = 100000
@@ -47,14 +44,13 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
canMountSys := true
isRootless := rootless.IsRootless()
- hasUserns := config.UsernsMode.IsContainer() || config.UsernsMode.IsNS() || len(config.IDMappings.UIDMap) > 0 || len(config.IDMappings.GIDMap) > 0
- inUserNS := isRootless || (hasUserns && !config.UsernsMode.IsHost())
+ inUserNS := config.User.InNS(isRootless)
- if inUserNS && config.NetMode.IsHost() {
+ if inUserNS && config.Network.NetMode.IsHost() {
canMountSys = false
}
- if config.Privileged && canMountSys {
+ if config.Security.Privileged && canMountSys {
cgroupPerm = "rw"
g.RemoveMount("/sys")
sysMnt := spec.Mount{
@@ -68,7 +64,7 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
addCgroup = false
g.RemoveMount("/sys")
r := "ro"
- if config.Privileged {
+ if config.Security.Privileged {
r = "rw"
}
sysMnt := spec.Mount{
@@ -78,7 +74,7 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
Options: []string{"rprivate", "nosuid", "noexec", "nodev", r, "rbind"},
}
g.AddMount(sysMnt)
- if !config.Privileged && isRootless {
+ if !config.Security.Privileged && isRootless {
g.AddLinuxMaskedPaths("/sys/kernel")
}
}
@@ -92,9 +88,9 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
}
// When using a different user namespace, check that the GID 5 is mapped inside
// the container.
- if gid5Available && len(config.IDMappings.GIDMap) > 0 {
+ if gid5Available && len(config.User.IDMappings.GIDMap) > 0 {
mappingFound := false
- for _, r := range config.IDMappings.GIDMap {
+ for _, r := range config.User.IDMappings.GIDMap {
if r.ContainerID <= 5 && 5 < r.ContainerID+r.Size {
mappingFound = true
break
@@ -117,7 +113,7 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
g.AddMount(devPts)
}
- if inUserNS && config.IpcMode.IsHost() {
+ if inUserNS && config.Ipc.IpcMode.IsHost() {
g.RemoveMount("/dev/mqueue")
devMqueue := spec.Mount{
Destination: "/dev/mqueue",
@@ -127,7 +123,7 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
}
g.AddMount(devMqueue)
}
- if inUserNS && config.PidMode.IsHost() {
+ if inUserNS && config.Pid.PidMode.IsHost() {
g.RemoveMount("/proc")
procMount := spec.Mount{
Destination: "/proc",
@@ -154,55 +150,6 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
for key, val := range config.Annotations {
g.AddAnnotation(key, val)
}
- g.SetRootReadonly(config.ReadOnlyRootfs)
-
- if config.HTTPProxy {
- for _, envSpec := range []string{
- "http_proxy",
- "HTTP_PROXY",
- "https_proxy",
- "HTTPS_PROXY",
- "ftp_proxy",
- "FTP_PROXY",
- "no_proxy",
- "NO_PROXY",
- } {
- envVal := os.Getenv(envSpec)
- if envVal != "" {
- g.AddProcessEnv(envSpec, envVal)
- }
- }
- }
-
- hostname := config.Hostname
- if hostname == "" {
- if utsCtrID := config.UtsMode.Container(); utsCtrID != "" {
- utsCtr, err := runtime.GetContainer(utsCtrID)
- if err != nil {
- return nil, errors.Wrapf(err, "unable to retrieve hostname from dependency container %s", utsCtrID)
- }
- hostname = utsCtr.Hostname()
- } else if config.NetMode.IsHost() || config.UtsMode.IsHost() {
- hostname, err = os.Hostname()
- if err != nil {
- return nil, errors.Wrap(err, "unable to retrieve hostname of the host")
- }
- } else {
- logrus.Debug("No hostname set; container's hostname will default to runtime default")
- }
- }
- g.RemoveHostname()
- if config.Hostname != "" || !config.UtsMode.IsHost() {
- // Set the hostname in the OCI configuration only
- // if specified by the user or if we are creating
- // a new UTS namespace.
- g.SetHostname(hostname)
- }
- g.AddProcessEnv("HOSTNAME", hostname)
-
- for sysctlKey, sysctlVal := range config.Sysctl {
- g.AddLinuxSysctl(sysctlKey, sysctlVal)
- }
g.AddProcessEnv("container", "podman")
addedResources := false
@@ -272,7 +219,7 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
}
// Devices
- if config.Privileged {
+ if config.Security.Privileged {
// If privileged, we need to add all the host devices to the
// spec. We do not add the user provided ones because we are
// already adding them all.
@@ -287,17 +234,11 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
}
}
- for _, uidmap := range config.IDMappings.UIDMap {
- g.AddLinuxUIDMapping(uint32(uidmap.HostID), uint32(uidmap.ContainerID), uint32(uidmap.Size))
- }
- for _, gidmap := range config.IDMappings.GIDMap {
- g.AddLinuxGIDMapping(uint32(gidmap.HostID), uint32(gidmap.ContainerID), uint32(gidmap.Size))
- }
// SECURITY OPTS
- g.SetProcessNoNewPrivileges(config.NoNewPrivs)
+ g.SetProcessNoNewPrivileges(config.Security.NoNewPrivs)
- if !config.Privileged {
- g.SetProcessApparmorProfile(config.ApparmorProfile)
+ if !config.Security.Privileged {
+ g.SetProcessApparmorProfile(config.Security.ApparmorProfile)
}
blockAccessToKernelFilesystems(config, &g)
@@ -341,54 +282,35 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
return nil, err
}
- if err := addPidNS(config, &g); err != nil {
+ // NAMESPACES
+
+ if err := config.Pid.ConfigureGenerator(&g); err != nil {
return nil, err
}
- if err := addUserNS(config, &g); err != nil {
+ if err := config.User.ConfigureGenerator(&g); err != nil {
return nil, err
}
- if err := addNetNS(config, &g); err != nil {
+ if err := config.Network.ConfigureGenerator(&g); err != nil {
return nil, err
}
- if err := addUTSNS(config, &g); err != nil {
+ if err := config.Uts.ConfigureGenerator(&g, &config.Network, runtime); err != nil {
return nil, err
}
- if err := addIpcNS(config, &g); err != nil {
+ if err := config.Ipc.ConfigureGenerator(&g); err != nil {
return nil, err
}
- if err := addCgroupNS(config, &g); err != nil {
+ if err := config.Cgroup.ConfigureGenerator(&g); err != nil {
return nil, err
}
configSpec := g.Config
- // HANDLE CAPABILITIES
- // NOTE: Must happen before SECCOMP
- if !config.Privileged {
- if err := setupCapabilities(config, configSpec); err != nil {
- return nil, err
- }
- } else {
- g.SetupPrivileged(true)
- }
-
- // HANDLE SECCOMP
-
- if config.SeccompProfilePath != "unconfined" {
- seccompConfig, err := getSeccompConfig(config, configSpec)
- if err != nil {
- return nil, err
- }
- configSpec.Linux.Seccomp = seccompConfig
- }
-
- // Clear default Seccomp profile from Generator for privileged containers
- if config.SeccompProfilePath == "unconfined" || config.Privileged {
- configSpec.Linux.Seccomp = nil
+ if err := config.Security.ConfigureGenerator(&g, &config.User); err != nil {
+ return nil, err
}
// BIND MOUNTS
@@ -430,7 +352,7 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
}
}
- switch config.Cgroups {
+ switch config.Cgroup.Cgroups {
case "disabled":
if addedResources {
return nil, errors.New("cannot specify resource limits when cgroups are disabled is specified")
@@ -461,48 +383,23 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
configSpec.Annotations[libpod.InspectAnnotationVolumesFrom] = strings.Join(config.VolumesFrom, ",")
}
- if config.Privileged {
+ if config.Security.Privileged {
configSpec.Annotations[libpod.InspectAnnotationPrivileged] = libpod.InspectResponseTrue
} else {
configSpec.Annotations[libpod.InspectAnnotationPrivileged] = libpod.InspectResponseFalse
}
- if config.PublishAll {
- configSpec.Annotations[libpod.InspectAnnotationPublishAll] = libpod.InspectResponseTrue
- } else {
- configSpec.Annotations[libpod.InspectAnnotationPublishAll] = libpod.InspectResponseFalse
- }
-
if config.Init {
configSpec.Annotations[libpod.InspectAnnotationInit] = libpod.InspectResponseTrue
} else {
configSpec.Annotations[libpod.InspectAnnotationInit] = libpod.InspectResponseFalse
}
- for _, opt := range config.SecurityOpts {
- // Split on both : and =
- splitOpt := strings.Split(opt, "=")
- if len(splitOpt) == 1 {
- splitOpt = strings.Split(opt, ":")
- }
- if len(splitOpt) < 2 {
- continue
- }
- switch splitOpt[0] {
- case "label":
- configSpec.Annotations[libpod.InspectAnnotationLabel] = splitOpt[1]
- case "seccomp":
- configSpec.Annotations[libpod.InspectAnnotationSeccomp] = splitOpt[1]
- case "apparmor":
- configSpec.Annotations[libpod.InspectAnnotationApparmor] = splitOpt[1]
- }
- }
-
return configSpec, nil
}
func blockAccessToKernelFilesystems(config *CreateConfig, g *generate.Generator) {
- if !config.Privileged {
+ if !config.Security.Privileged {
for _, mp := range []string{
"/proc/acpi",
"/proc/kcore",
@@ -518,7 +415,7 @@ func blockAccessToKernelFilesystems(config *CreateConfig, g *generate.Generator)
g.AddLinuxMaskedPaths(mp)
}
- if config.PidMode.IsHost() && rootless.IsRootless() {
+ if config.Pid.PidMode.IsHost() && rootless.IsRootless() {
return
}
@@ -535,130 +432,6 @@ func blockAccessToKernelFilesystems(config *CreateConfig, g *generate.Generator)
}
}
-func addPidNS(config *CreateConfig, g *generate.Generator) error {
- pidMode := config.PidMode
- if IsNS(string(pidMode)) {
- return g.AddOrReplaceLinuxNamespace(string(spec.PIDNamespace), NS(string(pidMode)))
- }
- if pidMode.IsHost() {
- return g.RemoveLinuxNamespace(string(spec.PIDNamespace))
- }
- if pidCtr := pidMode.Container(); pidCtr != "" {
- logrus.Debugf("using container %s pidmode", pidCtr)
- }
- if IsPod(string(pidMode)) {
- logrus.Debug("using pod pidmode")
- }
- return nil
-}
-
-func addUserNS(config *CreateConfig, g *generate.Generator) error {
- if IsNS(string(config.UsernsMode)) {
- if err := g.AddOrReplaceLinuxNamespace(string(spec.UserNamespace), NS(string(config.UsernsMode))); err != nil {
- return err
- }
- // runc complains if no mapping is specified, even if we join another ns. So provide a dummy mapping
- g.AddLinuxUIDMapping(uint32(0), uint32(0), uint32(1))
- g.AddLinuxGIDMapping(uint32(0), uint32(0), uint32(1))
- }
-
- if (len(config.IDMappings.UIDMap) > 0 || len(config.IDMappings.GIDMap) > 0) && !config.UsernsMode.IsHost() {
- if err := g.AddOrReplaceLinuxNamespace(string(spec.UserNamespace), ""); err != nil {
- return err
- }
- }
- return nil
-}
-
-func addNetNS(config *CreateConfig, g *generate.Generator) error {
- netMode := config.NetMode
- if netMode.IsHost() {
- logrus.Debug("Using host netmode")
- return g.RemoveLinuxNamespace(string(spec.NetworkNamespace))
- } else if netMode.IsNone() {
- logrus.Debug("Using none netmode")
- return nil
- } else if netMode.IsBridge() {
- logrus.Debug("Using bridge netmode")
- return nil
- } else if netCtr := netMode.Container(); netCtr != "" {
- logrus.Debugf("using container %s netmode", netCtr)
- return nil
- } else if IsNS(string(netMode)) {
- logrus.Debug("Using ns netmode")
- return g.AddOrReplaceLinuxNamespace(string(spec.NetworkNamespace), NS(string(netMode)))
- } else if IsPod(string(netMode)) {
- logrus.Debug("Using pod netmode, unless pod is not sharing")
- return nil
- } else if netMode.IsSlirp4netns() {
- logrus.Debug("Using slirp4netns netmode")
- return nil
- } else if netMode.IsUserDefined() {
- logrus.Debug("Using user defined netmode")
- return nil
- }
- return errors.Errorf("unknown network mode")
-}
-
-func addUTSNS(config *CreateConfig, g *generate.Generator) error {
- utsMode := config.UtsMode
- if IsNS(string(utsMode)) {
- return g.AddOrReplaceLinuxNamespace(string(spec.UTSNamespace), NS(string(utsMode)))
- }
- if utsMode.IsHost() {
- return g.RemoveLinuxNamespace(string(spec.UTSNamespace))
- }
- if utsCtr := utsMode.Container(); utsCtr != "" {
- logrus.Debugf("using container %s utsmode", utsCtr)
- }
- return nil
-}
-
-func addIpcNS(config *CreateConfig, g *generate.Generator) error {
- ipcMode := config.IpcMode
- if IsNS(string(ipcMode)) {
- return g.AddOrReplaceLinuxNamespace(string(spec.IPCNamespace), NS(string(ipcMode)))
- }
- if ipcMode.IsHost() {
- return g.RemoveLinuxNamespace(string(spec.IPCNamespace))
- }
- if ipcCtr := ipcMode.Container(); ipcCtr != "" {
- logrus.Debugf("Using container %s ipcmode", ipcCtr)
- }
-
- return nil
-}
-
-func addCgroupNS(config *CreateConfig, g *generate.Generator) error {
- cgroupMode := config.CgroupMode
-
- if cgroupMode.IsDefaultValue() {
- // If the value is not specified, default to "private" on cgroups v2 and "host" on cgroups v1.
- unified, err := cgroups.IsCgroup2UnifiedMode()
- if err != nil {
- return err
- }
- if unified {
- cgroupMode = "private"
- } else {
- cgroupMode = "host"
- }
- }
- if cgroupMode.IsNS() {
- return g.AddOrReplaceLinuxNamespace(string(spec.CgroupNamespace), NS(string(cgroupMode)))
- }
- if cgroupMode.IsHost() {
- return g.RemoveLinuxNamespace(string(spec.CgroupNamespace))
- }
- if cgroupMode.IsPrivate() {
- return g.AddOrReplaceLinuxNamespace(string(spec.CgroupNamespace), "")
- }
- if cgCtr := cgroupMode.Container(); cgCtr != "" {
- logrus.Debugf("Using container %s cgroup mode", cgCtr)
- }
- return nil
-}
-
func addRlimits(config *CreateConfig, g *generate.Generator) error {
var (
kernelMax uint64 = 1048576
@@ -702,37 +475,3 @@ func addRlimits(config *CreateConfig, g *generate.Generator) error {
return nil
}
-
-func setupCapabilities(config *CreateConfig, configSpec *spec.Spec) error {
- useNotRoot := func(user string) bool {
- if user == "" || user == "root" || user == "0" {
- return false
- }
- return true
- }
-
- var err error
- var caplist []string
- bounding := configSpec.Process.Capabilities.Bounding
- if useNotRoot(config.User) {
- configSpec.Process.Capabilities.Bounding = caplist
- }
- caplist, err = caps.TweakCapabilities(configSpec.Process.Capabilities.Bounding, config.CapAdd, config.CapDrop, nil, false)
- if err != nil {
- return err
- }
-
- configSpec.Process.Capabilities.Bounding = caplist
- configSpec.Process.Capabilities.Permitted = caplist
- configSpec.Process.Capabilities.Inheritable = caplist
- configSpec.Process.Capabilities.Effective = caplist
- configSpec.Process.Capabilities.Ambient = caplist
- if useNotRoot(config.User) {
- caplist, err = caps.TweakCapabilities(bounding, config.CapAdd, config.CapDrop, nil, false)
- if err != nil {
- return err
- }
- }
- configSpec.Process.Capabilities.Bounding = caplist
- return nil
-}
diff --git a/pkg/spec/spec_test.go b/pkg/spec/spec_test.go
index 2f91e1b21..0f63b2bbc 100644
--- a/pkg/spec/spec_test.go
+++ b/pkg/spec/spec_test.go
@@ -21,9 +21,9 @@ var (
func makeTestCreateConfig() *CreateConfig {
cc := new(CreateConfig)
cc.Resources = CreateResourceConfig{}
- cc.IDMappings = new(storage.IDMappingOptions)
- cc.IDMappings.UIDMap = []idtools.IDMap{}
- cc.IDMappings.GIDMap = []idtools.IDMap{}
+ cc.User.IDMappings = new(storage.IDMappingOptions)
+ cc.User.IDMappings.UIDMap = []idtools.IDMap{}
+ cc.User.IDMappings.GIDMap = []idtools.IDMap{}
return cc
}
diff --git a/pkg/spec/storage.go b/pkg/spec/storage.go
index 095534589..79c065b5d 100644
--- a/pkg/spec/storage.go
+++ b/pkg/spec/storage.go
@@ -160,7 +160,7 @@ func (config *CreateConfig) parseVolumes(runtime *libpod.Runtime) ([]spec.Mount,
}
// If requested, add tmpfs filesystems for read-only containers.
- if config.ReadOnlyRootfs && config.ReadOnlyTmpfs {
+ if config.Security.ReadOnlyRootfs && config.Security.ReadOnlyTmpfs {
readonlyTmpfs := []string{"/tmp", "/var/tmp", "/run"}
options := []string{"rw", "rprivate", "nosuid", "nodev", "tmpcopyup"}
for _, dest := range readonlyTmpfs {
@@ -514,11 +514,17 @@ func getTmpfsMount(args []string) (spec.Mount, error) {
Source: TypeTmpfs,
}
- var setDest, setRORW, setSuid, setDev, setExec bool
+ var setDest, setRORW, setSuid, setDev, setExec, setTmpcopyup bool
for _, val := range args {
kv := strings.Split(val, "=")
switch kv[0] {
+ case "tmpcopyup", "notmpcopyup":
+ if setTmpcopyup {
+ return newMount, errors.Wrapf(optionArgError, "cannot pass 'tmpcopyup' and 'notmpcopyup' options more than once")
+ }
+ setTmpcopyup = true
+ newMount.Options = append(newMount.Options, kv[0])
case "ro", "rw":
if setRORW {
return newMount, errors.Wrapf(optionArgError, "cannot pass 'ro' and 'rw' options more than once")
@@ -801,7 +807,7 @@ func (config *CreateConfig) addContainerInitBinary(path string) (spec.Mount, err
if path == "" {
return mount, fmt.Errorf("please specify a path to the container-init binary")
}
- if !config.PidMode.IsPrivate() {
+ if !config.Pid.PidMode.IsPrivate() {
return mount, fmt.Errorf("cannot add init binary as PID 1 (PID namespace isn't private)")
}
if config.Systemd {
diff --git a/pkg/util/mountOpts.go b/pkg/util/mountOpts.go
index 670daeaf9..d21800bc3 100644
--- a/pkg/util/mountOpts.go
+++ b/pkg/util/mountOpts.go
@@ -30,6 +30,8 @@ func ProcessOptions(options []string, isTmpfs bool, defaults *DefaultMountOption
foundWrite, foundSize, foundProp, foundMode, foundExec, foundSuid, foundDev, foundCopyUp, foundBind, foundZ bool
)
+ var newOptions []string
+
for _, opt := range options {
// Some options have parameters - size, mode
splitOpt := strings.SplitN(opt, "=", 2)
@@ -80,9 +82,19 @@ func ProcessOptions(options []string, isTmpfs bool, defaults *DefaultMountOption
return nil, errors.Wrapf(ErrBadMntOption, "the 'tmpcopyup' option is only allowed with tmpfs mounts")
}
if foundCopyUp {
- return nil, errors.Wrapf(ErrDupeMntOption, "the 'tmpcopyup' option can only be set once")
+ return nil, errors.Wrapf(ErrDupeMntOption, "the 'tmpcopyup' or 'notmpcopyup' option can only be set once")
+ }
+ foundCopyUp = true
+ case "notmpcopyup":
+ if !isTmpfs {
+ return nil, errors.Wrapf(ErrBadMntOption, "the 'notmpcopyup' option is only allowed with tmpfs mounts")
+ }
+ if foundCopyUp {
+ return nil, errors.Wrapf(ErrDupeMntOption, "the 'tmpcopyup' or 'notmpcopyup' option can only be set once")
}
foundCopyUp = true
+ // do not propagate notmpcopyup to the OCI runtime
+ continue
case "bind", "rbind":
if isTmpfs {
return nil, errors.Wrapf(ErrBadMntOption, "the 'bind' and 'rbind' options are not allowed with tmpfs mounts")
@@ -101,29 +113,30 @@ func ProcessOptions(options []string, isTmpfs bool, defaults *DefaultMountOption
default:
return nil, errors.Wrapf(ErrBadMntOption, "unknown mount option %q", opt)
}
+ newOptions = append(newOptions, opt)
}
if !foundWrite {
- options = append(options, "rw")
+ newOptions = append(newOptions, "rw")
}
if !foundProp {
- options = append(options, "rprivate")
+ newOptions = append(newOptions, "rprivate")
}
if !foundExec && (defaults == nil || defaults.Noexec) {
- options = append(options, "noexec")
+ newOptions = append(newOptions, "noexec")
}
if !foundSuid && (defaults == nil || defaults.Nosuid) {
- options = append(options, "nosuid")
+ newOptions = append(newOptions, "nosuid")
}
if !foundDev && (defaults == nil || defaults.Nodev) {
- options = append(options, "nodev")
+ newOptions = append(newOptions, "nodev")
}
if isTmpfs && !foundCopyUp {
- options = append(options, "tmpcopyup")
+ newOptions = append(newOptions, "tmpcopyup")
}
if !isTmpfs && !foundBind {
- options = append(options, "rbind")
+ newOptions = append(newOptions, "rbind")
}
- return options, nil
+ return newOptions, nil
}
diff --git a/test/e2e/checkpoint_test.go b/test/e2e/checkpoint_test.go
index 4b43ceb30..2d3efcbef 100644
--- a/test/e2e/checkpoint_test.go
+++ b/test/e2e/checkpoint_test.go
@@ -334,6 +334,10 @@ var _ = Describe("Podman checkpoint", func() {
IPBefore.WaitWithDefaultTimeout()
Expect(IPBefore.ExitCode()).To(Equal(0))
+ MACBefore := podmanTest.Podman([]string{"inspect", "-l", "--format={{.NetworkSettings.MacAddress}}"})
+ MACBefore.WaitWithDefaultTimeout()
+ Expect(MACBefore.ExitCode()).To(Equal(0))
+
result := podmanTest.Podman([]string{"container", "checkpoint", "test_name"})
result.WaitWithDefaultTimeout()
@@ -348,9 +352,16 @@ var _ = Describe("Podman checkpoint", func() {
IPAfter.WaitWithDefaultTimeout()
Expect(IPAfter.ExitCode()).To(Equal(0))
+ MACAfter := podmanTest.Podman([]string{"inspect", "-l", "--format={{.NetworkSettings.MacAddress}}"})
+ MACAfter.WaitWithDefaultTimeout()
+ Expect(MACAfter.ExitCode()).To(Equal(0))
+
// Check that IP address did not change between checkpointing and restoring
Expect(IPBefore.OutputToString()).To(Equal(IPAfter.OutputToString()))
+ // Check that MAC address did not change between checkpointing and restoring
+ Expect(MACBefore.OutputToString()).To(Equal(MACAfter.OutputToString()))
+
Expect(result.ExitCode()).To(Equal(0))
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up"))
diff --git a/test/e2e/create_staticmac_test.go b/test/e2e/create_staticmac_test.go
new file mode 100644
index 000000000..6460659f7
--- /dev/null
+++ b/test/e2e/create_staticmac_test.go
@@ -0,0 +1,46 @@
+// +build !remoteclient
+
+package integration
+
+import (
+ "os"
+
+ . "github.com/containers/libpod/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Podman run with --mac-address flag", func() {
+ var (
+ tempdir string
+ err error
+ podmanTest *PodmanTestIntegration
+ )
+
+ BeforeEach(func() {
+ SkipIfRootless()
+ tempdir, err = CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ podmanTest = PodmanTestCreate(tempdir)
+ podmanTest.Setup()
+ podmanTest.SeedImages()
+ // Cleanup the CNI networks used by the tests
+ os.RemoveAll("/var/lib/cni/networks/podman")
+ })
+
+ AfterEach(func() {
+ podmanTest.Cleanup()
+ f := CurrentGinkgoTestDescription()
+ processTestResult(f)
+
+ })
+
+ It("Podman run --mac-address", func() {
+ result := podmanTest.Podman([]string{"run", "--mac-address", "92:d0:c6:0a:29:34", ALPINE, "ip", "addr"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(result.OutputToString()).To(ContainSubstring("92:d0:c6:0a:29:34"))
+ })
+})
diff --git a/test/e2e/generate_kube_test.go b/test/e2e/generate_kube_test.go
index 5d3b1238a..603edbe6b 100644
--- a/test/e2e/generate_kube_test.go
+++ b/test/e2e/generate_kube_test.go
@@ -208,6 +208,39 @@ var _ = Describe("Podman generate kube", func() {
Expect(psOut).To(ContainSubstring("test2"))
})
+ It("podman generate with user and reimport kube on pod", func() {
+ podName := "toppod"
+ _, rc, _ := podmanTest.CreatePod(podName)
+ Expect(rc).To(Equal(0))
+
+ session := podmanTest.Podman([]string{"create", "--pod", podName, "--name", "test1", "--user", "100:200", ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ inspect := podmanTest.Podman([]string{"inspect", "--format", "{{.Config.User}}", "test1"})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect.ExitCode()).To(Equal(0))
+ Expect(inspect.OutputToString()).To(ContainSubstring("100:200"))
+
+ outputFile := filepath.Join(podmanTest.RunRoot, "pod.yaml")
+ kube := podmanTest.Podman([]string{"generate", "kube", "-f", outputFile, podName})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"pod", "rm", "-af"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"play", "kube", outputFile})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ inspect1 := podmanTest.Podman([]string{"inspect", "--format", "{{.Config.User}}", "test1"})
+ inspect1.WaitWithDefaultTimeout()
+ Expect(inspect1.ExitCode()).To(Equal(0))
+ Expect(inspect1.OutputToString()).To(ContainSubstring(inspect.OutputToString()))
+ })
+
It("podman generate kube with volume", func() {
vol1 := filepath.Join(podmanTest.TempDir, "vol-test1")
err := os.MkdirAll(vol1, 0755)
diff --git a/test/e2e/run_volume_test.go b/test/e2e/run_volume_test.go
index 8e5de85e4..0c2389e40 100644
--- a/test/e2e/run_volume_test.go
+++ b/test/e2e/run_volume_test.go
@@ -116,6 +116,28 @@ var _ = Describe("Podman run with volumes", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
Expect(session.OutputToString()).To(ContainSubstring("/run/test rw,nosuid,nodev,noexec,relatime - tmpfs"))
+
+ session = podmanTest.Podman([]string{"run", "--rm", "--mount", "type=tmpfs,target=/etc/ssl,tmpcopyup", ALPINE, "ls", "/etc/ssl"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring("certs"))
+
+ session = podmanTest.Podman([]string{"run", "--rm", "--mount", "type=tmpfs,target=/etc/ssl,tmpcopyup,notmpcopyup", ALPINE, "ls", "/etc/ssl"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Not(Equal(0)))
+
+ session = podmanTest.Podman([]string{"run", "--rm", "--mount", "type=bind,src=/tmp,target=/tmp,tmpcopyup", ALPINE, "true"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Not(Equal(0)))
+
+ session = podmanTest.Podman([]string{"run", "--rm", "--mount", "type=bind,src=/tmp,target=/tmp,notmpcopyup", ALPINE, "true"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Not(Equal(0)))
+
+ session = podmanTest.Podman([]string{"run", "--rm", "--mount", "type=tmpfs,target=/etc/ssl,notmpcopyup", ALPINE, "ls", "/etc/ssl"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(Not(ContainSubstring("certs")))
})
It("podman run with conflicting volumes errors", func() {
diff --git a/test/e2e/test.yaml b/test/e2e/test.yaml
index 319d6a4a0..98d2c91df 100644
--- a/test/e2e/test.yaml
+++ b/test/e2e/test.yaml
@@ -24,6 +24,9 @@ spec:
name: test
resources: {}
securityContext:
+ runAsUser: 1000
+ runAsGroup: 3000
+ fsGroup: 2000
allowPrivilegeEscalation: true
capabilities: {}
privileged: false
diff --git a/vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go
deleted file mode 100644
index 7647734de..000000000
--- a/vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go
+++ /dev/null
@@ -1,943 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-mksyscall_windows generates windows system call bodies
-
-It parses all files specified on command line containing function
-prototypes (like syscall_windows.go) and prints system call bodies
-to standard output.
-
-The prototypes are marked by lines beginning with "//sys" and read
-like func declarations if //sys is replaced by func, but:
-
-* The parameter lists must give a name for each argument. This
- includes return parameters.
-
-* The parameter lists must give a type for each argument:
- the (x, y, z int) shorthand is not allowed.
-
-* If the return parameter is an error number, it must be named err.
-
-* If go func name needs to be different from it's winapi dll name,
- the winapi name could be specified at the end, after "=" sign, like
- //sys LoadLibrary(libname string) (handle uint32, err error) = LoadLibraryA
-
-* Each function that returns err needs to supply a condition, that
- return value of winapi will be tested against to detect failure.
- This would set err to windows "last-error", otherwise it will be nil.
- The value can be provided at end of //sys declaration, like
- //sys LoadLibrary(libname string) (handle uint32, err error) [failretval==-1] = LoadLibraryA
- and is [failretval==0] by default.
-
-Usage:
- mksyscall_windows [flags] [path ...]
-
-The flags are:
- -output
- Specify output file name (outputs to console if blank).
- -trace
- Generate print statement after every syscall.
-*/
-package main
-
-import (
- "bufio"
- "bytes"
- "errors"
- "flag"
- "fmt"
- "go/format"
- "go/parser"
- "go/token"
- "io"
- "io/ioutil"
- "log"
- "os"
- "path/filepath"
- "runtime"
- "sort"
- "strconv"
- "strings"
- "text/template"
-)
-
-var (
- filename = flag.String("output", "", "output file name (standard output if omitted)")
- printTraceFlag = flag.Bool("trace", false, "generate print statement after every syscall")
- systemDLL = flag.Bool("systemdll", true, "whether all DLLs should be loaded from the Windows system directory")
- winio = flag.Bool("winio", false, "import go-winio")
-)
-
-func trim(s string) string {
- return strings.Trim(s, " \t")
-}
-
-var packageName string
-
-func packagename() string {
- return packageName
-}
-
-func syscalldot() string {
- if packageName == "syscall" {
- return ""
- }
- return "syscall."
-}
-
-// Param is function parameter
-type Param struct {
- Name string
- Type string
- fn *Fn
- tmpVarIdx int
-}
-
-// tmpVar returns temp variable name that will be used to represent p during syscall.
-func (p *Param) tmpVar() string {
- if p.tmpVarIdx < 0 {
- p.tmpVarIdx = p.fn.curTmpVarIdx
- p.fn.curTmpVarIdx++
- }
- return fmt.Sprintf("_p%d", p.tmpVarIdx)
-}
-
-// BoolTmpVarCode returns source code for bool temp variable.
-func (p *Param) BoolTmpVarCode() string {
- const code = `var %s uint32
- if %s {
- %s = 1
- } else {
- %s = 0
- }`
- tmp := p.tmpVar()
- return fmt.Sprintf(code, tmp, p.Name, tmp, tmp)
-}
-
-// SliceTmpVarCode returns source code for slice temp variable.
-func (p *Param) SliceTmpVarCode() string {
- const code = `var %s *%s
- if len(%s) > 0 {
- %s = &%s[0]
- }`
- tmp := p.tmpVar()
- return fmt.Sprintf(code, tmp, p.Type[2:], p.Name, tmp, p.Name)
-}
-
-// StringTmpVarCode returns source code for string temp variable.
-func (p *Param) StringTmpVarCode() string {
- errvar := p.fn.Rets.ErrorVarName()
- if errvar == "" {
- errvar = "_"
- }
- tmp := p.tmpVar()
- const code = `var %s %s
- %s, %s = %s(%s)`
- s := fmt.Sprintf(code, tmp, p.fn.StrconvType(), tmp, errvar, p.fn.StrconvFunc(), p.Name)
- if errvar == "-" {
- return s
- }
- const morecode = `
- if %s != nil {
- return
- }`
- return s + fmt.Sprintf(morecode, errvar)
-}
-
-// TmpVarCode returns source code for temp variable.
-func (p *Param) TmpVarCode() string {
- switch {
- case p.Type == "bool":
- return p.BoolTmpVarCode()
- case strings.HasPrefix(p.Type, "[]"):
- return p.SliceTmpVarCode()
- default:
- return ""
- }
-}
-
-// TmpVarHelperCode returns source code for helper's temp variable.
-func (p *Param) TmpVarHelperCode() string {
- if p.Type != "string" {
- return ""
- }
- return p.StringTmpVarCode()
-}
-
-// SyscallArgList returns source code fragments representing p parameter
-// in syscall. Slices are translated into 2 syscall parameters: pointer to
-// the first element and length.
-func (p *Param) SyscallArgList() []string {
- t := p.HelperType()
- var s string
- switch {
- case t[0] == '*':
- s = fmt.Sprintf("unsafe.Pointer(%s)", p.Name)
- case t == "bool":
- s = p.tmpVar()
- case strings.HasPrefix(t, "[]"):
- return []string{
- fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.tmpVar()),
- fmt.Sprintf("uintptr(len(%s))", p.Name),
- }
- default:
- s = p.Name
- }
- return []string{fmt.Sprintf("uintptr(%s)", s)}
-}
-
-// IsError determines if p parameter is used to return error.
-func (p *Param) IsError() bool {
- return p.Name == "err" && p.Type == "error"
-}
-
-// HelperType returns type of parameter p used in helper function.
-func (p *Param) HelperType() string {
- if p.Type == "string" {
- return p.fn.StrconvType()
- }
- return p.Type
-}
-
-// join concatenates parameters ps into a string with sep separator.
-// Each parameter is converted into string by applying fn to it
-// before conversion.
-func join(ps []*Param, fn func(*Param) string, sep string) string {
- if len(ps) == 0 {
- return ""
- }
- a := make([]string, 0)
- for _, p := range ps {
- a = append(a, fn(p))
- }
- return strings.Join(a, sep)
-}
-
-// Rets describes function return parameters.
-type Rets struct {
- Name string
- Type string
- ReturnsError bool
- FailCond string
-}
-
-// ErrorVarName returns error variable name for r.
-func (r *Rets) ErrorVarName() string {
- if r.ReturnsError {
- return "err"
- }
- if r.Type == "error" {
- return r.Name
- }
- return ""
-}
-
-// ToParams converts r into slice of *Param.
-func (r *Rets) ToParams() []*Param {
- ps := make([]*Param, 0)
- if len(r.Name) > 0 {
- ps = append(ps, &Param{Name: r.Name, Type: r.Type})
- }
- if r.ReturnsError {
- ps = append(ps, &Param{Name: "err", Type: "error"})
- }
- return ps
-}
-
-// List returns source code of syscall return parameters.
-func (r *Rets) List() string {
- s := join(r.ToParams(), func(p *Param) string { return p.Name + " " + p.Type }, ", ")
- if len(s) > 0 {
- s = "(" + s + ")"
- }
- return s
-}
-
-// PrintList returns source code of trace printing part correspondent
-// to syscall return values.
-func (r *Rets) PrintList() string {
- return join(r.ToParams(), func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `)
-}
-
-// SetReturnValuesCode returns source code that accepts syscall return values.
-func (r *Rets) SetReturnValuesCode() string {
- if r.Name == "" && !r.ReturnsError {
- return ""
- }
- retvar := "r0"
- if r.Name == "" {
- retvar = "r1"
- }
- errvar := "_"
- if r.ReturnsError {
- errvar = "e1"
- }
- return fmt.Sprintf("%s, _, %s := ", retvar, errvar)
-}
-
-func (r *Rets) useLongHandleErrorCode(retvar string) string {
- const code = `if %s {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = %sEINVAL
- }
- }`
- cond := retvar + " == 0"
- if r.FailCond != "" {
- cond = strings.Replace(r.FailCond, "failretval", retvar, 1)
- }
- return fmt.Sprintf(code, cond, syscalldot())
-}
-
-// SetErrorCode returns source code that sets return parameters.
-func (r *Rets) SetErrorCode() string {
- const code = `if r0 != 0 {
- %s = %sErrno(r0)
- }`
- const hrCode = `if int32(r0) < 0 {
- if r0&0x1fff0000 == 0x00070000 {
- r0 &= 0xffff
- }
- %s = %sErrno(r0)
- }`
- if r.Name == "" && !r.ReturnsError {
- return ""
- }
- if r.Name == "" {
- return r.useLongHandleErrorCode("r1")
- }
- if r.Type == "error" {
- if r.Name == "hr" {
- return fmt.Sprintf(hrCode, r.Name, syscalldot())
- } else {
- return fmt.Sprintf(code, r.Name, syscalldot())
- }
- }
- s := ""
- switch {
- case r.Type[0] == '*':
- s = fmt.Sprintf("%s = (%s)(unsafe.Pointer(r0))", r.Name, r.Type)
- case r.Type == "bool":
- s = fmt.Sprintf("%s = r0 != 0", r.Name)
- default:
- s = fmt.Sprintf("%s = %s(r0)", r.Name, r.Type)
- }
- if !r.ReturnsError {
- return s
- }
- return s + "\n\t" + r.useLongHandleErrorCode(r.Name)
-}
-
-// Fn describes syscall function.
-type Fn struct {
- Name string
- Params []*Param
- Rets *Rets
- PrintTrace bool
- confirmproc bool
- dllname string
- dllfuncname string
- src string
- // TODO: get rid of this field and just use parameter index instead
- curTmpVarIdx int // insure tmp variables have uniq names
-}
-
-// extractParams parses s to extract function parameters.
-func extractParams(s string, f *Fn) ([]*Param, error) {
- s = trim(s)
- if s == "" {
- return nil, nil
- }
- a := strings.Split(s, ",")
- ps := make([]*Param, len(a))
- for i := range ps {
- s2 := trim(a[i])
- b := strings.Split(s2, " ")
- if len(b) != 2 {
- b = strings.Split(s2, "\t")
- if len(b) != 2 {
- return nil, errors.New("Could not extract function parameter from \"" + s2 + "\"")
- }
- }
- ps[i] = &Param{
- Name: trim(b[0]),
- Type: trim(b[1]),
- fn: f,
- tmpVarIdx: -1,
- }
- }
- return ps, nil
-}
-
-// extractSection extracts text out of string s starting after start
-// and ending just before end. found return value will indicate success,
-// and prefix, body and suffix will contain correspondent parts of string s.
-func extractSection(s string, start, end rune) (prefix, body, suffix string, found bool) {
- s = trim(s)
- if strings.HasPrefix(s, string(start)) {
- // no prefix
- body = s[1:]
- } else {
- a := strings.SplitN(s, string(start), 2)
- if len(a) != 2 {
- return "", "", s, false
- }
- prefix = a[0]
- body = a[1]
- }
- a := strings.SplitN(body, string(end), 2)
- if len(a) != 2 {
- return "", "", "", false
- }
- return prefix, a[0], a[1], true
-}
-
-// newFn parses string s and return created function Fn.
-func newFn(s string) (*Fn, error) {
- s = trim(s)
- f := &Fn{
- Rets: &Rets{},
- src: s,
- PrintTrace: *printTraceFlag,
- }
- // function name and args
- prefix, body, s, found := extractSection(s, '(', ')')
- if !found || prefix == "" {
- return nil, errors.New("Could not extract function name and parameters from \"" + f.src + "\"")
- }
- f.Name = prefix
- var err error
- f.Params, err = extractParams(body, f)
- if err != nil {
- return nil, err
- }
- // return values
- _, body, s, found = extractSection(s, '(', ')')
- if found {
- r, err := extractParams(body, f)
- if err != nil {
- return nil, err
- }
- switch len(r) {
- case 0:
- case 1:
- if r[0].IsError() {
- f.Rets.ReturnsError = true
- } else {
- f.Rets.Name = r[0].Name
- f.Rets.Type = r[0].Type
- }
- case 2:
- if !r[1].IsError() {
- return nil, errors.New("Only last windows error is allowed as second return value in \"" + f.src + "\"")
- }
- f.Rets.ReturnsError = true
- f.Rets.Name = r[0].Name
- f.Rets.Type = r[0].Type
- default:
- return nil, errors.New("Too many return values in \"" + f.src + "\"")
- }
- }
- // fail condition
- _, body, s, found = extractSection(s, '[', ']')
- if found {
- f.Rets.FailCond = body
- }
- // dll and dll function names
- s = trim(s)
- if s == "" {
- return f, nil
- }
- if !strings.HasPrefix(s, "=") {
- return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
- }
- s = trim(s[1:])
- a := strings.Split(s, ".")
- switch len(a) {
- case 1:
- f.dllfuncname = a[0]
- case 2:
- f.dllname = a[0]
- f.dllfuncname = a[1]
- default:
- return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
- }
- if f.dllfuncname[len(f.dllfuncname)-1] == '?' {
- f.confirmproc = true
- f.dllfuncname = f.dllfuncname[0 : len(f.dllfuncname)-1]
- }
- return f, nil
-}
-
-// DLLName returns DLL name for function f.
-func (f *Fn) DLLName() string {
- if f.dllname == "" {
- return "kernel32"
- }
- return f.dllname
-}
-
-// DLLName returns DLL function name for function f.
-func (f *Fn) DLLFuncName() string {
- if f.dllfuncname == "" {
- return f.Name
- }
- return f.dllfuncname
-}
-
-func (f *Fn) ConfirmProc() bool {
- return f.confirmproc
-}
-
-// ParamList returns source code for function f parameters.
-func (f *Fn) ParamList() string {
- return join(f.Params, func(p *Param) string { return p.Name + " " + p.Type }, ", ")
-}
-
-// HelperParamList returns source code for helper function f parameters.
-func (f *Fn) HelperParamList() string {
- return join(f.Params, func(p *Param) string { return p.Name + " " + p.HelperType() }, ", ")
-}
-
-// ParamPrintList returns source code of trace printing part correspondent
-// to syscall input parameters.
-func (f *Fn) ParamPrintList() string {
- return join(f.Params, func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `)
-}
-
-// ParamCount return number of syscall parameters for function f.
-func (f *Fn) ParamCount() int {
- n := 0
- for _, p := range f.Params {
- n += len(p.SyscallArgList())
- }
- return n
-}
-
-// SyscallParamCount determines which version of Syscall/Syscall6/Syscall9/...
-// to use. It returns parameter count for correspondent SyscallX function.
-func (f *Fn) SyscallParamCount() int {
- n := f.ParamCount()
- switch {
- case n <= 3:
- return 3
- case n <= 6:
- return 6
- case n <= 9:
- return 9
- case n <= 12:
- return 12
- case n <= 15:
- return 15
- default:
- panic("too many arguments to system call")
- }
-}
-
-// Syscall determines which SyscallX function to use for function f.
-func (f *Fn) Syscall() string {
- c := f.SyscallParamCount()
- if c == 3 {
- return syscalldot() + "Syscall"
- }
- return syscalldot() + "Syscall" + strconv.Itoa(c)
-}
-
-// SyscallParamList returns source code for SyscallX parameters for function f.
-func (f *Fn) SyscallParamList() string {
- a := make([]string, 0)
- for _, p := range f.Params {
- a = append(a, p.SyscallArgList()...)
- }
- for len(a) < f.SyscallParamCount() {
- a = append(a, "0")
- }
- return strings.Join(a, ", ")
-}
-
-// HelperCallParamList returns source code of call into function f helper.
-func (f *Fn) HelperCallParamList() string {
- a := make([]string, 0, len(f.Params))
- for _, p := range f.Params {
- s := p.Name
- if p.Type == "string" {
- s = p.tmpVar()
- }
- a = append(a, s)
- }
- return strings.Join(a, ", ")
-}
-
-// IsUTF16 is true, if f is W (utf16) function. It is false
-// for all A (ascii) functions.
-func (_ *Fn) IsUTF16() bool {
- return true
-}
-
-// StrconvFunc returns name of Go string to OS string function for f.
-func (f *Fn) StrconvFunc() string {
- if f.IsUTF16() {
- return syscalldot() + "UTF16PtrFromString"
- }
- return syscalldot() + "BytePtrFromString"
-}
-
-// StrconvType returns Go type name used for OS string for f.
-func (f *Fn) StrconvType() string {
- if f.IsUTF16() {
- return "*uint16"
- }
- return "*byte"
-}
-
-// HasStringParam is true, if f has at least one string parameter.
-// Otherwise it is false.
-func (f *Fn) HasStringParam() bool {
- for _, p := range f.Params {
- if p.Type == "string" {
- return true
- }
- }
- return false
-}
-
-var uniqDllFuncName = make(map[string]bool)
-
-// IsNotDuplicate is true if f is not a duplicated function
-func (f *Fn) IsNotDuplicate() bool {
- funcName := f.DLLFuncName()
- if uniqDllFuncName[funcName] == false {
- uniqDllFuncName[funcName] = true
- return true
- }
- return false
-}
-
-// HelperName returns name of function f helper.
-func (f *Fn) HelperName() string {
- if !f.HasStringParam() {
- return f.Name
- }
- return "_" + f.Name
-}
-
-// Source files and functions.
-type Source struct {
- Funcs []*Fn
- Files []string
- StdLibImports []string
- ExternalImports []string
-}
-
-func (src *Source) Import(pkg string) {
- src.StdLibImports = append(src.StdLibImports, pkg)
- sort.Strings(src.StdLibImports)
-}
-
-func (src *Source) ExternalImport(pkg string) {
- src.ExternalImports = append(src.ExternalImports, pkg)
- sort.Strings(src.ExternalImports)
-}
-
-// ParseFiles parses files listed in fs and extracts all syscall
-// functions listed in sys comments. It returns source files
-// and functions collection *Source if successful.
-func ParseFiles(fs []string) (*Source, error) {
- src := &Source{
- Funcs: make([]*Fn, 0),
- Files: make([]string, 0),
- StdLibImports: []string{
- "unsafe",
- },
- ExternalImports: make([]string, 0),
- }
- for _, file := range fs {
- if err := src.ParseFile(file); err != nil {
- return nil, err
- }
- }
- return src, nil
-}
-
-// DLLs return dll names for a source set src.
-func (src *Source) DLLs() []string {
- uniq := make(map[string]bool)
- r := make([]string, 0)
- for _, f := range src.Funcs {
- name := f.DLLName()
- if _, found := uniq[name]; !found {
- uniq[name] = true
- r = append(r, name)
- }
- }
- return r
-}
-
-// ParseFile adds additional file path to a source set src.
-func (src *Source) ParseFile(path string) error {
- file, err := os.Open(path)
- if err != nil {
- return err
- }
- defer file.Close()
-
- s := bufio.NewScanner(file)
- for s.Scan() {
- t := trim(s.Text())
- if len(t) < 7 {
- continue
- }
- if !strings.HasPrefix(t, "//sys") {
- continue
- }
- t = t[5:]
- if !(t[0] == ' ' || t[0] == '\t') {
- continue
- }
- f, err := newFn(t[1:])
- if err != nil {
- return err
- }
- src.Funcs = append(src.Funcs, f)
- }
- if err := s.Err(); err != nil {
- return err
- }
- src.Files = append(src.Files, path)
-
- // get package name
- fset := token.NewFileSet()
- _, err = file.Seek(0, 0)
- if err != nil {
- return err
- }
- pkg, err := parser.ParseFile(fset, "", file, parser.PackageClauseOnly)
- if err != nil {
- return err
- }
- packageName = pkg.Name.Name
-
- return nil
-}
-
-// IsStdRepo returns true if src is part of standard library.
-func (src *Source) IsStdRepo() (bool, error) {
- if len(src.Files) == 0 {
- return false, errors.New("no input files provided")
- }
- abspath, err := filepath.Abs(src.Files[0])
- if err != nil {
- return false, err
- }
- goroot := runtime.GOROOT()
- if runtime.GOOS == "windows" {
- abspath = strings.ToLower(abspath)
- goroot = strings.ToLower(goroot)
- }
- sep := string(os.PathSeparator)
- if !strings.HasSuffix(goroot, sep) {
- goroot += sep
- }
- return strings.HasPrefix(abspath, goroot), nil
-}
-
-// Generate output source file from a source set src.
-func (src *Source) Generate(w io.Writer) error {
- const (
- pkgStd = iota // any package in std library
- pkgXSysWindows // x/sys/windows package
- pkgOther
- )
- isStdRepo, err := src.IsStdRepo()
- if err != nil {
- return err
- }
- var pkgtype int
- switch {
- case isStdRepo:
- pkgtype = pkgStd
- case packageName == "windows":
- // TODO: this needs better logic than just using package name
- pkgtype = pkgXSysWindows
- default:
- pkgtype = pkgOther
- }
- if *systemDLL {
- switch pkgtype {
- case pkgStd:
- src.Import("internal/syscall/windows/sysdll")
- case pkgXSysWindows:
- default:
- src.ExternalImport("golang.org/x/sys/windows")
- }
- }
- if *winio {
- src.ExternalImport("github.com/Microsoft/go-winio")
- }
- if packageName != "syscall" {
- src.Import("syscall")
- }
- funcMap := template.FuncMap{
- "packagename": packagename,
- "syscalldot": syscalldot,
- "newlazydll": func(dll string) string {
- arg := "\"" + dll + ".dll\""
- if !*systemDLL {
- return syscalldot() + "NewLazyDLL(" + arg + ")"
- }
- if strings.HasPrefix(dll, "api_") || strings.HasPrefix(dll, "ext_") {
- arg = strings.Replace(arg, "_", "-", -1)
- }
- switch pkgtype {
- case pkgStd:
- return syscalldot() + "NewLazyDLL(sysdll.Add(" + arg + "))"
- case pkgXSysWindows:
- return "NewLazySystemDLL(" + arg + ")"
- default:
- return "windows.NewLazySystemDLL(" + arg + ")"
- }
- },
- }
- t := template.Must(template.New("main").Funcs(funcMap).Parse(srcTemplate))
- err = t.Execute(w, src)
- if err != nil {
- return errors.New("Failed to execute template: " + err.Error())
- }
- return nil
-}
-
-func usage() {
- fmt.Fprintf(os.Stderr, "usage: mksyscall_windows [flags] [path ...]\n")
- flag.PrintDefaults()
- os.Exit(1)
-}
-
-func main() {
- flag.Usage = usage
- flag.Parse()
- if len(flag.Args()) <= 0 {
- fmt.Fprintf(os.Stderr, "no files to parse provided\n")
- usage()
- }
-
- src, err := ParseFiles(flag.Args())
- if err != nil {
- log.Fatal(err)
- }
-
- var buf bytes.Buffer
- if err := src.Generate(&buf); err != nil {
- log.Fatal(err)
- }
-
- data, err := format.Source(buf.Bytes())
- if err != nil {
- log.Fatal(err)
- }
- if *filename == "" {
- _, err = os.Stdout.Write(data)
- } else {
- err = ioutil.WriteFile(*filename, data, 0644)
- }
- if err != nil {
- log.Fatal(err)
- }
-}
-
-// TODO: use println instead to print in the following template
-const srcTemplate = `
-
-{{define "main"}}// Code generated mksyscall_windows.exe DO NOT EDIT
-
-package {{packagename}}
-
-import (
-{{range .StdLibImports}}"{{.}}"
-{{end}}
-
-{{range .ExternalImports}}"{{.}}"
-{{end}}
-)
-
-var _ unsafe.Pointer
-
-// Do the interface allocations only once for common
-// Errno values.
-const (
- errnoERROR_IO_PENDING = 997
-)
-
-var (
- errERROR_IO_PENDING error = {{syscalldot}}Errno(errnoERROR_IO_PENDING)
-)
-
-// errnoErr returns common boxed Errno values, to prevent
-// allocations at runtime.
-func errnoErr(e {{syscalldot}}Errno) error {
- switch e {
- case 0:
- return nil
- case errnoERROR_IO_PENDING:
- return errERROR_IO_PENDING
- }
- // TODO: add more here, after collecting data on the common
- // error values see on Windows. (perhaps when running
- // all.bat?)
- return e
-}
-
-var (
-{{template "dlls" .}}
-{{template "funcnames" .}})
-{{range .Funcs}}{{if .HasStringParam}}{{template "helperbody" .}}{{end}}{{template "funcbody" .}}{{end}}
-{{end}}
-
-{{/* help functions */}}
-
-{{define "dlls"}}{{range .DLLs}} mod{{.}} = {{newlazydll .}}
-{{end}}{{end}}
-
-{{define "funcnames"}}{{range .Funcs}}{{if .IsNotDuplicate}} proc{{.DLLFuncName}} = mod{{.DLLName}}.NewProc("{{.DLLFuncName}}"){{end}}
-{{end}}{{end}}
-
-{{define "helperbody"}}
-func {{.Name}}({{.ParamList}}) {{template "results" .}}{
-{{template "helpertmpvars" .}} return {{.HelperName}}({{.HelperCallParamList}})
-}
-{{end}}
-
-{{define "funcbody"}}
-func {{.HelperName}}({{.HelperParamList}}) {{template "results" .}}{
-{{template "tmpvars" .}} {{template "syscallcheck" .}}{{template "syscall" .}}
-{{template "seterror" .}}{{template "printtrace" .}} return
-}
-{{end}}
-
-{{define "helpertmpvars"}}{{range .Params}}{{if .TmpVarHelperCode}} {{.TmpVarHelperCode}}
-{{end}}{{end}}{{end}}
-
-{{define "tmpvars"}}{{range .Params}}{{if .TmpVarCode}} {{.TmpVarCode}}
-{{end}}{{end}}{{end}}
-
-{{define "results"}}{{if .Rets.List}}{{.Rets.List}} {{end}}{{end}}
-
-{{define "syscall"}}{{.Rets.SetReturnValuesCode}}{{.Syscall}}(proc{{.DLLFuncName}}.Addr(), {{.ParamCount}}, {{.SyscallParamList}}){{end}}
-
-{{define "syscallcheck"}}{{if .ConfirmProc}}if {{.Rets.ErrorVarName}} = proc{{.DLLFuncName}}.Find(); {{.Rets.ErrorVarName}} != nil {
- return
-}
-{{end}}{{end}}
-
-
-{{define "seterror"}}{{if .Rets.SetErrorCode}} {{.Rets.SetErrorCode}}
-{{end}}{{end}}
-
-{{define "printtrace"}}{{if .PrintTrace}} print("SYSCALL: {{.Name}}(", {{.ParamPrintList}}") (", {{.Rets.PrintList}}")\n")
-{{end}}{{end}}
-
-`
diff --git a/vendor/github.com/containernetworking/cni/libcni/api.go b/vendor/github.com/containernetworking/cni/libcni/api.go
index 0f14d3427..22b111742 100644
--- a/vendor/github.com/containernetworking/cni/libcni/api.go
+++ b/vendor/github.com/containernetworking/cni/libcni/api.go
@@ -25,6 +25,7 @@ import (
"github.com/containernetworking/cni/pkg/invoke"
"github.com/containernetworking/cni/pkg/types"
+ "github.com/containernetworking/cni/pkg/utils"
"github.com/containernetworking/cni/pkg/version"
)
@@ -32,6 +33,10 @@ var (
CacheDir = "/var/lib/cni"
)
+const (
+ CNICacheV1 = "cniCacheV1"
+)
+
// A RuntimeConf holds the arguments to one invocation of a CNI plugin
// excepting the network configuration, with the nested exception that
// the `runtimeConfig` from the network configuration is included
@@ -48,7 +53,7 @@ type RuntimeConf struct {
// to the plugin
CapabilityArgs map[string]interface{}
- // A cache directory in which to library data. Defaults to CacheDir
+ // DEPRECATED. Will be removed in a future release.
CacheDir string
}
@@ -70,19 +75,22 @@ type CNI interface {
CheckNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error
DelNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error
GetNetworkListCachedResult(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error)
+ GetNetworkListCachedConfig(net *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error)
AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error)
CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error
DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error
GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error)
+ GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error)
ValidateNetworkList(ctx context.Context, net *NetworkConfigList) ([]string, error)
ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error)
}
type CNIConfig struct {
- Path []string
- exec invoke.Exec
+ Path []string
+ exec invoke.Exec
+ cacheDir string
}
// CNIConfig implements the CNI interface
@@ -92,9 +100,18 @@ var _ CNI = &CNIConfig{}
// in the given paths and use the given exec interface to run those plugins,
// or if the exec interface is not given, will use a default exec handler.
func NewCNIConfig(path []string, exec invoke.Exec) *CNIConfig {
+ return NewCNIConfigWithCacheDir(path, "", exec)
+}
+
+// NewCNIConfigWithCacheDir returns a new CNIConfig object that will search for plugins
+// in the given paths use the given exec interface to run those plugins,
+// or if the exec interface is not given, will use a default exec handler.
+// The given cache directory will be used for temporary data storage when needed.
+func NewCNIConfigWithCacheDir(path []string, cacheDir string, exec invoke.Exec) *CNIConfig {
return &CNIConfig{
- Path: path,
- exec: exec,
+ Path: path,
+ cacheDir: cacheDir,
+ exec: exec,
}
}
@@ -165,33 +182,122 @@ func (c *CNIConfig) ensureExec() invoke.Exec {
return c.exec
}
-func getResultCacheFilePath(netName string, rt *RuntimeConf) string {
- cacheDir := rt.CacheDir
- if cacheDir == "" {
- cacheDir = CacheDir
+type cachedInfo struct {
+ Kind string `json:"kind"`
+ ContainerID string `json:"containerId"`
+ Config []byte `json:"config"`
+ IfName string `json:"ifName"`
+ NetworkName string `json:"networkName"`
+ CniArgs [][2]string `json:"cniArgs,omitempty"`
+ CapabilityArgs map[string]interface{} `json:"capabilityArgs,omitempty"`
+ RawResult map[string]interface{} `json:"result,omitempty"`
+ Result types.Result `json:"-"`
+}
+
+// getCacheDir returns the cache directory in this order:
+// 1) global cacheDir from CNIConfig object
+// 2) deprecated cacheDir from RuntimeConf object
+// 3) fall back to default cache directory
+func (c *CNIConfig) getCacheDir(rt *RuntimeConf) string {
+ if c.cacheDir != "" {
+ return c.cacheDir
+ }
+ if rt.CacheDir != "" {
+ return rt.CacheDir
+ }
+ return CacheDir
+}
+
+func (c *CNIConfig) getCacheFilePath(netName string, rt *RuntimeConf) (string, error) {
+ if netName == "" || rt.ContainerID == "" || rt.IfName == "" {
+ return "", fmt.Errorf("cache file path requires network name (%q), container ID (%q), and interface name (%q)", netName, rt.ContainerID, rt.IfName)
}
- return filepath.Join(cacheDir, "results", fmt.Sprintf("%s-%s-%s", netName, rt.ContainerID, rt.IfName))
+ return filepath.Join(c.getCacheDir(rt), "results", fmt.Sprintf("%s-%s-%s", netName, rt.ContainerID, rt.IfName)), nil
}
-func setCachedResult(result types.Result, netName string, rt *RuntimeConf) error {
+func (c *CNIConfig) cacheAdd(result types.Result, config []byte, netName string, rt *RuntimeConf) error {
+ cached := cachedInfo{
+ Kind: CNICacheV1,
+ ContainerID: rt.ContainerID,
+ Config: config,
+ IfName: rt.IfName,
+ NetworkName: netName,
+ CniArgs: rt.Args,
+ CapabilityArgs: rt.CapabilityArgs,
+ }
+
+ // We need to get type.Result into cachedInfo as JSON map
+ // Marshal to []byte, then Unmarshal into cached.RawResult
data, err := json.Marshal(result)
if err != nil {
return err
}
- fname := getResultCacheFilePath(netName, rt)
+
+ err = json.Unmarshal(data, &cached.RawResult)
+ if err != nil {
+ return err
+ }
+
+ newBytes, err := json.Marshal(&cached)
+ if err != nil {
+ return err
+ }
+
+ fname, err := c.getCacheFilePath(netName, rt)
+ if err != nil {
+ return err
+ }
if err := os.MkdirAll(filepath.Dir(fname), 0700); err != nil {
return err
}
- return ioutil.WriteFile(fname, data, 0600)
+
+ return ioutil.WriteFile(fname, newBytes, 0600)
}
-func delCachedResult(netName string, rt *RuntimeConf) error {
- fname := getResultCacheFilePath(netName, rt)
+func (c *CNIConfig) cacheDel(netName string, rt *RuntimeConf) error {
+ fname, err := c.getCacheFilePath(netName, rt)
+ if err != nil {
+ // Ignore error
+ return nil
+ }
return os.Remove(fname)
}
-func getCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) {
- fname := getResultCacheFilePath(netName, rt)
+func (c *CNIConfig) getCachedConfig(netName string, rt *RuntimeConf) ([]byte, *RuntimeConf, error) {
+ var bytes []byte
+
+ fname, err := c.getCacheFilePath(netName, rt)
+ if err != nil {
+ return nil, nil, err
+ }
+ bytes, err = ioutil.ReadFile(fname)
+ if err != nil {
+ // Ignore read errors; the cached result may not exist on-disk
+ return nil, nil, nil
+ }
+
+ unmarshaled := cachedInfo{}
+ if err := json.Unmarshal(bytes, &unmarshaled); err != nil {
+ return nil, nil, fmt.Errorf("failed to unmarshal cached network %q config: %v", netName, err)
+ }
+ if unmarshaled.Kind != CNICacheV1 {
+ return nil, nil, fmt.Errorf("read cached network %q config has wrong kind: %v", netName, unmarshaled.Kind)
+ }
+
+ newRt := *rt
+ if unmarshaled.CniArgs != nil {
+ newRt.Args = unmarshaled.CniArgs
+ }
+ newRt.CapabilityArgs = unmarshaled.CapabilityArgs
+
+ return unmarshaled.Config, &newRt, nil
+}
+
+func (c *CNIConfig) getLegacyCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) {
+ fname, err := c.getCacheFilePath(netName, rt)
+ if err != nil {
+ return nil, err
+ }
data, err := ioutil.ReadFile(fname)
if err != nil {
// Ignore read errors; the cached result may not exist on-disk
@@ -222,16 +328,73 @@ func getCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result,
return result, err
}
+func (c *CNIConfig) getCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) {
+ fname, err := c.getCacheFilePath(netName, rt)
+ if err != nil {
+ return nil, err
+ }
+ fdata, err := ioutil.ReadFile(fname)
+ if err != nil {
+ // Ignore read errors; the cached result may not exist on-disk
+ return nil, nil
+ }
+
+ cachedInfo := cachedInfo{}
+ if err := json.Unmarshal(fdata, &cachedInfo); err != nil || cachedInfo.Kind != CNICacheV1 {
+ return c.getLegacyCachedResult(netName, cniVersion, rt)
+ }
+
+ newBytes, err := json.Marshal(&cachedInfo.RawResult)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal cached network %q config: %v", netName, err)
+ }
+
+ // Read the version of the cached result
+ decoder := version.ConfigDecoder{}
+ resultCniVersion, err := decoder.Decode(newBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ // Ensure we can understand the result
+ result, err := version.NewResult(resultCniVersion, newBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ // Convert to the config version to ensure plugins get prevResult
+ // in the same version as the config. The cached result version
+ // should match the config version unless the config was changed
+ // while the container was running.
+ result, err = result.GetAsVersion(cniVersion)
+ if err != nil && resultCniVersion != cniVersion {
+ return nil, fmt.Errorf("failed to convert cached result version %q to config version %q: %v", resultCniVersion, cniVersion, err)
+ }
+ return result, err
+}
+
// GetNetworkListCachedResult returns the cached Result of the previous
-// previous AddNetworkList() operation for a network list, or an error.
+// AddNetworkList() operation for a network list, or an error.
func (c *CNIConfig) GetNetworkListCachedResult(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) {
- return getCachedResult(list.Name, list.CNIVersion, rt)
+ return c.getCachedResult(list.Name, list.CNIVersion, rt)
}
// GetNetworkCachedResult returns the cached Result of the previous
-// previous AddNetwork() operation for a network, or an error.
+// AddNetwork() operation for a network, or an error.
func (c *CNIConfig) GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) {
- return getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
+ return c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
+}
+
+// GetNetworkListCachedConfig copies the input RuntimeConf to output
+// RuntimeConf with fields updated with info from the cached Config.
+func (c *CNIConfig) GetNetworkListCachedConfig(list *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error) {
+ return c.getCachedConfig(list.Name, rt)
+}
+
+// GetNetworkCachedConfig copies the input RuntimeConf to output
+// RuntimeConf with fields updated with info from the cached Config.
+func (c *CNIConfig) GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) {
+ return c.getCachedConfig(net.Network.Name, rt)
}
func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) {
@@ -240,6 +403,12 @@ func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net
if err != nil {
return nil, err
}
+ if err := utils.ValidateContainerID(rt.ContainerID); err != nil {
+ return nil, err
+ }
+ if err := utils.ValidateNetworkName(name); err != nil {
+ return nil, err
+ }
newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt)
if err != nil {
@@ -260,7 +429,7 @@ func (c *CNIConfig) AddNetworkList(ctx context.Context, list *NetworkConfigList,
}
}
- if err = setCachedResult(result, list.Name, rt); err != nil {
+ if err = c.cacheAdd(result, list.Bytes, list.Name, rt); err != nil {
return nil, fmt.Errorf("failed to set network %q cached result: %v", list.Name, err)
}
@@ -295,7 +464,7 @@ func (c *CNIConfig) CheckNetworkList(ctx context.Context, list *NetworkConfigLis
return nil
}
- cachedResult, err := getCachedResult(list.Name, list.CNIVersion, rt)
+ cachedResult, err := c.getCachedResult(list.Name, list.CNIVersion, rt)
if err != nil {
return fmt.Errorf("failed to get network %q cached result: %v", list.Name, err)
}
@@ -332,7 +501,7 @@ func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList,
if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil {
return err
} else if gtet {
- cachedResult, err = getCachedResult(list.Name, list.CNIVersion, rt)
+ cachedResult, err = c.getCachedResult(list.Name, list.CNIVersion, rt)
if err != nil {
return fmt.Errorf("failed to get network %q cached result: %v", list.Name, err)
}
@@ -344,7 +513,7 @@ func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList,
return err
}
}
- _ = delCachedResult(list.Name, rt)
+ _ = c.cacheDel(list.Name, rt)
return nil
}
@@ -356,7 +525,7 @@ func (c *CNIConfig) AddNetwork(ctx context.Context, net *NetworkConfig, rt *Runt
return nil, err
}
- if err = setCachedResult(result, net.Network.Name, rt); err != nil {
+ if err = c.cacheAdd(result, net.Bytes, net.Network.Name, rt); err != nil {
return nil, fmt.Errorf("failed to set network %q cached result: %v", net.Network.Name, err)
}
@@ -372,7 +541,7 @@ func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *Ru
return fmt.Errorf("configuration version %q does not support the CHECK command", net.Network.CNIVersion)
}
- cachedResult, err := getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
+ cachedResult, err := c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
if err != nil {
return fmt.Errorf("failed to get network %q cached result: %v", net.Network.Name, err)
}
@@ -387,7 +556,7 @@ func (c *CNIConfig) DelNetwork(ctx context.Context, net *NetworkConfig, rt *Runt
if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil {
return err
} else if gtet {
- cachedResult, err = getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
+ cachedResult, err = c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
if err != nil {
return fmt.Errorf("failed to get network %q cached result: %v", net.Network.Name, err)
}
@@ -396,7 +565,7 @@ func (c *CNIConfig) DelNetwork(ctx context.Context, net *NetworkConfig, rt *Runt
if err := c.delNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt); err != nil {
return err
}
- _ = delCachedResult(net.Network.Name, rt)
+ _ = c.cacheDel(net.Network.Name, rt)
return nil
}
@@ -455,7 +624,8 @@ func (c *CNIConfig) ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]
// validatePlugin checks that an individual plugin's configuration is sane
func (c *CNIConfig) validatePlugin(ctx context.Context, pluginName, expectedVersion string) error {
- pluginPath, err := invoke.FindInPath(pluginName, c.Path)
+ c.ensureExec()
+ pluginPath, err := c.exec.FindInPath(pluginName, c.Path)
if err != nil {
return err
}
diff --git a/vendor/github.com/containernetworking/cni/libcni/conf.go b/vendor/github.com/containernetworking/cni/libcni/conf.go
index ea56c509d..d8920cf8c 100644
--- a/vendor/github.com/containernetworking/cni/libcni/conf.go
+++ b/vendor/github.com/containernetworking/cni/libcni/conf.go
@@ -114,11 +114,11 @@ func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) {
for i, conf := range plugins {
newBytes, err := json.Marshal(conf)
if err != nil {
- return nil, fmt.Errorf("Failed to marshal plugin config %d: %v", i, err)
+ return nil, fmt.Errorf("failed to marshal plugin config %d: %v", i, err)
}
netConf, err := ConfFromBytes(newBytes)
if err != nil {
- return nil, fmt.Errorf("Failed to parse plugin config %d: %v", i, err)
+ return nil, fmt.Errorf("failed to parse plugin config %d: %v", i, err)
}
list.Plugins = append(list.Plugins, netConf)
}
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/args.go b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go
index 913528c1d..d31a44e87 100644
--- a/vendor/github.com/containernetworking/cni/pkg/invoke/args.go
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go
@@ -32,7 +32,7 @@ type inherited struct{}
var inheritArgsFromEnv inherited
-func (_ *inherited) AsEnv() []string {
+func (*inherited) AsEnv() []string {
return nil
}
diff --git a/vendor/github.com/containernetworking/cni/pkg/types/args.go b/vendor/github.com/containernetworking/cni/pkg/types/args.go
index bd8640fc9..4eac64899 100644
--- a/vendor/github.com/containernetworking/cni/pkg/types/args.go
+++ b/vendor/github.com/containernetworking/cni/pkg/types/args.go
@@ -36,7 +36,7 @@ func (b *UnmarshallableBool) UnmarshalText(data []byte) error {
case "0", "false":
*b = false
default:
- return fmt.Errorf("Boolean unmarshal error: invalid input %s", s)
+ return fmt.Errorf("boolean unmarshal error: invalid input %s", s)
}
return nil
}
diff --git a/vendor/github.com/containernetworking/cni/pkg/types/types.go b/vendor/github.com/containernetworking/cni/pkg/types/types.go
index d0d11006a..3e185c1ce 100644
--- a/vendor/github.com/containernetworking/cni/pkg/types/types.go
+++ b/vendor/github.com/containernetworking/cni/pkg/types/types.go
@@ -16,7 +16,6 @@ package types
import (
"encoding/json"
- "errors"
"fmt"
"io"
"net"
@@ -134,9 +133,16 @@ func (r *Route) String() string {
// Well known error codes
// see https://github.com/containernetworking/cni/blob/master/SPEC.md#well-known-error-codes
const (
- ErrUnknown uint = iota // 0
- ErrIncompatibleCNIVersion // 1
- ErrUnsupportedField // 2
+ ErrUnknown uint = iota // 0
+ ErrIncompatibleCNIVersion // 1
+ ErrUnsupportedField // 2
+ ErrUnknownContainer // 3
+ ErrInvalidEnvironmentVariables // 4
+ ErrIOFailure // 5
+ ErrDecodingFailure // 6
+ ErrInvalidNetworkConfig // 7
+ ErrTryAgainLater uint = 11
+ ErrInternal uint = 999
)
type Error struct {
@@ -145,6 +151,14 @@ type Error struct {
Details string `json:"details,omitempty"`
}
+func NewError(code uint, msg, details string) *Error {
+ return &Error{
+ Code: code,
+ Msg: msg,
+ Details: details,
+ }
+}
+
func (e *Error) Error() string {
details := ""
if e.Details != "" {
@@ -194,6 +208,3 @@ func prettyPrint(obj interface{}) error {
_, err = os.Stdout.Write(data)
return err
}
-
-// NotImplementedError is used to indicate that a method is not implemented for the given platform
-var NotImplementedError = errors.New("Not Implemented")
diff --git a/vendor/github.com/containernetworking/cni/pkg/utils/utils.go b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go
new file mode 100644
index 000000000..324c40dea
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go
@@ -0,0 +1,51 @@
+// Copyright 2019 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "regexp"
+
+ "github.com/containernetworking/cni/pkg/types"
+)
+
+// cniValidNameChars is the regexp used to validate valid characters in
+// containerID and networkName
+const cniValidNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.\-]`
+
+var cniReg = regexp.MustCompile(`^` + cniValidNameChars + `*$`)
+
+// ValidateContainerID will validate that the supplied containerID is not empty does not contain invalid characters
+func ValidateContainerID(containerID string) *types.Error {
+
+ if containerID == "" {
+ return types.NewError(types.ErrUnknownContainer, "missing containerID", "")
+ }
+ if !cniReg.MatchString(containerID) {
+ return types.NewError(types.ErrInvalidEnvironmentVariables, "invalid characters in containerID", containerID)
+ }
+ return nil
+}
+
+// ValidateNetworkName will validate that the supplied networkName does not contain invalid characters
+func ValidateNetworkName(networkName string) *types.Error {
+
+ if networkName == "" {
+ return types.NewError(types.ErrInvalidNetworkConfig, "missing network name:", "")
+ }
+ if !cniReg.MatchString(networkName) {
+ return types.NewError(types.ErrInvalidNetworkConfig, "invalid characters found in network name", networkName)
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/example_changes.go b/vendor/github.com/containers/storage/pkg/archive/example_changes.go
deleted file mode 100644
index 70f9c5564..000000000
--- a/vendor/github.com/containers/storage/pkg/archive/example_changes.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// +build ignore
-
-// Simple tool to create an archive stream from an old and new directory
-//
-// By default it will stream the comparison of two temporary directories with junk files
-package main
-
-import (
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path"
-
- "github.com/containers/storage/pkg/archive"
- "github.com/sirupsen/logrus"
-)
-
-var (
- flDebug = flag.Bool("D", false, "debugging output")
- flNewDir = flag.String("newdir", "", "")
- flOldDir = flag.String("olddir", "", "")
- log = logrus.New()
-)
-
-func main() {
- flag.Usage = func() {
- fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
- fmt.Printf("%s [OPTIONS]\n", os.Args[0])
- flag.PrintDefaults()
- }
- flag.Parse()
- log.Out = os.Stderr
- if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
- logrus.SetLevel(logrus.DebugLevel)
- }
- var newDir, oldDir string
-
- if len(*flNewDir) == 0 {
- var err error
- newDir, err = ioutil.TempDir("", "storage-test-newDir")
- if err != nil {
- log.Fatal(err)
- }
- defer os.RemoveAll(newDir)
- if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
- log.Fatal(err)
- }
- } else {
- newDir = *flNewDir
- }
-
- if len(*flOldDir) == 0 {
- oldDir, err := ioutil.TempDir("", "storage-test-oldDir")
- if err != nil {
- log.Fatal(err)
- }
- defer os.RemoveAll(oldDir)
- } else {
- oldDir = *flOldDir
- }
-
- changes, err := archive.ChangesDirs(newDir, oldDir)
- if err != nil {
- log.Fatal(err)
- }
-
- a, err := archive.ExportChanges(newDir, changes)
- if err != nil {
- log.Fatal(err)
- }
- defer a.Close()
-
- i, err := io.Copy(os.Stdout, a)
- if err != nil && err != io.EOF {
- log.Fatal(err)
- }
- fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
-}
-
-func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
- fileData := []byte("fooo")
- for n := 0; n < numberOfFiles; n++ {
- fileName := fmt.Sprintf("file-%d", n)
- if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
- return 0, err
- }
- if makeLinks {
- if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
- return 0, err
- }
- }
- }
- totalSize := numberOfFiles * len(fileData)
- return totalSize, nil
-}
diff --git a/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go b/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go
index 8743abc56..0cdbf14b7 100644
--- a/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go
+++ b/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go
@@ -2,11 +2,14 @@ package ocicni
import (
"context"
+ "encoding/json"
"errors"
"fmt"
+ "io/ioutil"
"net"
"os"
"path"
+ "path/filepath"
"sort"
"strings"
"sync"
@@ -21,10 +24,11 @@ import (
)
type cniNetworkPlugin struct {
+ cniConfig *libcni.CNIConfig
loNetwork *cniNetwork
sync.RWMutex
- defaultNetName string
+ defaultNetName netName
networks map[string]*cniNetwork
nsManager *nsManager
@@ -47,11 +51,15 @@ type cniNetworkPlugin struct {
cacheDir string
}
+type netName struct {
+ name string
+ changeable bool
+}
+
type cniNetwork struct {
- name string
- filePath string
- NetworkConfig *libcni.NetworkConfigList
- CNIConfig *libcni.CNIConfig
+ name string
+ filePath string
+ config *libcni.NetworkConfigList
}
var errMissingDefaultNetwork = errors.New("Missing CNI default network")
@@ -186,6 +194,8 @@ func (plugin *cniNetworkPlugin) monitorConfDir(start *sync.WaitGroup) {
// If defaultNetName is not empty, a CNI config with that network name will
// be used as the default CNI network, and container network operations will
// fail until that network config is present and valid.
+// If defaultNetName is empty, CNI config files should be reloaded real-time and
+// defaultNetName should be changeable and determined by file sorting.
func InitCNI(defaultNetName string, confDir string, binDirs ...string) (CNIPlugin, error) {
return initCNI(nil, "", defaultNetName, confDir, binDirs...)
}
@@ -198,17 +208,24 @@ func initCNI(exec cniinvoke.Exec, cacheDir, defaultNetName string, confDir strin
if len(binDirs) == 0 {
binDirs = []string{DefaultBinDir}
}
+
plugin := &cniNetworkPlugin{
- defaultNetName: defaultNetName,
- networks: make(map[string]*cniNetwork),
- loNetwork: getLoNetwork(exec, binDirs),
- confDir: confDir,
- binDirs: binDirs,
- shutdownChan: make(chan struct{}),
- done: &sync.WaitGroup{},
- pods: make(map[string]*podLock),
- exec: exec,
- cacheDir: cacheDir,
+ cniConfig: libcni.NewCNIConfig(binDirs, exec),
+ defaultNetName: netName{
+ name: defaultNetName,
+ // If defaultNetName is not assigned in initialization,
+ // it should be changeable
+ changeable: defaultNetName == "",
+ },
+ networks: make(map[string]*cniNetwork),
+ loNetwork: getLoNetwork(),
+ confDir: confDir,
+ binDirs: binDirs,
+ shutdownChan: make(chan struct{}),
+ done: &sync.WaitGroup{},
+ pods: make(map[string]*podLock),
+ exec: exec,
+ cacheDir: cacheDir,
}
if exec == nil {
@@ -246,7 +263,7 @@ func (plugin *cniNetworkPlugin) Shutdown() error {
return nil
}
-func loadNetworks(exec cniinvoke.Exec, confDir string, binDirs []string) (map[string]*cniNetwork, string, error) {
+func loadNetworks(confDir string, cni *libcni.CNIConfig) (map[string]*cniNetwork, string, error) {
files, err := libcni.ConfFiles(confDir, []string{".conf", ".conflist", ".json"})
if err != nil {
return nil, "", err
@@ -284,17 +301,30 @@ func loadNetworks(exec cniinvoke.Exec, confDir string, binDirs []string) (map[st
logrus.Warningf("CNI config list %s has no networks, skipping", confFile)
continue
}
+
+ // Validation on CNI config should be done to pre-check presence
+ // of plugins which are necessary.
+ if _, err := cni.ValidateNetworkList(context.TODO(), confList); err != nil {
+ logrus.Warningf("Error validating CNI config file %s: %v", confFile, err)
+ continue
+ }
+
if confList.Name == "" {
confList.Name = path.Base(confFile)
}
+ cniNet := &cniNetwork{
+ name: confList.Name,
+ filePath: confFile,
+ config: confList,
+ }
+
logrus.Infof("Found CNI network %s (type=%v) at %s", confList.Name, confList.Plugins[0].Network.Type, confFile)
- networks[confList.Name] = &cniNetwork{
- name: confList.Name,
- filePath: confFile,
- NetworkConfig: confList,
- CNIConfig: libcni.NewCNIConfig(binDirs, exec),
+ if _, ok := networks[confList.Name]; !ok {
+ networks[confList.Name] = cniNet
+ } else {
+ logrus.Infof("Ignore CNI network %s (type=%v) at %s because already exists", confList.Name, confList.Plugins[0].Network.Type, confFile)
}
if defaultNetName == "" {
@@ -305,39 +335,49 @@ func loadNetworks(exec cniinvoke.Exec, confDir string, binDirs []string) (map[st
return networks, defaultNetName, nil
}
-func getLoNetwork(exec cniinvoke.Exec, binDirs []string) *cniNetwork {
- loConfig, err := libcni.ConfListFromBytes([]byte(`{
- "cniVersion": "0.2.0",
- "name": "cni-loopback",
+const (
+ loIfname string = "lo"
+ loNetname string = "cni-loopback"
+)
+
+func getLoNetwork() *cniNetwork {
+ loConfig, err := libcni.ConfListFromBytes([]byte(fmt.Sprintf(`{
+ "cniVersion": "0.3.1",
+ "name": "%s",
"plugins": [{
"type": "loopback"
}]
-}`))
+}`, loNetname)))
if err != nil {
// The hardcoded config above should always be valid and unit tests will
// catch this
panic(err)
}
loNetwork := &cniNetwork{
- name: "lo",
- NetworkConfig: loConfig,
- CNIConfig: libcni.NewCNIConfig(binDirs, exec),
+ name: loIfname,
+ config: loConfig,
}
return loNetwork
}
func (plugin *cniNetworkPlugin) syncNetworkConfig() error {
- networks, defaultNetName, err := loadNetworks(plugin.exec, plugin.confDir, plugin.binDirs)
+ networks, defaultNetName, err := loadNetworks(plugin.confDir, plugin.cniConfig)
if err != nil {
return err
}
plugin.Lock()
defer plugin.Unlock()
- if plugin.defaultNetName == "" {
- plugin.defaultNetName = defaultNetName
+
+ // Update defaultNetName if it is changeable
+ if plugin.defaultNetName.changeable {
+ plugin.defaultNetName.name = defaultNetName
+ logrus.Infof("Update default CNI network name to %s", defaultNetName)
+ } else {
+ logrus.Warnf("Default CNI network name %s is unchangeable", plugin.defaultNetName.name)
}
+
plugin.networks = networks
return nil
@@ -356,7 +396,7 @@ func (plugin *cniNetworkPlugin) getNetwork(name string) (*cniNetwork, error) {
func (plugin *cniNetworkPlugin) GetDefaultNetworkName() string {
plugin.RLock()
defer plugin.RUnlock()
- return plugin.defaultNetName
+ return plugin.defaultNetName.name
}
func (plugin *cniNetworkPlugin) getDefaultNetwork() *cniNetwork {
@@ -382,27 +422,120 @@ func (plugin *cniNetworkPlugin) Name() string {
return CNIPluginName
}
-func (plugin *cniNetworkPlugin) forEachNetwork(podNetwork *PodNetwork, forEachFunc func(*cniNetwork, string, *PodNetwork, RuntimeConfig) error) error {
+func (plugin *cniNetworkPlugin) loadNetworkFromCache(name string, rt *libcni.RuntimeConf) (*cniNetwork, *libcni.RuntimeConf, error) {
+ cniNet := &cniNetwork{
+ name: name,
+ config: &libcni.NetworkConfigList{
+ Name: name,
+ },
+ }
+
+ var confBytes []byte
+ var err error
+ confBytes, rt, err = plugin.cniConfig.GetNetworkListCachedConfig(cniNet.config, rt)
+ if err != nil {
+ return nil, nil, err
+ } else if confBytes == nil {
+ return nil, nil, fmt.Errorf("network %q not found in CNI cache", name)
+ }
+
+ cniNet.config, err = libcni.ConfListFromBytes(confBytes)
+ if err != nil {
+ // Might be a plain NetworkConfig
+ netConf, err := libcni.ConfFromBytes(confBytes)
+ if err != nil {
+ return nil, nil, err
+ }
+ // Up-convert to a NetworkConfigList
+ cniNet.config, err = libcni.ConfListFromConf(netConf)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ return cniNet, rt, nil
+}
+
+type forEachNetworkFn func(*cniNetwork, *PodNetwork, *libcni.RuntimeConf) error
+
+func (plugin *cniNetworkPlugin) forEachNetwork(podNetwork *PodNetwork, fromCache bool, actionFn forEachNetworkFn) error {
networks := podNetwork.Networks
if len(networks) == 0 {
- networks = append(networks, plugin.GetDefaultNetworkName())
+ networks = append(networks, NetAttachment{
+ Name: plugin.GetDefaultNetworkName(),
+ })
}
- for i, netName := range networks {
- // Interface names start at "eth0" and count up for each network
- ifName := fmt.Sprintf("eth%d", i)
- network, err := plugin.getNetwork(netName)
+
+ allIfNames := make(map[string]bool)
+ for _, req := range networks {
+ if req.Ifname != "" {
+ // Make sure the requested name isn't already assigned
+ if allIfNames[req.Ifname] {
+ return fmt.Errorf("network %q requested interface name %q already assigned", req.Name, req.Ifname)
+ }
+ allIfNames[req.Ifname] = true
+ }
+ }
+
+ for _, network := range networks {
+ ifName := network.Ifname
+ if ifName == "" {
+ for i := 0; i < 10000; i++ {
+ candidate := fmt.Sprintf("eth%d", i)
+ if !allIfNames[candidate] {
+ allIfNames[candidate] = true
+ ifName = candidate
+ break
+ }
+ }
+ if ifName == "" {
+ return fmt.Errorf("failed to find free interface name for network %q", network.Name)
+ }
+ }
+
+ rt, err := buildCNIRuntimeConf(plugin.cacheDir, podNetwork, ifName, podNetwork.RuntimeConfig[network.Name])
if err != nil {
- logrus.Errorf(err.Error())
+ logrus.Errorf("error building CNI runtime config: %v", err)
return err
}
- if err := forEachFunc(network, ifName, podNetwork, podNetwork.RuntimeConfig[netName]); err != nil {
+
+ var cniNet *cniNetwork
+ if fromCache {
+ var newRt *libcni.RuntimeConf
+ cniNet, newRt, err = plugin.loadNetworkFromCache(network.Name, rt)
+ if err != nil {
+ logrus.Errorf("error loading cached network config: %v", err)
+ // fall back to loading from existing plugins on disk
+ } else {
+ // Use the updated RuntimeConf
+ rt = newRt
+ }
+ }
+ if cniNet == nil {
+ cniNet, err = plugin.getNetwork(network.Name)
+ if err != nil {
+ logrus.Errorf(err.Error())
+ return err
+ }
+ }
+
+ if err := actionFn(cniNet, podNetwork, rt); err != nil {
return err
}
}
return nil
}
-func (plugin *cniNetworkPlugin) SetUpPod(podNetwork PodNetwork) ([]cnitypes.Result, error) {
+func buildLoopbackRuntimeConf(cacheDir string, podNetwork *PodNetwork) *libcni.RuntimeConf {
+ return &libcni.RuntimeConf{
+ ContainerID: podNetwork.ID,
+ NetNS: podNetwork.NetNS,
+ CacheDir: cacheDir,
+ IfName: loIfname,
+ }
+}
+
+func (plugin *cniNetworkPlugin) SetUpPod(podNetwork PodNetwork) ([]NetResult, error) {
if err := plugin.networksAvailable(&podNetwork); err != nil {
return nil, err
}
@@ -410,20 +543,26 @@ func (plugin *cniNetworkPlugin) SetUpPod(podNetwork PodNetwork) ([]cnitypes.Resu
plugin.podLock(podNetwork).Lock()
defer plugin.podUnlock(podNetwork)
- _, err := plugin.loNetwork.addToNetwork(plugin.cacheDir, &podNetwork, "lo", RuntimeConfig{})
- if err != nil {
+ loRt := buildLoopbackRuntimeConf(plugin.cacheDir, &podNetwork)
+ if _, err := plugin.loNetwork.addToNetwork(loRt, plugin.cniConfig); err != nil {
logrus.Errorf("Error while adding to cni lo network: %s", err)
return nil, err
}
- results := make([]cnitypes.Result, 0)
- if err := plugin.forEachNetwork(&podNetwork, func(network *cniNetwork, ifName string, podNetwork *PodNetwork, runtimeConfig RuntimeConfig) error {
- result, err := network.addToNetwork(plugin.cacheDir, podNetwork, ifName, runtimeConfig)
+ results := make([]NetResult, 0)
+ if err := plugin.forEachNetwork(&podNetwork, false, func(network *cniNetwork, podNetwork *PodNetwork, rt *libcni.RuntimeConf) error {
+ result, err := network.addToNetwork(rt, plugin.cniConfig)
if err != nil {
logrus.Errorf("Error while adding pod to CNI network %q: %s", network.name, err)
return err
}
- results = append(results, result)
+ results = append(results, NetResult{
+ Result: result,
+ NetAttachment: NetAttachment{
+ Name: network.name,
+ Ifname: rt.IfName,
+ },
+ })
return nil
}); err != nil {
return nil, err
@@ -432,16 +571,99 @@ func (plugin *cniNetworkPlugin) SetUpPod(podNetwork PodNetwork) ([]cnitypes.Resu
return results, nil
}
+func (plugin *cniNetworkPlugin) getCachedNetworkInfo(containerID string) ([]NetAttachment, error) {
+ cacheDir := libcni.CacheDir
+ if plugin.cacheDir != "" {
+ cacheDir = plugin.cacheDir
+ }
+
+ dirPath := filepath.Join(cacheDir, "results")
+ entries, err := ioutil.ReadDir(dirPath)
+ if err != nil {
+ return nil, err
+ }
+
+ fileNames := make([]string, 0, len(entries))
+ for _, e := range entries {
+ fileNames = append(fileNames, e.Name())
+ }
+ sort.Strings(fileNames)
+
+ attachments := []NetAttachment{}
+ for _, fname := range fileNames {
+ part := fmt.Sprintf("-%s-", containerID)
+ pos := strings.Index(fname, part)
+ if pos <= 0 || pos+len(part) >= len(fname) {
+ continue
+ }
+
+ cacheFile := filepath.Join(dirPath, fname)
+ bytes, err := ioutil.ReadFile(cacheFile)
+ if err != nil {
+ logrus.Warningf("failed to read CNI cache file %s: %v", cacheFile, err)
+ continue
+ }
+
+ cachedInfo := struct {
+ Kind string `json:"kind"`
+ IfName string `json:"ifName"`
+ ContainerID string `json:"containerID"`
+ NetName string `json:"networkName"`
+ }{}
+
+ if err := json.Unmarshal(bytes, &cachedInfo); err != nil {
+ logrus.Warningf("failed to unmarshal CNI cache file %s: %v", cacheFile, err)
+ continue
+ }
+ if cachedInfo.Kind != libcni.CNICacheV1 {
+ logrus.Warningf("unknown CNI cache file %s kind %q", cacheFile, cachedInfo.Kind)
+ continue
+ }
+ if cachedInfo.ContainerID != containerID {
+ continue
+ }
+ // Ignore the loopback interface; it's handled separately
+ if cachedInfo.IfName == loIfname && cachedInfo.NetName == loNetname {
+ continue
+ }
+ if cachedInfo.IfName == "" || cachedInfo.NetName == "" {
+ logrus.Warningf("missing CNI cache file %s ifname %q or netname %q", cacheFile, cachedInfo.IfName, cachedInfo.NetName)
+ continue
+ }
+
+ attachments = append(attachments, NetAttachment{
+ Name: cachedInfo.NetName,
+ Ifname: cachedInfo.IfName,
+ })
+ }
+ return attachments, nil
+}
+
+// TearDownPod tears down pod networks. Prefers cached pod attachment information
+// but falls back to given network attachment information.
func (plugin *cniNetworkPlugin) TearDownPod(podNetwork PodNetwork) error {
+ if len(podNetwork.Networks) == 0 {
+ attachments, err := plugin.getCachedNetworkInfo(podNetwork.ID)
+ if err == nil && len(attachments) > 0 {
+ podNetwork.Networks = attachments
+ }
+ }
+
if err := plugin.networksAvailable(&podNetwork); err != nil {
return err
}
+ loRt := buildLoopbackRuntimeConf(plugin.cacheDir, &podNetwork)
+ if err := plugin.loNetwork.deleteFromNetwork(loRt, plugin.cniConfig); err != nil {
+ logrus.Errorf("Error while removing pod from CNI lo network: %v", err)
+ // Loopback teardown errors are not fatal
+ }
+
plugin.podLock(podNetwork).Lock()
defer plugin.podUnlock(podNetwork)
- return plugin.forEachNetwork(&podNetwork, func(network *cniNetwork, ifName string, podNetwork *PodNetwork, runtimeConfig RuntimeConfig) error {
- if err := network.deleteFromNetwork(plugin.cacheDir, podNetwork, ifName, runtimeConfig); err != nil {
+ return plugin.forEachNetwork(&podNetwork, true, func(network *cniNetwork, podNetwork *PodNetwork, rt *libcni.RuntimeConf) error {
+ if err := network.deleteFromNetwork(rt, plugin.cniConfig); err != nil {
logrus.Errorf("Error while removing pod from CNI network %q: %s", network.name, err)
return err
}
@@ -451,19 +673,25 @@ func (plugin *cniNetworkPlugin) TearDownPod(podNetwork PodNetwork) error {
// GetPodNetworkStatus returns IP addressing and interface details for all
// networks attached to the pod.
-func (plugin *cniNetworkPlugin) GetPodNetworkStatus(podNetwork PodNetwork) ([]cnitypes.Result, error) {
+func (plugin *cniNetworkPlugin) GetPodNetworkStatus(podNetwork PodNetwork) ([]NetResult, error) {
plugin.podLock(podNetwork).Lock()
defer plugin.podUnlock(podNetwork)
- results := make([]cnitypes.Result, 0)
- if err := plugin.forEachNetwork(&podNetwork, func(network *cniNetwork, ifName string, podNetwork *PodNetwork, runtimeConfig RuntimeConfig) error {
- result, err := network.checkNetwork(plugin.cacheDir, podNetwork, ifName, runtimeConfig, plugin.nsManager)
+ results := make([]NetResult, 0)
+ if err := plugin.forEachNetwork(&podNetwork, true, func(network *cniNetwork, podNetwork *PodNetwork, rt *libcni.RuntimeConf) error {
+ result, err := network.checkNetwork(rt, plugin.cniConfig, plugin.nsManager, podNetwork.NetNS)
if err != nil {
logrus.Errorf("Error while checking pod to CNI network %q: %s", network.name, err)
return err
}
if result != nil {
- results = append(results, result)
+ results = append(results, NetResult{
+ Result: result,
+ NetAttachment: NetAttachment{
+ Name: network.name,
+ Ifname: rt.IfName,
+ },
+ })
}
return nil
}); err != nil {
@@ -473,16 +701,9 @@ func (plugin *cniNetworkPlugin) GetPodNetworkStatus(podNetwork PodNetwork) ([]cn
return results, nil
}
-func (network *cniNetwork) addToNetwork(cacheDir string, podNetwork *PodNetwork, ifName string, runtimeConfig RuntimeConfig) (cnitypes.Result, error) {
- rt, err := buildCNIRuntimeConf(cacheDir, podNetwork, ifName, runtimeConfig)
- if err != nil {
- logrus.Errorf("Error adding network: %v", err)
- return nil, err
- }
-
- netconf, cninet := network.NetworkConfig, network.CNIConfig
- logrus.Infof("About to add CNI network %s (type=%v)", netconf.Name, netconf.Plugins[0].Network.Type)
- res, err := cninet.AddNetworkList(context.Background(), netconf, rt)
+func (network *cniNetwork) addToNetwork(rt *libcni.RuntimeConf, cni *libcni.CNIConfig) (cnitypes.Result, error) {
+ logrus.Infof("About to add CNI network %s (type=%v)", network.name, network.config.Plugins[0].Network.Type)
+ res, err := cni.AddNetworkList(context.Background(), network.config, rt)
if err != nil {
logrus.Errorf("Error adding network: %v", err)
return nil, err
@@ -491,18 +712,10 @@ func (network *cniNetwork) addToNetwork(cacheDir string, podNetwork *PodNetwork,
return res, nil
}
-func (network *cniNetwork) checkNetwork(cacheDir string, podNetwork *PodNetwork, ifName string, runtimeConfig RuntimeConfig, nsManager *nsManager) (cnitypes.Result, error) {
-
- rt, err := buildCNIRuntimeConf(cacheDir, podNetwork, ifName, runtimeConfig)
- if err != nil {
- logrus.Errorf("Error checking network: %v", err)
- return nil, err
- }
-
- netconf, cninet := network.NetworkConfig, network.CNIConfig
- logrus.Infof("About to check CNI network %s (type=%v)", netconf.Name, netconf.Plugins[0].Network.Type)
+func (network *cniNetwork) checkNetwork(rt *libcni.RuntimeConf, cni *libcni.CNIConfig, nsManager *nsManager, netns string) (cnitypes.Result, error) {
+ logrus.Infof("About to check CNI network %s (type=%v)", network.name, network.config.Plugins[0].Network.Type)
- gtet, err := cniversion.GreaterThanOrEqualTo(netconf.CNIVersion, "0.4.0")
+ gtet, err := cniversion.GreaterThanOrEqualTo(network.config.CNIVersion, "0.4.0")
if err != nil {
return nil, err
}
@@ -511,15 +724,15 @@ func (network *cniNetwork) checkNetwork(cacheDir string, podNetwork *PodNetwork,
// When CNIVersion supports Check, use it. Otherwise fall back on what was done initially.
if gtet {
- err = cninet.CheckNetworkList(context.Background(), netconf, rt)
- logrus.Infof("Checking CNI network %s (config version=%v)", netconf.Name, netconf.CNIVersion)
+ err = cni.CheckNetworkList(context.Background(), network.config, rt)
+ logrus.Infof("Checking CNI network %s (config version=%v)", network.name, network.config.CNIVersion)
if err != nil {
logrus.Errorf("Error checking network: %v", err)
return nil, err
}
}
- result, err = cninet.GetNetworkListCachedResult(netconf, rt)
+ result, err = cni.GetNetworkListCachedResult(network.config, rt)
if err != nil {
logrus.Errorf("Error GetNetworkListCachedResult: %v", err)
return nil, err
@@ -528,19 +741,19 @@ func (network *cniNetwork) checkNetwork(cacheDir string, podNetwork *PodNetwork,
}
// result doesn't exist, create one
- logrus.Infof("Checking CNI network %s (config version=%v) nsManager=%v", netconf.Name, netconf.CNIVersion, nsManager)
+ logrus.Infof("Checking CNI network %s (config version=%v) nsManager=%v", network.name, network.config.CNIVersion, nsManager)
var cniInterface *cnicurrent.Interface
ips := []*cnicurrent.IPConfig{}
errs := []error{}
for _, version := range []string{"4", "6"} {
- ip, mac, err := getContainerDetails(nsManager, podNetwork.NetNS, ifName, "-"+version)
+ ip, mac, err := getContainerDetails(nsManager, netns, rt.IfName, "-"+version)
if err == nil {
if cniInterface == nil {
cniInterface = &cnicurrent.Interface{
- Name: ifName,
+ Name: rt.IfName,
Mac: mac.String(),
- Sandbox: podNetwork.NetNS,
+ Sandbox: netns,
}
}
ips = append(ips, &cnicurrent.IPConfig{
@@ -557,25 +770,23 @@ func (network *cniNetwork) checkNetwork(cacheDir string, podNetwork *PodNetwork,
}
result = &cnicurrent.Result{
- CNIVersion: netconf.CNIVersion,
+ CNIVersion: network.config.CNIVersion,
Interfaces: []*cnicurrent.Interface{cniInterface},
IPs: ips,
}
- return result, nil
-}
-
-func (network *cniNetwork) deleteFromNetwork(cacheDir string, podNetwork *PodNetwork, ifName string, runtimeConfig RuntimeConfig) error {
- rt, err := buildCNIRuntimeConf(cacheDir, podNetwork, ifName, runtimeConfig)
+ // Result must be the same CNIVersion as the CNI config
+ converted, err := result.GetAsVersion(network.config.CNIVersion)
if err != nil {
- logrus.Errorf("Error deleting network: %v", err)
- return err
+ return nil, err
}
- netconf, cninet := network.NetworkConfig, network.CNIConfig
- logrus.Infof("About to del CNI network %s (type=%v)", netconf.Name, netconf.Plugins[0].Network.Type)
- err = cninet.DelNetworkList(context.Background(), netconf, rt)
- if err != nil {
+ return converted, nil
+}
+
+func (network *cniNetwork) deleteFromNetwork(rt *libcni.RuntimeConf, cni *libcni.CNIConfig) error {
+ logrus.Infof("About to del CNI network %s (type=%v)", network.name, network.config.Plugins[0].Network.Type)
+ if err := cni.DelNetworkList(context.Background(), network.config, rt); err != nil {
logrus.Errorf("Error deleting network: %v", err)
return err
}
@@ -608,6 +819,16 @@ func buildCNIRuntimeConf(cacheDir string, podNetwork *PodNetwork, ifName string,
rt.Args = append(rt.Args, [2]string{"IP", ip})
}
+ // Add the requested static MAC to CNI_ARGS
+ mac := runtimeConfig.MAC
+ if mac != "" {
+ _, err := net.ParseMAC(mac)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse MAC address %q: %v", mac, err)
+ }
+ rt.Args = append(rt.Args, [2]string{"MAC", mac})
+ }
+
// Set PortMappings in Capabilities
if len(runtimeConfig.PortMappings) != 0 {
rt.CapabilityArgs["portMappings"] = runtimeConfig.PortMappings
diff --git a/vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go b/vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go
index 8709711e0..717ecda33 100644
--- a/vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go
+++ b/vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go
@@ -44,6 +44,9 @@ type RuntimeConfig struct {
// with the hostlocal IP allocator. If left unset, an IP will be
// dynamically allocated.
IP string
+ // MAC is a static MAC address to be assigned to the network interface.
+ // If left unset, a MAC will be dynamically allocated.
+ MAC string
// PortMappings is the port mapping of the sandbox.
PortMappings []PortMapping
// Bandwidth is the bandwidth limiting of the pod
@@ -75,9 +78,10 @@ type PodNetwork struct {
// NetNS is the network namespace path of the sandbox.
NetNS string
- // Networks is a list of CNI network names to attach to the sandbox
- // Leave this list empty to attach the default network to the sandbox
- Networks []string
+ // Networks is a list of CNI network names (and optional interface
+ // names) to attach to the sandbox. Leave this list empty to attach the
+ // default network to the sandbox
+ Networks []NetAttachment
// NetworkConfig is configuration specific to a single CNI network.
// It is optional, and can be omitted for some or all specified networks
@@ -85,6 +89,24 @@ type PodNetwork struct {
RuntimeConfig map[string]RuntimeConfig
}
+// NetAttachment describes a container network attachment
+type NetAttachment struct {
+ // NetName contains the name of the CNI network to which the container
+ // should be or is attached
+ Name string
+ // Ifname contains the optional interface name of the attachment
+ Ifname string
+}
+
+// NetResult contains the result the network attachment operation
+type NetResult struct {
+ // Result is the CNI Result
+ Result types.Result
+ // NetAttachment contains the network and interface names of this
+ // network attachment
+ NetAttachment
+}
+
// CNIPlugin is the interface that needs to be implemented by a plugin
type CNIPlugin interface {
// Name returns the plugin's name. This will be used when searching
@@ -98,13 +120,13 @@ type CNIPlugin interface {
// SetUpPod is the method called after the sandbox container of
// the pod has been created but before the other containers of the
// pod are launched.
- SetUpPod(network PodNetwork) ([]types.Result, error)
+ SetUpPod(network PodNetwork) ([]NetResult, error)
// TearDownPod is the method called before a pod's sandbox container will be deleted
TearDownPod(network PodNetwork) error
// Status is the method called to obtain the ipv4 or ipv6 addresses of the pod sandbox
- GetPodNetworkStatus(network PodNetwork) ([]types.Result, error)
+ GetPodNetworkStatus(network PodNetwork) ([]NetResult, error)
// NetworkStatus returns error if the network plugin is in error state
Status() error
diff --git a/vendor/github.com/docker/docker/pkg/archive/example_changes.go b/vendor/github.com/docker/docker/pkg/archive/example_changes.go
deleted file mode 100644
index 495db809e..000000000
--- a/vendor/github.com/docker/docker/pkg/archive/example_changes.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// +build ignore
-
-// Simple tool to create an archive stream from an old and new directory
-//
-// By default it will stream the comparison of two temporary directories with junk files
-package main
-
-import (
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path"
-
- "github.com/docker/docker/pkg/archive"
- "github.com/sirupsen/logrus"
-)
-
-var (
- flDebug = flag.Bool("D", false, "debugging output")
- flNewDir = flag.String("newdir", "", "")
- flOldDir = flag.String("olddir", "", "")
- log = logrus.New()
-)
-
-func main() {
- flag.Usage = func() {
- fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
- fmt.Printf("%s [OPTIONS]\n", os.Args[0])
- flag.PrintDefaults()
- }
- flag.Parse()
- log.Out = os.Stderr
- if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
- logrus.SetLevel(logrus.DebugLevel)
- }
- var newDir, oldDir string
-
- if len(*flNewDir) == 0 {
- var err error
- newDir, err = ioutil.TempDir("", "docker-test-newDir")
- if err != nil {
- log.Fatal(err)
- }
- defer os.RemoveAll(newDir)
- if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
- log.Fatal(err)
- }
- } else {
- newDir = *flNewDir
- }
-
- if len(*flOldDir) == 0 {
- oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
- if err != nil {
- log.Fatal(err)
- }
- defer os.RemoveAll(oldDir)
- } else {
- oldDir = *flOldDir
- }
-
- changes, err := archive.ChangesDirs(newDir, oldDir)
- if err != nil {
- log.Fatal(err)
- }
-
- a, err := archive.ExportChanges(newDir, changes)
- if err != nil {
- log.Fatal(err)
- }
- defer a.Close()
-
- i, err := io.Copy(os.Stdout, a)
- if err != nil && err != io.EOF {
- log.Fatal(err)
- }
- fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
-}
-
-func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
- fileData := []byte("fooo")
- for n := 0; n < numberOfFiles; n++ {
- fileName := fmt.Sprintf("file-%d", n)
- if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
- return 0, err
- }
- if makeLinks {
- if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
- return 0, err
- }
- }
- }
- totalSize := numberOfFiles * len(fileData)
- return totalSize, nil
-}
diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go
index 95ae54fbf..29b31cf78 100644
--- a/vendor/github.com/json-iterator/go/iter.go
+++ b/vendor/github.com/json-iterator/go/iter.go
@@ -74,6 +74,7 @@ type Iterator struct {
buf []byte
head int
tail int
+ depth int
captureStartedAt int
captured []byte
Error error
@@ -88,6 +89,7 @@ func NewIterator(cfg API) *Iterator {
buf: nil,
head: 0,
tail: 0,
+ depth: 0,
}
}
@@ -99,6 +101,7 @@ func Parse(cfg API, reader io.Reader, bufSize int) *Iterator {
buf: make([]byte, bufSize),
head: 0,
tail: 0,
+ depth: 0,
}
}
@@ -110,6 +113,7 @@ func ParseBytes(cfg API, input []byte) *Iterator {
buf: input,
head: 0,
tail: len(input),
+ depth: 0,
}
}
@@ -128,6 +132,7 @@ func (iter *Iterator) Reset(reader io.Reader) *Iterator {
iter.reader = reader
iter.head = 0
iter.tail = 0
+ iter.depth = 0
return iter
}
@@ -137,6 +142,7 @@ func (iter *Iterator) ResetBytes(input []byte) *Iterator {
iter.buf = input
iter.head = 0
iter.tail = len(input)
+ iter.depth = 0
return iter
}
@@ -320,3 +326,24 @@ func (iter *Iterator) Read() interface{} {
return nil
}
}
+
+// limit maximum depth of nesting, as allowed by https://tools.ietf.org/html/rfc7159#section-9
+const maxDepth = 10000
+
+func (iter *Iterator) incrementDepth() (success bool) {
+ iter.depth++
+ if iter.depth <= maxDepth {
+ return true
+ }
+ iter.ReportError("incrementDepth", "exceeded max depth")
+ return false
+}
+
+func (iter *Iterator) decrementDepth() (success bool) {
+ iter.depth--
+ if iter.depth >= 0 {
+ return true
+ }
+ iter.ReportError("decrementDepth", "unexpected negative nesting")
+ return false
+}
diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go
index 6188cb457..204fe0e09 100644
--- a/vendor/github.com/json-iterator/go/iter_array.go
+++ b/vendor/github.com/json-iterator/go/iter_array.go
@@ -28,26 +28,32 @@ func (iter *Iterator) ReadArray() (ret bool) {
func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) {
c := iter.nextToken()
if c == '[' {
+ if !iter.incrementDepth() {
+ return false
+ }
c = iter.nextToken()
if c != ']' {
iter.unreadByte()
if !callback(iter) {
+ iter.decrementDepth()
return false
}
c = iter.nextToken()
for c == ',' {
if !callback(iter) {
+ iter.decrementDepth()
return false
}
c = iter.nextToken()
}
if c != ']' {
iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c}))
+ iter.decrementDepth()
return false
}
- return true
+ return iter.decrementDepth()
}
- return true
+ return iter.decrementDepth()
}
if c == 'n' {
iter.skipThreeBytes('u', 'l', 'l')
diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go
index 1c5757671..b65137114 100644
--- a/vendor/github.com/json-iterator/go/iter_object.go
+++ b/vendor/github.com/json-iterator/go/iter_object.go
@@ -112,6 +112,9 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
c := iter.nextToken()
var field string
if c == '{' {
+ if !iter.incrementDepth() {
+ return false
+ }
c = iter.nextToken()
if c == '"' {
iter.unreadByte()
@@ -121,6 +124,7 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
}
if !callback(iter, field) {
+ iter.decrementDepth()
return false
}
c = iter.nextToken()
@@ -131,20 +135,23 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
}
if !callback(iter, field) {
+ iter.decrementDepth()
return false
}
c = iter.nextToken()
}
if c != '}' {
iter.ReportError("ReadObjectCB", `object not ended with }`)
+ iter.decrementDepth()
return false
}
- return true
+ return iter.decrementDepth()
}
if c == '}' {
- return true
+ return iter.decrementDepth()
}
iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c}))
+ iter.decrementDepth()
return false
}
if c == 'n' {
@@ -159,15 +166,20 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool {
c := iter.nextToken()
if c == '{' {
+ if !iter.incrementDepth() {
+ return false
+ }
c = iter.nextToken()
if c == '"' {
iter.unreadByte()
field := iter.ReadString()
if iter.nextToken() != ':' {
iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+ iter.decrementDepth()
return false
}
if !callback(iter, field) {
+ iter.decrementDepth()
return false
}
c = iter.nextToken()
@@ -175,23 +187,27 @@ func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool {
field = iter.ReadString()
if iter.nextToken() != ':' {
iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+ iter.decrementDepth()
return false
}
if !callback(iter, field) {
+ iter.decrementDepth()
return false
}
c = iter.nextToken()
}
if c != '}' {
iter.ReportError("ReadMapCB", `object not ended with }`)
+ iter.decrementDepth()
return false
}
- return true
+ return iter.decrementDepth()
}
if c == '}' {
- return true
+ return iter.decrementDepth()
}
iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c}))
+ iter.decrementDepth()
return false
}
if c == 'n' {
diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
index 8fcdc3b69..9303de41e 100644
--- a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
+++ b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
@@ -22,6 +22,9 @@ func (iter *Iterator) skipNumber() {
func (iter *Iterator) skipArray() {
level := 1
+ if !iter.incrementDepth() {
+ return
+ }
for {
for i := iter.head; i < iter.tail; i++ {
switch iter.buf[i] {
@@ -31,8 +34,14 @@ func (iter *Iterator) skipArray() {
i = iter.head - 1 // it will be i++ soon
case '[': // If open symbol, increase level
level++
+ if !iter.incrementDepth() {
+ return
+ }
case ']': // If close symbol, increase level
level--
+ if !iter.decrementDepth() {
+ return
+ }
// If we have returned to the original level, we're done
if level == 0 {
@@ -50,6 +59,10 @@ func (iter *Iterator) skipArray() {
func (iter *Iterator) skipObject() {
level := 1
+ if !iter.incrementDepth() {
+ return
+ }
+
for {
for i := iter.head; i < iter.tail; i++ {
switch iter.buf[i] {
@@ -59,8 +72,14 @@ func (iter *Iterator) skipObject() {
i = iter.head - 1 // it will be i++ soon
case '{': // If open symbol, increase level
level++
+ if !iter.incrementDepth() {
+ return
+ }
case '}': // If close symbol, increase level
level--
+ if !iter.decrementDepth() {
+ return
+ }
// If we have returned to the original level, we're done
if level == 0 {
diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go
index 4459e203f..74974ba74 100644
--- a/vendor/github.com/json-iterator/go/reflect.go
+++ b/vendor/github.com/json-iterator/go/reflect.go
@@ -60,6 +60,7 @@ func (b *ctx) append(prefix string) *ctx {
// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal
func (iter *Iterator) ReadVal(obj interface{}) {
+ depth := iter.depth
cacheKey := reflect2.RTypeOf(obj)
decoder := iter.cfg.getDecoderFromCache(cacheKey)
if decoder == nil {
@@ -76,6 +77,10 @@ func (iter *Iterator) ReadVal(obj interface{}) {
return
}
decoder.Decode(ptr, iter)
+ if iter.depth != depth {
+ iter.ReportError("ReadVal", "unexpected mismatched nesting")
+ return
+ }
}
// WriteVal copy the go interface into underlying JSON, same as json.Marshal
diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go
index 05e8fbf1f..e27e8d191 100644
--- a/vendor/github.com/json-iterator/go/reflect_extension.go
+++ b/vendor/github.com/json-iterator/go/reflect_extension.go
@@ -341,10 +341,10 @@ func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor {
if ctx.onlyTaggedField && !hastag && !field.Anonymous() {
continue
}
- tagParts := strings.Split(tag, ",")
if tag == "-" {
continue
}
+ tagParts := strings.Split(tag, ",")
if field.Anonymous() && (tag == "" || tagParts[0] == "") {
if field.Type().Kind() == reflect.Struct {
structDescriptor := describeStruct(ctx, field.Type())
diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go
index 547b4421e..08e9a3912 100644
--- a/vendor/github.com/json-iterator/go/reflect_map.go
+++ b/vendor/github.com/json-iterator/go/reflect_map.go
@@ -249,6 +249,10 @@ type mapEncoder struct {
}
func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if *(*unsafe.Pointer)(ptr) == nil {
+ stream.WriteNil()
+ return
+ }
stream.WriteObjectStart()
iter := encoder.mapType.UnsafeIterate(ptr)
for i := 0; iter.HasNext(); i++ {
diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go
index fea50719d..3e21f3756 100644
--- a/vendor/github.com/json-iterator/go/reflect_marshaler.go
+++ b/vendor/github.com/json-iterator/go/reflect_marshaler.go
@@ -3,8 +3,9 @@ package jsoniter
import (
"encoding"
"encoding/json"
- "github.com/modern-go/reflect2"
"unsafe"
+
+ "github.com/modern-go/reflect2"
)
var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem()
@@ -93,10 +94,17 @@ func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteNil()
return
}
- bytes, err := json.Marshal(obj)
+ marshaler := obj.(json.Marshaler)
+ bytes, err := marshaler.MarshalJSON()
if err != nil {
stream.Error = err
} else {
+ // html escape was already done by jsoniter
+ // but the extra '\n' should be trimed
+ l := len(bytes)
+ if l > 0 && bytes[l-1] == '\n' {
+ bytes = bytes[:l-1]
+ }
stream.Write(bytes)
}
}
diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
index 932641ac4..5ad5cc561 100644
--- a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
+++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
@@ -500,6 +500,9 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator)
if !iter.readObjectStart() {
return
}
+ if !iter.incrementDepth() {
+ return
+ }
var c byte
for c = ','; c == ','; c = iter.nextToken() {
decoder.decodeOneField(ptr, iter)
@@ -510,6 +513,7 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator)
if c != '}' {
iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c}))
}
+ iter.decrementDepth()
}
func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) {
@@ -571,6 +575,9 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator)
if !iter.readObjectStart() {
return
}
+ if !iter.incrementDepth() {
+ return
+ }
for {
if iter.readFieldHash() == decoder.fieldHash {
decoder.fieldDecoder.Decode(ptr, iter)
@@ -584,6 +591,7 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator)
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
+ iter.decrementDepth()
}
type twoFieldsStructDecoder struct {
@@ -598,6 +606,9 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
if !iter.readObjectStart() {
return
}
+ if !iter.incrementDepth() {
+ return
+ }
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@@ -614,6 +625,7 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
+ iter.decrementDepth()
}
type threeFieldsStructDecoder struct {
@@ -630,6 +642,9 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
if !iter.readObjectStart() {
return
}
+ if !iter.incrementDepth() {
+ return
+ }
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@@ -648,6 +663,7 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
+ iter.decrementDepth()
}
type fourFieldsStructDecoder struct {
@@ -666,6 +682,9 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
if !iter.readObjectStart() {
return
}
+ if !iter.incrementDepth() {
+ return
+ }
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@@ -686,6 +705,7 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
+ iter.decrementDepth()
}
type fiveFieldsStructDecoder struct {
@@ -706,6 +726,9 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
if !iter.readObjectStart() {
return
}
+ if !iter.incrementDepth() {
+ return
+ }
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@@ -728,6 +751,7 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
+ iter.decrementDepth()
}
type sixFieldsStructDecoder struct {
@@ -750,6 +774,9 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
if !iter.readObjectStart() {
return
}
+ if !iter.incrementDepth() {
+ return
+ }
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@@ -774,6 +801,7 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
+ iter.decrementDepth()
}
type sevenFieldsStructDecoder struct {
@@ -798,6 +826,9 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
if !iter.readObjectStart() {
return
}
+ if !iter.incrementDepth() {
+ return
+ }
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@@ -824,6 +855,7 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
+ iter.decrementDepth()
}
type eightFieldsStructDecoder struct {
@@ -850,6 +882,9 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
if !iter.readObjectStart() {
return
}
+ if !iter.incrementDepth() {
+ return
+ }
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@@ -878,6 +913,7 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
+ iter.decrementDepth()
}
type nineFieldsStructDecoder struct {
@@ -906,6 +942,9 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
if !iter.readObjectStart() {
return
}
+ if !iter.incrementDepth() {
+ return
+ }
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@@ -936,6 +975,7 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
+ iter.decrementDepth()
}
type tenFieldsStructDecoder struct {
@@ -966,6 +1006,9 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
if !iter.readObjectStart() {
return
}
+ if !iter.incrementDepth() {
+ return
+ }
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@@ -998,6 +1041,7 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
+ iter.decrementDepth()
}
type structFieldDecoder struct {
diff --git a/vendor/github.com/klauspost/compress/flate/gen.go b/vendor/github.com/klauspost/compress/flate/gen.go
deleted file mode 100644
index 154c89a48..000000000
--- a/vendor/github.com/klauspost/compress/flate/gen.go
+++ /dev/null
@@ -1,265 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// This program generates fixedhuff.go
-// Invoke as
-//
-// go run gen.go -output fixedhuff.go
-
-package main
-
-import (
- "bytes"
- "flag"
- "fmt"
- "go/format"
- "io/ioutil"
- "log"
-)
-
-var filename = flag.String("output", "fixedhuff.go", "output file name")
-
-const maxCodeLen = 16
-
-// Note: the definition of the huffmanDecoder struct is copied from
-// inflate.go, as it is private to the implementation.
-
-// chunk & 15 is number of bits
-// chunk >> 4 is value, including table link
-
-const (
- huffmanChunkBits = 9
- huffmanNumChunks = 1 << huffmanChunkBits
- huffmanCountMask = 15
- huffmanValueShift = 4
-)
-
-type huffmanDecoder struct {
- min int // the minimum code length
- chunks [huffmanNumChunks]uint32 // chunks as described above
- links [][]uint32 // overflow links
- linkMask uint32 // mask the width of the link table
-}
-
-// Initialize Huffman decoding tables from array of code lengths.
-// Following this function, h is guaranteed to be initialized into a complete
-// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
-// degenerate case where the tree has only a single symbol with length 1. Empty
-// trees are permitted.
-func (h *huffmanDecoder) init(bits []int) bool {
- // Sanity enables additional runtime tests during Huffman
- // table construction. It's intended to be used during
- // development to supplement the currently ad-hoc unit tests.
- const sanity = false
-
- if h.min != 0 {
- *h = huffmanDecoder{}
- }
-
- // Count number of codes of each length,
- // compute min and max length.
- var count [maxCodeLen]int
- var min, max int
- for _, n := range bits {
- if n == 0 {
- continue
- }
- if min == 0 || n < min {
- min = n
- }
- if n > max {
- max = n
- }
- count[n]++
- }
-
- // Empty tree. The decompressor.huffSym function will fail later if the tree
- // is used. Technically, an empty tree is only valid for the HDIST tree and
- // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
- // is guaranteed to fail since it will attempt to use the tree to decode the
- // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
- // guaranteed to fail later since the compressed data section must be
- // composed of at least one symbol (the end-of-block marker).
- if max == 0 {
- return true
- }
-
- code := 0
- var nextcode [maxCodeLen]int
- for i := min; i <= max; i++ {
- code <<= 1
- nextcode[i] = code
- code += count[i]
- }
-
- // Check that the coding is complete (i.e., that we've
- // assigned all 2-to-the-max possible bit sequences).
- // Exception: To be compatible with zlib, we also need to
- // accept degenerate single-code codings. See also
- // TestDegenerateHuffmanCoding.
- if code != 1<<uint(max) && !(code == 1 && max == 1) {
- return false
- }
-
- h.min = min
- if max > huffmanChunkBits {
- numLinks := 1 << (uint(max) - huffmanChunkBits)
- h.linkMask = uint32(numLinks - 1)
-
- // create link tables
- link := nextcode[huffmanChunkBits+1] >> 1
- h.links = make([][]uint32, huffmanNumChunks-link)
- for j := uint(link); j < huffmanNumChunks; j++ {
- reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8
- reverse >>= uint(16 - huffmanChunkBits)
- off := j - uint(link)
- if sanity && h.chunks[reverse] != 0 {
- panic("impossible: overwriting existing chunk")
- }
- h.chunks[reverse] = uint32(off<<huffmanValueShift | (huffmanChunkBits + 1))
- h.links[off] = make([]uint32, numLinks)
- }
- }
-
- for i, n := range bits {
- if n == 0 {
- continue
- }
- code := nextcode[n]
- nextcode[n]++
- chunk := uint32(i<<huffmanValueShift | n)
- reverse := int(reverseByte[code>>8]) | int(reverseByte[code&0xff])<<8
- reverse >>= uint(16 - n)
- if n <= huffmanChunkBits {
- for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
- // We should never need to overwrite
- // an existing chunk. Also, 0 is
- // never a valid chunk, because the
- // lower 4 "count" bits should be
- // between 1 and 15.
- if sanity && h.chunks[off] != 0 {
- panic("impossible: overwriting existing chunk")
- }
- h.chunks[off] = chunk
- }
- } else {
- j := reverse & (huffmanNumChunks - 1)
- if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
- // Longer codes should have been
- // associated with a link table above.
- panic("impossible: not an indirect chunk")
- }
- value := h.chunks[j] >> huffmanValueShift
- linktab := h.links[value]
- reverse >>= huffmanChunkBits
- for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
- if sanity && linktab[off] != 0 {
- panic("impossible: overwriting existing chunk")
- }
- linktab[off] = chunk
- }
- }
- }
-
- if sanity {
- // Above we've sanity checked that we never overwrote
- // an existing entry. Here we additionally check that
- // we filled the tables completely.
- for i, chunk := range h.chunks {
- if chunk == 0 {
- // As an exception, in the degenerate
- // single-code case, we allow odd
- // chunks to be missing.
- if code == 1 && i%2 == 1 {
- continue
- }
- panic("impossible: missing chunk")
- }
- }
- for _, linktab := range h.links {
- for _, chunk := range linktab {
- if chunk == 0 {
- panic("impossible: missing chunk")
- }
- }
- }
- }
-
- return true
-}
-
-func main() {
- flag.Parse()
-
- var h huffmanDecoder
- var bits [288]int
- initReverseByte()
- for i := 0; i < 144; i++ {
- bits[i] = 8
- }
- for i := 144; i < 256; i++ {
- bits[i] = 9
- }
- for i := 256; i < 280; i++ {
- bits[i] = 7
- }
- for i := 280; i < 288; i++ {
- bits[i] = 8
- }
- h.init(bits[:])
- if h.links != nil {
- log.Fatal("Unexpected links table in fixed Huffman decoder")
- }
-
- var buf bytes.Buffer
-
- fmt.Fprintf(&buf, `// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.`+"\n\n")
-
- fmt.Fprintln(&buf, "package flate")
- fmt.Fprintln(&buf)
- fmt.Fprintln(&buf, "// autogenerated by go run gen.go -output fixedhuff.go, DO NOT EDIT")
- fmt.Fprintln(&buf)
- fmt.Fprintln(&buf, "var fixedHuffmanDecoder = huffmanDecoder{")
- fmt.Fprintf(&buf, "\t%d,\n", h.min)
- fmt.Fprintln(&buf, "\t[huffmanNumChunks]uint32{")
- for i := 0; i < huffmanNumChunks; i++ {
- if i&7 == 0 {
- fmt.Fprintf(&buf, "\t\t")
- } else {
- fmt.Fprintf(&buf, " ")
- }
- fmt.Fprintf(&buf, "0x%04x,", h.chunks[i])
- if i&7 == 7 {
- fmt.Fprintln(&buf)
- }
- }
- fmt.Fprintln(&buf, "\t},")
- fmt.Fprintln(&buf, "\tnil, 0,")
- fmt.Fprintln(&buf, "}")
-
- data, err := format.Source(buf.Bytes())
- if err != nil {
- log.Fatal(err)
- }
- err = ioutil.WriteFile(*filename, data, 0644)
- if err != nil {
- log.Fatal(err)
- }
-}
-
-var reverseByte [256]byte
-
-func initReverseByte() {
- for x := 0; x < 256; x++ {
- var result byte
- for i := uint(0); i < 8; i++ {
- result |= byte(((x >> i) & 1) << (7 - i))
- }
- reverseByte[x] = result
- }
-}
diff --git a/vendor/github.com/klauspost/cpuid/private-gen.go b/vendor/github.com/klauspost/cpuid/private-gen.go
deleted file mode 100644
index 437333d29..000000000
--- a/vendor/github.com/klauspost/cpuid/private-gen.go
+++ /dev/null
@@ -1,476 +0,0 @@
-// +build ignore
-
-package main
-
-import (
- "bytes"
- "fmt"
- "go/ast"
- "go/parser"
- "go/printer"
- "go/token"
- "io"
- "io/ioutil"
- "log"
- "os"
- "reflect"
- "strings"
- "unicode"
- "unicode/utf8"
-)
-
-var inFiles = []string{"cpuid.go", "cpuid_test.go"}
-var copyFiles = []string{"cpuid_amd64.s", "cpuid_386.s", "detect_ref.go", "detect_intel.go"}
-var fileSet = token.NewFileSet()
-var reWrites = []rewrite{
- initRewrite("CPUInfo -> cpuInfo"),
- initRewrite("Vendor -> vendor"),
- initRewrite("Flags -> flags"),
- initRewrite("Detect -> detect"),
- initRewrite("CPU -> cpu"),
-}
-var excludeNames = map[string]bool{"string": true, "join": true, "trim": true,
- // cpuid_test.go
- "t": true, "println": true, "logf": true, "log": true, "fatalf": true, "fatal": true,
-}
-
-var excludePrefixes = []string{"test", "benchmark"}
-
-func main() {
- Package := "private"
- parserMode := parser.ParseComments
- exported := make(map[string]rewrite)
- for _, file := range inFiles {
- in, err := os.Open(file)
- if err != nil {
- log.Fatalf("opening input", err)
- }
-
- src, err := ioutil.ReadAll(in)
- if err != nil {
- log.Fatalf("reading input", err)
- }
-
- astfile, err := parser.ParseFile(fileSet, file, src, parserMode)
- if err != nil {
- log.Fatalf("parsing input", err)
- }
-
- for _, rw := range reWrites {
- astfile = rw(astfile)
- }
-
- // Inspect the AST and print all identifiers and literals.
- var startDecl token.Pos
- var endDecl token.Pos
- ast.Inspect(astfile, func(n ast.Node) bool {
- var s string
- switch x := n.(type) {
- case *ast.Ident:
- if x.IsExported() {
- t := strings.ToLower(x.Name)
- for _, pre := range excludePrefixes {
- if strings.HasPrefix(t, pre) {
- return true
- }
- }
- if excludeNames[t] != true {
- //if x.Pos() > startDecl && x.Pos() < endDecl {
- exported[x.Name] = initRewrite(x.Name + " -> " + t)
- }
- }
-
- case *ast.GenDecl:
- if x.Tok == token.CONST && x.Lparen > 0 {
- startDecl = x.Lparen
- endDecl = x.Rparen
- // fmt.Printf("Decl:%s -> %s\n", fileSet.Position(startDecl), fileSet.Position(endDecl))
- }
- }
- if s != "" {
- fmt.Printf("%s:\t%s\n", fileSet.Position(n.Pos()), s)
- }
- return true
- })
-
- for _, rw := range exported {
- astfile = rw(astfile)
- }
-
- var buf bytes.Buffer
-
- printer.Fprint(&buf, fileSet, astfile)
-
- // Remove package documentation and insert information
- s := buf.String()
- ind := strings.Index(buf.String(), "\npackage cpuid")
- s = s[ind:]
- s = "// Generated, DO NOT EDIT,\n" +
- "// but copy it to your own project and rename the package.\n" +
- "// See more at http://github.com/klauspost/cpuid\n" +
- s
-
- outputName := Package + string(os.PathSeparator) + file
-
- err = ioutil.WriteFile(outputName, []byte(s), 0644)
- if err != nil {
- log.Fatalf("writing output: %s", err)
- }
- log.Println("Generated", outputName)
- }
-
- for _, file := range copyFiles {
- dst := ""
- if strings.HasPrefix(file, "cpuid") {
- dst = Package + string(os.PathSeparator) + file
- } else {
- dst = Package + string(os.PathSeparator) + "cpuid_" + file
- }
- err := copyFile(file, dst)
- if err != nil {
- log.Fatalf("copying file: %s", err)
- }
- log.Println("Copied", dst)
- }
-}
-
-// CopyFile copies a file from src to dst. If src and dst files exist, and are
-// the same, then return success. Copy the file contents from src to dst.
-func copyFile(src, dst string) (err error) {
- sfi, err := os.Stat(src)
- if err != nil {
- return
- }
- if !sfi.Mode().IsRegular() {
- // cannot copy non-regular files (e.g., directories,
- // symlinks, devices, etc.)
- return fmt.Errorf("CopyFile: non-regular source file %s (%q)", sfi.Name(), sfi.Mode().String())
- }
- dfi, err := os.Stat(dst)
- if err != nil {
- if !os.IsNotExist(err) {
- return
- }
- } else {
- if !(dfi.Mode().IsRegular()) {
- return fmt.Errorf("CopyFile: non-regular destination file %s (%q)", dfi.Name(), dfi.Mode().String())
- }
- if os.SameFile(sfi, dfi) {
- return
- }
- }
- err = copyFileContents(src, dst)
- return
-}
-
-// copyFileContents copies the contents of the file named src to the file named
-// by dst. The file will be created if it does not already exist. If the
-// destination file exists, all it's contents will be replaced by the contents
-// of the source file.
-func copyFileContents(src, dst string) (err error) {
- in, err := os.Open(src)
- if err != nil {
- return
- }
- defer in.Close()
- out, err := os.Create(dst)
- if err != nil {
- return
- }
- defer func() {
- cerr := out.Close()
- if err == nil {
- err = cerr
- }
- }()
- if _, err = io.Copy(out, in); err != nil {
- return
- }
- err = out.Sync()
- return
-}
-
-type rewrite func(*ast.File) *ast.File
-
-// Mostly copied from gofmt
-func initRewrite(rewriteRule string) rewrite {
- f := strings.Split(rewriteRule, "->")
- if len(f) != 2 {
- fmt.Fprintf(os.Stderr, "rewrite rule must be of the form 'pattern -> replacement'\n")
- os.Exit(2)
- }
- pattern := parseExpr(f[0], "pattern")
- replace := parseExpr(f[1], "replacement")
- return func(p *ast.File) *ast.File { return rewriteFile(pattern, replace, p) }
-}
-
-// parseExpr parses s as an expression.
-// It might make sense to expand this to allow statement patterns,
-// but there are problems with preserving formatting and also
-// with what a wildcard for a statement looks like.
-func parseExpr(s, what string) ast.Expr {
- x, err := parser.ParseExpr(s)
- if err != nil {
- fmt.Fprintf(os.Stderr, "parsing %s %s at %s\n", what, s, err)
- os.Exit(2)
- }
- return x
-}
-
-// Keep this function for debugging.
-/*
-func dump(msg string, val reflect.Value) {
- fmt.Printf("%s:\n", msg)
- ast.Print(fileSet, val.Interface())
- fmt.Println()
-}
-*/
-
-// rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file.
-func rewriteFile(pattern, replace ast.Expr, p *ast.File) *ast.File {
- cmap := ast.NewCommentMap(fileSet, p, p.Comments)
- m := make(map[string]reflect.Value)
- pat := reflect.ValueOf(pattern)
- repl := reflect.ValueOf(replace)
-
- var rewriteVal func(val reflect.Value) reflect.Value
- rewriteVal = func(val reflect.Value) reflect.Value {
- // don't bother if val is invalid to start with
- if !val.IsValid() {
- return reflect.Value{}
- }
- for k := range m {
- delete(m, k)
- }
- val = apply(rewriteVal, val)
- if match(m, pat, val) {
- val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos()))
- }
- return val
- }
-
- r := apply(rewriteVal, reflect.ValueOf(p)).Interface().(*ast.File)
- r.Comments = cmap.Filter(r).Comments() // recreate comments list
- return r
-}
-
-// set is a wrapper for x.Set(y); it protects the caller from panics if x cannot be changed to y.
-func set(x, y reflect.Value) {
- // don't bother if x cannot be set or y is invalid
- if !x.CanSet() || !y.IsValid() {
- return
- }
- defer func() {
- if x := recover(); x != nil {
- if s, ok := x.(string); ok &&
- (strings.Contains(s, "type mismatch") || strings.Contains(s, "not assignable")) {
- // x cannot be set to y - ignore this rewrite
- return
- }
- panic(x)
- }
- }()
- x.Set(y)
-}
-
-// Values/types for special cases.
-var (
- objectPtrNil = reflect.ValueOf((*ast.Object)(nil))
- scopePtrNil = reflect.ValueOf((*ast.Scope)(nil))
-
- identType = reflect.TypeOf((*ast.Ident)(nil))
- objectPtrType = reflect.TypeOf((*ast.Object)(nil))
- positionType = reflect.TypeOf(token.NoPos)
- callExprType = reflect.TypeOf((*ast.CallExpr)(nil))
- scopePtrType = reflect.TypeOf((*ast.Scope)(nil))
-)
-
-// apply replaces each AST field x in val with f(x), returning val.
-// To avoid extra conversions, f operates on the reflect.Value form.
-func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value {
- if !val.IsValid() {
- return reflect.Value{}
- }
-
- // *ast.Objects introduce cycles and are likely incorrect after
- // rewrite; don't follow them but replace with nil instead
- if val.Type() == objectPtrType {
- return objectPtrNil
- }
-
- // similarly for scopes: they are likely incorrect after a rewrite;
- // replace them with nil
- if val.Type() == scopePtrType {
- return scopePtrNil
- }
-
- switch v := reflect.Indirect(val); v.Kind() {
- case reflect.Slice:
- for i := 0; i < v.Len(); i++ {
- e := v.Index(i)
- set(e, f(e))
- }
- case reflect.Struct:
- for i := 0; i < v.NumField(); i++ {
- e := v.Field(i)
- set(e, f(e))
- }
- case reflect.Interface:
- e := v.Elem()
- set(v, f(e))
- }
- return val
-}
-
-func isWildcard(s string) bool {
- rune, size := utf8.DecodeRuneInString(s)
- return size == len(s) && unicode.IsLower(rune)
-}
-
-// match returns true if pattern matches val,
-// recording wildcard submatches in m.
-// If m == nil, match checks whether pattern == val.
-func match(m map[string]reflect.Value, pattern, val reflect.Value) bool {
- // Wildcard matches any expression. If it appears multiple
- // times in the pattern, it must match the same expression
- // each time.
- if m != nil && pattern.IsValid() && pattern.Type() == identType {
- name := pattern.Interface().(*ast.Ident).Name
- if isWildcard(name) && val.IsValid() {
- // wildcards only match valid (non-nil) expressions.
- if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() {
- if old, ok := m[name]; ok {
- return match(nil, old, val)
- }
- m[name] = val
- return true
- }
- }
- }
-
- // Otherwise, pattern and val must match recursively.
- if !pattern.IsValid() || !val.IsValid() {
- return !pattern.IsValid() && !val.IsValid()
- }
- if pattern.Type() != val.Type() {
- return false
- }
-
- // Special cases.
- switch pattern.Type() {
- case identType:
- // For identifiers, only the names need to match
- // (and none of the other *ast.Object information).
- // This is a common case, handle it all here instead
- // of recursing down any further via reflection.
- p := pattern.Interface().(*ast.Ident)
- v := val.Interface().(*ast.Ident)
- return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name
- case objectPtrType, positionType:
- // object pointers and token positions always match
- return true
- case callExprType:
- // For calls, the Ellipsis fields (token.Position) must
- // match since that is how f(x) and f(x...) are different.
- // Check them here but fall through for the remaining fields.
- p := pattern.Interface().(*ast.CallExpr)
- v := val.Interface().(*ast.CallExpr)
- if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() {
- return false
- }
- }
-
- p := reflect.Indirect(pattern)
- v := reflect.Indirect(val)
- if !p.IsValid() || !v.IsValid() {
- return !p.IsValid() && !v.IsValid()
- }
-
- switch p.Kind() {
- case reflect.Slice:
- if p.Len() != v.Len() {
- return false
- }
- for i := 0; i < p.Len(); i++ {
- if !match(m, p.Index(i), v.Index(i)) {
- return false
- }
- }
- return true
-
- case reflect.Struct:
- for i := 0; i < p.NumField(); i++ {
- if !match(m, p.Field(i), v.Field(i)) {
- return false
- }
- }
- return true
-
- case reflect.Interface:
- return match(m, p.Elem(), v.Elem())
- }
-
- // Handle token integers, etc.
- return p.Interface() == v.Interface()
-}
-
-// subst returns a copy of pattern with values from m substituted in place
-// of wildcards and pos used as the position of tokens from the pattern.
-// if m == nil, subst returns a copy of pattern and doesn't change the line
-// number information.
-func subst(m map[string]reflect.Value, pattern reflect.Value, pos reflect.Value) reflect.Value {
- if !pattern.IsValid() {
- return reflect.Value{}
- }
-
- // Wildcard gets replaced with map value.
- if m != nil && pattern.Type() == identType {
- name := pattern.Interface().(*ast.Ident).Name
- if isWildcard(name) {
- if old, ok := m[name]; ok {
- return subst(nil, old, reflect.Value{})
- }
- }
- }
-
- if pos.IsValid() && pattern.Type() == positionType {
- // use new position only if old position was valid in the first place
- if old := pattern.Interface().(token.Pos); !old.IsValid() {
- return pattern
- }
- return pos
- }
-
- // Otherwise copy.
- switch p := pattern; p.Kind() {
- case reflect.Slice:
- v := reflect.MakeSlice(p.Type(), p.Len(), p.Len())
- for i := 0; i < p.Len(); i++ {
- v.Index(i).Set(subst(m, p.Index(i), pos))
- }
- return v
-
- case reflect.Struct:
- v := reflect.New(p.Type()).Elem()
- for i := 0; i < p.NumField(); i++ {
- v.Field(i).Set(subst(m, p.Field(i), pos))
- }
- return v
-
- case reflect.Ptr:
- v := reflect.New(p.Type()).Elem()
- if elem := p.Elem(); elem.IsValid() {
- v.Set(subst(m, elem, pos).Addr())
- }
- return v
-
- case reflect.Interface:
- v := reflect.New(p.Type()).Elem()
- if elem := p.Elem(); elem.IsValid() {
- v.Set(subst(m, elem, pos))
- }
- return v
- }
-
- return pattern
-}
diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md
index f67074016..ecbdd2734 100644
--- a/vendor/github.com/onsi/gomega/CHANGELOG.md
+++ b/vendor/github.com/onsi/gomega/CHANGELOG.md
@@ -1,3 +1,8 @@
+## 1.7.1
+
+### Fixes
+- Bump go-yaml version to cover fixed ddos heuristic (#362) [95e431e]
+
## 1.7.0
### Features
diff --git a/vendor/github.com/onsi/gomega/go.mod b/vendor/github.com/onsi/gomega/go.mod
index 65eedf696..177a541c4 100644
--- a/vendor/github.com/onsi/gomega/go.mod
+++ b/vendor/github.com/onsi/gomega/go.mod
@@ -11,5 +11,6 @@ require (
golang.org/x/text v0.3.0 // indirect
gopkg.in/fsnotify.v1 v1.4.7 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
- gopkg.in/yaml.v2 v2.2.1
+ gopkg.in/yaml.v2 v2.2.4
)
+
diff --git a/vendor/github.com/onsi/gomega/go.sum b/vendor/github.com/onsi/gomega/go.sum
index b23f6ef02..bbcc05d3e 100644
--- a/vendor/github.com/onsi/gomega/go.sum
+++ b/vendor/github.com/onsi/gomega/go.sum
@@ -20,5 +20,5 @@ gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go
index b145768cf..85505f2ec 100644
--- a/vendor/github.com/onsi/gomega/gomega_dsl.go
+++ b/vendor/github.com/onsi/gomega/gomega_dsl.go
@@ -24,7 +24,7 @@ import (
"github.com/onsi/gomega/types"
)
-const GOMEGA_VERSION = "1.7.0"
+const GOMEGA_VERSION = "1.7.1"
const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil.
If you're using Ginkgo then you probably forgot to put your assertion in an It().
diff --git a/vendor/github.com/uber/jaeger-client-go/.travis.yml b/vendor/github.com/uber/jaeger-client-go/.travis.yml
index 0d7bdd9ab..e81cc8805 100644
--- a/vendor/github.com/uber/jaeger-client-go/.travis.yml
+++ b/vendor/github.com/uber/jaeger-client-go/.travis.yml
@@ -7,21 +7,22 @@ dist: trusty
matrix:
include:
- - go: 1.12.x
+ - go: 1.13.x
env:
- TESTS=true
- USE_DEP=true
- COVERAGE=true
- - go: 1.12.x
+ - go: 1.13.x
env:
- USE_DEP=true
- CROSSDOCK=true
- - go: 1.12.x
+ - go: 1.13.x
env:
- TESTS=true
- USE_DEP=false
- USE_GLIDE=true
- - go: 1.11.x
+ # test with previous version of Go
+ - go: 1.12.x
env:
- TESTS=true
- USE_DEP=true
diff --git a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
index 31b22e40c..c4590bf93 100644
--- a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
+++ b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
@@ -1,6 +1,45 @@
Changes by Version
==================
+2.20.0 (2019-11-06)
+-------------------
+
+## New Features
+
+- Allow all in-process spans of a trace to share sampling state (#443) -- Prithvi Raj
+
+ Sampling state is shared between all spans of the trace that are still in memory.
+ This allows implementation of delayed sampling decisions (see below).
+
+- Support delayed sampling decisions (#449) -- Yuri Shkuro
+
+ This is a large structural change to how the samplers work.
+ It allows some samplers to be executed multiple times on different
+ span events (like setting a tag) and make a positive sampling decision
+ later in the span life cycle, or even based on children spans.
+ See [README](./README.md#delayed-sampling) for more details.
+
+ There is a related minor change in behavior of the adaptive (per-operation) sampler,
+ which will no longer re-sample the trace when `span.SetOperation()` is called, i.e. the
+ operation used to make the sampling decision is always the one provided at span creation.
+
+- Add experimental tag matching sampler (#452) -- Yuri Shkuro
+
+ A sampler that can sample a trace based on a certain tag added to the root
+ span or one of its local (in-process) children. The sampler can be used with
+ another experimental `PrioritySampler` that allows multiple samplers to try
+ to make a sampling decision, in a certain priority order.
+
+- [log/zap] Report whether a trace was sampled (#445) -- Abhinav Gupta
+- Allow config.FromEnv() to enrich an existing config object (#436) -- Vineeth Reddy
+
+## Minor patches
+
+- Expose Sampler on Tracer and accept sampler options via Configuration (#460) -- Yuri Shkuro
+- Fix github.com/uber-go/atomic import (#464) -- Yuri Shkuro
+- Add nodejs to crossdock tests (#441) -- Bhavin Gandhi
+- Bump Go compiler version to 1.13 (#453) -- Yuri Shkuro
+
2.19.0 (2019-09-23)
-------------------
diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock
index 1ed86f4a7..5a42ebf16 100644
--- a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock
+++ b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock
@@ -2,6 +2,14 @@
[[projects]]
+ digest = "1:9f3b30d9f8e0d7040f729b82dcbc8f0dead820a133b3147ce355fc451f32d761"
+ name = "github.com/BurntSushi/toml"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005"
+ version = "v0.3.1"
+
+[[projects]]
digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
name = "github.com/beorn7/perks"
packages = ["quantile"]
@@ -138,14 +146,6 @@
version = "v1.4.0"
[[projects]]
- digest = "1:a5158647b553c61877aa9ae74f4015000294e47981e6b8b07525edcbb0747c81"
- name = "github.com/uber-go/atomic"
- packages = ["."]
- pruneopts = "UT"
- revision = "df976f2515e274675050de7b3f42545de80594fd"
- version = "v1.4.0"
-
-[[projects]]
digest = "1:0ec60ffd594af00ba1660bc746aa0e443d27dd4003dee55f9d08a0b4ff5431a3"
name = "github.com/uber/jaeger-lib"
packages = [
@@ -158,23 +158,31 @@
version = "v2.2.0"
[[projects]]
- digest = "1:a5158647b553c61877aa9ae74f4015000294e47981e6b8b07525edcbb0747c81"
+ digest = "1:0bdcb0c740d79d400bd3f7946ac22a715c94db62b20bfd2e01cd50693aba0600"
name = "go.uber.org/atomic"
packages = ["."]
pruneopts = "UT"
- revision = "df976f2515e274675050de7b3f42545de80594fd"
- version = "v1.4.0"
+ revision = "9dc4df04d0d1c39369750a9f6c32c39560672089"
+ version = "v1.5.0"
[[projects]]
- digest = "1:60bf2a5e347af463c42ed31a493d817f8a72f102543060ed992754e689805d1a"
+ digest = "1:002ebc50f3ef475ac325e1904be931d9dcba6dc6d73b5682afce0c63436e3902"
name = "go.uber.org/multierr"
packages = ["."]
pruneopts = "UT"
- revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a"
- version = "v1.1.0"
+ revision = "c3fc3d02ec864719d8e25be2d7dde1e35a36aa27"
+ version = "v1.3.0"
+
+[[projects]]
+ branch = "master"
+ digest = "1:3032e90a153750ea149f68bf081f97ca738f041fba45c41c80737f572ffdf2f4"
+ name = "go.uber.org/tools"
+ packages = ["update-license"]
+ pruneopts = "UT"
+ revision = "2cfd321de3ee5d5f8a5fda2521d1703478334d98"
[[projects]]
- digest = "1:676160e6a4722b08e0e26b11521d575c2cb2b6f0c679e1ee6178c5d8dee51e5e"
+ digest = "1:6be13632ab4bd5842a097abb3aabac045a8601e19a10da4239e7d8bd83d4b83c"
name = "go.uber.org/zap"
packages = [
".",
@@ -185,8 +193,19 @@
"zapcore",
]
pruneopts = "UT"
- revision = "27376062155ad36be76b0f12cf1572a221d3a48c"
- version = "v1.10.0"
+ revision = "a6015e13fab9b744d96085308ce4e8f11bad1996"
+ version = "v1.12.0"
+
+[[projects]]
+ branch = "master"
+ digest = "1:21d7bad9b7da270fd2d50aba8971a041bd691165c95096a2a4c68db823cbc86a"
+ name = "golang.org/x/lint"
+ packages = [
+ ".",
+ "golint",
+ ]
+ pruneopts = "UT"
+ revision = "16217165b5de779cb6a5e4fc81fa9c1166fda457"
[[projects]]
branch = "master"
@@ -197,23 +216,81 @@
"context/ctxhttp",
]
pruneopts = "UT"
- revision = "aa69164e4478b84860dc6769c710c699c67058a3"
+ revision = "0deb6923b6d97481cb43bc1043fe5b72a0143032"
[[projects]]
branch = "master"
- digest = "1:712252802d318c8107d8f2136b99aa10feb17eca715245ed915199fbfc260155"
+ digest = "1:5dfb17d45415b7b8927382f53955a66f55f9d9d11557aa82f7f481d642ab247a"
name = "golang.org/x/sys"
packages = ["windows"]
pruneopts = "UT"
- revision = "0a153f010e6963173baba2306531d173aa843137"
+ revision = "f43be2a4598cf3a47be9f94f0c28197ed9eae611"
+
+[[projects]]
+ branch = "master"
+ digest = "1:bae8b3bf837d9d7f601776f37f44e031d46943677beff8fb2eb9c7317d44de2f"
+ name = "golang.org/x/tools"
+ packages = [
+ "go/analysis",
+ "go/analysis/passes/inspect",
+ "go/ast/astutil",
+ "go/ast/inspector",
+ "go/buildutil",
+ "go/gcexportdata",
+ "go/internal/gcimporter",
+ "go/internal/packagesdriver",
+ "go/packages",
+ "go/types/objectpath",
+ "go/types/typeutil",
+ "internal/fastwalk",
+ "internal/gopathwalk",
+ "internal/semver",
+ "internal/span",
+ ]
+ pruneopts = "UT"
+ revision = "8dbcdeb83d3faec5315146800b375c4962a42fc6"
[[projects]]
- digest = "1:4d2e5a73dc1500038e504a8d78b986630e3626dc027bc030ba5c75da257cdb96"
+ digest = "1:59f10c1537d2199d9115d946927fe31165959a95190849c82ff11e05803528b0"
name = "gopkg.in/yaml.v2"
packages = ["."]
pruneopts = "UT"
- revision = "51d6538a90f86fe93ac480b35f37b2be17fef232"
- version = "v2.2.2"
+ revision = "f221b8435cfb71e54062f6c6e99e9ade30b124d5"
+ version = "v2.2.4"
+
+[[projects]]
+ digest = "1:131158a88aad1f94854d0aa21a64af2802d0a470fb0f01cb33c04fafd2047111"
+ name = "honnef.co/go/tools"
+ packages = [
+ "arg",
+ "cmd/staticcheck",
+ "config",
+ "deprecated",
+ "facts",
+ "functions",
+ "go/types/typeutil",
+ "internal/cache",
+ "internal/passes/buildssa",
+ "internal/renameio",
+ "internal/sharedcheck",
+ "lint",
+ "lint/lintdsl",
+ "lint/lintutil",
+ "lint/lintutil/format",
+ "loader",
+ "printf",
+ "simple",
+ "ssa",
+ "ssautil",
+ "staticcheck",
+ "staticcheck/vrp",
+ "stylecheck",
+ "unused",
+ "version",
+ ]
+ pruneopts = "UT"
+ revision = "afd67930eec2a9ed3e9b19f684d17a062285f16a"
+ version = "2019.2.3"
[solve-meta]
analyzer-name = "dep"
@@ -229,10 +306,10 @@
"github.com/stretchr/testify/assert",
"github.com/stretchr/testify/require",
"github.com/stretchr/testify/suite",
- "github.com/uber-go/atomic",
"github.com/uber/jaeger-lib/metrics",
"github.com/uber/jaeger-lib/metrics/metricstest",
"github.com/uber/jaeger-lib/metrics/prometheus",
+ "go.uber.org/atomic",
"go.uber.org/zap",
"go.uber.org/zap/zapcore",
]
diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.toml b/vendor/github.com/uber/jaeger-client-go/Gopkg.toml
index 3e6ac35ae..1fed7f814 100644
--- a/vendor/github.com/uber/jaeger-client-go/Gopkg.toml
+++ b/vendor/github.com/uber/jaeger-client-go/Gopkg.toml
@@ -15,7 +15,7 @@
version = "^1.1.3"
[[constraint]]
- name = "github.com/uber-go/atomic"
+ name = "go.uber.org/atomic"
version = "^1"
[[constraint]]
diff --git a/vendor/github.com/uber/jaeger-client-go/Makefile b/vendor/github.com/uber/jaeger-client-go/Makefile
index 74e11787a..0cfe6a5f6 100644
--- a/vendor/github.com/uber/jaeger-client-go/Makefile
+++ b/vendor/github.com/uber/jaeger-client-go/Makefile
@@ -1,5 +1,5 @@
PROJECT_ROOT=github.com/uber/jaeger-client-go
-PACKAGES := $(shell go list ./... | awk -F/ 'NR>1 {print "./"$$4"/..."}' | grep -v -e ./thrift-gen/... -e ./thrift/... | sort -u)
+PACKAGES := . $(shell go list ./... | awk -F/ 'NR>1 {print "./"$$4"/..."}' | grep -v -e ./thrift-gen/... -e ./thrift/... | sort -u)
# all .go files that don't exist in hidden directories
ALL_SRC := $(shell find . -name "*.go" | grep -v -e vendor -e thrift-gen -e ./thrift/ \
-e ".*/\..*" \
@@ -125,3 +125,4 @@ ifeq ($(CI_SKIP_LINT),true)
else
make lint
endif
+
diff --git a/vendor/github.com/uber/jaeger-client-go/README.md b/vendor/github.com/uber/jaeger-client-go/README.md
index 604d4b571..a3366114d 100644
--- a/vendor/github.com/uber/jaeger-client-go/README.md
+++ b/vendor/github.com/uber/jaeger-client-go/README.md
@@ -182,6 +182,29 @@ are available:
1. `RateLimitingSampler` can be used to allow only a certain fixed
number of traces to be sampled per second.
+#### Delayed sampling
+
+Version 2.20 introduced the ability to delay sampling decisions in the life cycle
+of the root span. It involves several features and architectural changes:
+ * **Shared sampling state**: the sampling state is shared across all local
+ (i.e. in-process) spans for a given trace.
+ * **New `SamplerV2` API** allows the sampler to be called at multiple points
+ in the life cycle of a span:
+ * on span creation
+ * on overwriting span operation name
+ * on setting span tags
+ * on finishing the span
+ * **Final/non-final sampling state**: the new `SamplerV2` API allows the sampler
+ to indicate if the negative sampling decision is final or not (positive sampling
+ decisions are always final). If the decision is not final, the sampler will be
+ called again on further span life cycle events, like setting tags.
+
+These new features are used in the experimental `x.TagMatchingSampler`, which
+can sample a trace based on a certain tag added to the root
+span or one of its local (in-process) children. The sampler can be used with
+another experimental `x.PrioritySampler` that allows multiple samplers to try
+to make a sampling decision, in a certain priority order.
+
### Baggage Injection
The OpenTracing spec allows for [baggage][baggage], which are key value pairs that are added
diff --git a/vendor/github.com/uber/jaeger-client-go/config/config.go b/vendor/github.com/uber/jaeger-client-go/config/config.go
index 6bce1b3b0..965f7c3ee 100644
--- a/vendor/github.com/uber/jaeger-client-go/config/config.go
+++ b/vendor/github.com/uber/jaeger-client-go/config/config.go
@@ -86,6 +86,9 @@ type SamplerConfig struct {
// jaeger-agent for the appropriate sampling strategy.
// Can be set by exporting an environment variable named JAEGER_SAMPLER_REFRESH_INTERVAL
SamplingRefreshInterval time.Duration `yaml:"samplingRefreshInterval"`
+
+ // Options can be used to programmatically pass additional options to the Remote sampler.
+ Options []jaeger.SamplerOption
}
// ReporterConfig configures the reporter. All fields are optional.
@@ -357,6 +360,7 @@ func (sc *SamplerConfig) NewSampler(
if sc.SamplingRefreshInterval != 0 {
options = append(options, jaeger.SamplerOptions.SamplingRefreshInterval(sc.SamplingRefreshInterval))
}
+ options = append(options, sc.Options...)
return jaeger.NewRemotelyControlledSampler(serviceName, options...), nil
}
return nil, fmt.Errorf("Unknown sampler type %v", sc.Type)
diff --git a/vendor/github.com/uber/jaeger-client-go/config/config_env.go b/vendor/github.com/uber/jaeger-client-go/config/config_env.go
index 14d69b11d..a729bd8fe 100644
--- a/vendor/github.com/uber/jaeger-client-go/config/config_env.go
+++ b/vendor/github.com/uber/jaeger-client-go/config/config_env.go
@@ -52,7 +52,11 @@ const (
// FromEnv uses environment variables to set the tracer's Configuration
func FromEnv() (*Configuration, error) {
c := &Configuration{}
+ return c.FromEnv()
+}
+// FromEnv uses environment variables and overrides existing tracer's Configuration
+func (c *Configuration) FromEnv() (*Configuration, error) {
if e := os.Getenv(envServiceName); e != "" {
c.ServiceName = e
}
@@ -77,13 +81,21 @@ func FromEnv() (*Configuration, error) {
c.Tags = parseTags(e)
}
- if s, err := samplerConfigFromEnv(); err == nil {
+ if c.Sampler == nil {
+ c.Sampler = &SamplerConfig{}
+ }
+
+ if s, err := c.Sampler.samplerConfigFromEnv(); err == nil {
c.Sampler = s
} else {
return nil, errors.Wrap(err, "cannot obtain sampler config from env")
}
- if r, err := reporterConfigFromEnv(); err == nil {
+ if c.Reporter == nil {
+ c.Reporter = &ReporterConfig{}
+ }
+
+ if r, err := c.Reporter.reporterConfigFromEnv(); err == nil {
c.Reporter = r
} else {
return nil, errors.Wrap(err, "cannot obtain reporter config from env")
@@ -93,9 +105,7 @@ func FromEnv() (*Configuration, error) {
}
// samplerConfigFromEnv creates a new SamplerConfig based on the environment variables
-func samplerConfigFromEnv() (*SamplerConfig, error) {
- sc := &SamplerConfig{}
-
+func (sc *SamplerConfig) samplerConfigFromEnv() (*SamplerConfig, error) {
if e := os.Getenv(envSamplerType); e != "" {
sc.Type = e
}
@@ -135,9 +145,7 @@ func samplerConfigFromEnv() (*SamplerConfig, error) {
}
// reporterConfigFromEnv creates a new ReporterConfig based on the environment variables
-func reporterConfigFromEnv() (*ReporterConfig, error) {
- rc := &ReporterConfig{}
-
+func (rc *ReporterConfig) reporterConfigFromEnv() (*ReporterConfig, error) {
if e := os.Getenv(envReporterMaxQueueSize); e != "" {
if value, err := strconv.ParseInt(e, 10, 0); err == nil {
rc.QueueSize = int(value)
diff --git a/vendor/github.com/uber/jaeger-client-go/constants.go b/vendor/github.com/uber/jaeger-client-go/constants.go
index e95b2ba09..0da47b02f 100644
--- a/vendor/github.com/uber/jaeger-client-go/constants.go
+++ b/vendor/github.com/uber/jaeger-client-go/constants.go
@@ -22,7 +22,7 @@ import (
const (
// JaegerClientVersion is the version of the client library reported as Span tag.
- JaegerClientVersion = "Go-2.19.0"
+ JaegerClientVersion = "Go-2.20.0"
// JaegerClientVersionTagKey is the name of the tag used to report client version.
JaegerClientVersionTagKey = "jaeger.version"
diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
index 6ce1caf87..f0f1afe2f 100644
--- a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
+++ b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
@@ -35,7 +35,7 @@ func BuildJaegerThrift(span *Span) *j.Span {
SpanId: int64(span.context.spanID),
ParentSpanId: int64(span.context.parentID),
OperationName: span.operationName,
- Flags: int32(span.context.flags),
+ Flags: int32(span.context.samplingState.flags()),
StartTime: startTime,
Duration: duration,
Tags: buildTags(span.tags, span.tracer.options.maxTagValueLength),
diff --git a/vendor/github.com/uber/jaeger-client-go/metrics.go b/vendor/github.com/uber/jaeger-client-go/metrics.go
index e56db9b73..50e4e22d6 100644
--- a/vendor/github.com/uber/jaeger-client-go/metrics.go
+++ b/vendor/github.com/uber/jaeger-client-go/metrics.go
@@ -26,6 +26,9 @@ type Metrics struct {
// Number of traces started by this tracer as not sampled
TracesStartedNotSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer as not sampled"`
+ // Number of traces started by this tracer with delayed sampling
+ TracesStartedDelayedSampling metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer with delayed sampling"`
+
// Number of externally started sampled traces this tracer joined
TracesJoinedSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=y" help:"Number of externally started sampled traces this tracer joined"`
@@ -33,13 +36,22 @@ type Metrics struct {
TracesJoinedNotSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=n" help:"Number of externally started not-sampled traces this tracer joined"`
// Number of sampled spans started by this tracer
- SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y" help:"Number of sampled spans started by this tracer"`
+ SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y" help:"Number of spans started by this tracer as sampled"`
+
+ // Number of not sampled spans started by this tracer
+ SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n" help:"Number of spans started by this tracer as not sampled"`
+
+ // Number of spans with delayed sampling started by this tracer
+ SpansStartedDelayedSampling metrics.Counter `metric:"started_spans" tags:"sampled=delayed" help:"Number of spans started by this tracer with delayed sampling"`
- // Number of unsampled spans started by this tracer
- SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n" help:"Number of unsampled spans started by this tracer"`
+ // Number of spans finished by this tracer
+ SpansFinishedSampled metrics.Counter `metric:"finished_spans" tags:"sampled=y" help:"Number of sampled spans finished by this tracer"`
+
+ // Number of spans finished by this tracer
+ SpansFinishedNotSampled metrics.Counter `metric:"finished_spans" tags:"sampled=n" help:"Number of not-sampled spans finished by this tracer"`
// Number of spans finished by this tracer
- SpansFinished metrics.Counter `metric:"finished_spans" help:"Number of spans finished by this tracer"`
+ SpansFinishedDelayedSampling metrics.Counter `metric:"finished_spans" tags:"sampled=delayed" help:"Number of spans with delayed sampling finished by this tracer"`
// Number of errors decoding tracing context
DecodingErrors metrics.Counter `metric:"span_context_decoding_errors" help:"Number of errors decoding tracing context"`
diff --git a/vendor/github.com/uber/jaeger-client-go/propagation.go b/vendor/github.com/uber/jaeger-client-go/propagation.go
index 5b50cfb71..42fd64b58 100644
--- a/vendor/github.com/uber/jaeger-client-go/propagation.go
+++ b/vendor/github.com/uber/jaeger-client-go/propagation.go
@@ -193,7 +193,7 @@ func (p *BinaryPropagator) Inject(
if err := binary.Write(carrier, binary.BigEndian, sc.parentID); err != nil {
return err
}
- if err := binary.Write(carrier, binary.BigEndian, sc.flags); err != nil {
+ if err := binary.Write(carrier, binary.BigEndian, sc.samplingState.flags()); err != nil {
return err
}
@@ -222,6 +222,7 @@ func (p *BinaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, er
return emptyContext, opentracing.ErrInvalidCarrier
}
var ctx SpanContext
+ ctx.samplingState = &samplingState{}
if err := binary.Read(carrier, binary.BigEndian, &ctx.traceID); err != nil {
return emptyContext, opentracing.ErrSpanContextCorrupted
@@ -232,9 +233,12 @@ func (p *BinaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, er
if err := binary.Read(carrier, binary.BigEndian, &ctx.parentID); err != nil {
return emptyContext, opentracing.ErrSpanContextCorrupted
}
- if err := binary.Read(carrier, binary.BigEndian, &ctx.flags); err != nil {
+
+ var flags byte
+ if err := binary.Read(carrier, binary.BigEndian, &flags); err != nil {
return emptyContext, opentracing.ErrSpanContextCorrupted
}
+ ctx.samplingState.setFlags(flags)
// Handle the baggage items
var numBaggage int32
diff --git a/vendor/github.com/uber/jaeger-client-go/reporter.go b/vendor/github.com/uber/jaeger-client-go/reporter.go
index 27163ebe4..0b78cec20 100644
--- a/vendor/github.com/uber/jaeger-client-go/reporter.go
+++ b/vendor/github.com/uber/jaeger-client-go/reporter.go
@@ -28,6 +28,8 @@ import (
// Reporter is called by the tracer when a span is completed to report the span to the tracing collector.
type Reporter interface {
// Report submits a new span to collectors, possibly asynchronously and/or with buffering.
+ // If the reporter is processing Span asynchronously then it needs to Retain() the span,
+ // and then Release() it when no longer needed, to avoid span data corruption.
Report(span *Span)
// Close does a clean shutdown of the reporter, flushing any traces that may be buffered in memory.
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler.go b/vendor/github.com/uber/jaeger-client-go/sampler.go
index ea6984e02..6195d59c5 100644
--- a/vendor/github.com/uber/jaeger-client-go/sampler.go
+++ b/vendor/github.com/uber/jaeger-client-go/sampler.go
@@ -17,19 +17,14 @@ package jaeger
import (
"fmt"
"math"
- "net/url"
"sync"
- "sync/atomic"
- "time"
- "github.com/uber/jaeger-client-go/log"
"github.com/uber/jaeger-client-go/thrift-gen/sampling"
"github.com/uber/jaeger-client-go/utils"
)
const (
- defaultSamplingRefreshInterval = time.Minute
- defaultMaxOperations = 2000
+ defaultMaxOperations = 2000
)
// Sampler decides whether a new trace should be sampled or not.
@@ -47,9 +42,7 @@ type Sampler interface {
// Equal checks if the `other` sampler is functionally equivalent
// to this sampler.
- // TODO remove this function. This function is used to determine if 2 samplers are equivalent
- // which does not bode well with the adaptive sampler which has to create all the composite samplers
- // for the comparison to occur. This is expensive to do if only one sampler has changed.
+ // TODO (breaking change) remove this function. See PerOperationSampler.Equals for explanation.
Equal(other Sampler) bool
}
@@ -57,17 +50,23 @@ type Sampler interface {
// ConstSampler is a sampler that always makes the same decision.
type ConstSampler struct {
+ legacySamplerV1Base
Decision bool
tags []Tag
}
// NewConstSampler creates a ConstSampler.
-func NewConstSampler(sample bool) Sampler {
+func NewConstSampler(sample bool) *ConstSampler {
tags := []Tag{
{key: SamplerTypeTagKey, value: SamplerTypeConst},
{key: SamplerParamTagKey, value: sample},
}
- return &ConstSampler{Decision: sample, tags: tags}
+ s := &ConstSampler{
+ Decision: sample,
+ tags: tags,
+ }
+ s.delegate = s.IsSampled
+ return s
}
// IsSampled implements IsSampled() of Sampler.
@@ -88,11 +87,17 @@ func (s *ConstSampler) Equal(other Sampler) bool {
return false
}
+// String is used to log sampler details.
+func (s *ConstSampler) String() string {
+ return fmt.Sprintf("ConstSampler(decision=%t)", s.Decision)
+}
+
// -----------------------
// ProbabilisticSampler is a sampler that randomly samples a certain percentage
// of traces.
type ProbabilisticSampler struct {
+ legacySamplerV1Base
samplingRate float64
samplingBoundary uint64
tags []Tag
@@ -114,16 +119,19 @@ func NewProbabilisticSampler(samplingRate float64) (*ProbabilisticSampler, error
}
func newProbabilisticSampler(samplingRate float64) *ProbabilisticSampler {
- samplingRate = math.Max(0.0, math.Min(samplingRate, 1.0))
- tags := []Tag{
+ s := new(ProbabilisticSampler)
+ s.delegate = s.IsSampled
+ return s.init(samplingRate)
+}
+
+func (s *ProbabilisticSampler) init(samplingRate float64) *ProbabilisticSampler {
+ s.samplingRate = math.Max(0.0, math.Min(samplingRate, 1.0))
+ s.samplingBoundary = uint64(float64(maxRandomNumber) * s.samplingRate)
+ s.tags = []Tag{
{key: SamplerTypeTagKey, value: SamplerTypeProbabilistic},
- {key: SamplerParamTagKey, value: samplingRate},
- }
- return &ProbabilisticSampler{
- samplingRate: samplingRate,
- samplingBoundary: uint64(float64(maxRandomNumber) * samplingRate),
- tags: tags,
+ {key: SamplerParamTagKey, value: s.samplingRate},
}
+ return s
}
// SamplingRate returns the sampling probability this sampled was constructed with.
@@ -149,65 +157,104 @@ func (s *ProbabilisticSampler) Equal(other Sampler) bool {
return false
}
+// Update modifies in-place the sampling rate. Locking must be done externally.
+func (s *ProbabilisticSampler) Update(samplingRate float64) error {
+ if samplingRate < 0.0 || samplingRate > 1.0 {
+ return fmt.Errorf("Sampling Rate must be between 0.0 and 1.0, received %f", samplingRate)
+ }
+ s.init(samplingRate)
+ return nil
+}
+
+// String is used to log sampler details.
+func (s *ProbabilisticSampler) String() string {
+ return fmt.Sprintf("ProbabilisticSampler(samplingRate=%v)", s.samplingRate)
+}
+
// -----------------------
-type rateLimitingSampler struct {
+// RateLimitingSampler samples at most maxTracesPerSecond. The distribution of sampled traces follows
+// burstiness of the service, i.e. a service with uniformly distributed requests will have those
+// requests sampled uniformly as well, but if requests are bursty, especially sub-second, then a
+// number of sequential requests can be sampled each second.
+type RateLimitingSampler struct {
+ legacySamplerV1Base
maxTracesPerSecond float64
- rateLimiter utils.RateLimiter
+ rateLimiter *utils.ReconfigurableRateLimiter
tags []Tag
}
-// NewRateLimitingSampler creates a sampler that samples at most maxTracesPerSecond. The distribution of sampled
-// traces follows burstiness of the service, i.e. a service with uniformly distributed requests will have those
-// requests sampled uniformly as well, but if requests are bursty, especially sub-second, then a number of
-// sequential requests can be sampled each second.
-func NewRateLimitingSampler(maxTracesPerSecond float64) Sampler {
- tags := []Tag{
+// NewRateLimitingSampler creates new RateLimitingSampler.
+func NewRateLimitingSampler(maxTracesPerSecond float64) *RateLimitingSampler {
+ s := new(RateLimitingSampler)
+ s.delegate = s.IsSampled
+ return s.init(maxTracesPerSecond)
+}
+
+func (s *RateLimitingSampler) init(maxTracesPerSecond float64) *RateLimitingSampler {
+ if s.rateLimiter == nil {
+ s.rateLimiter = utils.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0))
+ } else {
+ s.rateLimiter.Update(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0))
+ }
+ s.maxTracesPerSecond = maxTracesPerSecond
+ s.tags = []Tag{
{key: SamplerTypeTagKey, value: SamplerTypeRateLimiting},
{key: SamplerParamTagKey, value: maxTracesPerSecond},
}
- return &rateLimitingSampler{
- maxTracesPerSecond: maxTracesPerSecond,
- rateLimiter: utils.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0)),
- tags: tags,
- }
+ return s
}
// IsSampled implements IsSampled() of Sampler.
-func (s *rateLimitingSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+func (s *RateLimitingSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
return s.rateLimiter.CheckCredit(1.0), s.tags
}
-func (s *rateLimitingSampler) Close() {
+// Update reconfigures the rate limiter, while preserving its accumulated balance.
+// Locking must be done externally.
+func (s *RateLimitingSampler) Update(maxTracesPerSecond float64) {
+ if s.maxTracesPerSecond != maxTracesPerSecond {
+ s.init(maxTracesPerSecond)
+ }
+}
+
+// Close does nothing.
+func (s *RateLimitingSampler) Close() {
// nothing to do
}
-func (s *rateLimitingSampler) Equal(other Sampler) bool {
- if o, ok := other.(*rateLimitingSampler); ok {
+// Equal compares with another sampler.
+func (s *RateLimitingSampler) Equal(other Sampler) bool {
+ if o, ok := other.(*RateLimitingSampler); ok {
return s.maxTracesPerSecond == o.maxTracesPerSecond
}
return false
}
+// String is used to log sampler details.
+func (s *RateLimitingSampler) String() string {
+ return fmt.Sprintf("RateLimitingSampler(maxTracesPerSecond=%v)", s.maxTracesPerSecond)
+}
+
// -----------------------
-// GuaranteedThroughputProbabilisticSampler is a sampler that leverages both probabilisticSampler and
-// rateLimitingSampler. The rateLimitingSampler is used as a guaranteed lower bound sampler such that
+// GuaranteedThroughputProbabilisticSampler is a sampler that leverages both ProbabilisticSampler and
+// RateLimitingSampler. The RateLimitingSampler is used as a guaranteed lower bound sampler such that
// every operation is sampled at least once in a time interval defined by the lowerBound. ie a lowerBound
// of 1.0 / (60 * 10) will sample an operation at least once every 10 minutes.
//
-// The probabilisticSampler is given higher priority when tags are emitted, ie. if IsSampled() for both
-// samplers return true, the tags for probabilisticSampler will be used.
+// The ProbabilisticSampler is given higher priority when tags are emitted, ie. if IsSampled() for both
+// samplers return true, the tags for ProbabilisticSampler will be used.
type GuaranteedThroughputProbabilisticSampler struct {
probabilisticSampler *ProbabilisticSampler
- lowerBoundSampler Sampler
+ lowerBoundSampler *RateLimitingSampler
tags []Tag
samplingRate float64
lowerBound float64
}
// NewGuaranteedThroughputProbabilisticSampler returns a delegating sampler that applies both
-// probabilisticSampler and rateLimitingSampler.
+// ProbabilisticSampler and RateLimitingSampler.
func NewGuaranteedThroughputProbabilisticSampler(
lowerBound, samplingRate float64,
) (*GuaranteedThroughputProbabilisticSampler, error) {
@@ -224,8 +271,14 @@ func newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate float6
}
func (s *GuaranteedThroughputProbabilisticSampler) setProbabilisticSampler(samplingRate float64) {
- if s.probabilisticSampler == nil || s.samplingRate != samplingRate {
+ if s.probabilisticSampler == nil {
s.probabilisticSampler = newProbabilisticSampler(samplingRate)
+ } else if s.samplingRate != samplingRate {
+ s.probabilisticSampler.init(samplingRate)
+ }
+ // since we don't validate samplingRate, sampler may have clamped it to [0, 1] interval
+ samplingRate = s.probabilisticSampler.SamplingRate()
+ if s.samplingRate != samplingRate || s.tags == nil {
s.samplingRate = s.probabilisticSampler.SamplingRate()
s.tags = []Tag{
{key: SamplerTypeTagKey, value: SamplerTypeLowerBound},
@@ -252,7 +305,7 @@ func (s *GuaranteedThroughputProbabilisticSampler) Close() {
// Equal implements Equal() of Sampler.
func (s *GuaranteedThroughputProbabilisticSampler) Equal(other Sampler) bool {
- // NB The Equal() function is expensive and will be removed. See adaptiveSampler.Equal() for
+ // NB The Equal() function is expensive and will be removed. See PerOperationSampler.Equal() for
// more information.
return false
}
@@ -261,52 +314,116 @@ func (s *GuaranteedThroughputProbabilisticSampler) Equal(other Sampler) bool {
func (s *GuaranteedThroughputProbabilisticSampler) update(lowerBound, samplingRate float64) {
s.setProbabilisticSampler(samplingRate)
if s.lowerBound != lowerBound {
- s.lowerBoundSampler = NewRateLimitingSampler(lowerBound)
+ s.lowerBoundSampler.Update(lowerBound)
s.lowerBound = lowerBound
}
}
// -----------------------
-type adaptiveSampler struct {
+// PerOperationSampler is a delegating sampler that applies GuaranteedThroughputProbabilisticSampler
+// on a per-operation basis.
+type PerOperationSampler struct {
sync.RWMutex
samplers map[string]*GuaranteedThroughputProbabilisticSampler
defaultSampler *ProbabilisticSampler
lowerBound float64
maxOperations int
+
+ // see description in PerOperationSamplerParams
+ operationNameLateBinding bool
}
-// NewAdaptiveSampler returns a delegating sampler that applies both probabilisticSampler and
-// rateLimitingSampler via the guaranteedThroughputProbabilisticSampler. This sampler keeps track of all
-// operations and delegates calls to the respective guaranteedThroughputProbabilisticSampler.
-func NewAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) (Sampler, error) {
- return newAdaptiveSampler(strategies, maxOperations), nil
+// NewAdaptiveSampler returns a new PerOperationSampler.
+// Deprecated: please use NewPerOperationSampler.
+func NewAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) (*PerOperationSampler, error) {
+ return NewPerOperationSampler(PerOperationSamplerParams{
+ MaxOperations: maxOperations,
+ Strategies: strategies,
+ }), nil
}
-func newAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) Sampler {
+// PerOperationSamplerParams defines parameters when creating PerOperationSampler.
+type PerOperationSamplerParams struct {
+ // Max number of operations that will be tracked. Other operations will be given default strategy.
+ MaxOperations int
+
+ // Opt-in feature for applications that require late binding of span name via explicit call to SetOperationName.
+ // When this feature is enabled, the sampler will return retryable=true from OnCreateSpan(), thus leaving
+ // the sampling decision as non-final (and the span as writeable). This may lead to degraded performance
+ // in applications that always provide the correct span name on trace creation.
+ //
+ // For backwards compatibility this option is off by default.
+ OperationNameLateBinding bool
+
+ // Initial configuration of the sampling strategies (usually retrieved from the backend by Remote Sampler).
+ Strategies *sampling.PerOperationSamplingStrategies
+}
+
+// NewPerOperationSampler returns a new PerOperationSampler.
+func NewPerOperationSampler(params PerOperationSamplerParams) *PerOperationSampler {
samplers := make(map[string]*GuaranteedThroughputProbabilisticSampler)
- for _, strategy := range strategies.PerOperationStrategies {
+ for _, strategy := range params.Strategies.PerOperationStrategies {
sampler := newGuaranteedThroughputProbabilisticSampler(
- strategies.DefaultLowerBoundTracesPerSecond,
+ params.Strategies.DefaultLowerBoundTracesPerSecond,
strategy.ProbabilisticSampling.SamplingRate,
)
samplers[strategy.Operation] = sampler
}
- return &adaptiveSampler{
- samplers: samplers,
- defaultSampler: newProbabilisticSampler(strategies.DefaultSamplingProbability),
- lowerBound: strategies.DefaultLowerBoundTracesPerSecond,
- maxOperations: maxOperations,
+ return &PerOperationSampler{
+ samplers: samplers,
+ defaultSampler: newProbabilisticSampler(params.Strategies.DefaultSamplingProbability),
+ lowerBound: params.Strategies.DefaultLowerBoundTracesPerSecond,
+ maxOperations: params.MaxOperations,
+ operationNameLateBinding: params.OperationNameLateBinding,
}
}
-func (s *adaptiveSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+// IsSampled is not used and only exists to match Sampler V1 API.
+// TODO (breaking change) remove when upgrading everything to SamplerV2
+func (s *PerOperationSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+ return false, nil
+}
+
+func (s *PerOperationSampler) trySampling(span *Span, operationName string) (bool, []Tag) {
+ samplerV1 := s.getSamplerForOperation(operationName)
+ var sampled bool
+ var tags []Tag
+ if span.context.samplingState.isLocalRootSpan(span.context.spanID) {
+ sampled, tags = samplerV1.IsSampled(span.context.TraceID(), operationName)
+ }
+ return sampled, tags
+}
+
+// OnCreateSpan implements OnCreateSpan of SamplerV2.
+func (s *PerOperationSampler) OnCreateSpan(span *Span) SamplingDecision {
+ sampled, tags := s.trySampling(span, span.OperationName())
+ return SamplingDecision{Sample: sampled, Retryable: s.operationNameLateBinding, Tags: tags}
+}
+
+// OnSetOperationName implements OnSetOperationName of SamplerV2.
+func (s *PerOperationSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision {
+ sampled, tags := s.trySampling(span, operationName)
+ return SamplingDecision{Sample: sampled, Retryable: false, Tags: tags}
+}
+
+// OnSetTag implements OnSetTag of SamplerV2.
+func (s *PerOperationSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
+ return SamplingDecision{Sample: false, Retryable: true}
+}
+
+// OnFinishSpan implements OnFinishSpan of SamplerV2.
+func (s *PerOperationSampler) OnFinishSpan(span *Span) SamplingDecision {
+ return SamplingDecision{Sample: false, Retryable: true}
+}
+
+func (s *PerOperationSampler) getSamplerForOperation(operation string) Sampler {
s.RLock()
sampler, ok := s.samplers[operation]
if ok {
defer s.RUnlock()
- return sampler.IsSampled(id, operation)
+ return sampler
}
s.RUnlock()
s.Lock()
@@ -315,18 +432,19 @@ func (s *adaptiveSampler) IsSampled(id TraceID, operation string) (bool, []Tag)
// Check if sampler has already been created
sampler, ok = s.samplers[operation]
if ok {
- return sampler.IsSampled(id, operation)
+ return sampler
}
// Store only up to maxOperations of unique ops.
if len(s.samplers) >= s.maxOperations {
- return s.defaultSampler.IsSampled(id, operation)
+ return s.defaultSampler
}
newSampler := newGuaranteedThroughputProbabilisticSampler(s.lowerBound, s.defaultSampler.SamplingRate())
s.samplers[operation] = newSampler
- return newSampler.IsSampled(id, operation)
+ return newSampler
}
-func (s *adaptiveSampler) Close() {
+// Close invokes Close on all underlying samplers.
+func (s *PerOperationSampler) Close() {
s.Lock()
defer s.Unlock()
for _, sampler := range s.samplers {
@@ -335,16 +453,18 @@ func (s *adaptiveSampler) Close() {
s.defaultSampler.Close()
}
-func (s *adaptiveSampler) Equal(other Sampler) bool {
- // NB The Equal() function is overly expensive for adaptiveSampler since it's composed of multiple
+// Equal is not used.
+// TODO (breaking change) remove this in the future
+func (s *PerOperationSampler) Equal(other Sampler) bool {
+ // NB The Equal() function is overly expensive for PerOperationSampler since it's composed of multiple
// samplers which all need to be initialized before this function can be called for a comparison.
- // Therefore, adaptiveSampler uses the update() function to only alter the samplers that need
+ // Therefore, PerOperationSampler uses the update() function to only alter the samplers that need
// changing. Hence this function always returns false so that the update function can be called.
// Once the Equal() function is removed from the Sampler API, this will no longer be needed.
return false
}
-func (s *adaptiveSampler) update(strategies *sampling.PerOperationSamplingStrategies) {
+func (s *PerOperationSampler) update(strategies *sampling.PerOperationSamplingStrategies) {
s.Lock()
defer s.Unlock()
newSamplers := map[string]*GuaranteedThroughputProbabilisticSampler{}
@@ -369,191 +489,3 @@ func (s *adaptiveSampler) update(strategies *sampling.PerOperationSamplingStrate
}
s.samplers = newSamplers
}
-
-// -----------------------
-
-// RemotelyControlledSampler is a delegating sampler that polls a remote server
-// for the appropriate sampling strategy, constructs a corresponding sampler and
-// delegates to it for sampling decisions.
-type RemotelyControlledSampler struct {
- // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
- // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
- closed int64 // 0 - not closed, 1 - closed
-
- sync.RWMutex
- samplerOptions
-
- serviceName string
- manager sampling.SamplingManager
- doneChan chan *sync.WaitGroup
-}
-
-type httpSamplingManager struct {
- serverURL string
-}
-
-func (s *httpSamplingManager) GetSamplingStrategy(serviceName string) (*sampling.SamplingStrategyResponse, error) {
- var out sampling.SamplingStrategyResponse
- v := url.Values{}
- v.Set("service", serviceName)
- if err := utils.GetJSON(s.serverURL+"?"+v.Encode(), &out); err != nil {
- return nil, err
- }
- return &out, nil
-}
-
-// NewRemotelyControlledSampler creates a sampler that periodically pulls
-// the sampling strategy from an HTTP sampling server (e.g. jaeger-agent).
-func NewRemotelyControlledSampler(
- serviceName string,
- opts ...SamplerOption,
-) *RemotelyControlledSampler {
- options := applySamplerOptions(opts...)
- sampler := &RemotelyControlledSampler{
- samplerOptions: options,
- serviceName: serviceName,
- manager: &httpSamplingManager{serverURL: options.samplingServerURL},
- doneChan: make(chan *sync.WaitGroup),
- }
- go sampler.pollController()
- return sampler
-}
-
-func applySamplerOptions(opts ...SamplerOption) samplerOptions {
- options := samplerOptions{}
- for _, option := range opts {
- option(&options)
- }
- if options.sampler == nil {
- options.sampler = newProbabilisticSampler(0.001)
- }
- if options.logger == nil {
- options.logger = log.NullLogger
- }
- if options.maxOperations <= 0 {
- options.maxOperations = defaultMaxOperations
- }
- if options.samplingServerURL == "" {
- options.samplingServerURL = DefaultSamplingServerURL
- }
- if options.metrics == nil {
- options.metrics = NewNullMetrics()
- }
- if options.samplingRefreshInterval <= 0 {
- options.samplingRefreshInterval = defaultSamplingRefreshInterval
- }
- return options
-}
-
-// IsSampled implements IsSampled() of Sampler.
-func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
- s.RLock()
- defer s.RUnlock()
- return s.sampler.IsSampled(id, operation)
-}
-
-// Close implements Close() of Sampler.
-func (s *RemotelyControlledSampler) Close() {
- if swapped := atomic.CompareAndSwapInt64(&s.closed, 0, 1); !swapped {
- s.logger.Error("Repeated attempt to close the sampler is ignored")
- return
- }
-
- var wg sync.WaitGroup
- wg.Add(1)
- s.doneChan <- &wg
- wg.Wait()
-}
-
-// Equal implements Equal() of Sampler.
-func (s *RemotelyControlledSampler) Equal(other Sampler) bool {
- // NB The Equal() function is expensive and will be removed. See adaptiveSampler.Equal() for
- // more information.
- if o, ok := other.(*RemotelyControlledSampler); ok {
- s.RLock()
- o.RLock()
- defer s.RUnlock()
- defer o.RUnlock()
- return s.sampler.Equal(o.sampler)
- }
- return false
-}
-
-func (s *RemotelyControlledSampler) pollController() {
- ticker := time.NewTicker(s.samplingRefreshInterval)
- defer ticker.Stop()
- s.pollControllerWithTicker(ticker)
-}
-
-func (s *RemotelyControlledSampler) pollControllerWithTicker(ticker *time.Ticker) {
- for {
- select {
- case <-ticker.C:
- s.updateSampler()
- case wg := <-s.doneChan:
- wg.Done()
- return
- }
- }
-}
-
-func (s *RemotelyControlledSampler) getSampler() Sampler {
- s.Lock()
- defer s.Unlock()
- return s.sampler
-}
-
-func (s *RemotelyControlledSampler) setSampler(sampler Sampler) {
- s.Lock()
- defer s.Unlock()
- s.sampler = sampler
-}
-
-func (s *RemotelyControlledSampler) updateSampler() {
- res, err := s.manager.GetSamplingStrategy(s.serviceName)
- if err != nil {
- s.metrics.SamplerQueryFailure.Inc(1)
- s.logger.Infof("Unable to query sampling strategy: %v", err)
- return
- }
- s.Lock()
- defer s.Unlock()
-
- s.metrics.SamplerRetrieved.Inc(1)
- if strategies := res.GetOperationSampling(); strategies != nil {
- s.updateAdaptiveSampler(strategies)
- } else {
- err = s.updateRateLimitingOrProbabilisticSampler(res)
- }
- if err != nil {
- s.metrics.SamplerUpdateFailure.Inc(1)
- s.logger.Infof("Unable to handle sampling strategy response %+v. Got error: %v", res, err)
- return
- }
- s.metrics.SamplerUpdated.Inc(1)
-}
-
-// NB: this function should only be called while holding a Write lock
-func (s *RemotelyControlledSampler) updateAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies) {
- if adaptiveSampler, ok := s.sampler.(*adaptiveSampler); ok {
- adaptiveSampler.update(strategies)
- } else {
- s.sampler = newAdaptiveSampler(strategies, s.maxOperations)
- }
-}
-
-// NB: this function should only be called while holding a Write lock
-func (s *RemotelyControlledSampler) updateRateLimitingOrProbabilisticSampler(res *sampling.SamplingStrategyResponse) error {
- var newSampler Sampler
- if probabilistic := res.GetProbabilisticSampling(); probabilistic != nil {
- newSampler = newProbabilisticSampler(probabilistic.SamplingRate)
- } else if rateLimiting := res.GetRateLimitingSampling(); rateLimiting != nil {
- newSampler = NewRateLimitingSampler(float64(rateLimiting.MaxTracesPerSecond))
- } else {
- return fmt.Errorf("Unsupported sampling strategy type %v", res.GetStrategyType())
- }
- if !s.sampler.Equal(newSampler) {
- s.sampler = newSampler
- }
- return nil
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_remote.go b/vendor/github.com/uber/jaeger-client-go/sampler_remote.go
new file mode 100644
index 000000000..9bd0c9822
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/sampler_remote.go
@@ -0,0 +1,334 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/uber/jaeger-client-go/thrift-gen/sampling"
+)
+
+const (
+ defaultSamplingRefreshInterval = time.Minute
+)
+
+// SamplingStrategyFetcher is used to fetch sampling strategy updates from remote server.
+type SamplingStrategyFetcher interface {
+ Fetch(service string) ([]byte, error)
+}
+
+// SamplingStrategyParser is used to parse sampling strategy updates. The output object
+// should be of the type that is recognized by the SamplerUpdaters.
+type SamplingStrategyParser interface {
+ Parse(response []byte) (interface{}, error)
+}
+
+// SamplerUpdater is used by RemotelyControlledSampler to apply sampling strategies,
+// retrieved from remote config server, to the current sampler. The updater can modify
+// the sampler in-place if sampler supports it, or create a new one.
+//
+// If the strategy does not contain configuration for the sampler in question,
+// updater must return modifiedSampler=nil to give other updaters a chance to inspect
+// the sampling strategy response.
+//
+// RemotelyControlledSampler invokes the updaters while holding a lock on the main sampler.
+type SamplerUpdater interface {
+ Update(sampler SamplerV2, strategy interface{}) (modified SamplerV2, err error)
+}
+
+// RemotelyControlledSampler is a delegating sampler that polls a remote server
+// for the appropriate sampling strategy, constructs a corresponding sampler and
+// delegates to it for sampling decisions.
+type RemotelyControlledSampler struct {
+ // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
+ // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
+ closed int64 // 0 - not closed, 1 - closed
+
+ sync.RWMutex
+ samplerOptions
+
+ serviceName string
+ doneChan chan *sync.WaitGroup
+}
+
+// NewRemotelyControlledSampler creates a sampler that periodically pulls
+// the sampling strategy from an HTTP sampling server (e.g. jaeger-agent).
+func NewRemotelyControlledSampler(
+ serviceName string,
+ opts ...SamplerOption,
+) *RemotelyControlledSampler {
+ options := new(samplerOptions).applyOptionsAndDefaults(opts...)
+ sampler := &RemotelyControlledSampler{
+ samplerOptions: *options,
+ serviceName: serviceName,
+ doneChan: make(chan *sync.WaitGroup),
+ }
+ go sampler.pollController()
+ return sampler
+}
+
+// IsSampled implements IsSampled() of Sampler.
+// TODO (breaking change) remove when Sampler V1 is removed
+func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+ return false, nil
+}
+
+// OnCreateSpan implements OnCreateSpan of SamplerV2.
+func (s *RemotelyControlledSampler) OnCreateSpan(span *Span) SamplingDecision {
+ return s.sampler.OnCreateSpan(span)
+}
+
+// OnSetOperationName implements OnSetOperationName of SamplerV2.
+func (s *RemotelyControlledSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision {
+ return s.sampler.OnSetOperationName(span, operationName)
+}
+
+// OnSetTag implements OnSetTag of SamplerV2.
+func (s *RemotelyControlledSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
+ return s.sampler.OnSetTag(span, key, value)
+}
+
+// OnFinishSpan implements OnFinishSpan of SamplerV2.
+func (s *RemotelyControlledSampler) OnFinishSpan(span *Span) SamplingDecision {
+ return s.sampler.OnFinishSpan(span)
+}
+
+// Close implements Close() of Sampler.
+func (s *RemotelyControlledSampler) Close() {
+ if swapped := atomic.CompareAndSwapInt64(&s.closed, 0, 1); !swapped {
+ s.logger.Error("Repeated attempt to close the sampler is ignored")
+ return
+ }
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ s.doneChan <- &wg
+ wg.Wait()
+}
+
+// Equal implements Equal() of Sampler.
+func (s *RemotelyControlledSampler) Equal(other Sampler) bool {
+ // NB The Equal() function is expensive and will be removed. See PerOperationSampler.Equal() for
+ // more information.
+ return false
+}
+
+func (s *RemotelyControlledSampler) pollController() {
+ ticker := time.NewTicker(s.samplingRefreshInterval)
+ defer ticker.Stop()
+ s.pollControllerWithTicker(ticker)
+}
+
+func (s *RemotelyControlledSampler) pollControllerWithTicker(ticker *time.Ticker) {
+ for {
+ select {
+ case <-ticker.C:
+ s.UpdateSampler()
+ case wg := <-s.doneChan:
+ wg.Done()
+ return
+ }
+ }
+}
+
+// Sampler returns the currently active sampler.
+func (s *RemotelyControlledSampler) Sampler() SamplerV2 {
+ s.Lock()
+ defer s.Unlock()
+ return s.sampler
+}
+
+func (s *RemotelyControlledSampler) setSampler(sampler SamplerV2) {
+ s.Lock()
+ defer s.Unlock()
+ s.sampler = sampler
+}
+
+// UpdateSampler forces the sampler to fetch sampling strategy from backend server.
+// This function is called automatically on a timer, but can also be safely called manually, e.g. from tests.
+func (s *RemotelyControlledSampler) UpdateSampler() {
+ res, err := s.samplingFetcher.Fetch(s.serviceName)
+ if err != nil {
+ s.metrics.SamplerQueryFailure.Inc(1)
+ s.logger.Infof("failed to fetch sampling strategy: %v", err)
+ return
+ }
+ strategy, err := s.samplingParser.Parse(res)
+ if err != nil {
+ s.metrics.SamplerUpdateFailure.Inc(1)
+ s.logger.Infof("failed to parse sampling strategy response: %v", err)
+ return
+ }
+
+ s.Lock()
+ defer s.Unlock()
+
+ s.metrics.SamplerRetrieved.Inc(1)
+ if err := s.updateSamplerViaUpdaters(strategy); err != nil {
+ s.metrics.SamplerUpdateFailure.Inc(1)
+ s.logger.Infof("failed to handle sampling strategy response %+v. Got error: %v", res, err)
+ return
+ }
+ s.metrics.SamplerUpdated.Inc(1)
+}
+
+// NB: this function should only be called while holding a Write lock
+func (s *RemotelyControlledSampler) updateSamplerViaUpdaters(strategy interface{}) error {
+ for _, updater := range s.updaters {
+ sampler, err := updater.Update(s.sampler, strategy)
+ if err != nil {
+ return err
+ }
+ if sampler != nil {
+ s.sampler = sampler
+ return nil
+ }
+ }
+ return fmt.Errorf("unsupported sampling strategy %+v", strategy)
+}
+
+// -----------------------
+
+// ProbabilisticSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
+type ProbabilisticSamplerUpdater struct{}
+
+// Update implements Update of SamplerUpdater.
+func (u *ProbabilisticSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
+ type response interface {
+ GetProbabilisticSampling() *sampling.ProbabilisticSamplingStrategy
+ }
+ var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
+ if resp, ok := strategy.(response); ok {
+ if probabilistic := resp.GetProbabilisticSampling(); probabilistic != nil {
+ if ps, ok := sampler.(*ProbabilisticSampler); ok {
+ if err := ps.Update(probabilistic.SamplingRate); err != nil {
+ return nil, err
+ }
+ return sampler, nil
+ }
+ return newProbabilisticSampler(probabilistic.SamplingRate), nil
+ }
+ }
+ return nil, nil
+}
+
+// -----------------------
+
+// RateLimitingSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
+type RateLimitingSamplerUpdater struct{}
+
+// Update implements Update of SamplerUpdater.
+func (u *RateLimitingSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
+ type response interface {
+ GetRateLimitingSampling() *sampling.RateLimitingSamplingStrategy
+ }
+ var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
+ if resp, ok := strategy.(response); ok {
+ if rateLimiting := resp.GetRateLimitingSampling(); rateLimiting != nil {
+ rateLimit := float64(rateLimiting.MaxTracesPerSecond)
+ if rl, ok := sampler.(*RateLimitingSampler); ok {
+ rl.Update(rateLimit)
+ return rl, nil
+ }
+ return NewRateLimitingSampler(rateLimit), nil
+ }
+ }
+ return nil, nil
+}
+
+// -----------------------
+
+// AdaptiveSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
+type AdaptiveSamplerUpdater struct {
+ MaxOperations int // required
+ OperationNameLateBinding bool
+}
+
+// Update implements Update of SamplerUpdater.
+func (u *AdaptiveSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
+ type response interface {
+ GetOperationSampling() *sampling.PerOperationSamplingStrategies
+ }
+ var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
+ if p, ok := strategy.(response); ok {
+ if operations := p.GetOperationSampling(); operations != nil {
+ if as, ok := sampler.(*PerOperationSampler); ok {
+ as.update(operations)
+ return as, nil
+ }
+ return NewPerOperationSampler(PerOperationSamplerParams{
+ MaxOperations: u.MaxOperations,
+ OperationNameLateBinding: u.OperationNameLateBinding,
+ Strategies: operations,
+ }), nil
+ }
+ }
+ return nil, nil
+}
+
+// -----------------------
+
+type httpSamplingStrategyFetcher struct {
+ serverURL string
+ logger Logger
+}
+
+func (f *httpSamplingStrategyFetcher) Fetch(serviceName string) ([]byte, error) {
+ v := url.Values{}
+ v.Set("service", serviceName)
+ uri := f.serverURL + "?" + v.Encode()
+
+ // TODO create and reuse http.Client with proper timeout settings, etc.
+ resp, err := http.Get(uri)
+ if err != nil {
+ return nil, err
+ }
+
+ defer func() {
+ if err := resp.Body.Close(); err != nil {
+ f.logger.Error(fmt.Sprintf("failed to close HTTP response body: %+v", err))
+ }
+ }()
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode >= 400 {
+ return nil, fmt.Errorf("StatusCode: %d, Body: %s", resp.StatusCode, body)
+ }
+
+ return body, nil
+}
+
+// -----------------------
+
+type samplingStrategyParser struct{}
+
+func (p *samplingStrategyParser) Parse(response []byte) (interface{}, error) {
+ strategy := new(sampling.SamplingStrategyResponse)
+ if err := json.Unmarshal(response, strategy); err != nil {
+ return nil, err
+ }
+ return strategy, nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_options.go b/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go
index 75d28a561..7a292effc 100644
--- a/vendor/github.com/uber/jaeger-client-go/sampler_options.go
+++ b/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go
@@ -16,6 +16,8 @@ package jaeger
import (
"time"
+
+ "github.com/uber/jaeger-client-go/log"
)
// SamplerOption is a function that sets some option on the sampler
@@ -27,10 +29,13 @@ var SamplerOptions samplerOptions
type samplerOptions struct {
metrics *Metrics
maxOperations int
- sampler Sampler
+ sampler SamplerV2
logger Logger
samplingServerURL string
samplingRefreshInterval time.Duration
+ samplingFetcher SamplingStrategyFetcher
+ samplingParser SamplingStrategyParser
+ updaters []SamplerUpdater
}
// Metrics creates a SamplerOption that initializes Metrics on the sampler,
@@ -53,7 +58,7 @@ func (samplerOptions) MaxOperations(maxOperations int) SamplerOption {
// to use before a remote sampler is created and used.
func (samplerOptions) InitialSampler(sampler Sampler) SamplerOption {
return func(o *samplerOptions) {
- o.sampler = sampler
+ o.sampler = samplerV1toV2(sampler)
}
}
@@ -79,3 +84,65 @@ func (samplerOptions) SamplingRefreshInterval(samplingRefreshInterval time.Durat
o.samplingRefreshInterval = samplingRefreshInterval
}
}
+
+// SamplingStrategyFetcher creates a SamplerOption that initializes sampling strategy fetcher.
+func (samplerOptions) SamplingStrategyFetcher(fetcher SamplingStrategyFetcher) SamplerOption {
+ return func(o *samplerOptions) {
+ o.samplingFetcher = fetcher
+ }
+}
+
+// SamplingStrategyParser creates a SamplerOption that initializes sampling strategy parser.
+func (samplerOptions) SamplingStrategyParser(parser SamplingStrategyParser) SamplerOption {
+ return func(o *samplerOptions) {
+ o.samplingParser = parser
+ }
+}
+
+// Updaters creates a SamplerOption that initializes sampler updaters.
+func (samplerOptions) Updaters(updaters ...SamplerUpdater) SamplerOption {
+ return func(o *samplerOptions) {
+ o.updaters = updaters
+ }
+}
+
+func (o *samplerOptions) applyOptionsAndDefaults(opts ...SamplerOption) *samplerOptions {
+ for _, option := range opts {
+ option(o)
+ }
+ if o.sampler == nil {
+ o.sampler = newProbabilisticSampler(0.001)
+ }
+ if o.logger == nil {
+ o.logger = log.NullLogger
+ }
+ if o.maxOperations <= 0 {
+ o.maxOperations = defaultMaxOperations
+ }
+ if o.samplingServerURL == "" {
+ o.samplingServerURL = DefaultSamplingServerURL
+ }
+ if o.metrics == nil {
+ o.metrics = NewNullMetrics()
+ }
+ if o.samplingRefreshInterval <= 0 {
+ o.samplingRefreshInterval = defaultSamplingRefreshInterval
+ }
+ if o.samplingFetcher == nil {
+ o.samplingFetcher = &httpSamplingStrategyFetcher{
+ serverURL: o.samplingServerURL,
+ logger: o.logger,
+ }
+ }
+ if o.samplingParser == nil {
+ o.samplingParser = new(samplingStrategyParser)
+ }
+ if o.updaters == nil {
+ o.updaters = []SamplerUpdater{
+ &AdaptiveSamplerUpdater{MaxOperations: o.maxOperations},
+ new(ProbabilisticSamplerUpdater),
+ new(RateLimitingSamplerUpdater),
+ }
+ }
+ return o
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_v2.go b/vendor/github.com/uber/jaeger-client-go/sampler_v2.go
new file mode 100644
index 000000000..a50671a23
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/sampler_v2.go
@@ -0,0 +1,93 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+// SamplingDecision is returned by the V2 samplers.
+type SamplingDecision struct {
+ Sample bool
+ Retryable bool
+ Tags []Tag
+}
+
+// SamplerV2 is an extension of the V1 samplers that allows sampling decisions
+// be made at different points of the span lifecycle.
+type SamplerV2 interface {
+ OnCreateSpan(span *Span) SamplingDecision
+ OnSetOperationName(span *Span, operationName string) SamplingDecision
+ OnSetTag(span *Span, key string, value interface{}) SamplingDecision
+ OnFinishSpan(span *Span) SamplingDecision
+
+ // Close does a clean shutdown of the sampler, stopping any background
+ // go-routines it may have started.
+ Close()
+}
+
+// samplerV1toV2 wraps legacy V1 sampler into an adapter that make it look like V2.
+func samplerV1toV2(s Sampler) SamplerV2 {
+ if s2, ok := s.(SamplerV2); ok {
+ return s2
+ }
+ type legacySamplerV1toV2Adapter struct {
+ legacySamplerV1Base
+ }
+ return &legacySamplerV1toV2Adapter{
+ legacySamplerV1Base: legacySamplerV1Base{
+ delegate: s.IsSampled,
+ },
+ }
+}
+
+// SamplerV2Base can be used by V2 samplers to implement dummy V1 methods.
+// Supporting V1 API is required because Tracer configuration only accepts V1 Sampler
+// for backwards compatibility reasons.
+// TODO (breaking change) remove this in the next major release
+type SamplerV2Base struct{}
+
+// IsSampled implements IsSampled of Sampler.
+func (SamplerV2Base) IsSampled(id TraceID, operation string) (sampled bool, tags []Tag) {
+ return false, nil
+}
+
+// Close implements Close of Sampler.
+func (SamplerV2Base) Close() {}
+
+// Equal implements Equal of Sampler.
+func (SamplerV2Base) Equal(other Sampler) bool { return false }
+
+// legacySamplerV1Base is used as a base for simple samplers that only implement
+// the legacy isSampled() function that is not sensitive to its arguments.
+type legacySamplerV1Base struct {
+ delegate func(id TraceID, operation string) (sampled bool, tags []Tag)
+}
+
+func (s *legacySamplerV1Base) OnCreateSpan(span *Span) SamplingDecision {
+ isSampled, tags := s.delegate(span.context.traceID, span.operationName)
+ return SamplingDecision{Sample: isSampled, Retryable: false, Tags: tags}
+}
+
+func (s *legacySamplerV1Base) OnSetOperationName(span *Span, operationName string) SamplingDecision {
+ isSampled, tags := s.delegate(span.context.traceID, span.operationName)
+ return SamplingDecision{Sample: isSampled, Retryable: false, Tags: tags}
+}
+
+func (s *legacySamplerV1Base) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
+ return SamplingDecision{Sample: false, Retryable: true}
+}
+
+func (s *legacySamplerV1Base) OnFinishSpan(span *Span) SamplingDecision {
+ return SamplingDecision{Sample: false, Retryable: true}
+}
+
+func (s *legacySamplerV1Base) Close() {}
diff --git a/vendor/github.com/uber/jaeger-client-go/span.go b/vendor/github.com/uber/jaeger-client-go/span.go
index 9df8b6017..bbf6fb068 100644
--- a/vendor/github.com/uber/jaeger-client-go/span.go
+++ b/vendor/github.com/uber/jaeger-client-go/span.go
@@ -34,6 +34,7 @@ type Span struct {
tracer *Tracer
+ // TODO: (breaking change) change to use a pointer
context SpanContext
// The name of the "operation" this span is an instance of.
@@ -65,18 +66,26 @@ type Span struct {
}
// Tag is a simple key value wrapper.
-// TODO deprecate in the next major release, use opentracing.Tag instead.
+// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead.
type Tag struct {
key string
value interface{}
}
+// NewTag creates a new Tag.
+// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead.
+func NewTag(key string, value interface{}) Tag {
+ return Tag{key: key, value: value}
+}
+
// SetOperationName sets or changes the operation name.
func (s *Span) SetOperationName(operationName string) opentracing.Span {
s.Lock()
- defer s.Unlock()
- if s.context.IsSampled() {
- s.operationName = operationName
+ s.operationName = operationName
+ s.Unlock()
+ if !s.isSamplingFinalized() {
+ decision := s.tracer.sampler.OnSetOperationName(s, operationName)
+ s.applySamplingDecision(decision, true)
}
s.observer.OnSetOperationName(operationName)
return s
@@ -84,14 +93,24 @@ func (s *Span) SetOperationName(operationName string) opentracing.Span {
// SetTag implements SetTag() of opentracing.Span
func (s *Span) SetTag(key string, value interface{}) opentracing.Span {
+ return s.setTagInternal(key, value, true)
+}
+
+func (s *Span) setTagInternal(key string, value interface{}, lock bool) opentracing.Span {
s.observer.OnSetTag(key, value)
if key == string(ext.SamplingPriority) && !setSamplingPriority(s, value) {
return s
}
- s.Lock()
- defer s.Unlock()
- if s.context.IsSampled() {
- s.setTagNoLocking(key, value)
+ if !s.isSamplingFinalized() {
+ decision := s.tracer.sampler.OnSetTag(s, key, value)
+ s.applySamplingDecision(decision, lock)
+ }
+ if s.isWriteable() {
+ if lock {
+ s.Lock()
+ defer s.Unlock()
+ }
+ s.appendTagNoLocking(key, value)
}
return s
}
@@ -121,14 +140,38 @@ func (s *Span) Duration() time.Duration {
func (s *Span) Tags() opentracing.Tags {
s.Lock()
defer s.Unlock()
- var result = make(opentracing.Tags)
+ var result = make(opentracing.Tags, len(s.tags))
for _, tag := range s.tags {
result[tag.key] = tag.value
}
return result
}
-func (s *Span) setTagNoLocking(key string, value interface{}) {
+// Logs returns micro logs for span
+func (s *Span) Logs() []opentracing.LogRecord {
+ s.Lock()
+ defer s.Unlock()
+
+ return append([]opentracing.LogRecord(nil), s.logs...)
+}
+
+// References returns references for this span
+func (s *Span) References() []opentracing.SpanReference {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.references == nil || len(s.references) == 0 {
+ return nil
+ }
+
+ result := make([]opentracing.SpanReference, len(s.references))
+ for i, r := range s.references {
+ result[i] = opentracing.SpanReference{Type: r.Type, ReferencedContext: r.Context}
+ }
+ return result
+}
+
+func (s *Span) appendTagNoLocking(key string, value interface{}) {
s.tags = append(s.tags, Tag{key: key, value: value})
}
@@ -148,7 +191,7 @@ func (s *Span) logFieldsNoLocking(fields ...log.Field) {
Fields: fields,
Timestamp: time.Now(),
}
- s.appendLog(lr)
+ s.appendLogNoLocking(lr)
}
// LogKV implements opentracing.Span API
@@ -185,12 +228,12 @@ func (s *Span) Log(ld opentracing.LogData) {
if ld.Timestamp.IsZero() {
ld.Timestamp = s.tracer.timeNow()
}
- s.appendLog(ld.ToLogRecord())
+ s.appendLogNoLocking(ld.ToLogRecord())
}
}
// this function should only be called while holding a Write lock
-func (s *Span) appendLog(lr opentracing.LogRecord) {
+func (s *Span) appendLogNoLocking(lr opentracing.LogRecord) {
// TODO add logic to limit number of logs per span (issue #46)
s.logs = append(s.logs, lr)
}
@@ -224,17 +267,25 @@ func (s *Span) FinishWithOptions(options opentracing.FinishOptions) {
}
s.observer.OnFinish(options)
s.Lock()
+ s.duration = options.FinishTime.Sub(s.startTime)
+ s.Unlock()
+ if !s.isSamplingFinalized() {
+ decision := s.tracer.sampler.OnFinishSpan(s)
+ s.applySamplingDecision(decision, true)
+ }
if s.context.IsSampled() {
- s.duration = options.FinishTime.Sub(s.startTime)
- // Note: bulk logs are not subject to maxLogsPerSpan limit
- if options.LogRecords != nil {
- s.logs = append(s.logs, options.LogRecords...)
- }
- for _, ld := range options.BulkLogData {
- s.logs = append(s.logs, ld.ToLogRecord())
+ if len(options.LogRecords) > 0 || len(options.BulkLogData) > 0 {
+ s.Lock()
+ // Note: bulk logs are not subject to maxLogsPerSpan limit
+ if options.LogRecords != nil {
+ s.logs = append(s.logs, options.LogRecords...)
+ }
+ for _, ld := range options.BulkLogData {
+ s.logs = append(s.logs, ld.ToLogRecord())
+ }
+ s.Unlock()
}
}
- s.Unlock()
// call reportSpan even for non-sampled traces, to return span to the pool
// and update metrics counter
s.tracer.reportSpan(s)
@@ -300,23 +351,62 @@ func (s *Span) serviceName() string {
return s.tracer.serviceName
}
+func (s *Span) applySamplingDecision(decision SamplingDecision, lock bool) {
+ if !decision.Retryable {
+ s.context.samplingState.setFinal()
+ }
+ if decision.Sample {
+ s.context.samplingState.setSampled()
+ if len(decision.Tags) > 0 {
+ if lock {
+ s.Lock()
+ defer s.Unlock()
+ }
+ for _, tag := range decision.Tags {
+ s.appendTagNoLocking(tag.key, tag.value)
+ }
+ }
+ }
+}
+
+// Span can be written to if it is sampled or the sampling decision has not been finalized.
+func (s *Span) isWriteable() bool {
+ state := s.context.samplingState
+ return !state.isFinal() || state.isSampled()
+}
+
+func (s *Span) isSamplingFinalized() bool {
+ return s.context.samplingState.isFinal()
+}
+
// setSamplingPriority returns true if the flag was updated successfully, false otherwise.
+// The behavior of setSamplingPriority is surprising
+// If noDebugFlagOnForcedSampling is set
+// setSamplingPriority(span, 1) always sets only flagSampled
+// If noDebugFlagOnForcedSampling is unset, and isDebugAllowed passes
+// setSamplingPriority(span, 1) sets both flagSampled and flagDebug
+// However,
+// setSamplingPriority(span, 0) always only resets flagSampled
+//
+// This means that doing a setSamplingPriority(span, 1) followed by setSamplingPriority(span, 0) can
+// leave flagDebug set
func setSamplingPriority(s *Span, value interface{}) bool {
val, ok := value.(uint16)
if !ok {
return false
}
- s.Lock()
- defer s.Unlock()
if val == 0 {
- s.context.flags = s.context.flags & (^flagSampled)
+ s.context.samplingState.unsetSampled()
+ s.context.samplingState.setFinal()
return true
}
if s.tracer.options.noDebugFlagOnForcedSampling {
- s.context.flags = s.context.flags | flagSampled
+ s.context.samplingState.setSampled()
+ s.context.samplingState.setFinal()
return true
} else if s.tracer.isDebugAllowed(s.operationName) {
- s.context.flags = s.context.flags | flagDebug | flagSampled
+ s.context.samplingState.setDebugAndSampled()
+ s.context.samplingState.setFinal()
return true
}
return false
@@ -326,5 +416,5 @@ func setSamplingPriority(s *Span, value interface{}) bool {
func EnableFirehose(s *Span) {
s.Lock()
defer s.Unlock()
- s.context.flags |= flagFirehose
+ s.context.samplingState.setFirehose()
}
diff --git a/vendor/github.com/uber/jaeger-client-go/context.go b/vendor/github.com/uber/jaeger-client-go/span_context.go
index 43553655a..b7230abfe 100644
--- a/vendor/github.com/uber/jaeger-client-go/context.go
+++ b/vendor/github.com/uber/jaeger-client-go/span_context.go
@@ -19,12 +19,15 @@ import (
"fmt"
"strconv"
"strings"
+ "sync"
+
+ "go.uber.org/atomic"
)
const (
- flagSampled = byte(1)
- flagDebug = byte(2)
- flagFirehose = byte(8)
+ flagSampled = 1
+ flagDebug = 2
+ flagFirehose = 8
)
var (
@@ -56,9 +59,6 @@ type SpanContext struct {
// Should be 0 if the current span is a root span.
parentID SpanID
- // flags is a bitmap containing such bits as 'sampled' and 'debug'.
- flags byte
-
// Distributed Context baggage. The is a snapshot in time.
baggage map[string]string
@@ -67,6 +67,102 @@ type SpanContext struct {
//
// See JaegerDebugHeader in constants.go
debugID string
+
+ // samplingState is shared across all spans
+ samplingState *samplingState
+
+ // remote indicates that span context represents a remote parent
+ remote bool
+}
+
+type samplingState struct {
+ // Span context's state flags that are propagated across processes. Only lower 8 bits are used.
+ // We use an int32 instead of byte to be able to use CAS operations.
+ stateFlags atomic.Int32
+
+ // When state is not final, sampling will be retried on other span write operations,
+ // like SetOperationName / SetTag, and the spans will remain writable.
+ final atomic.Bool
+
+ // localRootSpan stores the SpanID of the first span created in this process for a given trace.
+ localRootSpan SpanID
+
+ // extendedState allows samplers to keep intermediate state.
+ // The keys and values in this map are completely opaque: interface{} -> interface{}.
+ extendedState sync.Map
+}
+
+func (s *samplingState) isLocalRootSpan(id SpanID) bool {
+ return id == s.localRootSpan
+}
+
+func (s *samplingState) setFlag(newFlag int32) {
+ swapped := false
+ for !swapped {
+ old := s.stateFlags.Load()
+ swapped = s.stateFlags.CAS(old, old|newFlag)
+ }
+}
+
+func (s *samplingState) unsetFlag(newFlag int32) {
+ swapped := false
+ for !swapped {
+ old := s.stateFlags.Load()
+ swapped = s.stateFlags.CAS(old, old&^newFlag)
+ }
+}
+
+func (s *samplingState) setSampled() {
+ s.setFlag(flagSampled)
+}
+
+func (s *samplingState) unsetSampled() {
+ s.unsetFlag(flagSampled)
+}
+
+func (s *samplingState) setDebugAndSampled() {
+ s.setFlag(flagDebug | flagSampled)
+}
+
+func (s *samplingState) setFirehose() {
+ s.setFlag(flagFirehose)
+}
+
+func (s *samplingState) setFlags(flags byte) {
+ s.stateFlags.Store(int32(flags))
+}
+
+func (s *samplingState) setFinal() {
+ s.final.Store(true)
+}
+
+func (s *samplingState) flags() byte {
+ return byte(s.stateFlags.Load())
+}
+
+func (s *samplingState) isSampled() bool {
+ return s.stateFlags.Load()&flagSampled == flagSampled
+}
+
+func (s *samplingState) isDebug() bool {
+ return s.stateFlags.Load()&flagDebug == flagDebug
+}
+
+func (s *samplingState) isFirehose() bool {
+ return s.stateFlags.Load()&flagFirehose == flagFirehose
+}
+
+func (s *samplingState) isFinal() bool {
+ return s.final.Load()
+}
+
+func (s *samplingState) extendedStateForKey(key interface{}, initValue func() interface{}) interface{} {
+ if value, ok := s.extendedState.Load(key); ok {
+ return value
+ }
+ value := initValue()
+ value, _ = s.extendedState.LoadOrStore(key, value)
+ return value
}
// ForeachBaggageItem implements ForeachBaggageItem() of opentracing.SpanContext
@@ -81,17 +177,28 @@ func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) {
// IsSampled returns whether this trace was chosen for permanent storage
// by the sampling mechanism of the tracer.
func (c SpanContext) IsSampled() bool {
- return (c.flags & flagSampled) == flagSampled
+ return c.samplingState.isSampled()
}
// IsDebug indicates whether sampling was explicitly requested by the service.
func (c SpanContext) IsDebug() bool {
- return (c.flags & flagDebug) == flagDebug
+ return c.samplingState.isDebug()
+}
+
+// IsSamplingFinalized indicates whether the sampling decision has been finalized.
+func (c SpanContext) IsSamplingFinalized() bool {
+ return c.samplingState.isFinal()
}
// IsFirehose indicates whether the firehose flag was set
func (c SpanContext) IsFirehose() bool {
- return (c.flags & flagFirehose) == flagFirehose
+ return c.samplingState.isFirehose()
+}
+
+// ExtendedSamplingState returns the custom state object for a given key. If the value for this key does not exist,
+// it is initialized via initValue function. This state can be used by samplers (e.g. x.PrioritySampler).
+func (c SpanContext) ExtendedSamplingState(key interface{}, initValue func() interface{}) interface{} {
+ return c.samplingState.extendedStateForKey(key, initValue)
}
// IsValid indicates whether this context actually represents a valid trace.
@@ -99,11 +206,16 @@ func (c SpanContext) IsValid() bool {
return c.traceID.IsValid() && c.spanID != 0
}
+// SetFirehose enables firehose mode for this trace.
+func (c SpanContext) SetFirehose() {
+ c.samplingState.setFirehose()
+}
+
func (c SpanContext) String() string {
if c.traceID.High == 0 {
- return fmt.Sprintf("%x:%x:%x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.flags)
+ return fmt.Sprintf("%x:%x:%x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.samplingState.stateFlags.Load())
}
- return fmt.Sprintf("%x%016x:%x:%x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.flags)
+ return fmt.Sprintf("%x%016x:%x:%x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.samplingState.stateFlags.Load())
}
// ContextFromString reconstructs the Context encoded in a string
@@ -130,7 +242,8 @@ func ContextFromString(value string) (SpanContext, error) {
if err != nil {
return emptyContext, err
}
- context.flags = byte(flags)
+ context.samplingState = &samplingState{}
+ context.samplingState.setFlags(byte(flags))
return context, nil
}
@@ -149,18 +262,24 @@ func (c SpanContext) ParentID() SpanID {
return c.parentID
}
+// Flags returns the bitmap containing such bits as 'sampled' and 'debug'.
+func (c SpanContext) Flags() byte {
+ return c.samplingState.flags()
+}
+
// NewSpanContext creates a new instance of SpanContext
func NewSpanContext(traceID TraceID, spanID, parentID SpanID, sampled bool, baggage map[string]string) SpanContext {
- flags := byte(0)
+ samplingState := &samplingState{}
if sampled {
- flags = flagSampled
+ samplingState.setSampled()
}
+
return SpanContext{
- traceID: traceID,
- spanID: spanID,
- parentID: parentID,
- flags: flags,
- baggage: baggage}
+ traceID: traceID,
+ spanID: spanID,
+ parentID: parentID,
+ samplingState: samplingState,
+ baggage: baggage}
}
// CopyFrom copies data from ctx into this context, including span identity and baggage.
@@ -169,7 +288,7 @@ func (c *SpanContext) CopyFrom(ctx *SpanContext) {
c.traceID = ctx.traceID
c.spanID = ctx.spanID
c.parentID = ctx.parentID
- c.flags = ctx.flags
+ c.samplingState = ctx.samplingState
if l := len(ctx.baggage); l > 0 {
c.baggage = make(map[string]string, l)
for k, v := range ctx.baggage {
@@ -193,7 +312,7 @@ func (c SpanContext) WithBaggageItem(key, value string) SpanContext {
newBaggage[key] = value
}
// Use positional parameters so the compiler will help catch new fields.
- return SpanContext{c.traceID, c.spanID, c.parentID, c.flags, newBaggage, ""}
+ return SpanContext{c.traceID, c.spanID, c.parentID, newBaggage, "", c.samplingState, c.remote}
}
// isDebugIDContainerOnly returns true when the instance of the context is only
diff --git a/vendor/github.com/uber/jaeger-client-go/tracer.go b/vendor/github.com/uber/jaeger-client-go/tracer.go
index 745a0c38a..f03372dc7 100644
--- a/vendor/github.com/uber/jaeger-client-go/tracer.go
+++ b/vendor/github.com/uber/jaeger-client-go/tracer.go
@@ -38,7 +38,7 @@ type Tracer struct {
serviceName string
hostIPv4 uint32 // this is for zipkin endpoint conversion
- sampler Sampler
+ sampler SamplerV2
reporter Reporter
metrics Metrics
logger log.Logger
@@ -74,6 +74,7 @@ type Tracer struct {
// NewTracer creates Tracer implementation that reports tracing to Jaeger.
// The returned io.Closer can be used in shutdown hooks to ensure that the internal
// queue of the Reporter is drained and all buffered spans are submitted to collectors.
+// TODO (breaking change) return *Tracer only, without closer.
func NewTracer(
serviceName string,
sampler Sampler,
@@ -82,7 +83,7 @@ func NewTracer(
) (opentracing.Tracer, io.Closer) {
t := &Tracer{
serviceName: serviceName,
- sampler: sampler,
+ sampler: samplerV1toV2(sampler),
reporter: reporter,
injectors: make(map[interface{}]Injector),
extractors: make(map[interface{}]Extractor),
@@ -261,7 +262,7 @@ func (t *Tracer) startSpanWithOptions(
rpcServer = (v == ext.SpanKindRPCServerEnum || v == string(ext.SpanKindRPCServerEnum))
}
- var samplerTags []Tag
+ var internalTags []Tag
newTrace := false
if !isSelfRef {
if !hasParent || !parent.IsValid() {
@@ -272,13 +273,12 @@ func (t *Tracer) startSpanWithOptions(
}
ctx.spanID = SpanID(ctx.traceID.Low)
ctx.parentID = 0
- ctx.flags = byte(0)
+ ctx.samplingState = &samplingState{
+ localRootSpan: ctx.spanID,
+ }
if hasParent && parent.isDebugIDContainerOnly() && t.isDebugAllowed(operationName) {
- ctx.flags |= (flagSampled | flagDebug)
- samplerTags = []Tag{{key: JaegerDebugHeader, value: parent.debugID}}
- } else if sampled, tags := t.sampler.IsSampled(ctx.traceID, operationName); sampled {
- ctx.flags |= flagSampled
- samplerTags = tags
+ ctx.samplingState.setDebugAndSampled()
+ internalTags = append(internalTags, Tag{key: JaegerDebugHeader, value: parent.debugID})
}
} else {
ctx.traceID = parent.traceID
@@ -290,7 +290,11 @@ func (t *Tracer) startSpanWithOptions(
ctx.spanID = SpanID(t.randomID())
ctx.parentID = parent.spanID
}
- ctx.flags = parent.flags
+ ctx.samplingState = parent.samplingState
+ if parent.remote {
+ ctx.samplingState.setFinal()
+ ctx.samplingState.localRootSpan = ctx.spanID
+ }
}
if hasParent {
// copy baggage items
@@ -305,17 +309,30 @@ func (t *Tracer) startSpanWithOptions(
sp := t.newSpan()
sp.context = ctx
+ sp.tracer = t
+ sp.operationName = operationName
+ sp.startTime = options.StartTime
+ sp.duration = 0
+ sp.references = references
+ sp.firstInProcess = rpcServer || sp.context.parentID == 0
+
+ if !sp.isSamplingFinalized() {
+ decision := t.sampler.OnCreateSpan(sp)
+ sp.applySamplingDecision(decision, false)
+ }
sp.observer = t.observer.OnStartSpan(sp, operationName, options)
- return t.startSpanInternal(
- sp,
- operationName,
- options.StartTime,
- samplerTags,
- options.Tags,
- newTrace,
- rpcServer,
- references,
- )
+
+ if tagsTotalLength := len(options.Tags) + len(internalTags); tagsTotalLength > 0 {
+ if sp.tags == nil || cap(sp.tags) < tagsTotalLength {
+ sp.tags = make([]Tag, 0, tagsTotalLength)
+ }
+ sp.tags = append(sp.tags, internalTags...)
+ for k, v := range options.Tags {
+ sp.setTagInternal(k, v, false)
+ }
+ }
+ t.emitNewSpanMetrics(sp, newTrace)
+ return sp
}
// Inject implements Inject() method of opentracing.Tracer
@@ -340,6 +357,7 @@ func (t *Tracer) Extract(
if err != nil {
return nil, err // ensure returned spanCtx is nil
}
+ spanCtx.remote = true
return spanCtx, nil
}
return nil, opentracing.ErrUnsupportedFormat
@@ -350,10 +368,10 @@ func (t *Tracer) Close() error {
t.reporter.Close()
t.sampler.Close()
if mgr, ok := t.baggageRestrictionManager.(io.Closer); ok {
- mgr.Close()
+ _ = mgr.Close()
}
if throttler, ok := t.debugThrottler.(io.Closer); ok {
- throttler.Close()
+ _ = throttler.Close()
}
return nil
}
@@ -368,6 +386,7 @@ func (t *Tracer) Tags() []opentracing.Tag {
}
// getTag returns the value of specific tag, if not exists, return nil.
+// TODO only used by tests, move there.
func (t *Tracer) getTag(key string) (interface{}, bool) {
for _, tag := range t.tags {
if tag.key == key {
@@ -383,41 +402,21 @@ func (t *Tracer) newSpan() *Span {
return t.spanAllocator.Get()
}
-func (t *Tracer) startSpanInternal(
- sp *Span,
- operationName string,
- startTime time.Time,
- internalTags []Tag,
- tags opentracing.Tags,
- newTrace bool,
- rpcServer bool,
- references []Reference,
-) *Span {
- sp.tracer = t
- sp.operationName = operationName
- sp.startTime = startTime
- sp.duration = 0
- sp.references = references
- sp.firstInProcess = rpcServer || sp.context.parentID == 0
- if len(tags) > 0 || len(internalTags) > 0 {
- sp.tags = make([]Tag, len(internalTags), len(tags)+len(internalTags))
- copy(sp.tags, internalTags)
- for k, v := range tags {
- sp.observer.OnSetTag(k, v)
- if k == string(ext.SamplingPriority) && !setSamplingPriority(sp, v) {
- continue
- }
- sp.setTagNoLocking(k, v)
+// emitNewSpanMetrics generates metrics on the number of started spans and traces.
+// newTrace param: we cannot simply check for parentID==0 because in Zipkin model the
+// server-side RPC span has the exact same trace/span/parent IDs as the
+// calling client-side span, but obviously the server side span is
+// no longer a root span of the trace.
+func (t *Tracer) emitNewSpanMetrics(sp *Span, newTrace bool) {
+ if !sp.isSamplingFinalized() {
+ t.metrics.SpansStartedDelayedSampling.Inc(1)
+ if newTrace {
+ t.metrics.TracesStartedDelayedSampling.Inc(1)
}
- }
- // emit metrics
- if sp.context.IsSampled() {
+ // joining a trace is not possible, because sampling decision inherited from upstream is final
+ } else if sp.context.IsSampled() {
t.metrics.SpansStartedSampled.Inc(1)
if newTrace {
- // We cannot simply check for parentID==0 because in Zipkin model the
- // server-side RPC span has the exact same trace/span/parent IDs as the
- // calling client-side span, but obviously the server side span is
- // no longer a root span of the trace.
t.metrics.TracesStartedSampled.Inc(1)
} else if sp.firstInProcess {
t.metrics.TracesJoinedSampled.Inc(1)
@@ -430,15 +429,20 @@ func (t *Tracer) startSpanInternal(
t.metrics.TracesJoinedNotSampled.Inc(1)
}
}
- return sp
}
func (t *Tracer) reportSpan(sp *Span) {
- t.metrics.SpansFinished.Inc(1)
+ if !sp.isSamplingFinalized() {
+ t.metrics.SpansFinishedDelayedSampling.Inc(1)
+ } else if sp.context.IsSampled() {
+ t.metrics.SpansFinishedSampled.Inc(1)
+ } else {
+ t.metrics.SpansFinishedNotSampled.Inc(1)
+ }
- // Note: if the reporter is processing Span asynchronously need to Retain() it
- // otherwise, in the racing condition will be rewritten span data before it will be sent
- // * To remove object use method span.Release()
+ // Note: if the reporter is processing Span asynchronously then it needs to Retain() the span,
+ // and then Release() it when no longer needed.
+ // Otherwise, the span may be reused for another trace and its data may be overwritten.
if sp.context.IsSampled() {
t.reporter.Report(sp)
}
@@ -466,6 +470,11 @@ func (t *Tracer) isDebugAllowed(operation string) bool {
return t.debugThrottler.IsAllowed(operation)
}
+// Sampler returns the sampler given to the tracer at creation.
+func (t *Tracer) Sampler() SamplerV2 {
+ return t.sampler
+}
+
// SelfRef creates an opentracing compliant SpanReference from a jaeger
// SpanContext. This is a factory function in order to encapsulate jaeger specific
// types.
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go
index 1b8db9758..bf2f13165 100644
--- a/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go
+++ b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go
@@ -20,22 +20,15 @@ import (
)
// RateLimiter is a filter used to check if a message that is worth itemCost units is within the rate limits.
+//
+// TODO (breaking change) remove this interface in favor of public struct below
+//
+// Deprecated, use ReconfigurableRateLimiter.
type RateLimiter interface {
CheckCredit(itemCost float64) bool
}
-type rateLimiter struct {
- sync.Mutex
-
- creditsPerSecond float64
- balance float64
- maxBalance float64
- lastTick time.Time
-
- timeNow func() time.Time
-}
-
-// NewRateLimiter creates a new rate limiter based on leaky bucket algorithm, formulated in terms of a
+// ReconfigurableRateLimiter is a rate limiter based on leaky bucket algorithm, formulated in terms of a
// credits balance that is replenished every time CheckCredit() method is called (tick) by the amount proportional
// to the time elapsed since the last tick, up to max of creditsPerSecond. A call to CheckCredit() takes a cost
// of an item we want to pay with the balance. If the balance exceeds the cost of the item, the item is "purchased"
@@ -47,31 +40,73 @@ type rateLimiter struct {
//
// It can also be used to limit the rate of traffic in bytes, by setting creditsPerSecond to desired throughput
// as bytes/second, and calling CheckCredit() with the actual message size.
-func NewRateLimiter(creditsPerSecond, maxBalance float64) RateLimiter {
- return &rateLimiter{
+//
+// TODO (breaking change) rename to RateLimiter once the interface is removed
+type ReconfigurableRateLimiter struct {
+ lock sync.Mutex
+
+ creditsPerSecond float64
+ balance float64
+ maxBalance float64
+ lastTick time.Time
+
+ timeNow func() time.Time
+}
+
+// NewRateLimiter creates a new ReconfigurableRateLimiter.
+func NewRateLimiter(creditsPerSecond, maxBalance float64) *ReconfigurableRateLimiter {
+ return &ReconfigurableRateLimiter{
creditsPerSecond: creditsPerSecond,
balance: maxBalance,
maxBalance: maxBalance,
lastTick: time.Now(),
- timeNow: time.Now}
+ timeNow: time.Now,
+ }
}
-func (b *rateLimiter) CheckCredit(itemCost float64) bool {
- b.Lock()
- defer b.Unlock()
- // calculate how much time passed since the last tick, and update current tick
- currentTime := b.timeNow()
- elapsedTime := currentTime.Sub(b.lastTick)
- b.lastTick = currentTime
- // calculate how much credit have we accumulated since the last tick
- b.balance += elapsedTime.Seconds() * b.creditsPerSecond
- if b.balance > b.maxBalance {
- b.balance = b.maxBalance
- }
+// CheckCredit tries to reduce the current balance by itemCost provided that the current balance
+// is not lest than itemCost.
+func (rl *ReconfigurableRateLimiter) CheckCredit(itemCost float64) bool {
+ rl.lock.Lock()
+ defer rl.lock.Unlock()
+
// if we have enough credits to pay for current item, then reduce balance and allow
- if b.balance >= itemCost {
- b.balance -= itemCost
+ if rl.balance >= itemCost {
+ rl.balance -= itemCost
+ return true
+ }
+ // otherwise check if balance can be increased due to time elapsed, and try again
+ rl.updateBalance()
+ if rl.balance >= itemCost {
+ rl.balance -= itemCost
return true
}
return false
}
+
+// updateBalance recalculates current balance based on time elapsed. Must be called while holding a lock.
+func (rl *ReconfigurableRateLimiter) updateBalance() {
+ // calculate how much time passed since the last tick, and update current tick
+ currentTime := rl.timeNow()
+ elapsedTime := currentTime.Sub(rl.lastTick)
+ rl.lastTick = currentTime
+ // calculate how much credit have we accumulated since the last tick
+ rl.balance += elapsedTime.Seconds() * rl.creditsPerSecond
+ if rl.balance > rl.maxBalance {
+ rl.balance = rl.maxBalance
+ }
+}
+
+// Update changes the main parameters of the rate limiter in-place, while retaining
+// the current accumulated balance (pro-rated to the new maxBalance value). Using this method
+// instead of creating a new rate limiter helps to avoid thundering herd when sampling
+// strategies are updated.
+func (rl *ReconfigurableRateLimiter) Update(creditsPerSecond, maxBalance float64) {
+ rl.lock.Lock()
+ defer rl.lock.Unlock()
+
+ rl.updateBalance() // get up to date balance
+ rl.balance = rl.balance * maxBalance / rl.maxBalance
+ rl.creditsPerSecond = creditsPerSecond
+ rl.maxBalance = maxBalance
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin.go b/vendor/github.com/uber/jaeger-client-go/zipkin.go
index 636952b7f..98cab4b6e 100644
--- a/vendor/github.com/uber/jaeger-client-go/zipkin.go
+++ b/vendor/github.com/uber/jaeger-client-go/zipkin.go
@@ -55,7 +55,7 @@ func (p *zipkinPropagator) Inject(
carrier.SetTraceID(ctx.TraceID().Low) // TODO this cannot work with 128bit IDs
carrier.SetSpanID(uint64(ctx.SpanID()))
carrier.SetParentID(uint64(ctx.ParentID()))
- carrier.SetFlags(ctx.flags)
+ carrier.SetFlags(ctx.samplingState.flags())
return nil
}
@@ -71,6 +71,7 @@ func (p *zipkinPropagator) Extract(abstractCarrier interface{}) (SpanContext, er
ctx.traceID.Low = carrier.TraceID()
ctx.spanID = SpanID(carrier.SpanID())
ctx.parentID = SpanID(carrier.ParentID())
- ctx.flags = carrier.Flags()
+ ctx.samplingState = &samplingState{}
+ ctx.samplingState.setFlags(carrier.Flags())
return ctx, nil
}
diff --git a/vendor/github.com/ulikunitz/xz/example.go b/vendor/github.com/ulikunitz/xz/example.go
deleted file mode 100644
index 855e60aee..000000000
--- a/vendor/github.com/ulikunitz/xz/example.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-import (
- "bytes"
- "io"
- "log"
- "os"
-
- "github.com/ulikunitz/xz"
-)
-
-func main() {
- const text = "The quick brown fox jumps over the lazy dog.\n"
- var buf bytes.Buffer
- // compress text
- w, err := xz.NewWriter(&buf)
- if err != nil {
- log.Fatalf("xz.NewWriter error %s", err)
- }
- if _, err := io.WriteString(w, text); err != nil {
- log.Fatalf("WriteString error %s", err)
- }
- if err := w.Close(); err != nil {
- log.Fatalf("w.Close error %s", err)
- }
- // decompress buffer and write output to stdout
- r, err := xz.NewReader(&buf)
- if err != nil {
- log.Fatalf("NewReader error %s", err)
- }
- if _, err = io.Copy(os.Stdout, r); err != nil {
- log.Fatalf("io.Copy error %s", err)
- }
-}
diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml
new file mode 100644
index 000000000..6d4d1be7b
--- /dev/null
+++ b/vendor/go.uber.org/atomic/.codecov.yml
@@ -0,0 +1,15 @@
+coverage:
+ range: 80..100
+ round: down
+ precision: 2
+
+ status:
+ project: # measuring the overall project coverage
+ default: # context, you can create multiple ones with custom titles
+ enabled: yes # must be yes|true to enable this status
+ target: 100 # specify the target coverage for each commit status
+ # option: "auto" (must increase from parent commit or pull request base)
+ # option: "X%" a static target percentage to hit
+ if_not_found: success # if parent is not found report status as success, error, or failure
+ if_ci_failed: error # if ci fails report status as success, error, or failure
+
diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore
new file mode 100644
index 000000000..0a4504f11
--- /dev/null
+++ b/vendor/go.uber.org/atomic/.gitignore
@@ -0,0 +1,11 @@
+.DS_Store
+/vendor
+/cover
+cover.out
+lint.log
+
+# Binaries
+*.test
+
+# Profiling output
+*.prof
diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml
new file mode 100644
index 000000000..0f3769e5f
--- /dev/null
+++ b/vendor/go.uber.org/atomic/.travis.yml
@@ -0,0 +1,27 @@
+sudo: false
+language: go
+go_import_path: go.uber.org/atomic
+
+go:
+ - 1.11.x
+ - 1.12.x
+
+matrix:
+ include:
+ - go: 1.12.x
+ env: NO_TEST=yes LINT=yes
+
+cache:
+ directories:
+ - vendor
+
+install:
+ - make install_ci
+
+script:
+ - test -n "$NO_TEST" || make test_ci
+ - test -n "$NO_TEST" || scripts/test-ubergo.sh
+ - test -z "$LINT" || make install_lint lint
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt
new file mode 100644
index 000000000..8765c9fbc
--- /dev/null
+++ b/vendor/go.uber.org/atomic/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (c) 2016 Uber Technologies, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile
new file mode 100644
index 000000000..1ef263075
--- /dev/null
+++ b/vendor/go.uber.org/atomic/Makefile
@@ -0,0 +1,51 @@
+# Many Go tools take file globs or directories as arguments instead of packages.
+PACKAGE_FILES ?= *.go
+
+# For pre go1.6
+export GO15VENDOREXPERIMENT=1
+
+
+.PHONY: build
+build:
+ go build -i ./...
+
+
+.PHONY: install
+install:
+ glide --version || go get github.com/Masterminds/glide
+ glide install
+
+
+.PHONY: test
+test:
+ go test -cover -race ./...
+
+
+.PHONY: install_ci
+install_ci: install
+ go get github.com/wadey/gocovmerge
+ go get github.com/mattn/goveralls
+ go get golang.org/x/tools/cmd/cover
+
+.PHONY: install_lint
+install_lint:
+ go get golang.org/x/lint/golint
+
+
+.PHONY: lint
+lint:
+ @rm -rf lint.log
+ @echo "Checking formatting..."
+ @gofmt -d -s $(PACKAGE_FILES) 2>&1 | tee lint.log
+ @echo "Checking vet..."
+ @go vet ./... 2>&1 | tee -a lint.log;)
+ @echo "Checking lint..."
+ @golint $$(go list ./...) 2>&1 | tee -a lint.log
+ @echo "Checking for unresolved FIXMEs..."
+ @git grep -i fixme | grep -v -e vendor -e Makefile | tee -a lint.log
+ @[ ! -s lint.log ]
+
+
+.PHONY: test_ci
+test_ci: install_ci build
+ ./scripts/cover.sh $(shell go list $(PACKAGES))
diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md
new file mode 100644
index 000000000..62eb8e576
--- /dev/null
+++ b/vendor/go.uber.org/atomic/README.md
@@ -0,0 +1,36 @@
+# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard]
+
+Simple wrappers for primitive types to enforce atomic access.
+
+## Installation
+`go get -u go.uber.org/atomic`
+
+## Usage
+The standard library's `sync/atomic` is powerful, but it's easy to forget which
+variables must be accessed atomically. `go.uber.org/atomic` preserves all the
+functionality of the standard library, but wraps the primitive types to
+provide a safer, more convenient API.
+
+```go
+var atom atomic.Uint32
+atom.Store(42)
+atom.Sub(2)
+atom.CAS(40, 11)
+```
+
+See the [documentation][doc] for a complete API specification.
+
+## Development Status
+Stable.
+
+___
+Released under the [MIT License](LICENSE.txt).
+
+[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg
+[doc]: https://godoc.org/go.uber.org/atomic
+[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master
+[ci]: https://travis-ci.com/uber-go/atomic
+[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg
+[cov]: https://codecov.io/gh/uber-go/atomic
+[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic
+[reportcard]: https://goreportcard.com/report/go.uber.org/atomic
diff --git a/vendor/go.uber.org/atomic/atomic.go b/vendor/go.uber.org/atomic/atomic.go
new file mode 100644
index 000000000..1db6849fc
--- /dev/null
+++ b/vendor/go.uber.org/atomic/atomic.go
@@ -0,0 +1,351 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package atomic provides simple wrappers around numerics to enforce atomic
+// access.
+package atomic
+
+import (
+ "math"
+ "sync/atomic"
+ "time"
+)
+
+// Int32 is an atomic wrapper around an int32.
+type Int32 struct{ v int32 }
+
+// NewInt32 creates an Int32.
+func NewInt32(i int32) *Int32 {
+ return &Int32{i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Int32) Load() int32 {
+ return atomic.LoadInt32(&i.v)
+}
+
+// Add atomically adds to the wrapped int32 and returns the new value.
+func (i *Int32) Add(n int32) int32 {
+ return atomic.AddInt32(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped int32 and returns the new value.
+func (i *Int32) Sub(n int32) int32 {
+ return atomic.AddInt32(&i.v, -n)
+}
+
+// Inc atomically increments the wrapped int32 and returns the new value.
+func (i *Int32) Inc() int32 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped int32 and returns the new value.
+func (i *Int32) Dec() int32 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Int32) CAS(old, new int32) bool {
+ return atomic.CompareAndSwapInt32(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Int32) Store(n int32) {
+ atomic.StoreInt32(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped int32 and returns the old value.
+func (i *Int32) Swap(n int32) int32 {
+ return atomic.SwapInt32(&i.v, n)
+}
+
+// Int64 is an atomic wrapper around an int64.
+type Int64 struct{ v int64 }
+
+// NewInt64 creates an Int64.
+func NewInt64(i int64) *Int64 {
+ return &Int64{i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Int64) Load() int64 {
+ return atomic.LoadInt64(&i.v)
+}
+
+// Add atomically adds to the wrapped int64 and returns the new value.
+func (i *Int64) Add(n int64) int64 {
+ return atomic.AddInt64(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped int64 and returns the new value.
+func (i *Int64) Sub(n int64) int64 {
+ return atomic.AddInt64(&i.v, -n)
+}
+
+// Inc atomically increments the wrapped int64 and returns the new value.
+func (i *Int64) Inc() int64 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped int64 and returns the new value.
+func (i *Int64) Dec() int64 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Int64) CAS(old, new int64) bool {
+ return atomic.CompareAndSwapInt64(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Int64) Store(n int64) {
+ atomic.StoreInt64(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped int64 and returns the old value.
+func (i *Int64) Swap(n int64) int64 {
+ return atomic.SwapInt64(&i.v, n)
+}
+
+// Uint32 is an atomic wrapper around an uint32.
+type Uint32 struct{ v uint32 }
+
+// NewUint32 creates a Uint32.
+func NewUint32(i uint32) *Uint32 {
+ return &Uint32{i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Uint32) Load() uint32 {
+ return atomic.LoadUint32(&i.v)
+}
+
+// Add atomically adds to the wrapped uint32 and returns the new value.
+func (i *Uint32) Add(n uint32) uint32 {
+ return atomic.AddUint32(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped uint32 and returns the new value.
+func (i *Uint32) Sub(n uint32) uint32 {
+ return atomic.AddUint32(&i.v, ^(n - 1))
+}
+
+// Inc atomically increments the wrapped uint32 and returns the new value.
+func (i *Uint32) Inc() uint32 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped int32 and returns the new value.
+func (i *Uint32) Dec() uint32 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Uint32) CAS(old, new uint32) bool {
+ return atomic.CompareAndSwapUint32(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Uint32) Store(n uint32) {
+ atomic.StoreUint32(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped uint32 and returns the old value.
+func (i *Uint32) Swap(n uint32) uint32 {
+ return atomic.SwapUint32(&i.v, n)
+}
+
+// Uint64 is an atomic wrapper around a uint64.
+type Uint64 struct{ v uint64 }
+
+// NewUint64 creates a Uint64.
+func NewUint64(i uint64) *Uint64 {
+ return &Uint64{i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Uint64) Load() uint64 {
+ return atomic.LoadUint64(&i.v)
+}
+
+// Add atomically adds to the wrapped uint64 and returns the new value.
+func (i *Uint64) Add(n uint64) uint64 {
+ return atomic.AddUint64(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped uint64 and returns the new value.
+func (i *Uint64) Sub(n uint64) uint64 {
+ return atomic.AddUint64(&i.v, ^(n - 1))
+}
+
+// Inc atomically increments the wrapped uint64 and returns the new value.
+func (i *Uint64) Inc() uint64 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped uint64 and returns the new value.
+func (i *Uint64) Dec() uint64 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Uint64) CAS(old, new uint64) bool {
+ return atomic.CompareAndSwapUint64(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Uint64) Store(n uint64) {
+ atomic.StoreUint64(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped uint64 and returns the old value.
+func (i *Uint64) Swap(n uint64) uint64 {
+ return atomic.SwapUint64(&i.v, n)
+}
+
+// Bool is an atomic Boolean.
+type Bool struct{ v uint32 }
+
+// NewBool creates a Bool.
+func NewBool(initial bool) *Bool {
+ return &Bool{boolToInt(initial)}
+}
+
+// Load atomically loads the Boolean.
+func (b *Bool) Load() bool {
+ return truthy(atomic.LoadUint32(&b.v))
+}
+
+// CAS is an atomic compare-and-swap.
+func (b *Bool) CAS(old, new bool) bool {
+ return atomic.CompareAndSwapUint32(&b.v, boolToInt(old), boolToInt(new))
+}
+
+// Store atomically stores the passed value.
+func (b *Bool) Store(new bool) {
+ atomic.StoreUint32(&b.v, boolToInt(new))
+}
+
+// Swap sets the given value and returns the previous value.
+func (b *Bool) Swap(new bool) bool {
+ return truthy(atomic.SwapUint32(&b.v, boolToInt(new)))
+}
+
+// Toggle atomically negates the Boolean and returns the previous value.
+func (b *Bool) Toggle() bool {
+ return truthy(atomic.AddUint32(&b.v, 1) - 1)
+}
+
+func truthy(n uint32) bool {
+ return n&1 == 1
+}
+
+func boolToInt(b bool) uint32 {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+// Float64 is an atomic wrapper around float64.
+type Float64 struct {
+ v uint64
+}
+
+// NewFloat64 creates a Float64.
+func NewFloat64(f float64) *Float64 {
+ return &Float64{math.Float64bits(f)}
+}
+
+// Load atomically loads the wrapped value.
+func (f *Float64) Load() float64 {
+ return math.Float64frombits(atomic.LoadUint64(&f.v))
+}
+
+// Store atomically stores the passed value.
+func (f *Float64) Store(s float64) {
+ atomic.StoreUint64(&f.v, math.Float64bits(s))
+}
+
+// Add atomically adds to the wrapped float64 and returns the new value.
+func (f *Float64) Add(s float64) float64 {
+ for {
+ old := f.Load()
+ new := old + s
+ if f.CAS(old, new) {
+ return new
+ }
+ }
+}
+
+// Sub atomically subtracts from the wrapped float64 and returns the new value.
+func (f *Float64) Sub(s float64) float64 {
+ return f.Add(-s)
+}
+
+// CAS is an atomic compare-and-swap.
+func (f *Float64) CAS(old, new float64) bool {
+ return atomic.CompareAndSwapUint64(&f.v, math.Float64bits(old), math.Float64bits(new))
+}
+
+// Duration is an atomic wrapper around time.Duration
+// https://godoc.org/time#Duration
+type Duration struct {
+ v Int64
+}
+
+// NewDuration creates a Duration.
+func NewDuration(d time.Duration) *Duration {
+ return &Duration{v: *NewInt64(int64(d))}
+}
+
+// Load atomically loads the wrapped value.
+func (d *Duration) Load() time.Duration {
+ return time.Duration(d.v.Load())
+}
+
+// Store atomically stores the passed value.
+func (d *Duration) Store(n time.Duration) {
+ d.v.Store(int64(n))
+}
+
+// Add atomically adds to the wrapped time.Duration and returns the new value.
+func (d *Duration) Add(n time.Duration) time.Duration {
+ return time.Duration(d.v.Add(int64(n)))
+}
+
+// Sub atomically subtracts from the wrapped time.Duration and returns the new value.
+func (d *Duration) Sub(n time.Duration) time.Duration {
+ return time.Duration(d.v.Sub(int64(n)))
+}
+
+// Swap atomically swaps the wrapped time.Duration and returns the old value.
+func (d *Duration) Swap(n time.Duration) time.Duration {
+ return time.Duration(d.v.Swap(int64(n)))
+}
+
+// CAS is an atomic compare-and-swap.
+func (d *Duration) CAS(old, new time.Duration) bool {
+ return d.v.CAS(int64(old), int64(new))
+}
+
+// Value shadows the type of the same name from sync/atomic
+// https://godoc.org/sync/atomic#Value
+type Value struct{ atomic.Value }
diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go
new file mode 100644
index 000000000..0489d19ba
--- /dev/null
+++ b/vendor/go.uber.org/atomic/error.go
@@ -0,0 +1,55 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+// Error is an atomic type-safe wrapper around Value for errors
+type Error struct{ v Value }
+
+// errorHolder is non-nil holder for error object.
+// atomic.Value panics on saving nil object, so err object needs to be
+// wrapped with valid object first.
+type errorHolder struct{ err error }
+
+// NewError creates new atomic error object
+func NewError(err error) *Error {
+ e := &Error{}
+ if err != nil {
+ e.Store(err)
+ }
+ return e
+}
+
+// Load atomically loads the wrapped error
+func (e *Error) Load() error {
+ v := e.v.Load()
+ if v == nil {
+ return nil
+ }
+
+ eh := v.(errorHolder)
+ return eh.err
+}
+
+// Store atomically stores error.
+// NOTE: a holder object is allocated on each Store call.
+func (e *Error) Store(err error) {
+ e.v.Store(errorHolder{err: err})
+}
diff --git a/vendor/go.uber.org/atomic/glide.lock b/vendor/go.uber.org/atomic/glide.lock
new file mode 100644
index 000000000..3c72c5997
--- /dev/null
+++ b/vendor/go.uber.org/atomic/glide.lock
@@ -0,0 +1,17 @@
+hash: f14d51408e3e0e4f73b34e4039484c78059cd7fc5f4996fdd73db20dc8d24f53
+updated: 2016-10-27T00:10:51.16960137-07:00
+imports: []
+testImports:
+- name: github.com/davecgh/go-spew
+ version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
+ subpackages:
+ - spew
+- name: github.com/pmezard/go-difflib
+ version: d8ed2627bdf02c080bf22230dbb337003b7aba2d
+ subpackages:
+ - difflib
+- name: github.com/stretchr/testify
+ version: d77da356e56a7428ad25149ca77381849a6a5232
+ subpackages:
+ - assert
+ - require
diff --git a/vendor/go.uber.org/atomic/glide.yaml b/vendor/go.uber.org/atomic/glide.yaml
new file mode 100644
index 000000000..4cf608ec0
--- /dev/null
+++ b/vendor/go.uber.org/atomic/glide.yaml
@@ -0,0 +1,6 @@
+package: go.uber.org/atomic
+testImport:
+- package: github.com/stretchr/testify
+ subpackages:
+ - assert
+ - require
diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go
new file mode 100644
index 000000000..ede8136fa
--- /dev/null
+++ b/vendor/go.uber.org/atomic/string.go
@@ -0,0 +1,49 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+// String is an atomic type-safe wrapper around Value for strings.
+type String struct{ v Value }
+
+// NewString creates a String.
+func NewString(str string) *String {
+ s := &String{}
+ if str != "" {
+ s.Store(str)
+ }
+ return s
+}
+
+// Load atomically loads the wrapped string.
+func (s *String) Load() string {
+ v := s.v.Load()
+ if v == nil {
+ return ""
+ }
+ return v.(string)
+}
+
+// Store atomically stores the passed string.
+// Note: Converting the string to an interface{} to store in the Value
+// requires an allocation.
+func (s *String) Store(str string) {
+ s.v.Store(str)
+}
diff --git a/vendor/golang.org/x/net/html/atom/gen.go b/vendor/golang.org/x/net/html/atom/gen.go
deleted file mode 100644
index 5d052781b..000000000
--- a/vendor/golang.org/x/net/html/atom/gen.go
+++ /dev/null
@@ -1,712 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-//go:generate go run gen.go
-//go:generate go run gen.go -test
-
-package main
-
-import (
- "bytes"
- "flag"
- "fmt"
- "go/format"
- "io/ioutil"
- "math/rand"
- "os"
- "sort"
- "strings"
-)
-
-// identifier converts s to a Go exported identifier.
-// It converts "div" to "Div" and "accept-charset" to "AcceptCharset".
-func identifier(s string) string {
- b := make([]byte, 0, len(s))
- cap := true
- for _, c := range s {
- if c == '-' {
- cap = true
- continue
- }
- if cap && 'a' <= c && c <= 'z' {
- c -= 'a' - 'A'
- }
- cap = false
- b = append(b, byte(c))
- }
- return string(b)
-}
-
-var test = flag.Bool("test", false, "generate table_test.go")
-
-func genFile(name string, buf *bytes.Buffer) {
- b, err := format.Source(buf.Bytes())
- if err != nil {
- fmt.Fprintln(os.Stderr, err)
- os.Exit(1)
- }
- if err := ioutil.WriteFile(name, b, 0644); err != nil {
- fmt.Fprintln(os.Stderr, err)
- os.Exit(1)
- }
-}
-
-func main() {
- flag.Parse()
-
- var all []string
- all = append(all, elements...)
- all = append(all, attributes...)
- all = append(all, eventHandlers...)
- all = append(all, extra...)
- sort.Strings(all)
-
- // uniq - lists have dups
- w := 0
- for _, s := range all {
- if w == 0 || all[w-1] != s {
- all[w] = s
- w++
- }
- }
- all = all[:w]
-
- if *test {
- var buf bytes.Buffer
- fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n")
- fmt.Fprintln(&buf, "//go:generate go run gen.go -test\n")
- fmt.Fprintln(&buf, "package atom\n")
- fmt.Fprintln(&buf, "var testAtomList = []string{")
- for _, s := range all {
- fmt.Fprintf(&buf, "\t%q,\n", s)
- }
- fmt.Fprintln(&buf, "}")
-
- genFile("table_test.go", &buf)
- return
- }
-
- // Find hash that minimizes table size.
- var best *table
- for i := 0; i < 1000000; i++ {
- if best != nil && 1<<(best.k-1) < len(all) {
- break
- }
- h := rand.Uint32()
- for k := uint(0); k <= 16; k++ {
- if best != nil && k >= best.k {
- break
- }
- var t table
- if t.init(h, k, all) {
- best = &t
- break
- }
- }
- }
- if best == nil {
- fmt.Fprintf(os.Stderr, "failed to construct string table\n")
- os.Exit(1)
- }
-
- // Lay out strings, using overlaps when possible.
- layout := append([]string{}, all...)
-
- // Remove strings that are substrings of other strings
- for changed := true; changed; {
- changed = false
- for i, s := range layout {
- if s == "" {
- continue
- }
- for j, t := range layout {
- if i != j && t != "" && strings.Contains(s, t) {
- changed = true
- layout[j] = ""
- }
- }
- }
- }
-
- // Join strings where one suffix matches another prefix.
- for {
- // Find best i, j, k such that layout[i][len-k:] == layout[j][:k],
- // maximizing overlap length k.
- besti := -1
- bestj := -1
- bestk := 0
- for i, s := range layout {
- if s == "" {
- continue
- }
- for j, t := range layout {
- if i == j {
- continue
- }
- for k := bestk + 1; k <= len(s) && k <= len(t); k++ {
- if s[len(s)-k:] == t[:k] {
- besti = i
- bestj = j
- bestk = k
- }
- }
- }
- }
- if bestk > 0 {
- layout[besti] += layout[bestj][bestk:]
- layout[bestj] = ""
- continue
- }
- break
- }
-
- text := strings.Join(layout, "")
-
- atom := map[string]uint32{}
- for _, s := range all {
- off := strings.Index(text, s)
- if off < 0 {
- panic("lost string " + s)
- }
- atom[s] = uint32(off<<8 | len(s))
- }
-
- var buf bytes.Buffer
- // Generate the Go code.
- fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n")
- fmt.Fprintln(&buf, "//go:generate go run gen.go\n")
- fmt.Fprintln(&buf, "package atom\n\nconst (")
-
- // compute max len
- maxLen := 0
- for _, s := range all {
- if maxLen < len(s) {
- maxLen = len(s)
- }
- fmt.Fprintf(&buf, "\t%s Atom = %#x\n", identifier(s), atom[s])
- }
- fmt.Fprintln(&buf, ")\n")
-
- fmt.Fprintf(&buf, "const hash0 = %#x\n\n", best.h0)
- fmt.Fprintf(&buf, "const maxAtomLen = %d\n\n", maxLen)
-
- fmt.Fprintf(&buf, "var table = [1<<%d]Atom{\n", best.k)
- for i, s := range best.tab {
- if s == "" {
- continue
- }
- fmt.Fprintf(&buf, "\t%#x: %#x, // %s\n", i, atom[s], s)
- }
- fmt.Fprintf(&buf, "}\n")
- datasize := (1 << best.k) * 4
-
- fmt.Fprintln(&buf, "const atomText =")
- textsize := len(text)
- for len(text) > 60 {
- fmt.Fprintf(&buf, "\t%q +\n", text[:60])
- text = text[60:]
- }
- fmt.Fprintf(&buf, "\t%q\n\n", text)
-
- genFile("table.go", &buf)
-
- fmt.Fprintf(os.Stdout, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize)
-}
-
-type byLen []string
-
-func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) }
-func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-func (x byLen) Len() int { return len(x) }
-
-// fnv computes the FNV hash with an arbitrary starting value h.
-func fnv(h uint32, s string) uint32 {
- for i := 0; i < len(s); i++ {
- h ^= uint32(s[i])
- h *= 16777619
- }
- return h
-}
-
-// A table represents an attempt at constructing the lookup table.
-// The lookup table uses cuckoo hashing, meaning that each string
-// can be found in one of two positions.
-type table struct {
- h0 uint32
- k uint
- mask uint32
- tab []string
-}
-
-// hash returns the two hashes for s.
-func (t *table) hash(s string) (h1, h2 uint32) {
- h := fnv(t.h0, s)
- h1 = h & t.mask
- h2 = (h >> 16) & t.mask
- return
-}
-
-// init initializes the table with the given parameters.
-// h0 is the initial hash value,
-// k is the number of bits of hash value to use, and
-// x is the list of strings to store in the table.
-// init returns false if the table cannot be constructed.
-func (t *table) init(h0 uint32, k uint, x []string) bool {
- t.h0 = h0
- t.k = k
- t.tab = make([]string, 1<<k)
- t.mask = 1<<k - 1
- for _, s := range x {
- if !t.insert(s) {
- return false
- }
- }
- return true
-}
-
-// insert inserts s in the table.
-func (t *table) insert(s string) bool {
- h1, h2 := t.hash(s)
- if t.tab[h1] == "" {
- t.tab[h1] = s
- return true
- }
- if t.tab[h2] == "" {
- t.tab[h2] = s
- return true
- }
- if t.push(h1, 0) {
- t.tab[h1] = s
- return true
- }
- if t.push(h2, 0) {
- t.tab[h2] = s
- return true
- }
- return false
-}
-
-// push attempts to push aside the entry in slot i.
-func (t *table) push(i uint32, depth int) bool {
- if depth > len(t.tab) {
- return false
- }
- s := t.tab[i]
- h1, h2 := t.hash(s)
- j := h1 + h2 - i
- if t.tab[j] != "" && !t.push(j, depth+1) {
- return false
- }
- t.tab[j] = s
- return true
-}
-
-// The lists of element names and attribute keys were taken from
-// https://html.spec.whatwg.org/multipage/indices.html#index
-// as of the "HTML Living Standard - Last Updated 16 April 2018" version.
-
-// "command", "keygen" and "menuitem" have been removed from the spec,
-// but are kept here for backwards compatibility.
-var elements = []string{
- "a",
- "abbr",
- "address",
- "area",
- "article",
- "aside",
- "audio",
- "b",
- "base",
- "bdi",
- "bdo",
- "blockquote",
- "body",
- "br",
- "button",
- "canvas",
- "caption",
- "cite",
- "code",
- "col",
- "colgroup",
- "command",
- "data",
- "datalist",
- "dd",
- "del",
- "details",
- "dfn",
- "dialog",
- "div",
- "dl",
- "dt",
- "em",
- "embed",
- "fieldset",
- "figcaption",
- "figure",
- "footer",
- "form",
- "h1",
- "h2",
- "h3",
- "h4",
- "h5",
- "h6",
- "head",
- "header",
- "hgroup",
- "hr",
- "html",
- "i",
- "iframe",
- "img",
- "input",
- "ins",
- "kbd",
- "keygen",
- "label",
- "legend",
- "li",
- "link",
- "main",
- "map",
- "mark",
- "menu",
- "menuitem",
- "meta",
- "meter",
- "nav",
- "noscript",
- "object",
- "ol",
- "optgroup",
- "option",
- "output",
- "p",
- "param",
- "picture",
- "pre",
- "progress",
- "q",
- "rp",
- "rt",
- "ruby",
- "s",
- "samp",
- "script",
- "section",
- "select",
- "slot",
- "small",
- "source",
- "span",
- "strong",
- "style",
- "sub",
- "summary",
- "sup",
- "table",
- "tbody",
- "td",
- "template",
- "textarea",
- "tfoot",
- "th",
- "thead",
- "time",
- "title",
- "tr",
- "track",
- "u",
- "ul",
- "var",
- "video",
- "wbr",
-}
-
-// https://html.spec.whatwg.org/multipage/indices.html#attributes-3
-//
-// "challenge", "command", "contextmenu", "dropzone", "icon", "keytype", "mediagroup",
-// "radiogroup", "spellcheck", "scoped", "seamless", "sortable" and "sorted" have been removed from the spec,
-// but are kept here for backwards compatibility.
-var attributes = []string{
- "abbr",
- "accept",
- "accept-charset",
- "accesskey",
- "action",
- "allowfullscreen",
- "allowpaymentrequest",
- "allowusermedia",
- "alt",
- "as",
- "async",
- "autocomplete",
- "autofocus",
- "autoplay",
- "challenge",
- "charset",
- "checked",
- "cite",
- "class",
- "color",
- "cols",
- "colspan",
- "command",
- "content",
- "contenteditable",
- "contextmenu",
- "controls",
- "coords",
- "crossorigin",
- "data",
- "datetime",
- "default",
- "defer",
- "dir",
- "dirname",
- "disabled",
- "download",
- "draggable",
- "dropzone",
- "enctype",
- "for",
- "form",
- "formaction",
- "formenctype",
- "formmethod",
- "formnovalidate",
- "formtarget",
- "headers",
- "height",
- "hidden",
- "high",
- "href",
- "hreflang",
- "http-equiv",
- "icon",
- "id",
- "inputmode",
- "integrity",
- "is",
- "ismap",
- "itemid",
- "itemprop",
- "itemref",
- "itemscope",
- "itemtype",
- "keytype",
- "kind",
- "label",
- "lang",
- "list",
- "loop",
- "low",
- "manifest",
- "max",
- "maxlength",
- "media",
- "mediagroup",
- "method",
- "min",
- "minlength",
- "multiple",
- "muted",
- "name",
- "nomodule",
- "nonce",
- "novalidate",
- "open",
- "optimum",
- "pattern",
- "ping",
- "placeholder",
- "playsinline",
- "poster",
- "preload",
- "radiogroup",
- "readonly",
- "referrerpolicy",
- "rel",
- "required",
- "reversed",
- "rows",
- "rowspan",
- "sandbox",
- "spellcheck",
- "scope",
- "scoped",
- "seamless",
- "selected",
- "shape",
- "size",
- "sizes",
- "sortable",
- "sorted",
- "slot",
- "span",
- "spellcheck",
- "src",
- "srcdoc",
- "srclang",
- "srcset",
- "start",
- "step",
- "style",
- "tabindex",
- "target",
- "title",
- "translate",
- "type",
- "typemustmatch",
- "updateviacache",
- "usemap",
- "value",
- "width",
- "workertype",
- "wrap",
-}
-
-// "onautocomplete", "onautocompleteerror", "onmousewheel",
-// "onshow" and "onsort" have been removed from the spec,
-// but are kept here for backwards compatibility.
-var eventHandlers = []string{
- "onabort",
- "onautocomplete",
- "onautocompleteerror",
- "onauxclick",
- "onafterprint",
- "onbeforeprint",
- "onbeforeunload",
- "onblur",
- "oncancel",
- "oncanplay",
- "oncanplaythrough",
- "onchange",
- "onclick",
- "onclose",
- "oncontextmenu",
- "oncopy",
- "oncuechange",
- "oncut",
- "ondblclick",
- "ondrag",
- "ondragend",
- "ondragenter",
- "ondragexit",
- "ondragleave",
- "ondragover",
- "ondragstart",
- "ondrop",
- "ondurationchange",
- "onemptied",
- "onended",
- "onerror",
- "onfocus",
- "onhashchange",
- "oninput",
- "oninvalid",
- "onkeydown",
- "onkeypress",
- "onkeyup",
- "onlanguagechange",
- "onload",
- "onloadeddata",
- "onloadedmetadata",
- "onloadend",
- "onloadstart",
- "onmessage",
- "onmessageerror",
- "onmousedown",
- "onmouseenter",
- "onmouseleave",
- "onmousemove",
- "onmouseout",
- "onmouseover",
- "onmouseup",
- "onmousewheel",
- "onwheel",
- "onoffline",
- "ononline",
- "onpagehide",
- "onpageshow",
- "onpaste",
- "onpause",
- "onplay",
- "onplaying",
- "onpopstate",
- "onprogress",
- "onratechange",
- "onreset",
- "onresize",
- "onrejectionhandled",
- "onscroll",
- "onsecuritypolicyviolation",
- "onseeked",
- "onseeking",
- "onselect",
- "onshow",
- "onsort",
- "onstalled",
- "onstorage",
- "onsubmit",
- "onsuspend",
- "ontimeupdate",
- "ontoggle",
- "onunhandledrejection",
- "onunload",
- "onvolumechange",
- "onwaiting",
-}
-
-// extra are ad-hoc values not covered by any of the lists above.
-var extra = []string{
- "acronym",
- "align",
- "annotation",
- "annotation-xml",
- "applet",
- "basefont",
- "bgsound",
- "big",
- "blink",
- "center",
- "color",
- "desc",
- "face",
- "font",
- "foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive.
- "foreignobject",
- "frame",
- "frameset",
- "image",
- "isindex",
- "listing",
- "malignmark",
- "marquee",
- "math",
- "mglyph",
- "mi",
- "mn",
- "mo",
- "ms",
- "mtext",
- "nobr",
- "noembed",
- "noframes",
- "plaintext",
- "prompt",
- "public",
- "rb",
- "rtc",
- "spacer",
- "strike",
- "svg",
- "system",
- "tt",
- "xmp",
-}
diff --git a/vendor/golang.org/x/sys/unix/mkasm_darwin.go b/vendor/golang.org/x/sys/unix/mkasm_darwin.go
deleted file mode 100644
index 4548b993d..000000000
--- a/vendor/golang.org/x/sys/unix/mkasm_darwin.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// mkasm_darwin.go generates assembly trampolines to call libSystem routines from Go.
-//This program must be run after mksyscall.go.
-package main
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "log"
- "os"
- "strings"
-)
-
-func main() {
- in1, err := ioutil.ReadFile("syscall_darwin.go")
- if err != nil {
- log.Fatalf("can't open syscall_darwin.go: %s", err)
- }
- arch := os.Args[1]
- in2, err := ioutil.ReadFile(fmt.Sprintf("syscall_darwin_%s.go", arch))
- if err != nil {
- log.Fatalf("can't open syscall_darwin_%s.go: %s", arch, err)
- }
- in3, err := ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.go", arch))
- if err != nil {
- log.Fatalf("can't open zsyscall_darwin_%s.go: %s", arch, err)
- }
- in := string(in1) + string(in2) + string(in3)
-
- trampolines := map[string]bool{}
-
- var out bytes.Buffer
-
- fmt.Fprintf(&out, "// go run mkasm_darwin.go %s\n", strings.Join(os.Args[1:], " "))
- fmt.Fprintf(&out, "// Code generated by the command above; DO NOT EDIT.\n")
- fmt.Fprintf(&out, "\n")
- fmt.Fprintf(&out, "// +build go1.12\n")
- fmt.Fprintf(&out, "\n")
- fmt.Fprintf(&out, "#include \"textflag.h\"\n")
- for _, line := range strings.Split(in, "\n") {
- if !strings.HasPrefix(line, "func ") || !strings.HasSuffix(line, "_trampoline()") {
- continue
- }
- fn := line[5 : len(line)-13]
- if !trampolines[fn] {
- trampolines[fn] = true
- fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn)
- fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn)
- }
- }
- err = ioutil.WriteFile(fmt.Sprintf("zsyscall_darwin_%s.s", arch), out.Bytes(), 0644)
- if err != nil {
- log.Fatalf("can't write zsyscall_darwin_%s.s: %s", arch, err)
- }
-}
diff --git a/vendor/golang.org/x/sys/unix/mkpost.go b/vendor/golang.org/x/sys/unix/mkpost.go
deleted file mode 100644
index eb4332059..000000000
--- a/vendor/golang.org/x/sys/unix/mkpost.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// mkpost processes the output of cgo -godefs to
-// modify the generated types. It is used to clean up
-// the sys API in an architecture specific manner.
-//
-// mkpost is run after cgo -godefs; see README.md.
-package main
-
-import (
- "bytes"
- "fmt"
- "go/format"
- "io/ioutil"
- "log"
- "os"
- "regexp"
-)
-
-func main() {
- // Get the OS and architecture (using GOARCH_TARGET if it exists)
- goos := os.Getenv("GOOS")
- goarch := os.Getenv("GOARCH_TARGET")
- if goarch == "" {
- goarch = os.Getenv("GOARCH")
- }
- // Check that we are using the Docker-based build system if we should be.
- if goos == "linux" {
- if os.Getenv("GOLANG_SYS_BUILD") != "docker" {
- os.Stderr.WriteString("In the Docker-based build system, mkpost should not be called directly.\n")
- os.Stderr.WriteString("See README.md\n")
- os.Exit(1)
- }
- }
-
- b, err := ioutil.ReadAll(os.Stdin)
- if err != nil {
- log.Fatal(err)
- }
-
- if goos == "aix" {
- // Replace type of Atim, Mtim and Ctim by Timespec in Stat_t
- // to avoid having both StTimespec and Timespec.
- sttimespec := regexp.MustCompile(`_Ctype_struct_st_timespec`)
- b = sttimespec.ReplaceAll(b, []byte("Timespec"))
- }
-
- // Intentionally export __val fields in Fsid and Sigset_t
- valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__(bits|val)(\s+\S+\s+)}`)
- b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$4}"))
-
- // Intentionally export __fds_bits field in FdSet
- fdSetRegex := regexp.MustCompile(`type (FdSet) struct {(\s+)X__fds_bits(\s+\S+\s+)}`)
- b = fdSetRegex.ReplaceAll(b, []byte("type $1 struct {${2}Bits$3}"))
-
- // If we have empty Ptrace structs, we should delete them. Only s390x emits
- // nonempty Ptrace structs.
- ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`)
- b = ptraceRexexp.ReplaceAll(b, nil)
-
- // Replace the control_regs union with a blank identifier for now.
- controlRegsRegex := regexp.MustCompile(`(Control_regs)\s+\[0\]uint64`)
- b = controlRegsRegex.ReplaceAll(b, []byte("_ [0]uint64"))
-
- // Remove fields that are added by glibc
- // Note that this is unstable as the identifers are private.
- removeFieldsRegex := regexp.MustCompile(`X__glibc\S*`)
- b = removeFieldsRegex.ReplaceAll(b, []byte("_"))
-
- // Convert [65]int8 to [65]byte in Utsname members to simplify
- // conversion to string; see golang.org/issue/20753
- convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`)
- b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte"))
-
- // Convert [1024]int8 to [1024]byte in Ptmget members
- convertPtmget := regexp.MustCompile(`([SC]n)(\s+)\[(\d+)\]u?int8`)
- b = convertPtmget.ReplaceAll(b, []byte("$1[$3]byte"))
-
- // Remove spare fields (e.g. in Statx_t)
- spareFieldsRegex := regexp.MustCompile(`X__spare\S*`)
- b = spareFieldsRegex.ReplaceAll(b, []byte("_"))
-
- // Remove cgo padding fields
- removePaddingFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`)
- b = removePaddingFieldsRegex.ReplaceAll(b, []byte("_"))
-
- // Remove padding, hidden, or unused fields
- removeFieldsRegex = regexp.MustCompile(`\b(X_\S+|Padding)`)
- b = removeFieldsRegex.ReplaceAll(b, []byte("_"))
-
- // Remove the first line of warning from cgo
- b = b[bytes.IndexByte(b, '\n')+1:]
- // Modify the command in the header to include:
- // mkpost, our own warning, and a build tag.
- replacement := fmt.Sprintf(`$1 | go run mkpost.go
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build %s,%s`, goarch, goos)
- cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`)
- b = cgoCommandRegex.ReplaceAll(b, []byte(replacement))
-
- // Rename Stat_t time fields
- if goos == "freebsd" && goarch == "386" {
- // Hide Stat_t.[AMCB]tim_ext fields
- renameStatTimeExtFieldsRegex := regexp.MustCompile(`[AMCB]tim_ext`)
- b = renameStatTimeExtFieldsRegex.ReplaceAll(b, []byte("_"))
- }
- renameStatTimeFieldsRegex := regexp.MustCompile(`([AMCB])(?:irth)?time?(?:spec)?\s+(Timespec|StTimespec)`)
- b = renameStatTimeFieldsRegex.ReplaceAll(b, []byte("${1}tim ${2}"))
-
- // gofmt
- b, err = format.Source(b)
- if err != nil {
- log.Fatal(err)
- }
-
- os.Stdout.Write(b)
-}
diff --git a/vendor/golang.org/x/sys/unix/mksyscall.go b/vendor/golang.org/x/sys/unix/mksyscall.go
deleted file mode 100644
index e4af9424e..000000000
--- a/vendor/golang.org/x/sys/unix/mksyscall.go
+++ /dev/null
@@ -1,407 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-This program reads a file containing function prototypes
-(like syscall_darwin.go) and generates system call bodies.
-The prototypes are marked by lines beginning with "//sys"
-and read like func declarations if //sys is replaced by func, but:
- * The parameter lists must give a name for each argument.
- This includes return parameters.
- * The parameter lists must give a type for each argument:
- the (x, y, z int) shorthand is not allowed.
- * If the return parameter is an error number, it must be named errno.
-
-A line beginning with //sysnb is like //sys, except that the
-goroutine will not be suspended during the execution of the system
-call. This must only be used for system calls which can never
-block, as otherwise the system call could cause all goroutines to
-hang.
-*/
-package main
-
-import (
- "bufio"
- "flag"
- "fmt"
- "os"
- "regexp"
- "strings"
-)
-
-var (
- b32 = flag.Bool("b32", false, "32bit big-endian")
- l32 = flag.Bool("l32", false, "32bit little-endian")
- plan9 = flag.Bool("plan9", false, "plan9")
- openbsd = flag.Bool("openbsd", false, "openbsd")
- netbsd = flag.Bool("netbsd", false, "netbsd")
- dragonfly = flag.Bool("dragonfly", false, "dragonfly")
- arm = flag.Bool("arm", false, "arm") // 64-bit value should use (even, odd)-pair
- tags = flag.String("tags", "", "build tags")
- filename = flag.String("output", "", "output file name (standard output if omitted)")
-)
-
-// cmdLine returns this programs's commandline arguments
-func cmdLine() string {
- return "go run mksyscall.go " + strings.Join(os.Args[1:], " ")
-}
-
-// buildTags returns build tags
-func buildTags() string {
- return *tags
-}
-
-// Param is function parameter
-type Param struct {
- Name string
- Type string
-}
-
-// usage prints the program usage
-func usage() {
- fmt.Fprintf(os.Stderr, "usage: go run mksyscall.go [-b32 | -l32] [-tags x,y] [file ...]\n")
- os.Exit(1)
-}
-
-// parseParamList parses parameter list and returns a slice of parameters
-func parseParamList(list string) []string {
- list = strings.TrimSpace(list)
- if list == "" {
- return []string{}
- }
- return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
-}
-
-// parseParam splits a parameter into name and type
-func parseParam(p string) Param {
- ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
- if ps == nil {
- fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
- os.Exit(1)
- }
- return Param{ps[1], ps[2]}
-}
-
-func main() {
- // Get the OS and architecture (using GOARCH_TARGET if it exists)
- goos := os.Getenv("GOOS")
- if goos == "" {
- fmt.Fprintln(os.Stderr, "GOOS not defined in environment")
- os.Exit(1)
- }
- goarch := os.Getenv("GOARCH_TARGET")
- if goarch == "" {
- goarch = os.Getenv("GOARCH")
- }
-
- // Check that we are using the Docker-based build system if we should
- if goos == "linux" {
- if os.Getenv("GOLANG_SYS_BUILD") != "docker" {
- fmt.Fprintf(os.Stderr, "In the Docker-based build system, mksyscall should not be called directly.\n")
- fmt.Fprintf(os.Stderr, "See README.md\n")
- os.Exit(1)
- }
- }
-
- flag.Usage = usage
- flag.Parse()
- if len(flag.Args()) <= 0 {
- fmt.Fprintf(os.Stderr, "no files to parse provided\n")
- usage()
- }
-
- endianness := ""
- if *b32 {
- endianness = "big-endian"
- } else if *l32 {
- endianness = "little-endian"
- }
-
- libc := false
- if goos == "darwin" && strings.Contains(buildTags(), ",go1.12") {
- libc = true
- }
- trampolines := map[string]bool{}
-
- text := ""
- for _, path := range flag.Args() {
- file, err := os.Open(path)
- if err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
- s := bufio.NewScanner(file)
- for s.Scan() {
- t := s.Text()
- t = strings.TrimSpace(t)
- t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
- nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
- if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
- continue
- }
-
- // Line must be of the form
- // func Open(path string, mode int, perm int) (fd int, errno error)
- // Split into name, in params, out params.
- f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$`).FindStringSubmatch(t)
- if f == nil {
- fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
- os.Exit(1)
- }
- funct, inps, outps, sysname := f[2], f[3], f[4], f[5]
-
- // ClockGettime doesn't have a syscall number on Darwin, only generate libc wrappers.
- if goos == "darwin" && !libc && funct == "ClockGettime" {
- continue
- }
-
- // Split argument lists on comma.
- in := parseParamList(inps)
- out := parseParamList(outps)
-
- // Try in vain to keep people from editing this file.
- // The theory is that they jump into the middle of the file
- // without reading the header.
- text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
-
- // Go function header.
- outDecl := ""
- if len(out) > 0 {
- outDecl = fmt.Sprintf(" (%s)", strings.Join(out, ", "))
- }
- text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outDecl)
-
- // Check if err return available
- errvar := ""
- for _, param := range out {
- p := parseParam(param)
- if p.Type == "error" {
- errvar = p.Name
- break
- }
- }
-
- // Prepare arguments to Syscall.
- var args []string
- n := 0
- for _, param := range in {
- p := parseParam(param)
- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
- args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))")
- } else if p.Type == "string" && errvar != "" {
- text += fmt.Sprintf("\tvar _p%d *byte\n", n)
- text += fmt.Sprintf("\t_p%d, %s = BytePtrFromString(%s)\n", n, errvar, p.Name)
- text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
- args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
- n++
- } else if p.Type == "string" {
- fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
- text += fmt.Sprintf("\tvar _p%d *byte\n", n)
- text += fmt.Sprintf("\t_p%d, _ = BytePtrFromString(%s)\n", n, p.Name)
- args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
- n++
- } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil {
- // Convert slice into pointer, length.
- // Have to be careful not to take address of &a[0] if len == 0:
- // pass dummy pointer in that case.
- // Used to pass nil, but some OSes or simulators reject write(fd, nil, 0).
- text += fmt.Sprintf("\tvar _p%d unsafe.Pointer\n", n)
- text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = unsafe.Pointer(&%s[0])\n\t}", p.Name, n, p.Name)
- text += fmt.Sprintf(" else {\n\t\t_p%d = unsafe.Pointer(&_zero)\n\t}\n", n)
- args = append(args, fmt.Sprintf("uintptr(_p%d)", n), fmt.Sprintf("uintptr(len(%s))", p.Name))
- n++
- } else if p.Type == "int64" && (*openbsd || *netbsd) {
- args = append(args, "0")
- if endianness == "big-endian" {
- args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
- } else if endianness == "little-endian" {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
- } else {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
- }
- } else if p.Type == "int64" && *dragonfly {
- if regexp.MustCompile(`^(?i)extp(read|write)`).FindStringSubmatch(funct) == nil {
- args = append(args, "0")
- }
- if endianness == "big-endian" {
- args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
- } else if endianness == "little-endian" {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
- } else {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
- }
- } else if (p.Type == "int64" || p.Type == "uint64") && endianness != "" {
- if len(args)%2 == 1 && *arm {
- // arm abi specifies 64-bit argument uses
- // (even, odd) pair
- args = append(args, "0")
- }
- if endianness == "big-endian" {
- args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
- } else {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
- }
- } else {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
- }
- }
-
- // Determine which form to use; pad args with zeros.
- asm := "Syscall"
- if nonblock != nil {
- if errvar == "" && goos == "linux" {
- asm = "RawSyscallNoError"
- } else {
- asm = "RawSyscall"
- }
- } else {
- if errvar == "" && goos == "linux" {
- asm = "SyscallNoError"
- }
- }
- if len(args) <= 3 {
- for len(args) < 3 {
- args = append(args, "0")
- }
- } else if len(args) <= 6 {
- asm += "6"
- for len(args) < 6 {
- args = append(args, "0")
- }
- } else if len(args) <= 9 {
- asm += "9"
- for len(args) < 9 {
- args = append(args, "0")
- }
- } else {
- fmt.Fprintf(os.Stderr, "%s:%s too many arguments to system call\n", path, funct)
- }
-
- // System call number.
- if sysname == "" {
- sysname = "SYS_" + funct
- sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`)
- sysname = strings.ToUpper(sysname)
- }
-
- var libcFn string
- if libc {
- asm = "syscall_" + strings.ToLower(asm[:1]) + asm[1:] // internal syscall call
- sysname = strings.TrimPrefix(sysname, "SYS_") // remove SYS_
- sysname = strings.ToLower(sysname) // lowercase
- if sysname == "getdirentries64" {
- // Special case - libSystem name and
- // raw syscall name don't match.
- sysname = "__getdirentries64"
- }
- libcFn = sysname
- sysname = "funcPC(libc_" + sysname + "_trampoline)"
- }
-
- // Actual call.
- arglist := strings.Join(args, ", ")
- call := fmt.Sprintf("%s(%s, %s)", asm, sysname, arglist)
-
- // Assign return values.
- body := ""
- ret := []string{"_", "_", "_"}
- doErrno := false
- for i := 0; i < len(out); i++ {
- p := parseParam(out[i])
- reg := ""
- if p.Name == "err" && !*plan9 {
- reg = "e1"
- ret[2] = reg
- doErrno = true
- } else if p.Name == "err" && *plan9 {
- ret[0] = "r0"
- ret[2] = "e1"
- break
- } else {
- reg = fmt.Sprintf("r%d", i)
- ret[i] = reg
- }
- if p.Type == "bool" {
- reg = fmt.Sprintf("%s != 0", reg)
- }
- if p.Type == "int64" && endianness != "" {
- // 64-bit number in r1:r0 or r0:r1.
- if i+2 > len(out) {
- fmt.Fprintf(os.Stderr, "%s:%s not enough registers for int64 return\n", path, funct)
- }
- if endianness == "big-endian" {
- reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1)
- } else {
- reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i)
- }
- ret[i] = fmt.Sprintf("r%d", i)
- ret[i+1] = fmt.Sprintf("r%d", i+1)
- }
- if reg != "e1" || *plan9 {
- body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
- }
- }
- if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" {
- text += fmt.Sprintf("\t%s\n", call)
- } else {
- if errvar == "" && goos == "linux" {
- // raw syscall without error on Linux, see golang.org/issue/22924
- text += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], call)
- } else {
- text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call)
- }
- }
- text += body
-
- if *plan9 && ret[2] == "e1" {
- text += "\tif int32(r0) == -1 {\n"
- text += "\t\terr = e1\n"
- text += "\t}\n"
- } else if doErrno {
- text += "\tif e1 != 0 {\n"
- text += "\t\terr = errnoErr(e1)\n"
- text += "\t}\n"
- }
- text += "\treturn\n"
- text += "}\n\n"
-
- if libc && !trampolines[libcFn] {
- // some system calls share a trampoline, like read and readlen.
- trampolines[libcFn] = true
- // Declare assembly trampoline.
- text += fmt.Sprintf("func libc_%s_trampoline()\n", libcFn)
- // Assembly trampoline calls the libc_* function, which this magic
- // redirects to use the function from libSystem.
- text += fmt.Sprintf("//go:linkname libc_%s libc_%s\n", libcFn, libcFn)
- text += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"/usr/lib/libSystem.B.dylib\"\n", libcFn, libcFn)
- text += "\n"
- }
- }
- if err := s.Err(); err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
- file.Close()
- }
- fmt.Printf(srcTemplate, cmdLine(), buildTags(), text)
-}
-
-const srcTemplate = `// %s
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build %s
-
-package unix
-
-import (
- "syscall"
- "unsafe"
-)
-
-var _ syscall.Errno
-
-%s
-`
diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go
deleted file mode 100644
index 3be3cdfc3..000000000
--- a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go
+++ /dev/null
@@ -1,415 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-This program reads a file containing function prototypes
-(like syscall_aix.go) and generates system call bodies.
-The prototypes are marked by lines beginning with "//sys"
-and read like func declarations if //sys is replaced by func, but:
- * The parameter lists must give a name for each argument.
- This includes return parameters.
- * The parameter lists must give a type for each argument:
- the (x, y, z int) shorthand is not allowed.
- * If the return parameter is an error number, it must be named err.
- * If go func name needs to be different than its libc name,
- * or the function is not in libc, name could be specified
- * at the end, after "=" sign, like
- //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
-*/
-package main
-
-import (
- "bufio"
- "flag"
- "fmt"
- "os"
- "regexp"
- "strings"
-)
-
-var (
- b32 = flag.Bool("b32", false, "32bit big-endian")
- l32 = flag.Bool("l32", false, "32bit little-endian")
- aix = flag.Bool("aix", false, "aix")
- tags = flag.String("tags", "", "build tags")
-)
-
-// cmdLine returns this programs's commandline arguments
-func cmdLine() string {
- return "go run mksyscall_aix_ppc.go " + strings.Join(os.Args[1:], " ")
-}
-
-// buildTags returns build tags
-func buildTags() string {
- return *tags
-}
-
-// Param is function parameter
-type Param struct {
- Name string
- Type string
-}
-
-// usage prints the program usage
-func usage() {
- fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc.go [-b32 | -l32] [-tags x,y] [file ...]\n")
- os.Exit(1)
-}
-
-// parseParamList parses parameter list and returns a slice of parameters
-func parseParamList(list string) []string {
- list = strings.TrimSpace(list)
- if list == "" {
- return []string{}
- }
- return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
-}
-
-// parseParam splits a parameter into name and type
-func parseParam(p string) Param {
- ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
- if ps == nil {
- fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
- os.Exit(1)
- }
- return Param{ps[1], ps[2]}
-}
-
-func main() {
- flag.Usage = usage
- flag.Parse()
- if len(flag.Args()) <= 0 {
- fmt.Fprintf(os.Stderr, "no files to parse provided\n")
- usage()
- }
-
- endianness := ""
- if *b32 {
- endianness = "big-endian"
- } else if *l32 {
- endianness = "little-endian"
- }
-
- pack := ""
- text := ""
- cExtern := "/*\n#include <stdint.h>\n#include <stddef.h>\n"
- for _, path := range flag.Args() {
- file, err := os.Open(path)
- if err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
- s := bufio.NewScanner(file)
- for s.Scan() {
- t := s.Text()
- t = strings.TrimSpace(t)
- t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
- if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" {
- pack = p[1]
- }
- nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
- if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
- continue
- }
-
- // Line must be of the form
- // func Open(path string, mode int, perm int) (fd int, err error)
- // Split into name, in params, out params.
- f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t)
- if f == nil {
- fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
- os.Exit(1)
- }
- funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6]
-
- // Split argument lists on comma.
- in := parseParamList(inps)
- out := parseParamList(outps)
-
- inps = strings.Join(in, ", ")
- outps = strings.Join(out, ", ")
-
- // Try in vain to keep people from editing this file.
- // The theory is that they jump into the middle of the file
- // without reading the header.
- text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
-
- // Check if value return, err return available
- errvar := ""
- retvar := ""
- rettype := ""
- for _, param := range out {
- p := parseParam(param)
- if p.Type == "error" {
- errvar = p.Name
- } else {
- retvar = p.Name
- rettype = p.Type
- }
- }
-
- // System call name.
- if sysname == "" {
- sysname = funct
- }
- sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`)
- sysname = strings.ToLower(sysname) // All libc functions are lowercase.
-
- cRettype := ""
- if rettype == "unsafe.Pointer" {
- cRettype = "uintptr_t"
- } else if rettype == "uintptr" {
- cRettype = "uintptr_t"
- } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil {
- cRettype = "uintptr_t"
- } else if rettype == "int" {
- cRettype = "int"
- } else if rettype == "int32" {
- cRettype = "int"
- } else if rettype == "int64" {
- cRettype = "long long"
- } else if rettype == "uint32" {
- cRettype = "unsigned int"
- } else if rettype == "uint64" {
- cRettype = "unsigned long long"
- } else {
- cRettype = "int"
- }
- if sysname == "exit" {
- cRettype = "void"
- }
-
- // Change p.Types to c
- var cIn []string
- for _, param := range in {
- p := parseParam(param)
- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
- cIn = append(cIn, "uintptr_t")
- } else if p.Type == "string" {
- cIn = append(cIn, "uintptr_t")
- } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil {
- cIn = append(cIn, "uintptr_t", "size_t")
- } else if p.Type == "unsafe.Pointer" {
- cIn = append(cIn, "uintptr_t")
- } else if p.Type == "uintptr" {
- cIn = append(cIn, "uintptr_t")
- } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil {
- cIn = append(cIn, "uintptr_t")
- } else if p.Type == "int" {
- cIn = append(cIn, "int")
- } else if p.Type == "int32" {
- cIn = append(cIn, "int")
- } else if p.Type == "int64" {
- cIn = append(cIn, "long long")
- } else if p.Type == "uint32" {
- cIn = append(cIn, "unsigned int")
- } else if p.Type == "uint64" {
- cIn = append(cIn, "unsigned long long")
- } else {
- cIn = append(cIn, "int")
- }
- }
-
- if funct != "fcntl" && funct != "FcntlInt" && funct != "readlen" && funct != "writelen" {
- if sysname == "select" {
- // select is a keyword of Go. Its name is
- // changed to c_select.
- cExtern += "#define c_select select\n"
- }
- // Imports of system calls from libc
- cExtern += fmt.Sprintf("%s %s", cRettype, sysname)
- cIn := strings.Join(cIn, ", ")
- cExtern += fmt.Sprintf("(%s);\n", cIn)
- }
-
- // So file name.
- if *aix {
- if modname == "" {
- modname = "libc.a/shr_64.o"
- } else {
- fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct)
- os.Exit(1)
- }
- }
-
- strconvfunc := "C.CString"
-
- // Go function header.
- if outps != "" {
- outps = fmt.Sprintf(" (%s)", outps)
- }
- if text != "" {
- text += "\n"
- }
-
- text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps)
-
- // Prepare arguments to Syscall.
- var args []string
- n := 0
- argN := 0
- for _, param := range in {
- p := parseParam(param)
- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
- args = append(args, "C.uintptr_t(uintptr(unsafe.Pointer("+p.Name+")))")
- } else if p.Type == "string" && errvar != "" {
- text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name)
- args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n))
- n++
- } else if p.Type == "string" {
- fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
- text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name)
- args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n))
- n++
- } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil {
- // Convert slice into pointer, length.
- // Have to be careful not to take address of &a[0] if len == 0:
- // pass nil in that case.
- text += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1])
- text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name)
- args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(unsafe.Pointer(_p%d)))", n))
- n++
- text += fmt.Sprintf("\tvar _p%d int\n", n)
- text += fmt.Sprintf("\t_p%d = len(%s)\n", n, p.Name)
- args = append(args, fmt.Sprintf("C.size_t(_p%d)", n))
- n++
- } else if p.Type == "int64" && endianness != "" {
- if endianness == "big-endian" {
- args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
- } else {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
- }
- n++
- } else if p.Type == "bool" {
- text += fmt.Sprintf("\tvar _p%d uint32\n", n)
- text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n)
- args = append(args, fmt.Sprintf("_p%d", n))
- } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil {
- args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name))
- } else if p.Type == "unsafe.Pointer" {
- args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name))
- } else if p.Type == "int" {
- if (argN == 2) && ((funct == "readlen") || (funct == "writelen")) {
- args = append(args, fmt.Sprintf("C.size_t(%s)", p.Name))
- } else if argN == 0 && funct == "fcntl" {
- args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
- } else if (argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt")) {
- args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
- } else {
- args = append(args, fmt.Sprintf("C.int(%s)", p.Name))
- }
- } else if p.Type == "int32" {
- args = append(args, fmt.Sprintf("C.int(%s)", p.Name))
- } else if p.Type == "int64" {
- args = append(args, fmt.Sprintf("C.longlong(%s)", p.Name))
- } else if p.Type == "uint32" {
- args = append(args, fmt.Sprintf("C.uint(%s)", p.Name))
- } else if p.Type == "uint64" {
- args = append(args, fmt.Sprintf("C.ulonglong(%s)", p.Name))
- } else if p.Type == "uintptr" {
- args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
- } else {
- args = append(args, fmt.Sprintf("C.int(%s)", p.Name))
- }
- argN++
- }
-
- // Actual call.
- arglist := strings.Join(args, ", ")
- call := ""
- if sysname == "exit" {
- if errvar != "" {
- call += "er :="
- } else {
- call += ""
- }
- } else if errvar != "" {
- call += "r0,er :="
- } else if retvar != "" {
- call += "r0,_ :="
- } else {
- call += ""
- }
- if sysname == "select" {
- // select is a keyword of Go. Its name is
- // changed to c_select.
- call += fmt.Sprintf("C.c_%s(%s)", sysname, arglist)
- } else {
- call += fmt.Sprintf("C.%s(%s)", sysname, arglist)
- }
-
- // Assign return values.
- body := ""
- for i := 0; i < len(out); i++ {
- p := parseParam(out[i])
- reg := ""
- if p.Name == "err" {
- reg = "e1"
- } else {
- reg = "r0"
- }
- if reg != "e1" {
- body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
- }
- }
-
- // verify return
- if sysname != "exit" && errvar != "" {
- if regexp.MustCompile(`^uintptr`).FindStringSubmatch(cRettype) != nil {
- body += "\tif (uintptr(r0) ==^uintptr(0) && er != nil) {\n"
- body += fmt.Sprintf("\t\t%s = er\n", errvar)
- body += "\t}\n"
- } else {
- body += "\tif (r0 ==-1 && er != nil) {\n"
- body += fmt.Sprintf("\t\t%s = er\n", errvar)
- body += "\t}\n"
- }
- } else if errvar != "" {
- body += "\tif (er != nil) {\n"
- body += fmt.Sprintf("\t\t%s = er\n", errvar)
- body += "\t}\n"
- }
-
- text += fmt.Sprintf("\t%s\n", call)
- text += body
-
- text += "\treturn\n"
- text += "}\n"
- }
- if err := s.Err(); err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
- file.Close()
- }
- imp := ""
- if pack != "unix" {
- imp = "import \"golang.org/x/sys/unix\"\n"
-
- }
- fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, cExtern, imp, text)
-}
-
-const srcTemplate = `// %s
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build %s
-
-package %s
-
-
-%s
-*/
-import "C"
-import (
- "unsafe"
-)
-
-
-%s
-
-%s
-`
diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go
deleted file mode 100644
index c96009951..000000000
--- a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go
+++ /dev/null
@@ -1,614 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-This program reads a file containing function prototypes
-(like syscall_aix.go) and generates system call bodies.
-The prototypes are marked by lines beginning with "//sys"
-and read like func declarations if //sys is replaced by func, but:
- * The parameter lists must give a name for each argument.
- This includes return parameters.
- * The parameter lists must give a type for each argument:
- the (x, y, z int) shorthand is not allowed.
- * If the return parameter is an error number, it must be named err.
- * If go func name needs to be different than its libc name,
- * or the function is not in libc, name could be specified
- * at the end, after "=" sign, like
- //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
-
-
-This program will generate three files and handle both gc and gccgo implementation:
- - zsyscall_aix_ppc64.go: the common part of each implementation (error handler, pointer creation)
- - zsyscall_aix_ppc64_gc.go: gc part with //go_cgo_import_dynamic and a call to syscall6
- - zsyscall_aix_ppc64_gccgo.go: gccgo part with C function and conversion to C type.
-
- The generated code looks like this
-
-zsyscall_aix_ppc64.go
-func asyscall(...) (n int, err error) {
- // Pointer Creation
- r1, e1 := callasyscall(...)
- // Type Conversion
- // Error Handler
- return
-}
-
-zsyscall_aix_ppc64_gc.go
-//go:cgo_import_dynamic libc_asyscall asyscall "libc.a/shr_64.o"
-//go:linkname libc_asyscall libc_asyscall
-var asyscall syscallFunc
-
-func callasyscall(...) (r1 uintptr, e1 Errno) {
- r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_asyscall)), "nb_args", ... )
- return
-}
-
-zsyscall_aix_ppc64_ggcgo.go
-
-// int asyscall(...)
-
-import "C"
-
-func callasyscall(...) (r1 uintptr, e1 Errno) {
- r1 = uintptr(C.asyscall(...))
- e1 = syscall.GetErrno()
- return
-}
-*/
-
-package main
-
-import (
- "bufio"
- "flag"
- "fmt"
- "io/ioutil"
- "os"
- "regexp"
- "strings"
-)
-
-var (
- b32 = flag.Bool("b32", false, "32bit big-endian")
- l32 = flag.Bool("l32", false, "32bit little-endian")
- aix = flag.Bool("aix", false, "aix")
- tags = flag.String("tags", "", "build tags")
-)
-
-// cmdLine returns this programs's commandline arguments
-func cmdLine() string {
- return "go run mksyscall_aix_ppc64.go " + strings.Join(os.Args[1:], " ")
-}
-
-// buildTags returns build tags
-func buildTags() string {
- return *tags
-}
-
-// Param is function parameter
-type Param struct {
- Name string
- Type string
-}
-
-// usage prints the program usage
-func usage() {
- fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc64.go [-b32 | -l32] [-tags x,y] [file ...]\n")
- os.Exit(1)
-}
-
-// parseParamList parses parameter list and returns a slice of parameters
-func parseParamList(list string) []string {
- list = strings.TrimSpace(list)
- if list == "" {
- return []string{}
- }
- return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
-}
-
-// parseParam splits a parameter into name and type
-func parseParam(p string) Param {
- ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
- if ps == nil {
- fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
- os.Exit(1)
- }
- return Param{ps[1], ps[2]}
-}
-
-func main() {
- flag.Usage = usage
- flag.Parse()
- if len(flag.Args()) <= 0 {
- fmt.Fprintf(os.Stderr, "no files to parse provided\n")
- usage()
- }
-
- endianness := ""
- if *b32 {
- endianness = "big-endian"
- } else if *l32 {
- endianness = "little-endian"
- }
-
- pack := ""
- // GCCGO
- textgccgo := ""
- cExtern := "/*\n#include <stdint.h>\n"
- // GC
- textgc := ""
- dynimports := ""
- linknames := ""
- var vars []string
- // COMMON
- textcommon := ""
- for _, path := range flag.Args() {
- file, err := os.Open(path)
- if err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
- s := bufio.NewScanner(file)
- for s.Scan() {
- t := s.Text()
- t = strings.TrimSpace(t)
- t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
- if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" {
- pack = p[1]
- }
- nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
- if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
- continue
- }
-
- // Line must be of the form
- // func Open(path string, mode int, perm int) (fd int, err error)
- // Split into name, in params, out params.
- f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t)
- if f == nil {
- fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
- os.Exit(1)
- }
- funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6]
-
- // Split argument lists on comma.
- in := parseParamList(inps)
- out := parseParamList(outps)
-
- inps = strings.Join(in, ", ")
- outps = strings.Join(out, ", ")
-
- if sysname == "" {
- sysname = funct
- }
-
- onlyCommon := false
- if funct == "readlen" || funct == "writelen" || funct == "FcntlInt" || funct == "FcntlFlock" {
- // This function call another syscall which is already implemented.
- // Therefore, the gc and gccgo part must not be generated.
- onlyCommon = true
- }
-
- // Try in vain to keep people from editing this file.
- // The theory is that they jump into the middle of the file
- // without reading the header.
-
- textcommon += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
- if !onlyCommon {
- textgccgo += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
- textgc += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
- }
-
- // Check if value return, err return available
- errvar := ""
- rettype := ""
- for _, param := range out {
- p := parseParam(param)
- if p.Type == "error" {
- errvar = p.Name
- } else {
- rettype = p.Type
- }
- }
-
- sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`)
- sysname = strings.ToLower(sysname) // All libc functions are lowercase.
-
- // GCCGO Prototype return type
- cRettype := ""
- if rettype == "unsafe.Pointer" {
- cRettype = "uintptr_t"
- } else if rettype == "uintptr" {
- cRettype = "uintptr_t"
- } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil {
- cRettype = "uintptr_t"
- } else if rettype == "int" {
- cRettype = "int"
- } else if rettype == "int32" {
- cRettype = "int"
- } else if rettype == "int64" {
- cRettype = "long long"
- } else if rettype == "uint32" {
- cRettype = "unsigned int"
- } else if rettype == "uint64" {
- cRettype = "unsigned long long"
- } else {
- cRettype = "int"
- }
- if sysname == "exit" {
- cRettype = "void"
- }
-
- // GCCGO Prototype arguments type
- var cIn []string
- for i, param := range in {
- p := parseParam(param)
- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
- cIn = append(cIn, "uintptr_t")
- } else if p.Type == "string" {
- cIn = append(cIn, "uintptr_t")
- } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil {
- cIn = append(cIn, "uintptr_t", "size_t")
- } else if p.Type == "unsafe.Pointer" {
- cIn = append(cIn, "uintptr_t")
- } else if p.Type == "uintptr" {
- cIn = append(cIn, "uintptr_t")
- } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil {
- cIn = append(cIn, "uintptr_t")
- } else if p.Type == "int" {
- if (i == 0 || i == 2) && funct == "fcntl" {
- // These fcntl arguments needs to be uintptr to be able to call FcntlInt and FcntlFlock
- cIn = append(cIn, "uintptr_t")
- } else {
- cIn = append(cIn, "int")
- }
-
- } else if p.Type == "int32" {
- cIn = append(cIn, "int")
- } else if p.Type == "int64" {
- cIn = append(cIn, "long long")
- } else if p.Type == "uint32" {
- cIn = append(cIn, "unsigned int")
- } else if p.Type == "uint64" {
- cIn = append(cIn, "unsigned long long")
- } else {
- cIn = append(cIn, "int")
- }
- }
-
- if !onlyCommon {
- // GCCGO Prototype Generation
- // Imports of system calls from libc
- if sysname == "select" {
- // select is a keyword of Go. Its name is
- // changed to c_select.
- cExtern += "#define c_select select\n"
- }
- cExtern += fmt.Sprintf("%s %s", cRettype, sysname)
- cIn := strings.Join(cIn, ", ")
- cExtern += fmt.Sprintf("(%s);\n", cIn)
- }
- // GC Library name
- if modname == "" {
- modname = "libc.a/shr_64.o"
- } else {
- fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct)
- os.Exit(1)
- }
- sysvarname := fmt.Sprintf("libc_%s", sysname)
-
- if !onlyCommon {
- // GC Runtime import of function to allow cross-platform builds.
- dynimports += fmt.Sprintf("//go:cgo_import_dynamic %s %s \"%s\"\n", sysvarname, sysname, modname)
- // GC Link symbol to proc address variable.
- linknames += fmt.Sprintf("//go:linkname %s %s\n", sysvarname, sysvarname)
- // GC Library proc address variable.
- vars = append(vars, sysvarname)
- }
-
- strconvfunc := "BytePtrFromString"
- strconvtype := "*byte"
-
- // Go function header.
- if outps != "" {
- outps = fmt.Sprintf(" (%s)", outps)
- }
- if textcommon != "" {
- textcommon += "\n"
- }
-
- textcommon += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps)
-
- // Prepare arguments tocall.
- var argscommon []string // Arguments in the common part
- var argscall []string // Arguments for call prototype
- var argsgc []string // Arguments for gc call (with syscall6)
- var argsgccgo []string // Arguments for gccgo call (with C.name_of_syscall)
- n := 0
- argN := 0
- for _, param := range in {
- p := parseParam(param)
- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
- argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.Name))
- argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
- argsgc = append(argsgc, p.Name)
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
- } else if p.Type == "string" && errvar != "" {
- textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
- textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name)
- textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
-
- argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
- argscall = append(argscall, fmt.Sprintf("_p%d uintptr ", n))
- argsgc = append(argsgc, fmt.Sprintf("_p%d", n))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n))
- n++
- } else if p.Type == "string" {
- fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
- textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
- textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name)
- textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
-
- argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
- argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n))
- argsgc = append(argsgc, fmt.Sprintf("_p%d", n))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n))
- n++
- } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil {
- // Convert slice into pointer, length.
- // Have to be careful not to take address of &a[0] if len == 0:
- // pass nil in that case.
- textcommon += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1])
- textcommon += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name)
- argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("len(%s)", p.Name))
- argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n), fmt.Sprintf("_lenp%d int", n))
- argsgc = append(argsgc, fmt.Sprintf("_p%d", n), fmt.Sprintf("uintptr(_lenp%d)", n))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n), fmt.Sprintf("C.size_t(_lenp%d)", n))
- n++
- } else if p.Type == "int64" && endianness != "" {
- fmt.Fprintf(os.Stderr, path+":"+funct+" uses int64 with 32 bits mode. Case not yet implemented\n")
- } else if p.Type == "bool" {
- fmt.Fprintf(os.Stderr, path+":"+funct+" uses bool. Case not yet implemented\n")
- } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil || p.Type == "unsafe.Pointer" {
- argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name))
- argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
- argsgc = append(argsgc, p.Name)
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
- } else if p.Type == "int" {
- if (argN == 0 || argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt") || (funct == "FcntlFlock")) {
- // These fcntl arguments need to be uintptr to be able to call FcntlInt and FcntlFlock
- argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name))
- argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
- argsgc = append(argsgc, p.Name)
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
-
- } else {
- argscommon = append(argscommon, p.Name)
- argscall = append(argscall, fmt.Sprintf("%s int", p.Name))
- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name))
- }
- } else if p.Type == "int32" {
- argscommon = append(argscommon, p.Name)
- argscall = append(argscall, fmt.Sprintf("%s int32", p.Name))
- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name))
- } else if p.Type == "int64" {
- argscommon = append(argscommon, p.Name)
- argscall = append(argscall, fmt.Sprintf("%s int64", p.Name))
- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.longlong(%s)", p.Name))
- } else if p.Type == "uint32" {
- argscommon = append(argscommon, p.Name)
- argscall = append(argscall, fmt.Sprintf("%s uint32", p.Name))
- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uint(%s)", p.Name))
- } else if p.Type == "uint64" {
- argscommon = append(argscommon, p.Name)
- argscall = append(argscall, fmt.Sprintf("%s uint64", p.Name))
- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.ulonglong(%s)", p.Name))
- } else if p.Type == "uintptr" {
- argscommon = append(argscommon, p.Name)
- argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
- argsgc = append(argsgc, p.Name)
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
- } else {
- argscommon = append(argscommon, fmt.Sprintf("int(%s)", p.Name))
- argscall = append(argscall, fmt.Sprintf("%s int", p.Name))
- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name))
- }
- argN++
- }
- nargs := len(argsgc)
-
- // COMMON function generation
- argscommonlist := strings.Join(argscommon, ", ")
- callcommon := fmt.Sprintf("call%s(%s)", sysname, argscommonlist)
- ret := []string{"_", "_"}
- body := ""
- doErrno := false
- for i := 0; i < len(out); i++ {
- p := parseParam(out[i])
- reg := ""
- if p.Name == "err" {
- reg = "e1"
- ret[1] = reg
- doErrno = true
- } else {
- reg = "r0"
- ret[0] = reg
- }
- if p.Type == "bool" {
- reg = fmt.Sprintf("%s != 0", reg)
- }
- if reg != "e1" {
- body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
- }
- }
- if ret[0] == "_" && ret[1] == "_" {
- textcommon += fmt.Sprintf("\t%s\n", callcommon)
- } else {
- textcommon += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], callcommon)
- }
- textcommon += body
-
- if doErrno {
- textcommon += "\tif e1 != 0 {\n"
- textcommon += "\t\terr = errnoErr(e1)\n"
- textcommon += "\t}\n"
- }
- textcommon += "\treturn\n"
- textcommon += "}\n"
-
- if onlyCommon {
- continue
- }
-
- // CALL Prototype
- callProto := fmt.Sprintf("func call%s(%s) (r1 uintptr, e1 Errno) {\n", sysname, strings.Join(argscall, ", "))
-
- // GC function generation
- asm := "syscall6"
- if nonblock != nil {
- asm = "rawSyscall6"
- }
-
- if len(argsgc) <= 6 {
- for len(argsgc) < 6 {
- argsgc = append(argsgc, "0")
- }
- } else {
- fmt.Fprintf(os.Stderr, "%s: too many arguments to system call", funct)
- os.Exit(1)
- }
- argsgclist := strings.Join(argsgc, ", ")
- callgc := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, argsgclist)
-
- textgc += callProto
- textgc += fmt.Sprintf("\tr1, _, e1 = %s\n", callgc)
- textgc += "\treturn\n}\n"
-
- // GCCGO function generation
- argsgccgolist := strings.Join(argsgccgo, ", ")
- var callgccgo string
- if sysname == "select" {
- // select is a keyword of Go. Its name is
- // changed to c_select.
- callgccgo = fmt.Sprintf("C.c_%s(%s)", sysname, argsgccgolist)
- } else {
- callgccgo = fmt.Sprintf("C.%s(%s)", sysname, argsgccgolist)
- }
- textgccgo += callProto
- textgccgo += fmt.Sprintf("\tr1 = uintptr(%s)\n", callgccgo)
- textgccgo += "\te1 = syscall.GetErrno()\n"
- textgccgo += "\treturn\n}\n"
- }
- if err := s.Err(); err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
- file.Close()
- }
- imp := ""
- if pack != "unix" {
- imp = "import \"golang.org/x/sys/unix\"\n"
-
- }
-
- // Print zsyscall_aix_ppc64.go
- err := ioutil.WriteFile("zsyscall_aix_ppc64.go",
- []byte(fmt.Sprintf(srcTemplate1, cmdLine(), buildTags(), pack, imp, textcommon)),
- 0644)
- if err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
-
- // Print zsyscall_aix_ppc64_gc.go
- vardecls := "\t" + strings.Join(vars, ",\n\t")
- vardecls += " syscallFunc"
- err = ioutil.WriteFile("zsyscall_aix_ppc64_gc.go",
- []byte(fmt.Sprintf(srcTemplate2, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, textgc)),
- 0644)
- if err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
-
- // Print zsyscall_aix_ppc64_gccgo.go
- err = ioutil.WriteFile("zsyscall_aix_ppc64_gccgo.go",
- []byte(fmt.Sprintf(srcTemplate3, cmdLine(), buildTags(), pack, cExtern, imp, textgccgo)),
- 0644)
- if err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
-}
-
-const srcTemplate1 = `// %s
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build %s
-
-package %s
-
-import (
- "unsafe"
-)
-
-
-%s
-
-%s
-`
-const srcTemplate2 = `// %s
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build %s
-// +build !gccgo
-
-package %s
-
-import (
- "unsafe"
-)
-%s
-%s
-%s
-type syscallFunc uintptr
-
-var (
-%s
-)
-
-// Implemented in runtime/syscall_aix.go.
-func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
-func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
-
-%s
-`
-const srcTemplate3 = `// %s
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build %s
-// +build gccgo
-
-package %s
-
-%s
-*/
-import "C"
-import (
- "syscall"
-)
-
-
-%s
-
-%s
-`
diff --git a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go b/vendor/golang.org/x/sys/unix/mksyscall_solaris.go
deleted file mode 100644
index 3d864738b..000000000
--- a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go
+++ /dev/null
@@ -1,335 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
- This program reads a file containing function prototypes
- (like syscall_solaris.go) and generates system call bodies.
- The prototypes are marked by lines beginning with "//sys"
- and read like func declarations if //sys is replaced by func, but:
- * The parameter lists must give a name for each argument.
- This includes return parameters.
- * The parameter lists must give a type for each argument:
- the (x, y, z int) shorthand is not allowed.
- * If the return parameter is an error number, it must be named err.
- * If go func name needs to be different than its libc name,
- * or the function is not in libc, name could be specified
- * at the end, after "=" sign, like
- //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
-*/
-
-package main
-
-import (
- "bufio"
- "flag"
- "fmt"
- "os"
- "regexp"
- "strings"
-)
-
-var (
- b32 = flag.Bool("b32", false, "32bit big-endian")
- l32 = flag.Bool("l32", false, "32bit little-endian")
- tags = flag.String("tags", "", "build tags")
-)
-
-// cmdLine returns this programs's commandline arguments
-func cmdLine() string {
- return "go run mksyscall_solaris.go " + strings.Join(os.Args[1:], " ")
-}
-
-// buildTags returns build tags
-func buildTags() string {
- return *tags
-}
-
-// Param is function parameter
-type Param struct {
- Name string
- Type string
-}
-
-// usage prints the program usage
-func usage() {
- fmt.Fprintf(os.Stderr, "usage: go run mksyscall_solaris.go [-b32 | -l32] [-tags x,y] [file ...]\n")
- os.Exit(1)
-}
-
-// parseParamList parses parameter list and returns a slice of parameters
-func parseParamList(list string) []string {
- list = strings.TrimSpace(list)
- if list == "" {
- return []string{}
- }
- return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
-}
-
-// parseParam splits a parameter into name and type
-func parseParam(p string) Param {
- ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
- if ps == nil {
- fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
- os.Exit(1)
- }
- return Param{ps[1], ps[2]}
-}
-
-func main() {
- flag.Usage = usage
- flag.Parse()
- if len(flag.Args()) <= 0 {
- fmt.Fprintf(os.Stderr, "no files to parse provided\n")
- usage()
- }
-
- endianness := ""
- if *b32 {
- endianness = "big-endian"
- } else if *l32 {
- endianness = "little-endian"
- }
-
- pack := ""
- text := ""
- dynimports := ""
- linknames := ""
- var vars []string
- for _, path := range flag.Args() {
- file, err := os.Open(path)
- if err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
- s := bufio.NewScanner(file)
- for s.Scan() {
- t := s.Text()
- t = strings.TrimSpace(t)
- t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
- if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" {
- pack = p[1]
- }
- nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
- if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
- continue
- }
-
- // Line must be of the form
- // func Open(path string, mode int, perm int) (fd int, err error)
- // Split into name, in params, out params.
- f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t)
- if f == nil {
- fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
- os.Exit(1)
- }
- funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6]
-
- // Split argument lists on comma.
- in := parseParamList(inps)
- out := parseParamList(outps)
-
- inps = strings.Join(in, ", ")
- outps = strings.Join(out, ", ")
-
- // Try in vain to keep people from editing this file.
- // The theory is that they jump into the middle of the file
- // without reading the header.
- text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
-
- // So file name.
- if modname == "" {
- modname = "libc"
- }
-
- // System call name.
- if sysname == "" {
- sysname = funct
- }
-
- // System call pointer variable name.
- sysvarname := fmt.Sprintf("proc%s", sysname)
-
- strconvfunc := "BytePtrFromString"
- strconvtype := "*byte"
-
- sysname = strings.ToLower(sysname) // All libc functions are lowercase.
-
- // Runtime import of function to allow cross-platform builds.
- dynimports += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"%s.so\"\n", sysname, sysname, modname)
- // Link symbol to proc address variable.
- linknames += fmt.Sprintf("//go:linkname %s libc_%s\n", sysvarname, sysname)
- // Library proc address variable.
- vars = append(vars, sysvarname)
-
- // Go function header.
- outlist := strings.Join(out, ", ")
- if outlist != "" {
- outlist = fmt.Sprintf(" (%s)", outlist)
- }
- if text != "" {
- text += "\n"
- }
- text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outlist)
-
- // Check if err return available
- errvar := ""
- for _, param := range out {
- p := parseParam(param)
- if p.Type == "error" {
- errvar = p.Name
- continue
- }
- }
-
- // Prepare arguments to Syscall.
- var args []string
- n := 0
- for _, param := range in {
- p := parseParam(param)
- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
- args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))")
- } else if p.Type == "string" && errvar != "" {
- text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
- text += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name)
- text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
- args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
- n++
- } else if p.Type == "string" {
- fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
- text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
- text += fmt.Sprintf("\t_p%d, _ = %s(%s)\n", n, strconvfunc, p.Name)
- args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
- n++
- } else if s := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); s != nil {
- // Convert slice into pointer, length.
- // Have to be careful not to take address of &a[0] if len == 0:
- // pass nil in that case.
- text += fmt.Sprintf("\tvar _p%d *%s\n", n, s[1])
- text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name)
- args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("uintptr(len(%s))", p.Name))
- n++
- } else if p.Type == "int64" && endianness != "" {
- if endianness == "big-endian" {
- args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
- } else {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
- }
- } else if p.Type == "bool" {
- text += fmt.Sprintf("\tvar _p%d uint32\n", n)
- text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n)
- args = append(args, fmt.Sprintf("uintptr(_p%d)", n))
- n++
- } else {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
- }
- }
- nargs := len(args)
-
- // Determine which form to use; pad args with zeros.
- asm := "sysvicall6"
- if nonblock != nil {
- asm = "rawSysvicall6"
- }
- if len(args) <= 6 {
- for len(args) < 6 {
- args = append(args, "0")
- }
- } else {
- fmt.Fprintf(os.Stderr, "%s: too many arguments to system call\n", path)
- os.Exit(1)
- }
-
- // Actual call.
- arglist := strings.Join(args, ", ")
- call := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, arglist)
-
- // Assign return values.
- body := ""
- ret := []string{"_", "_", "_"}
- doErrno := false
- for i := 0; i < len(out); i++ {
- p := parseParam(out[i])
- reg := ""
- if p.Name == "err" {
- reg = "e1"
- ret[2] = reg
- doErrno = true
- } else {
- reg = fmt.Sprintf("r%d", i)
- ret[i] = reg
- }
- if p.Type == "bool" {
- reg = fmt.Sprintf("%d != 0", reg)
- }
- if p.Type == "int64" && endianness != "" {
- // 64-bit number in r1:r0 or r0:r1.
- if i+2 > len(out) {
- fmt.Fprintf(os.Stderr, "%s: not enough registers for int64 return\n", path)
- os.Exit(1)
- }
- if endianness == "big-endian" {
- reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1)
- } else {
- reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i)
- }
- ret[i] = fmt.Sprintf("r%d", i)
- ret[i+1] = fmt.Sprintf("r%d", i+1)
- }
- if reg != "e1" {
- body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
- }
- }
- if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" {
- text += fmt.Sprintf("\t%s\n", call)
- } else {
- text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call)
- }
- text += body
-
- if doErrno {
- text += "\tif e1 != 0 {\n"
- text += "\t\terr = e1\n"
- text += "\t}\n"
- }
- text += "\treturn\n"
- text += "}\n"
- }
- if err := s.Err(); err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
- file.Close()
- }
- imp := ""
- if pack != "unix" {
- imp = "import \"golang.org/x/sys/unix\"\n"
-
- }
- vardecls := "\t" + strings.Join(vars, ",\n\t")
- vardecls += " syscallFunc"
- fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, text)
-}
-
-const srcTemplate = `// %s
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build %s
-
-package %s
-
-import (
- "syscall"
- "unsafe"
-)
-%s
-%s
-%s
-var (
-%s
-)
-
-%s
-`
diff --git a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go b/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go
deleted file mode 100644
index b6b409909..000000000
--- a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go
+++ /dev/null
@@ -1,355 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// Parse the header files for OpenBSD and generate a Go usable sysctl MIB.
-//
-// Build a MIB with each entry being an array containing the level, type and
-// a hash that will contain additional entries if the current entry is a node.
-// We then walk this MIB and create a flattened sysctl name to OID hash.
-
-package main
-
-import (
- "bufio"
- "fmt"
- "os"
- "path/filepath"
- "regexp"
- "sort"
- "strings"
-)
-
-var (
- goos, goarch string
-)
-
-// cmdLine returns this programs's commandline arguments.
-func cmdLine() string {
- return "go run mksysctl_openbsd.go " + strings.Join(os.Args[1:], " ")
-}
-
-// buildTags returns build tags.
-func buildTags() string {
- return fmt.Sprintf("%s,%s", goarch, goos)
-}
-
-// reMatch performs regular expression match and stores the substring slice to value pointed by m.
-func reMatch(re *regexp.Regexp, str string, m *[]string) bool {
- *m = re.FindStringSubmatch(str)
- if *m != nil {
- return true
- }
- return false
-}
-
-type nodeElement struct {
- n int
- t string
- pE *map[string]nodeElement
-}
-
-var (
- debugEnabled bool
- mib map[string]nodeElement
- node *map[string]nodeElement
- nodeMap map[string]string
- sysCtl []string
-)
-
-var (
- ctlNames1RE = regexp.MustCompile(`^#define\s+(CTL_NAMES)\s+{`)
- ctlNames2RE = regexp.MustCompile(`^#define\s+(CTL_(.*)_NAMES)\s+{`)
- ctlNames3RE = regexp.MustCompile(`^#define\s+((.*)CTL_NAMES)\s+{`)
- netInetRE = regexp.MustCompile(`^netinet/`)
- netInet6RE = regexp.MustCompile(`^netinet6/`)
- netRE = regexp.MustCompile(`^net/`)
- bracesRE = regexp.MustCompile(`{.*}`)
- ctlTypeRE = regexp.MustCompile(`{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}`)
- fsNetKernRE = regexp.MustCompile(`^(fs|net|kern)_`)
-)
-
-func debug(s string) {
- if debugEnabled {
- fmt.Fprintln(os.Stderr, s)
- }
-}
-
-// Walk the MIB and build a sysctl name to OID mapping.
-func buildSysctl(pNode *map[string]nodeElement, name string, oid []int) {
- lNode := pNode // local copy of pointer to node
- var keys []string
- for k := range *lNode {
- keys = append(keys, k)
- }
- sort.Strings(keys)
-
- for _, key := range keys {
- nodename := name
- if name != "" {
- nodename += "."
- }
- nodename += key
-
- nodeoid := append(oid, (*pNode)[key].n)
-
- if (*pNode)[key].t == `CTLTYPE_NODE` {
- if _, ok := nodeMap[nodename]; ok {
- lNode = &mib
- ctlName := nodeMap[nodename]
- for _, part := range strings.Split(ctlName, ".") {
- lNode = ((*lNode)[part]).pE
- }
- } else {
- lNode = (*pNode)[key].pE
- }
- buildSysctl(lNode, nodename, nodeoid)
- } else if (*pNode)[key].t != "" {
- oidStr := []string{}
- for j := range nodeoid {
- oidStr = append(oidStr, fmt.Sprintf("%d", nodeoid[j]))
- }
- text := "\t{ \"" + nodename + "\", []_C_int{ " + strings.Join(oidStr, ", ") + " } }, \n"
- sysCtl = append(sysCtl, text)
- }
- }
-}
-
-func main() {
- // Get the OS (using GOOS_TARGET if it exist)
- goos = os.Getenv("GOOS_TARGET")
- if goos == "" {
- goos = os.Getenv("GOOS")
- }
- // Get the architecture (using GOARCH_TARGET if it exists)
- goarch = os.Getenv("GOARCH_TARGET")
- if goarch == "" {
- goarch = os.Getenv("GOARCH")
- }
- // Check if GOOS and GOARCH environment variables are defined
- if goarch == "" || goos == "" {
- fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n")
- os.Exit(1)
- }
-
- mib = make(map[string]nodeElement)
- headers := [...]string{
- `sys/sysctl.h`,
- `sys/socket.h`,
- `sys/tty.h`,
- `sys/malloc.h`,
- `sys/mount.h`,
- `sys/namei.h`,
- `sys/sem.h`,
- `sys/shm.h`,
- `sys/vmmeter.h`,
- `uvm/uvmexp.h`,
- `uvm/uvm_param.h`,
- `uvm/uvm_swap_encrypt.h`,
- `ddb/db_var.h`,
- `net/if.h`,
- `net/if_pfsync.h`,
- `net/pipex.h`,
- `netinet/in.h`,
- `netinet/icmp_var.h`,
- `netinet/igmp_var.h`,
- `netinet/ip_ah.h`,
- `netinet/ip_carp.h`,
- `netinet/ip_divert.h`,
- `netinet/ip_esp.h`,
- `netinet/ip_ether.h`,
- `netinet/ip_gre.h`,
- `netinet/ip_ipcomp.h`,
- `netinet/ip_ipip.h`,
- `netinet/pim_var.h`,
- `netinet/tcp_var.h`,
- `netinet/udp_var.h`,
- `netinet6/in6.h`,
- `netinet6/ip6_divert.h`,
- `netinet6/pim6_var.h`,
- `netinet/icmp6.h`,
- `netmpls/mpls.h`,
- }
-
- ctls := [...]string{
- `kern`,
- `vm`,
- `fs`,
- `net`,
- //debug /* Special handling required */
- `hw`,
- //machdep /* Arch specific */
- `user`,
- `ddb`,
- //vfs /* Special handling required */
- `fs.posix`,
- `kern.forkstat`,
- `kern.intrcnt`,
- `kern.malloc`,
- `kern.nchstats`,
- `kern.seminfo`,
- `kern.shminfo`,
- `kern.timecounter`,
- `kern.tty`,
- `kern.watchdog`,
- `net.bpf`,
- `net.ifq`,
- `net.inet`,
- `net.inet.ah`,
- `net.inet.carp`,
- `net.inet.divert`,
- `net.inet.esp`,
- `net.inet.etherip`,
- `net.inet.gre`,
- `net.inet.icmp`,
- `net.inet.igmp`,
- `net.inet.ip`,
- `net.inet.ip.ifq`,
- `net.inet.ipcomp`,
- `net.inet.ipip`,
- `net.inet.mobileip`,
- `net.inet.pfsync`,
- `net.inet.pim`,
- `net.inet.tcp`,
- `net.inet.udp`,
- `net.inet6`,
- `net.inet6.divert`,
- `net.inet6.ip6`,
- `net.inet6.icmp6`,
- `net.inet6.pim6`,
- `net.inet6.tcp6`,
- `net.inet6.udp6`,
- `net.mpls`,
- `net.mpls.ifq`,
- `net.key`,
- `net.pflow`,
- `net.pfsync`,
- `net.pipex`,
- `net.rt`,
- `vm.swapencrypt`,
- //vfsgenctl /* Special handling required */
- }
-
- // Node name "fixups"
- ctlMap := map[string]string{
- "ipproto": "net.inet",
- "net.inet.ipproto": "net.inet",
- "net.inet6.ipv6proto": "net.inet6",
- "net.inet6.ipv6": "net.inet6.ip6",
- "net.inet.icmpv6": "net.inet6.icmp6",
- "net.inet6.divert6": "net.inet6.divert",
- "net.inet6.tcp6": "net.inet.tcp",
- "net.inet6.udp6": "net.inet.udp",
- "mpls": "net.mpls",
- "swpenc": "vm.swapencrypt",
- }
-
- // Node mappings
- nodeMap = map[string]string{
- "net.inet.ip.ifq": "net.ifq",
- "net.inet.pfsync": "net.pfsync",
- "net.mpls.ifq": "net.ifq",
- }
-
- mCtls := make(map[string]bool)
- for _, ctl := range ctls {
- mCtls[ctl] = true
- }
-
- for _, header := range headers {
- debug("Processing " + header)
- file, err := os.Open(filepath.Join("/usr/include", header))
- if err != nil {
- fmt.Fprintf(os.Stderr, "%v\n", err)
- os.Exit(1)
- }
- s := bufio.NewScanner(file)
- for s.Scan() {
- var sub []string
- if reMatch(ctlNames1RE, s.Text(), &sub) ||
- reMatch(ctlNames2RE, s.Text(), &sub) ||
- reMatch(ctlNames3RE, s.Text(), &sub) {
- if sub[1] == `CTL_NAMES` {
- // Top level.
- node = &mib
- } else {
- // Node.
- nodename := strings.ToLower(sub[2])
- ctlName := ""
- if reMatch(netInetRE, header, &sub) {
- ctlName = "net.inet." + nodename
- } else if reMatch(netInet6RE, header, &sub) {
- ctlName = "net.inet6." + nodename
- } else if reMatch(netRE, header, &sub) {
- ctlName = "net." + nodename
- } else {
- ctlName = nodename
- ctlName = fsNetKernRE.ReplaceAllString(ctlName, `$1.`)
- }
-
- if val, ok := ctlMap[ctlName]; ok {
- ctlName = val
- }
- if _, ok := mCtls[ctlName]; !ok {
- debug("Ignoring " + ctlName + "...")
- continue
- }
-
- // Walk down from the top of the MIB.
- node = &mib
- for _, part := range strings.Split(ctlName, ".") {
- if _, ok := (*node)[part]; !ok {
- debug("Missing node " + part)
- (*node)[part] = nodeElement{n: 0, t: "", pE: &map[string]nodeElement{}}
- }
- node = (*node)[part].pE
- }
- }
-
- // Populate current node with entries.
- i := -1
- for !strings.HasPrefix(s.Text(), "}") {
- s.Scan()
- if reMatch(bracesRE, s.Text(), &sub) {
- i++
- }
- if !reMatch(ctlTypeRE, s.Text(), &sub) {
- continue
- }
- (*node)[sub[1]] = nodeElement{n: i, t: sub[2], pE: &map[string]nodeElement{}}
- }
- }
- }
- err = s.Err()
- if err != nil {
- fmt.Fprintf(os.Stderr, "%v\n", err)
- os.Exit(1)
- }
- file.Close()
- }
- buildSysctl(&mib, "", []int{})
-
- sort.Strings(sysCtl)
- text := strings.Join(sysCtl, "")
-
- fmt.Printf(srcTemplate, cmdLine(), buildTags(), text)
-}
-
-const srcTemplate = `// %s
-// Code generated by the command above; DO NOT EDIT.
-
-// +build %s
-
-package unix
-
-type mibentry struct {
- ctlname string
- ctloid []_C_int
-}
-
-var sysctlMib = []mibentry {
-%s
-}
-`
diff --git a/vendor/golang.org/x/sys/unix/mksysnum.go b/vendor/golang.org/x/sys/unix/mksysnum.go
deleted file mode 100644
index baa6ecd85..000000000
--- a/vendor/golang.org/x/sys/unix/mksysnum.go
+++ /dev/null
@@ -1,190 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// Generate system call table for DragonFly, NetBSD,
-// FreeBSD, OpenBSD or Darwin from master list
-// (for example, /usr/src/sys/kern/syscalls.master or
-// sys/syscall.h).
-package main
-
-import (
- "bufio"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "os"
- "regexp"
- "strings"
-)
-
-var (
- goos, goarch string
-)
-
-// cmdLine returns this programs's commandline arguments
-func cmdLine() string {
- return "go run mksysnum.go " + strings.Join(os.Args[1:], " ")
-}
-
-// buildTags returns build tags
-func buildTags() string {
- return fmt.Sprintf("%s,%s", goarch, goos)
-}
-
-func checkErr(err error) {
- if err != nil {
- fmt.Fprintf(os.Stderr, "%v\n", err)
- os.Exit(1)
- }
-}
-
-// source string and substring slice for regexp
-type re struct {
- str string // source string
- sub []string // matched sub-string
-}
-
-// Match performs regular expression match
-func (r *re) Match(exp string) bool {
- r.sub = regexp.MustCompile(exp).FindStringSubmatch(r.str)
- if r.sub != nil {
- return true
- }
- return false
-}
-
-// fetchFile fetches a text file from URL
-func fetchFile(URL string) io.Reader {
- resp, err := http.Get(URL)
- checkErr(err)
- defer resp.Body.Close()
- body, err := ioutil.ReadAll(resp.Body)
- checkErr(err)
- return strings.NewReader(string(body))
-}
-
-// readFile reads a text file from path
-func readFile(path string) io.Reader {
- file, err := os.Open(os.Args[1])
- checkErr(err)
- return file
-}
-
-func format(name, num, proto string) string {
- name = strings.ToUpper(name)
- // There are multiple entries for enosys and nosys, so comment them out.
- nm := re{str: name}
- if nm.Match(`^SYS_E?NOSYS$`) {
- name = fmt.Sprintf("// %s", name)
- }
- if name == `SYS_SYS_EXIT` {
- name = `SYS_EXIT`
- }
- return fmt.Sprintf(" %s = %s; // %s\n", name, num, proto)
-}
-
-func main() {
- // Get the OS (using GOOS_TARGET if it exist)
- goos = os.Getenv("GOOS_TARGET")
- if goos == "" {
- goos = os.Getenv("GOOS")
- }
- // Get the architecture (using GOARCH_TARGET if it exists)
- goarch = os.Getenv("GOARCH_TARGET")
- if goarch == "" {
- goarch = os.Getenv("GOARCH")
- }
- // Check if GOOS and GOARCH environment variables are defined
- if goarch == "" || goos == "" {
- fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n")
- os.Exit(1)
- }
-
- file := strings.TrimSpace(os.Args[1])
- var syscalls io.Reader
- if strings.HasPrefix(file, "https://") || strings.HasPrefix(file, "http://") {
- // Download syscalls.master file
- syscalls = fetchFile(file)
- } else {
- syscalls = readFile(file)
- }
-
- var text, line string
- s := bufio.NewScanner(syscalls)
- for s.Scan() {
- t := re{str: line}
- if t.Match(`^(.*)\\$`) {
- // Handle continuation
- line = t.sub[1]
- line += strings.TrimLeft(s.Text(), " \t")
- } else {
- // New line
- line = s.Text()
- }
- t = re{str: line}
- if t.Match(`\\$`) {
- continue
- }
- t = re{str: line}
-
- switch goos {
- case "dragonfly":
- if t.Match(`^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$`) {
- num, proto := t.sub[1], t.sub[2]
- name := fmt.Sprintf("SYS_%s", t.sub[3])
- text += format(name, num, proto)
- }
- case "freebsd":
- if t.Match(`^([0-9]+)\s+\S+\s+(?:(?:NO)?STD|COMPAT10)\s+({ \S+\s+(\w+).*)$`) {
- num, proto := t.sub[1], t.sub[2]
- name := fmt.Sprintf("SYS_%s", t.sub[3])
- text += format(name, num, proto)
- }
- case "openbsd":
- if t.Match(`^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$`) {
- num, proto, name := t.sub[1], t.sub[3], t.sub[4]
- text += format(name, num, proto)
- }
- case "netbsd":
- if t.Match(`^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$`) {
- num, proto, compat := t.sub[1], t.sub[6], t.sub[8]
- name := t.sub[7] + "_" + t.sub[9]
- if t.sub[11] != "" {
- name = t.sub[7] + "_" + t.sub[11]
- }
- name = strings.ToUpper(name)
- if compat == "" || compat == "13" || compat == "30" || compat == "50" {
- text += fmt.Sprintf(" %s = %s; // %s\n", name, num, proto)
- }
- }
- case "darwin":
- if t.Match(`^#define\s+SYS_(\w+)\s+([0-9]+)`) {
- name, num := t.sub[1], t.sub[2]
- name = strings.ToUpper(name)
- text += fmt.Sprintf(" SYS_%s = %s;\n", name, num)
- }
- default:
- fmt.Fprintf(os.Stderr, "unrecognized GOOS=%s\n", goos)
- os.Exit(1)
-
- }
- }
- err := s.Err()
- checkErr(err)
-
- fmt.Printf(template, cmdLine(), buildTags(), text)
-}
-
-const template = `// %s
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build %s
-
-package unix
-
-const(
-%s)`
diff --git a/vendor/golang.org/x/sys/unix/types_aix.go b/vendor/golang.org/x/sys/unix/types_aix.go
deleted file mode 100644
index 40d2beede..000000000
--- a/vendor/golang.org/x/sys/unix/types_aix.go
+++ /dev/null
@@ -1,237 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-// +build aix
-
-/*
-Input to cgo -godefs. See also mkerrors.sh and mkall.sh
-*/
-
-// +godefs map struct_in_addr [4]byte /* in_addr */
-// +godefs map struct_in6_addr [16]byte /* in6_addr */
-
-package unix
-
-/*
-#include <sys/types.h>
-#include <sys/time.h>
-#include <sys/limits.h>
-#include <sys/un.h>
-#include <utime.h>
-#include <sys/utsname.h>
-#include <sys/poll.h>
-#include <sys/resource.h>
-#include <sys/stat.h>
-#include <sys/statfs.h>
-#include <sys/termio.h>
-#include <sys/ioctl.h>
-
-#include <termios.h>
-
-#include <net/if.h>
-#include <net/if_dl.h>
-#include <netinet/in.h>
-#include <netinet/icmp6.h>
-
-
-#include <dirent.h>
-#include <fcntl.h>
-
-enum {
- sizeofPtr = sizeof(void*),
-};
-
-union sockaddr_all {
- struct sockaddr s1; // this one gets used for fields
- struct sockaddr_in s2; // these pad it out
- struct sockaddr_in6 s3;
- struct sockaddr_un s4;
- struct sockaddr_dl s5;
-};
-
-struct sockaddr_any {
- struct sockaddr addr;
- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
-};
-
-*/
-import "C"
-
-// Machine characteristics
-
-const (
- SizeofPtr = C.sizeofPtr
- SizeofShort = C.sizeof_short
- SizeofInt = C.sizeof_int
- SizeofLong = C.sizeof_long
- SizeofLongLong = C.sizeof_longlong
- PathMax = C.PATH_MAX
-)
-
-// Basic types
-
-type (
- _C_short C.short
- _C_int C.int
- _C_long C.long
- _C_long_long C.longlong
-)
-
-type off64 C.off64_t
-type off C.off_t
-type Mode_t C.mode_t
-
-// Time
-
-type Timespec C.struct_timespec
-
-type Timeval C.struct_timeval
-
-type Timeval32 C.struct_timeval32
-
-type Timex C.struct_timex
-
-type Time_t C.time_t
-
-type Tms C.struct_tms
-
-type Utimbuf C.struct_utimbuf
-
-type Timezone C.struct_timezone
-
-// Processes
-
-type Rusage C.struct_rusage
-
-type Rlimit C.struct_rlimit64
-
-type Pid_t C.pid_t
-
-type _Gid_t C.gid_t
-
-type dev_t C.dev_t
-
-// Files
-
-type Stat_t C.struct_stat
-
-type StatxTimestamp C.struct_statx_timestamp
-
-type Statx_t C.struct_statx
-
-type Dirent C.struct_dirent
-
-// Sockets
-
-type RawSockaddrInet4 C.struct_sockaddr_in
-
-type RawSockaddrInet6 C.struct_sockaddr_in6
-
-type RawSockaddrUnix C.struct_sockaddr_un
-
-type RawSockaddrDatalink C.struct_sockaddr_dl
-
-type RawSockaddr C.struct_sockaddr
-
-type RawSockaddrAny C.struct_sockaddr_any
-
-type _Socklen C.socklen_t
-
-type Cmsghdr C.struct_cmsghdr
-
-type ICMPv6Filter C.struct_icmp6_filter
-
-type Iovec C.struct_iovec
-
-type IPMreq C.struct_ip_mreq
-
-type IPv6Mreq C.struct_ipv6_mreq
-
-type IPv6MTUInfo C.struct_ip6_mtuinfo
-
-type Linger C.struct_linger
-
-type Msghdr C.struct_msghdr
-
-const (
- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
- SizeofLinger = C.sizeof_struct_linger
- SizeofIPMreq = C.sizeof_struct_ip_mreq
- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
- SizeofMsghdr = C.sizeof_struct_msghdr
- SizeofCmsghdr = C.sizeof_struct_cmsghdr
- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
-)
-
-// Routing and interface messages
-
-const (
- SizeofIfMsghdr = C.sizeof_struct_if_msghdr
-)
-
-type IfMsgHdr C.struct_if_msghdr
-
-// Misc
-
-type FdSet C.fd_set
-
-type Utsname C.struct_utsname
-
-type Ustat_t C.struct_ustat
-
-type Sigset_t C.sigset_t
-
-const (
- AT_FDCWD = C.AT_FDCWD
- AT_REMOVEDIR = C.AT_REMOVEDIR
- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
-)
-
-// Terminal handling
-
-type Termios C.struct_termios
-
-type Termio C.struct_termio
-
-type Winsize C.struct_winsize
-
-//poll
-
-type PollFd struct {
- Fd int32
- Events uint16
- Revents uint16
-}
-
-const (
- POLLERR = C.POLLERR
- POLLHUP = C.POLLHUP
- POLLIN = C.POLLIN
- POLLNVAL = C.POLLNVAL
- POLLOUT = C.POLLOUT
- POLLPRI = C.POLLPRI
- POLLRDBAND = C.POLLRDBAND
- POLLRDNORM = C.POLLRDNORM
- POLLWRBAND = C.POLLWRBAND
- POLLWRNORM = C.POLLWRNORM
-)
-
-//flock_t
-
-type Flock_t C.struct_flock64
-
-// Statfs
-
-type Fsid_t C.struct_fsid_t
-type Fsid64_t C.struct_fsid64_t
-
-type Statfs_t C.struct_statfs
-
-const RNDGETENTCNT = 0x80045200
diff --git a/vendor/golang.org/x/sys/unix/types_darwin.go b/vendor/golang.org/x/sys/unix/types_darwin.go
deleted file mode 100644
index 155c2e692..000000000
--- a/vendor/golang.org/x/sys/unix/types_darwin.go
+++ /dev/null
@@ -1,283 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-Input to cgo -godefs. See README.md
-*/
-
-// +godefs map struct_in_addr [4]byte /* in_addr */
-// +godefs map struct_in6_addr [16]byte /* in6_addr */
-
-package unix
-
-/*
-#define __DARWIN_UNIX03 0
-#define KERNEL
-#define _DARWIN_USE_64_BIT_INODE
-#include <dirent.h>
-#include <fcntl.h>
-#include <poll.h>
-#include <signal.h>
-#include <termios.h>
-#include <unistd.h>
-#include <mach/mach.h>
-#include <mach/message.h>
-#include <sys/event.h>
-#include <sys/mman.h>
-#include <sys/mount.h>
-#include <sys/param.h>
-#include <sys/ptrace.h>
-#include <sys/resource.h>
-#include <sys/select.h>
-#include <sys/signal.h>
-#include <sys/socket.h>
-#include <sys/stat.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <sys/uio.h>
-#include <sys/un.h>
-#include <sys/utsname.h>
-#include <sys/wait.h>
-#include <net/bpf.h>
-#include <net/if.h>
-#include <net/if_dl.h>
-#include <net/if_var.h>
-#include <net/route.h>
-#include <netinet/in.h>
-#include <netinet/icmp6.h>
-#include <netinet/tcp.h>
-
-enum {
- sizeofPtr = sizeof(void*),
-};
-
-union sockaddr_all {
- struct sockaddr s1; // this one gets used for fields
- struct sockaddr_in s2; // these pad it out
- struct sockaddr_in6 s3;
- struct sockaddr_un s4;
- struct sockaddr_dl s5;
-};
-
-struct sockaddr_any {
- struct sockaddr addr;
- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
-};
-
-*/
-import "C"
-
-// Machine characteristics
-
-const (
- SizeofPtr = C.sizeofPtr
- SizeofShort = C.sizeof_short
- SizeofInt = C.sizeof_int
- SizeofLong = C.sizeof_long
- SizeofLongLong = C.sizeof_longlong
-)
-
-// Basic types
-
-type (
- _C_short C.short
- _C_int C.int
- _C_long C.long
- _C_long_long C.longlong
-)
-
-// Time
-
-type Timespec C.struct_timespec
-
-type Timeval C.struct_timeval
-
-type Timeval32 C.struct_timeval32
-
-// Processes
-
-type Rusage C.struct_rusage
-
-type Rlimit C.struct_rlimit
-
-type _Gid_t C.gid_t
-
-// Files
-
-type Stat_t C.struct_stat64
-
-type Statfs_t C.struct_statfs64
-
-type Flock_t C.struct_flock
-
-type Fstore_t C.struct_fstore
-
-type Radvisory_t C.struct_radvisory
-
-type Fbootstraptransfer_t C.struct_fbootstraptransfer
-
-type Log2phys_t C.struct_log2phys
-
-type Fsid C.struct_fsid
-
-type Dirent C.struct_dirent
-
-// Sockets
-
-type RawSockaddrInet4 C.struct_sockaddr_in
-
-type RawSockaddrInet6 C.struct_sockaddr_in6
-
-type RawSockaddrUnix C.struct_sockaddr_un
-
-type RawSockaddrDatalink C.struct_sockaddr_dl
-
-type RawSockaddr C.struct_sockaddr
-
-type RawSockaddrAny C.struct_sockaddr_any
-
-type _Socklen C.socklen_t
-
-type Linger C.struct_linger
-
-type Iovec C.struct_iovec
-
-type IPMreq C.struct_ip_mreq
-
-type IPv6Mreq C.struct_ipv6_mreq
-
-type Msghdr C.struct_msghdr
-
-type Cmsghdr C.struct_cmsghdr
-
-type Inet4Pktinfo C.struct_in_pktinfo
-
-type Inet6Pktinfo C.struct_in6_pktinfo
-
-type IPv6MTUInfo C.struct_ip6_mtuinfo
-
-type ICMPv6Filter C.struct_icmp6_filter
-
-const (
- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
- SizeofLinger = C.sizeof_struct_linger
- SizeofIPMreq = C.sizeof_struct_ip_mreq
- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
- SizeofMsghdr = C.sizeof_struct_msghdr
- SizeofCmsghdr = C.sizeof_struct_cmsghdr
- SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo
- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
-)
-
-// Ptrace requests
-
-const (
- PTRACE_TRACEME = C.PT_TRACE_ME
- PTRACE_CONT = C.PT_CONTINUE
- PTRACE_KILL = C.PT_KILL
-)
-
-// Events (kqueue, kevent)
-
-type Kevent_t C.struct_kevent
-
-// Select
-
-type FdSet C.fd_set
-
-// Routing and interface messages
-
-const (
- SizeofIfMsghdr = C.sizeof_struct_if_msghdr
- SizeofIfData = C.sizeof_struct_if_data
- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
- SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
- SizeofIfmaMsghdr2 = C.sizeof_struct_ifma_msghdr2
- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
- SizeofRtMetrics = C.sizeof_struct_rt_metrics
-)
-
-type IfMsghdr C.struct_if_msghdr
-
-type IfData C.struct_if_data
-
-type IfaMsghdr C.struct_ifa_msghdr
-
-type IfmaMsghdr C.struct_ifma_msghdr
-
-type IfmaMsghdr2 C.struct_ifma_msghdr2
-
-type RtMsghdr C.struct_rt_msghdr
-
-type RtMetrics C.struct_rt_metrics
-
-// Berkeley packet filter
-
-const (
- SizeofBpfVersion = C.sizeof_struct_bpf_version
- SizeofBpfStat = C.sizeof_struct_bpf_stat
- SizeofBpfProgram = C.sizeof_struct_bpf_program
- SizeofBpfInsn = C.sizeof_struct_bpf_insn
- SizeofBpfHdr = C.sizeof_struct_bpf_hdr
-)
-
-type BpfVersion C.struct_bpf_version
-
-type BpfStat C.struct_bpf_stat
-
-type BpfProgram C.struct_bpf_program
-
-type BpfInsn C.struct_bpf_insn
-
-type BpfHdr C.struct_bpf_hdr
-
-// Terminal handling
-
-type Termios C.struct_termios
-
-type Winsize C.struct_winsize
-
-// fchmodat-like syscalls.
-
-const (
- AT_FDCWD = C.AT_FDCWD
- AT_REMOVEDIR = C.AT_REMOVEDIR
- AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
-)
-
-// poll
-
-type PollFd C.struct_pollfd
-
-const (
- POLLERR = C.POLLERR
- POLLHUP = C.POLLHUP
- POLLIN = C.POLLIN
- POLLNVAL = C.POLLNVAL
- POLLOUT = C.POLLOUT
- POLLPRI = C.POLLPRI
- POLLRDBAND = C.POLLRDBAND
- POLLRDNORM = C.POLLRDNORM
- POLLWRBAND = C.POLLWRBAND
- POLLWRNORM = C.POLLWRNORM
-)
-
-// uname
-
-type Utsname C.struct_utsname
-
-// Clockinfo
-
-const SizeofClockinfo = C.sizeof_struct_clockinfo
-
-type Clockinfo C.struct_clockinfo
diff --git a/vendor/golang.org/x/sys/unix/types_dragonfly.go b/vendor/golang.org/x/sys/unix/types_dragonfly.go
deleted file mode 100644
index 3365dd79d..000000000
--- a/vendor/golang.org/x/sys/unix/types_dragonfly.go
+++ /dev/null
@@ -1,263 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-Input to cgo -godefs. See README.md
-*/
-
-// +godefs map struct_in_addr [4]byte /* in_addr */
-// +godefs map struct_in6_addr [16]byte /* in6_addr */
-
-package unix
-
-/*
-#define KERNEL
-#include <dirent.h>
-#include <fcntl.h>
-#include <poll.h>
-#include <signal.h>
-#include <termios.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <sys/event.h>
-#include <sys/mman.h>
-#include <sys/mount.h>
-#include <sys/param.h>
-#include <sys/ptrace.h>
-#include <sys/resource.h>
-#include <sys/select.h>
-#include <sys/signal.h>
-#include <sys/socket.h>
-#include <sys/stat.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <sys/un.h>
-#include <sys/utsname.h>
-#include <sys/wait.h>
-#include <net/bpf.h>
-#include <net/if.h>
-#include <net/if_dl.h>
-#include <net/route.h>
-#include <netinet/in.h>
-#include <netinet/icmp6.h>
-#include <netinet/tcp.h>
-
-enum {
- sizeofPtr = sizeof(void*),
-};
-
-union sockaddr_all {
- struct sockaddr s1; // this one gets used for fields
- struct sockaddr_in s2; // these pad it out
- struct sockaddr_in6 s3;
- struct sockaddr_un s4;
- struct sockaddr_dl s5;
-};
-
-struct sockaddr_any {
- struct sockaddr addr;
- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
-};
-
-*/
-import "C"
-
-// Machine characteristics
-
-const (
- SizeofPtr = C.sizeofPtr
- SizeofShort = C.sizeof_short
- SizeofInt = C.sizeof_int
- SizeofLong = C.sizeof_long
- SizeofLongLong = C.sizeof_longlong
-)
-
-// Basic types
-
-type (
- _C_short C.short
- _C_int C.int
- _C_long C.long
- _C_long_long C.longlong
-)
-
-// Time
-
-type Timespec C.struct_timespec
-
-type Timeval C.struct_timeval
-
-// Processes
-
-type Rusage C.struct_rusage
-
-type Rlimit C.struct_rlimit
-
-type _Gid_t C.gid_t
-
-// Files
-
-type Stat_t C.struct_stat
-
-type Statfs_t C.struct_statfs
-
-type Flock_t C.struct_flock
-
-type Dirent C.struct_dirent
-
-type Fsid C.struct_fsid
-
-// File system limits
-
-const (
- PathMax = C.PATH_MAX
-)
-
-// Sockets
-
-type RawSockaddrInet4 C.struct_sockaddr_in
-
-type RawSockaddrInet6 C.struct_sockaddr_in6
-
-type RawSockaddrUnix C.struct_sockaddr_un
-
-type RawSockaddrDatalink C.struct_sockaddr_dl
-
-type RawSockaddr C.struct_sockaddr
-
-type RawSockaddrAny C.struct_sockaddr_any
-
-type _Socklen C.socklen_t
-
-type Linger C.struct_linger
-
-type Iovec C.struct_iovec
-
-type IPMreq C.struct_ip_mreq
-
-type IPv6Mreq C.struct_ipv6_mreq
-
-type Msghdr C.struct_msghdr
-
-type Cmsghdr C.struct_cmsghdr
-
-type Inet6Pktinfo C.struct_in6_pktinfo
-
-type IPv6MTUInfo C.struct_ip6_mtuinfo
-
-type ICMPv6Filter C.struct_icmp6_filter
-
-const (
- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
- SizeofLinger = C.sizeof_struct_linger
- SizeofIPMreq = C.sizeof_struct_ip_mreq
- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
- SizeofMsghdr = C.sizeof_struct_msghdr
- SizeofCmsghdr = C.sizeof_struct_cmsghdr
- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
-)
-
-// Ptrace requests
-
-const (
- PTRACE_TRACEME = C.PT_TRACE_ME
- PTRACE_CONT = C.PT_CONTINUE
- PTRACE_KILL = C.PT_KILL
-)
-
-// Events (kqueue, kevent)
-
-type Kevent_t C.struct_kevent
-
-// Select
-
-type FdSet C.fd_set
-
-// Routing and interface messages
-
-const (
- SizeofIfMsghdr = C.sizeof_struct_if_msghdr
- SizeofIfData = C.sizeof_struct_if_data
- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
- SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
- SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
- SizeofRtMetrics = C.sizeof_struct_rt_metrics
-)
-
-type IfMsghdr C.struct_if_msghdr
-
-type IfData C.struct_if_data
-
-type IfaMsghdr C.struct_ifa_msghdr
-
-type IfmaMsghdr C.struct_ifma_msghdr
-
-type IfAnnounceMsghdr C.struct_if_announcemsghdr
-
-type RtMsghdr C.struct_rt_msghdr
-
-type RtMetrics C.struct_rt_metrics
-
-// Berkeley packet filter
-
-const (
- SizeofBpfVersion = C.sizeof_struct_bpf_version
- SizeofBpfStat = C.sizeof_struct_bpf_stat
- SizeofBpfProgram = C.sizeof_struct_bpf_program
- SizeofBpfInsn = C.sizeof_struct_bpf_insn
- SizeofBpfHdr = C.sizeof_struct_bpf_hdr
-)
-
-type BpfVersion C.struct_bpf_version
-
-type BpfStat C.struct_bpf_stat
-
-type BpfProgram C.struct_bpf_program
-
-type BpfInsn C.struct_bpf_insn
-
-type BpfHdr C.struct_bpf_hdr
-
-// Terminal handling
-
-type Termios C.struct_termios
-
-type Winsize C.struct_winsize
-
-// fchmodat-like syscalls.
-
-const (
- AT_FDCWD = C.AT_FDCWD
- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
-)
-
-// poll
-
-type PollFd C.struct_pollfd
-
-const (
- POLLERR = C.POLLERR
- POLLHUP = C.POLLHUP
- POLLIN = C.POLLIN
- POLLNVAL = C.POLLNVAL
- POLLOUT = C.POLLOUT
- POLLPRI = C.POLLPRI
- POLLRDBAND = C.POLLRDBAND
- POLLRDNORM = C.POLLRDNORM
- POLLWRBAND = C.POLLWRBAND
- POLLWRNORM = C.POLLWRNORM
-)
-
-// Uname
-
-type Utsname C.struct_utsname
diff --git a/vendor/golang.org/x/sys/unix/types_freebsd.go b/vendor/golang.org/x/sys/unix/types_freebsd.go
deleted file mode 100644
index a121dc336..000000000
--- a/vendor/golang.org/x/sys/unix/types_freebsd.go
+++ /dev/null
@@ -1,400 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-Input to cgo -godefs. See README.md
-*/
-
-// +godefs map struct_in_addr [4]byte /* in_addr */
-// +godefs map struct_in6_addr [16]byte /* in6_addr */
-
-package unix
-
-/*
-#define _WANT_FREEBSD11_STAT 1
-#define _WANT_FREEBSD11_STATFS 1
-#define _WANT_FREEBSD11_DIRENT 1
-#define _WANT_FREEBSD11_KEVENT 1
-
-#include <dirent.h>
-#include <fcntl.h>
-#include <poll.h>
-#include <signal.h>
-#include <termios.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <sys/capsicum.h>
-#include <sys/event.h>
-#include <sys/mman.h>
-#include <sys/mount.h>
-#include <sys/param.h>
-#include <sys/ptrace.h>
-#include <sys/resource.h>
-#include <sys/select.h>
-#include <sys/signal.h>
-#include <sys/socket.h>
-#include <sys/stat.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <sys/un.h>
-#include <sys/utsname.h>
-#include <sys/wait.h>
-#include <net/bpf.h>
-#include <net/if.h>
-#include <net/if_dl.h>
-#include <net/route.h>
-#include <netinet/in.h>
-#include <netinet/icmp6.h>
-#include <netinet/tcp.h>
-
-enum {
- sizeofPtr = sizeof(void*),
-};
-
-union sockaddr_all {
- struct sockaddr s1; // this one gets used for fields
- struct sockaddr_in s2; // these pad it out
- struct sockaddr_in6 s3;
- struct sockaddr_un s4;
- struct sockaddr_dl s5;
-};
-
-struct sockaddr_any {
- struct sockaddr addr;
- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
-};
-
-// This structure is a duplicate of if_data on FreeBSD 8-STABLE.
-// See /usr/include/net/if.h.
-struct if_data8 {
- u_char ifi_type;
- u_char ifi_physical;
- u_char ifi_addrlen;
- u_char ifi_hdrlen;
- u_char ifi_link_state;
- u_char ifi_spare_char1;
- u_char ifi_spare_char2;
- u_char ifi_datalen;
- u_long ifi_mtu;
- u_long ifi_metric;
- u_long ifi_baudrate;
- u_long ifi_ipackets;
- u_long ifi_ierrors;
- u_long ifi_opackets;
- u_long ifi_oerrors;
- u_long ifi_collisions;
- u_long ifi_ibytes;
- u_long ifi_obytes;
- u_long ifi_imcasts;
- u_long ifi_omcasts;
- u_long ifi_iqdrops;
- u_long ifi_noproto;
- u_long ifi_hwassist;
-// FIXME: these are now unions, so maybe need to change definitions?
-#undef ifi_epoch
- time_t ifi_epoch;
-#undef ifi_lastchange
- struct timeval ifi_lastchange;
-};
-
-// This structure is a duplicate of if_msghdr on FreeBSD 8-STABLE.
-// See /usr/include/net/if.h.
-struct if_msghdr8 {
- u_short ifm_msglen;
- u_char ifm_version;
- u_char ifm_type;
- int ifm_addrs;
- int ifm_flags;
- u_short ifm_index;
- struct if_data8 ifm_data;
-};
-*/
-import "C"
-
-// Machine characteristics
-
-const (
- SizeofPtr = C.sizeofPtr
- SizeofShort = C.sizeof_short
- SizeofInt = C.sizeof_int
- SizeofLong = C.sizeof_long
- SizeofLongLong = C.sizeof_longlong
-)
-
-// Basic types
-
-type (
- _C_short C.short
- _C_int C.int
- _C_long C.long
- _C_long_long C.longlong
-)
-
-// Time
-
-type Timespec C.struct_timespec
-
-type Timeval C.struct_timeval
-
-// Processes
-
-type Rusage C.struct_rusage
-
-type Rlimit C.struct_rlimit
-
-type _Gid_t C.gid_t
-
-// Files
-
-const (
- _statfsVersion = C.STATFS_VERSION
- _dirblksiz = C.DIRBLKSIZ
-)
-
-type Stat_t C.struct_stat
-
-type stat_freebsd11_t C.struct_freebsd11_stat
-
-type Statfs_t C.struct_statfs
-
-type statfs_freebsd11_t C.struct_freebsd11_statfs
-
-type Flock_t C.struct_flock
-
-type Dirent C.struct_dirent
-
-type dirent_freebsd11 C.struct_freebsd11_dirent
-
-type Fsid C.struct_fsid
-
-// File system limits
-
-const (
- PathMax = C.PATH_MAX
-)
-
-// Advice to Fadvise
-
-const (
- FADV_NORMAL = C.POSIX_FADV_NORMAL
- FADV_RANDOM = C.POSIX_FADV_RANDOM
- FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL
- FADV_WILLNEED = C.POSIX_FADV_WILLNEED
- FADV_DONTNEED = C.POSIX_FADV_DONTNEED
- FADV_NOREUSE = C.POSIX_FADV_NOREUSE
-)
-
-// Sockets
-
-type RawSockaddrInet4 C.struct_sockaddr_in
-
-type RawSockaddrInet6 C.struct_sockaddr_in6
-
-type RawSockaddrUnix C.struct_sockaddr_un
-
-type RawSockaddrDatalink C.struct_sockaddr_dl
-
-type RawSockaddr C.struct_sockaddr
-
-type RawSockaddrAny C.struct_sockaddr_any
-
-type _Socklen C.socklen_t
-
-type Linger C.struct_linger
-
-type Iovec C.struct_iovec
-
-type IPMreq C.struct_ip_mreq
-
-type IPMreqn C.struct_ip_mreqn
-
-type IPv6Mreq C.struct_ipv6_mreq
-
-type Msghdr C.struct_msghdr
-
-type Cmsghdr C.struct_cmsghdr
-
-type Inet6Pktinfo C.struct_in6_pktinfo
-
-type IPv6MTUInfo C.struct_ip6_mtuinfo
-
-type ICMPv6Filter C.struct_icmp6_filter
-
-const (
- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
- SizeofLinger = C.sizeof_struct_linger
- SizeofIPMreq = C.sizeof_struct_ip_mreq
- SizeofIPMreqn = C.sizeof_struct_ip_mreqn
- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
- SizeofMsghdr = C.sizeof_struct_msghdr
- SizeofCmsghdr = C.sizeof_struct_cmsghdr
- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
-)
-
-// Ptrace requests
-
-const (
- PTRACE_ATTACH = C.PT_ATTACH
- PTRACE_CONT = C.PT_CONTINUE
- PTRACE_DETACH = C.PT_DETACH
- PTRACE_GETFPREGS = C.PT_GETFPREGS
- PTRACE_GETFSBASE = C.PT_GETFSBASE
- PTRACE_GETLWPLIST = C.PT_GETLWPLIST
- PTRACE_GETNUMLWPS = C.PT_GETNUMLWPS
- PTRACE_GETREGS = C.PT_GETREGS
- PTRACE_GETXSTATE = C.PT_GETXSTATE
- PTRACE_IO = C.PT_IO
- PTRACE_KILL = C.PT_KILL
- PTRACE_LWPEVENTS = C.PT_LWP_EVENTS
- PTRACE_LWPINFO = C.PT_LWPINFO
- PTRACE_SETFPREGS = C.PT_SETFPREGS
- PTRACE_SETREGS = C.PT_SETREGS
- PTRACE_SINGLESTEP = C.PT_STEP
- PTRACE_TRACEME = C.PT_TRACE_ME
-)
-
-const (
- PIOD_READ_D = C.PIOD_READ_D
- PIOD_WRITE_D = C.PIOD_WRITE_D
- PIOD_READ_I = C.PIOD_READ_I
- PIOD_WRITE_I = C.PIOD_WRITE_I
-)
-
-const (
- PL_FLAG_BORN = C.PL_FLAG_BORN
- PL_FLAG_EXITED = C.PL_FLAG_EXITED
- PL_FLAG_SI = C.PL_FLAG_SI
-)
-
-const (
- TRAP_BRKPT = C.TRAP_BRKPT
- TRAP_TRACE = C.TRAP_TRACE
-)
-
-type PtraceLwpInfoStruct C.struct_ptrace_lwpinfo
-
-type __Siginfo C.struct___siginfo
-
-type Sigset_t C.sigset_t
-
-type Reg C.struct_reg
-
-type FpReg C.struct_fpreg
-
-type PtraceIoDesc C.struct_ptrace_io_desc
-
-// Events (kqueue, kevent)
-
-type Kevent_t C.struct_kevent_freebsd11
-
-// Select
-
-type FdSet C.fd_set
-
-// Routing and interface messages
-
-const (
- sizeofIfMsghdr = C.sizeof_struct_if_msghdr
- SizeofIfMsghdr = C.sizeof_struct_if_msghdr8
- sizeofIfData = C.sizeof_struct_if_data
- SizeofIfData = C.sizeof_struct_if_data8
- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
- SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
- SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
- SizeofRtMetrics = C.sizeof_struct_rt_metrics
-)
-
-type ifMsghdr C.struct_if_msghdr
-
-type IfMsghdr C.struct_if_msghdr8
-
-type ifData C.struct_if_data
-
-type IfData C.struct_if_data8
-
-type IfaMsghdr C.struct_ifa_msghdr
-
-type IfmaMsghdr C.struct_ifma_msghdr
-
-type IfAnnounceMsghdr C.struct_if_announcemsghdr
-
-type RtMsghdr C.struct_rt_msghdr
-
-type RtMetrics C.struct_rt_metrics
-
-// Berkeley packet filter
-
-const (
- SizeofBpfVersion = C.sizeof_struct_bpf_version
- SizeofBpfStat = C.sizeof_struct_bpf_stat
- SizeofBpfZbuf = C.sizeof_struct_bpf_zbuf
- SizeofBpfProgram = C.sizeof_struct_bpf_program
- SizeofBpfInsn = C.sizeof_struct_bpf_insn
- SizeofBpfHdr = C.sizeof_struct_bpf_hdr
- SizeofBpfZbufHeader = C.sizeof_struct_bpf_zbuf_header
-)
-
-type BpfVersion C.struct_bpf_version
-
-type BpfStat C.struct_bpf_stat
-
-type BpfZbuf C.struct_bpf_zbuf
-
-type BpfProgram C.struct_bpf_program
-
-type BpfInsn C.struct_bpf_insn
-
-type BpfHdr C.struct_bpf_hdr
-
-type BpfZbufHeader C.struct_bpf_zbuf_header
-
-// Terminal handling
-
-type Termios C.struct_termios
-
-type Winsize C.struct_winsize
-
-// fchmodat-like syscalls.
-
-const (
- AT_FDCWD = C.AT_FDCWD
- AT_REMOVEDIR = C.AT_REMOVEDIR
- AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
-)
-
-// poll
-
-type PollFd C.struct_pollfd
-
-const (
- POLLERR = C.POLLERR
- POLLHUP = C.POLLHUP
- POLLIN = C.POLLIN
- POLLINIGNEOF = C.POLLINIGNEOF
- POLLNVAL = C.POLLNVAL
- POLLOUT = C.POLLOUT
- POLLPRI = C.POLLPRI
- POLLRDBAND = C.POLLRDBAND
- POLLRDNORM = C.POLLRDNORM
- POLLWRBAND = C.POLLWRBAND
- POLLWRNORM = C.POLLWRNORM
-)
-
-// Capabilities
-
-type CapRights C.struct_cap_rights
-
-// Uname
-
-type Utsname C.struct_utsname
diff --git a/vendor/golang.org/x/sys/unix/types_netbsd.go b/vendor/golang.org/x/sys/unix/types_netbsd.go
deleted file mode 100644
index 4a96d72c3..000000000
--- a/vendor/golang.org/x/sys/unix/types_netbsd.go
+++ /dev/null
@@ -1,290 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-Input to cgo -godefs. See README.md
-*/
-
-// +godefs map struct_in_addr [4]byte /* in_addr */
-// +godefs map struct_in6_addr [16]byte /* in6_addr */
-
-package unix
-
-/*
-#define KERNEL
-#include <dirent.h>
-#include <fcntl.h>
-#include <poll.h>
-#include <signal.h>
-#include <termios.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <sys/param.h>
-#include <sys/types.h>
-#include <sys/event.h>
-#include <sys/mman.h>
-#include <sys/mount.h>
-#include <sys/ptrace.h>
-#include <sys/resource.h>
-#include <sys/select.h>
-#include <sys/signal.h>
-#include <sys/socket.h>
-#include <sys/stat.h>
-#include <sys/sysctl.h>
-#include <sys/time.h>
-#include <sys/uio.h>
-#include <sys/un.h>
-#include <sys/utsname.h>
-#include <sys/wait.h>
-#include <net/bpf.h>
-#include <net/if.h>
-#include <net/if_dl.h>
-#include <net/route.h>
-#include <netinet/in.h>
-#include <netinet/icmp6.h>
-#include <netinet/tcp.h>
-
-enum {
- sizeofPtr = sizeof(void*),
-};
-
-union sockaddr_all {
- struct sockaddr s1; // this one gets used for fields
- struct sockaddr_in s2; // these pad it out
- struct sockaddr_in6 s3;
- struct sockaddr_un s4;
- struct sockaddr_dl s5;
-};
-
-struct sockaddr_any {
- struct sockaddr addr;
- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
-};
-
-*/
-import "C"
-
-// Machine characteristics
-
-const (
- SizeofPtr = C.sizeofPtr
- SizeofShort = C.sizeof_short
- SizeofInt = C.sizeof_int
- SizeofLong = C.sizeof_long
- SizeofLongLong = C.sizeof_longlong
-)
-
-// Basic types
-
-type (
- _C_short C.short
- _C_int C.int
- _C_long C.long
- _C_long_long C.longlong
-)
-
-// Time
-
-type Timespec C.struct_timespec
-
-type Timeval C.struct_timeval
-
-// Processes
-
-type Rusage C.struct_rusage
-
-type Rlimit C.struct_rlimit
-
-type _Gid_t C.gid_t
-
-// Files
-
-type Stat_t C.struct_stat
-
-type Statfs_t C.struct_statfs
-
-type Flock_t C.struct_flock
-
-type Dirent C.struct_dirent
-
-type Fsid C.fsid_t
-
-// File system limits
-
-const (
- PathMax = C.PATH_MAX
-)
-
-// Advice to Fadvise
-
-const (
- FADV_NORMAL = C.POSIX_FADV_NORMAL
- FADV_RANDOM = C.POSIX_FADV_RANDOM
- FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL
- FADV_WILLNEED = C.POSIX_FADV_WILLNEED
- FADV_DONTNEED = C.POSIX_FADV_DONTNEED
- FADV_NOREUSE = C.POSIX_FADV_NOREUSE
-)
-
-// Sockets
-
-type RawSockaddrInet4 C.struct_sockaddr_in
-
-type RawSockaddrInet6 C.struct_sockaddr_in6
-
-type RawSockaddrUnix C.struct_sockaddr_un
-
-type RawSockaddrDatalink C.struct_sockaddr_dl
-
-type RawSockaddr C.struct_sockaddr
-
-type RawSockaddrAny C.struct_sockaddr_any
-
-type _Socklen C.socklen_t
-
-type Linger C.struct_linger
-
-type Iovec C.struct_iovec
-
-type IPMreq C.struct_ip_mreq
-
-type IPv6Mreq C.struct_ipv6_mreq
-
-type Msghdr C.struct_msghdr
-
-type Cmsghdr C.struct_cmsghdr
-
-type Inet6Pktinfo C.struct_in6_pktinfo
-
-type IPv6MTUInfo C.struct_ip6_mtuinfo
-
-type ICMPv6Filter C.struct_icmp6_filter
-
-const (
- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
- SizeofLinger = C.sizeof_struct_linger
- SizeofIPMreq = C.sizeof_struct_ip_mreq
- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
- SizeofMsghdr = C.sizeof_struct_msghdr
- SizeofCmsghdr = C.sizeof_struct_cmsghdr
- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
-)
-
-// Ptrace requests
-
-const (
- PTRACE_TRACEME = C.PT_TRACE_ME
- PTRACE_CONT = C.PT_CONTINUE
- PTRACE_KILL = C.PT_KILL
-)
-
-// Events (kqueue, kevent)
-
-type Kevent_t C.struct_kevent
-
-// Select
-
-type FdSet C.fd_set
-
-// Routing and interface messages
-
-const (
- SizeofIfMsghdr = C.sizeof_struct_if_msghdr
- SizeofIfData = C.sizeof_struct_if_data
- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
- SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
- SizeofRtMetrics = C.sizeof_struct_rt_metrics
-)
-
-type IfMsghdr C.struct_if_msghdr
-
-type IfData C.struct_if_data
-
-type IfaMsghdr C.struct_ifa_msghdr
-
-type IfAnnounceMsghdr C.struct_if_announcemsghdr
-
-type RtMsghdr C.struct_rt_msghdr
-
-type RtMetrics C.struct_rt_metrics
-
-type Mclpool C.struct_mclpool
-
-// Berkeley packet filter
-
-const (
- SizeofBpfVersion = C.sizeof_struct_bpf_version
- SizeofBpfStat = C.sizeof_struct_bpf_stat
- SizeofBpfProgram = C.sizeof_struct_bpf_program
- SizeofBpfInsn = C.sizeof_struct_bpf_insn
- SizeofBpfHdr = C.sizeof_struct_bpf_hdr
-)
-
-type BpfVersion C.struct_bpf_version
-
-type BpfStat C.struct_bpf_stat
-
-type BpfProgram C.struct_bpf_program
-
-type BpfInsn C.struct_bpf_insn
-
-type BpfHdr C.struct_bpf_hdr
-
-type BpfTimeval C.struct_bpf_timeval
-
-// Terminal handling
-
-type Termios C.struct_termios
-
-type Winsize C.struct_winsize
-
-type Ptmget C.struct_ptmget
-
-// fchmodat-like syscalls.
-
-const (
- AT_FDCWD = C.AT_FDCWD
- AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
-)
-
-// poll
-
-type PollFd C.struct_pollfd
-
-const (
- POLLERR = C.POLLERR
- POLLHUP = C.POLLHUP
- POLLIN = C.POLLIN
- POLLNVAL = C.POLLNVAL
- POLLOUT = C.POLLOUT
- POLLPRI = C.POLLPRI
- POLLRDBAND = C.POLLRDBAND
- POLLRDNORM = C.POLLRDNORM
- POLLWRBAND = C.POLLWRBAND
- POLLWRNORM = C.POLLWRNORM
-)
-
-// Sysctl
-
-type Sysctlnode C.struct_sysctlnode
-
-// Uname
-
-type Utsname C.struct_utsname
-
-// Clockinfo
-
-const SizeofClockinfo = C.sizeof_struct_clockinfo
-
-type Clockinfo C.struct_clockinfo
diff --git a/vendor/golang.org/x/sys/unix/types_openbsd.go b/vendor/golang.org/x/sys/unix/types_openbsd.go
deleted file mode 100644
index 775cb57dc..000000000
--- a/vendor/golang.org/x/sys/unix/types_openbsd.go
+++ /dev/null
@@ -1,283 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-Input to cgo -godefs. See README.md
-*/
-
-// +godefs map struct_in_addr [4]byte /* in_addr */
-// +godefs map struct_in6_addr [16]byte /* in6_addr */
-
-package unix
-
-/*
-#define KERNEL
-#include <dirent.h>
-#include <fcntl.h>
-#include <poll.h>
-#include <signal.h>
-#include <termios.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <sys/param.h>
-#include <sys/types.h>
-#include <sys/event.h>
-#include <sys/mman.h>
-#include <sys/mount.h>
-#include <sys/ptrace.h>
-#include <sys/resource.h>
-#include <sys/select.h>
-#include <sys/signal.h>
-#include <sys/socket.h>
-#include <sys/stat.h>
-#include <sys/time.h>
-#include <sys/uio.h>
-#include <sys/un.h>
-#include <sys/utsname.h>
-#include <sys/wait.h>
-#include <uvm/uvmexp.h>
-#include <net/bpf.h>
-#include <net/if.h>
-#include <net/if_dl.h>
-#include <net/route.h>
-#include <netinet/in.h>
-#include <netinet/icmp6.h>
-#include <netinet/tcp.h>
-
-enum {
- sizeofPtr = sizeof(void*),
-};
-
-union sockaddr_all {
- struct sockaddr s1; // this one gets used for fields
- struct sockaddr_in s2; // these pad it out
- struct sockaddr_in6 s3;
- struct sockaddr_un s4;
- struct sockaddr_dl s5;
-};
-
-struct sockaddr_any {
- struct sockaddr addr;
- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
-};
-
-*/
-import "C"
-
-// Machine characteristics
-
-const (
- SizeofPtr = C.sizeofPtr
- SizeofShort = C.sizeof_short
- SizeofInt = C.sizeof_int
- SizeofLong = C.sizeof_long
- SizeofLongLong = C.sizeof_longlong
-)
-
-// Basic types
-
-type (
- _C_short C.short
- _C_int C.int
- _C_long C.long
- _C_long_long C.longlong
-)
-
-// Time
-
-type Timespec C.struct_timespec
-
-type Timeval C.struct_timeval
-
-// Processes
-
-type Rusage C.struct_rusage
-
-type Rlimit C.struct_rlimit
-
-type _Gid_t C.gid_t
-
-// Files
-
-type Stat_t C.struct_stat
-
-type Statfs_t C.struct_statfs
-
-type Flock_t C.struct_flock
-
-type Dirent C.struct_dirent
-
-type Fsid C.fsid_t
-
-// File system limits
-
-const (
- PathMax = C.PATH_MAX
-)
-
-// Sockets
-
-type RawSockaddrInet4 C.struct_sockaddr_in
-
-type RawSockaddrInet6 C.struct_sockaddr_in6
-
-type RawSockaddrUnix C.struct_sockaddr_un
-
-type RawSockaddrDatalink C.struct_sockaddr_dl
-
-type RawSockaddr C.struct_sockaddr
-
-type RawSockaddrAny C.struct_sockaddr_any
-
-type _Socklen C.socklen_t
-
-type Linger C.struct_linger
-
-type Iovec C.struct_iovec
-
-type IPMreq C.struct_ip_mreq
-
-type IPv6Mreq C.struct_ipv6_mreq
-
-type Msghdr C.struct_msghdr
-
-type Cmsghdr C.struct_cmsghdr
-
-type Inet6Pktinfo C.struct_in6_pktinfo
-
-type IPv6MTUInfo C.struct_ip6_mtuinfo
-
-type ICMPv6Filter C.struct_icmp6_filter
-
-const (
- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
- SizeofLinger = C.sizeof_struct_linger
- SizeofIPMreq = C.sizeof_struct_ip_mreq
- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
- SizeofMsghdr = C.sizeof_struct_msghdr
- SizeofCmsghdr = C.sizeof_struct_cmsghdr
- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
-)
-
-// Ptrace requests
-
-const (
- PTRACE_TRACEME = C.PT_TRACE_ME
- PTRACE_CONT = C.PT_CONTINUE
- PTRACE_KILL = C.PT_KILL
-)
-
-// Events (kqueue, kevent)
-
-type Kevent_t C.struct_kevent
-
-// Select
-
-type FdSet C.fd_set
-
-// Routing and interface messages
-
-const (
- SizeofIfMsghdr = C.sizeof_struct_if_msghdr
- SizeofIfData = C.sizeof_struct_if_data
- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
- SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
- SizeofRtMetrics = C.sizeof_struct_rt_metrics
-)
-
-type IfMsghdr C.struct_if_msghdr
-
-type IfData C.struct_if_data
-
-type IfaMsghdr C.struct_ifa_msghdr
-
-type IfAnnounceMsghdr C.struct_if_announcemsghdr
-
-type RtMsghdr C.struct_rt_msghdr
-
-type RtMetrics C.struct_rt_metrics
-
-type Mclpool C.struct_mclpool
-
-// Berkeley packet filter
-
-const (
- SizeofBpfVersion = C.sizeof_struct_bpf_version
- SizeofBpfStat = C.sizeof_struct_bpf_stat
- SizeofBpfProgram = C.sizeof_struct_bpf_program
- SizeofBpfInsn = C.sizeof_struct_bpf_insn
- SizeofBpfHdr = C.sizeof_struct_bpf_hdr
-)
-
-type BpfVersion C.struct_bpf_version
-
-type BpfStat C.struct_bpf_stat
-
-type BpfProgram C.struct_bpf_program
-
-type BpfInsn C.struct_bpf_insn
-
-type BpfHdr C.struct_bpf_hdr
-
-type BpfTimeval C.struct_bpf_timeval
-
-// Terminal handling
-
-type Termios C.struct_termios
-
-type Winsize C.struct_winsize
-
-// fchmodat-like syscalls.
-
-const (
- AT_FDCWD = C.AT_FDCWD
- AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
-)
-
-// poll
-
-type PollFd C.struct_pollfd
-
-const (
- POLLERR = C.POLLERR
- POLLHUP = C.POLLHUP
- POLLIN = C.POLLIN
- POLLNVAL = C.POLLNVAL
- POLLOUT = C.POLLOUT
- POLLPRI = C.POLLPRI
- POLLRDBAND = C.POLLRDBAND
- POLLRDNORM = C.POLLRDNORM
- POLLWRBAND = C.POLLWRBAND
- POLLWRNORM = C.POLLWRNORM
-)
-
-// Signal Sets
-
-type Sigset_t C.sigset_t
-
-// Uname
-
-type Utsname C.struct_utsname
-
-// Uvmexp
-
-const SizeofUvmexp = C.sizeof_struct_uvmexp
-
-type Uvmexp C.struct_uvmexp
-
-// Clockinfo
-
-const SizeofClockinfo = C.sizeof_struct_clockinfo
-
-type Clockinfo C.struct_clockinfo
diff --git a/vendor/golang.org/x/sys/unix/types_solaris.go b/vendor/golang.org/x/sys/unix/types_solaris.go
deleted file mode 100644
index 2b716f934..000000000
--- a/vendor/golang.org/x/sys/unix/types_solaris.go
+++ /dev/null
@@ -1,266 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-Input to cgo -godefs. See README.md
-*/
-
-// +godefs map struct_in_addr [4]byte /* in_addr */
-// +godefs map struct_in6_addr [16]byte /* in6_addr */
-
-package unix
-
-/*
-#define KERNEL
-// These defines ensure that builds done on newer versions of Solaris are
-// backwards-compatible with older versions of Solaris and
-// OpenSolaris-based derivatives.
-#define __USE_SUNOS_SOCKETS__ // msghdr
-#define __USE_LEGACY_PROTOTYPES__ // iovec
-#include <dirent.h>
-#include <fcntl.h>
-#include <netdb.h>
-#include <limits.h>
-#include <poll.h>
-#include <signal.h>
-#include <termios.h>
-#include <termio.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <sys/mman.h>
-#include <sys/mount.h>
-#include <sys/param.h>
-#include <sys/resource.h>
-#include <sys/select.h>
-#include <sys/signal.h>
-#include <sys/socket.h>
-#include <sys/stat.h>
-#include <sys/statvfs.h>
-#include <sys/time.h>
-#include <sys/times.h>
-#include <sys/types.h>
-#include <sys/utsname.h>
-#include <sys/un.h>
-#include <sys/wait.h>
-#include <net/bpf.h>
-#include <net/if.h>
-#include <net/if_dl.h>
-#include <net/route.h>
-#include <netinet/in.h>
-#include <netinet/icmp6.h>
-#include <netinet/tcp.h>
-#include <ustat.h>
-#include <utime.h>
-
-enum {
- sizeofPtr = sizeof(void*),
-};
-
-union sockaddr_all {
- struct sockaddr s1; // this one gets used for fields
- struct sockaddr_in s2; // these pad it out
- struct sockaddr_in6 s3;
- struct sockaddr_un s4;
- struct sockaddr_dl s5;
-};
-
-struct sockaddr_any {
- struct sockaddr addr;
- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
-};
-
-*/
-import "C"
-
-// Machine characteristics
-
-const (
- SizeofPtr = C.sizeofPtr
- SizeofShort = C.sizeof_short
- SizeofInt = C.sizeof_int
- SizeofLong = C.sizeof_long
- SizeofLongLong = C.sizeof_longlong
- PathMax = C.PATH_MAX
- MaxHostNameLen = C.MAXHOSTNAMELEN
-)
-
-// Basic types
-
-type (
- _C_short C.short
- _C_int C.int
- _C_long C.long
- _C_long_long C.longlong
-)
-
-// Time
-
-type Timespec C.struct_timespec
-
-type Timeval C.struct_timeval
-
-type Timeval32 C.struct_timeval32
-
-type Tms C.struct_tms
-
-type Utimbuf C.struct_utimbuf
-
-// Processes
-
-type Rusage C.struct_rusage
-
-type Rlimit C.struct_rlimit
-
-type _Gid_t C.gid_t
-
-// Files
-
-type Stat_t C.struct_stat
-
-type Flock_t C.struct_flock
-
-type Dirent C.struct_dirent
-
-// Filesystems
-
-type _Fsblkcnt_t C.fsblkcnt_t
-
-type Statvfs_t C.struct_statvfs
-
-// Sockets
-
-type RawSockaddrInet4 C.struct_sockaddr_in
-
-type RawSockaddrInet6 C.struct_sockaddr_in6
-
-type RawSockaddrUnix C.struct_sockaddr_un
-
-type RawSockaddrDatalink C.struct_sockaddr_dl
-
-type RawSockaddr C.struct_sockaddr
-
-type RawSockaddrAny C.struct_sockaddr_any
-
-type _Socklen C.socklen_t
-
-type Linger C.struct_linger
-
-type Iovec C.struct_iovec
-
-type IPMreq C.struct_ip_mreq
-
-type IPv6Mreq C.struct_ipv6_mreq
-
-type Msghdr C.struct_msghdr
-
-type Cmsghdr C.struct_cmsghdr
-
-type Inet6Pktinfo C.struct_in6_pktinfo
-
-type IPv6MTUInfo C.struct_ip6_mtuinfo
-
-type ICMPv6Filter C.struct_icmp6_filter
-
-const (
- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
- SizeofLinger = C.sizeof_struct_linger
- SizeofIPMreq = C.sizeof_struct_ip_mreq
- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
- SizeofMsghdr = C.sizeof_struct_msghdr
- SizeofCmsghdr = C.sizeof_struct_cmsghdr
- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
-)
-
-// Select
-
-type FdSet C.fd_set
-
-// Misc
-
-type Utsname C.struct_utsname
-
-type Ustat_t C.struct_ustat
-
-const (
- AT_FDCWD = C.AT_FDCWD
- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
- AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
- AT_REMOVEDIR = C.AT_REMOVEDIR
- AT_EACCESS = C.AT_EACCESS
-)
-
-// Routing and interface messages
-
-const (
- SizeofIfMsghdr = C.sizeof_struct_if_msghdr
- SizeofIfData = C.sizeof_struct_if_data
- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
- SizeofRtMetrics = C.sizeof_struct_rt_metrics
-)
-
-type IfMsghdr C.struct_if_msghdr
-
-type IfData C.struct_if_data
-
-type IfaMsghdr C.struct_ifa_msghdr
-
-type RtMsghdr C.struct_rt_msghdr
-
-type RtMetrics C.struct_rt_metrics
-
-// Berkeley packet filter
-
-const (
- SizeofBpfVersion = C.sizeof_struct_bpf_version
- SizeofBpfStat = C.sizeof_struct_bpf_stat
- SizeofBpfProgram = C.sizeof_struct_bpf_program
- SizeofBpfInsn = C.sizeof_struct_bpf_insn
- SizeofBpfHdr = C.sizeof_struct_bpf_hdr
-)
-
-type BpfVersion C.struct_bpf_version
-
-type BpfStat C.struct_bpf_stat
-
-type BpfProgram C.struct_bpf_program
-
-type BpfInsn C.struct_bpf_insn
-
-type BpfTimeval C.struct_bpf_timeval
-
-type BpfHdr C.struct_bpf_hdr
-
-// Terminal handling
-
-type Termios C.struct_termios
-
-type Termio C.struct_termio
-
-type Winsize C.struct_winsize
-
-// poll
-
-type PollFd C.struct_pollfd
-
-const (
- POLLERR = C.POLLERR
- POLLHUP = C.POLLHUP
- POLLIN = C.POLLIN
- POLLNVAL = C.POLLNVAL
- POLLOUT = C.POLLOUT
- POLLPRI = C.POLLPRI
- POLLRDBAND = C.POLLRDBAND
- POLLRDNORM = C.POLLRDNORM
- POLLWRBAND = C.POLLWRBAND
- POLLWRNORM = C.POLLWRNORM
-)
diff --git a/vendor/golang.org/x/text/encoding/charmap/maketables.go b/vendor/golang.org/x/text/encoding/charmap/maketables.go
deleted file mode 100644
index f7941701e..000000000
--- a/vendor/golang.org/x/text/encoding/charmap/maketables.go
+++ /dev/null
@@ -1,556 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-import (
- "bufio"
- "fmt"
- "log"
- "net/http"
- "sort"
- "strings"
- "unicode/utf8"
-
- "golang.org/x/text/encoding"
- "golang.org/x/text/internal/gen"
-)
-
-const ascii = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" +
- "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
- ` !"#$%&'()*+,-./0123456789:;<=>?` +
- `@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_` +
- "`abcdefghijklmnopqrstuvwxyz{|}~\u007f"
-
-var encodings = []struct {
- name string
- mib string
- comment string
- varName string
- replacement byte
- mapping string
-}{
- {
- "IBM Code Page 037",
- "IBM037",
- "",
- "CodePage037",
- 0x3f,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM037-2.1.2.ucm",
- },
- {
- "IBM Code Page 437",
- "PC8CodePage437",
- "",
- "CodePage437",
- encoding.ASCIISub,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM437-2.1.2.ucm",
- },
- {
- "IBM Code Page 850",
- "PC850Multilingual",
- "",
- "CodePage850",
- encoding.ASCIISub,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM850-2.1.2.ucm",
- },
- {
- "IBM Code Page 852",
- "PCp852",
- "",
- "CodePage852",
- encoding.ASCIISub,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM852-2.1.2.ucm",
- },
- {
- "IBM Code Page 855",
- "IBM855",
- "",
- "CodePage855",
- encoding.ASCIISub,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM855-2.1.2.ucm",
- },
- {
- "Windows Code Page 858", // PC latin1 with Euro
- "IBM00858",
- "",
- "CodePage858",
- encoding.ASCIISub,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/windows-858-2000.ucm",
- },
- {
- "IBM Code Page 860",
- "IBM860",
- "",
- "CodePage860",
- encoding.ASCIISub,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM860-2.1.2.ucm",
- },
- {
- "IBM Code Page 862",
- "PC862LatinHebrew",
- "",
- "CodePage862",
- encoding.ASCIISub,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM862-2.1.2.ucm",
- },
- {
- "IBM Code Page 863",
- "IBM863",
- "",
- "CodePage863",
- encoding.ASCIISub,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM863-2.1.2.ucm",
- },
- {
- "IBM Code Page 865",
- "IBM865",
- "",
- "CodePage865",
- encoding.ASCIISub,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM865-2.1.2.ucm",
- },
- {
- "IBM Code Page 866",
- "IBM866",
- "",
- "CodePage866",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-ibm866.txt",
- },
- {
- "IBM Code Page 1047",
- "IBM1047",
- "",
- "CodePage1047",
- 0x3f,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM1047-2.1.2.ucm",
- },
- {
- "IBM Code Page 1140",
- "IBM01140",
- "",
- "CodePage1140",
- 0x3f,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/ibm-1140_P100-1997.ucm",
- },
- {
- "ISO 8859-1",
- "ISOLatin1",
- "",
- "ISO8859_1",
- encoding.ASCIISub,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/iso-8859_1-1998.ucm",
- },
- {
- "ISO 8859-2",
- "ISOLatin2",
- "",
- "ISO8859_2",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-iso-8859-2.txt",
- },
- {
- "ISO 8859-3",
- "ISOLatin3",
- "",
- "ISO8859_3",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-iso-8859-3.txt",
- },
- {
- "ISO 8859-4",
- "ISOLatin4",
- "",
- "ISO8859_4",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-iso-8859-4.txt",
- },
- {
- "ISO 8859-5",
- "ISOLatinCyrillic",
- "",
- "ISO8859_5",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-iso-8859-5.txt",
- },
- {
- "ISO 8859-6",
- "ISOLatinArabic",
- "",
- "ISO8859_6,ISO8859_6E,ISO8859_6I",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-iso-8859-6.txt",
- },
- {
- "ISO 8859-7",
- "ISOLatinGreek",
- "",
- "ISO8859_7",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-iso-8859-7.txt",
- },
- {
- "ISO 8859-8",
- "ISOLatinHebrew",
- "",
- "ISO8859_8,ISO8859_8E,ISO8859_8I",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-iso-8859-8.txt",
- },
- {
- "ISO 8859-9",
- "ISOLatin5",
- "",
- "ISO8859_9",
- encoding.ASCIISub,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/iso-8859_9-1999.ucm",
- },
- {
- "ISO 8859-10",
- "ISOLatin6",
- "",
- "ISO8859_10",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-iso-8859-10.txt",
- },
- {
- "ISO 8859-13",
- "ISO885913",
- "",
- "ISO8859_13",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-iso-8859-13.txt",
- },
- {
- "ISO 8859-14",
- "ISO885914",
- "",
- "ISO8859_14",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-iso-8859-14.txt",
- },
- {
- "ISO 8859-15",
- "ISO885915",
- "",
- "ISO8859_15",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-iso-8859-15.txt",
- },
- {
- "ISO 8859-16",
- "ISO885916",
- "",
- "ISO8859_16",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-iso-8859-16.txt",
- },
- {
- "KOI8-R",
- "KOI8R",
- "",
- "KOI8R",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-koi8-r.txt",
- },
- {
- "KOI8-U",
- "KOI8U",
- "",
- "KOI8U",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-koi8-u.txt",
- },
- {
- "Macintosh",
- "Macintosh",
- "",
- "Macintosh",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-macintosh.txt",
- },
- {
- "Macintosh Cyrillic",
- "MacintoshCyrillic",
- "",
- "MacintoshCyrillic",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-x-mac-cyrillic.txt",
- },
- {
- "Windows 874",
- "Windows874",
- "",
- "Windows874",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-windows-874.txt",
- },
- {
- "Windows 1250",
- "Windows1250",
- "",
- "Windows1250",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-windows-1250.txt",
- },
- {
- "Windows 1251",
- "Windows1251",
- "",
- "Windows1251",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-windows-1251.txt",
- },
- {
- "Windows 1252",
- "Windows1252",
- "",
- "Windows1252",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-windows-1252.txt",
- },
- {
- "Windows 1253",
- "Windows1253",
- "",
- "Windows1253",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-windows-1253.txt",
- },
- {
- "Windows 1254",
- "Windows1254",
- "",
- "Windows1254",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-windows-1254.txt",
- },
- {
- "Windows 1255",
- "Windows1255",
- "",
- "Windows1255",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-windows-1255.txt",
- },
- {
- "Windows 1256",
- "Windows1256",
- "",
- "Windows1256",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-windows-1256.txt",
- },
- {
- "Windows 1257",
- "Windows1257",
- "",
- "Windows1257",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-windows-1257.txt",
- },
- {
- "Windows 1258",
- "Windows1258",
- "",
- "Windows1258",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-windows-1258.txt",
- },
- {
- "X-User-Defined",
- "XUserDefined",
- "It is defined at http://encoding.spec.whatwg.org/#x-user-defined",
- "XUserDefined",
- encoding.ASCIISub,
- ascii +
- "\uf780\uf781\uf782\uf783\uf784\uf785\uf786\uf787" +
- "\uf788\uf789\uf78a\uf78b\uf78c\uf78d\uf78e\uf78f" +
- "\uf790\uf791\uf792\uf793\uf794\uf795\uf796\uf797" +
- "\uf798\uf799\uf79a\uf79b\uf79c\uf79d\uf79e\uf79f" +
- "\uf7a0\uf7a1\uf7a2\uf7a3\uf7a4\uf7a5\uf7a6\uf7a7" +
- "\uf7a8\uf7a9\uf7aa\uf7ab\uf7ac\uf7ad\uf7ae\uf7af" +
- "\uf7b0\uf7b1\uf7b2\uf7b3\uf7b4\uf7b5\uf7b6\uf7b7" +
- "\uf7b8\uf7b9\uf7ba\uf7bb\uf7bc\uf7bd\uf7be\uf7bf" +
- "\uf7c0\uf7c1\uf7c2\uf7c3\uf7c4\uf7c5\uf7c6\uf7c7" +
- "\uf7c8\uf7c9\uf7ca\uf7cb\uf7cc\uf7cd\uf7ce\uf7cf" +
- "\uf7d0\uf7d1\uf7d2\uf7d3\uf7d4\uf7d5\uf7d6\uf7d7" +
- "\uf7d8\uf7d9\uf7da\uf7db\uf7dc\uf7dd\uf7de\uf7df" +
- "\uf7e0\uf7e1\uf7e2\uf7e3\uf7e4\uf7e5\uf7e6\uf7e7" +
- "\uf7e8\uf7e9\uf7ea\uf7eb\uf7ec\uf7ed\uf7ee\uf7ef" +
- "\uf7f0\uf7f1\uf7f2\uf7f3\uf7f4\uf7f5\uf7f6\uf7f7" +
- "\uf7f8\uf7f9\uf7fa\uf7fb\uf7fc\uf7fd\uf7fe\uf7ff",
- },
-}
-
-func getWHATWG(url string) string {
- res, err := http.Get(url)
- if err != nil {
- log.Fatalf("%q: Get: %v", url, err)
- }
- defer res.Body.Close()
-
- mapping := make([]rune, 128)
- for i := range mapping {
- mapping[i] = '\ufffd'
- }
-
- scanner := bufio.NewScanner(res.Body)
- for scanner.Scan() {
- s := strings.TrimSpace(scanner.Text())
- if s == "" || s[0] == '#' {
- continue
- }
- x, y := 0, 0
- if _, err := fmt.Sscanf(s, "%d\t0x%x", &x, &y); err != nil {
- log.Fatalf("could not parse %q", s)
- }
- if x < 0 || 128 <= x {
- log.Fatalf("code %d is out of range", x)
- }
- if 0x80 <= y && y < 0xa0 {
- // We diverge from the WHATWG spec by mapping control characters
- // in the range [0x80, 0xa0) to U+FFFD.
- continue
- }
- mapping[x] = rune(y)
- }
- return ascii + string(mapping)
-}
-
-func getUCM(url string) string {
- res, err := http.Get(url)
- if err != nil {
- log.Fatalf("%q: Get: %v", url, err)
- }
- defer res.Body.Close()
-
- mapping := make([]rune, 256)
- for i := range mapping {
- mapping[i] = '\ufffd'
- }
-
- charsFound := 0
- scanner := bufio.NewScanner(res.Body)
- for scanner.Scan() {
- s := strings.TrimSpace(scanner.Text())
- if s == "" || s[0] == '#' {
- continue
- }
- var c byte
- var r rune
- if _, err := fmt.Sscanf(s, `<U%x> \x%x |0`, &r, &c); err != nil {
- continue
- }
- mapping[c] = r
- charsFound++
- }
-
- if charsFound < 200 {
- log.Fatalf("%q: only %d characters found (wrong page format?)", url, charsFound)
- }
-
- return string(mapping)
-}
-
-func main() {
- mibs := map[string]bool{}
- all := []string{}
-
- w := gen.NewCodeWriter()
- defer w.WriteGoFile("tables.go", "charmap")
-
- printf := func(s string, a ...interface{}) { fmt.Fprintf(w, s, a...) }
-
- printf("import (\n")
- printf("\t\"golang.org/x/text/encoding\"\n")
- printf("\t\"golang.org/x/text/encoding/internal/identifier\"\n")
- printf(")\n\n")
- for _, e := range encodings {
- varNames := strings.Split(e.varName, ",")
- all = append(all, varNames...)
- varName := varNames[0]
- switch {
- case strings.HasPrefix(e.mapping, "http://encoding.spec.whatwg.org/"):
- e.mapping = getWHATWG(e.mapping)
- case strings.HasPrefix(e.mapping, "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/"):
- e.mapping = getUCM(e.mapping)
- }
-
- asciiSuperset, low := strings.HasPrefix(e.mapping, ascii), 0x00
- if asciiSuperset {
- low = 0x80
- }
- lvn := 1
- if strings.HasPrefix(varName, "ISO") || strings.HasPrefix(varName, "KOI") {
- lvn = 3
- }
- lowerVarName := strings.ToLower(varName[:lvn]) + varName[lvn:]
- printf("// %s is the %s encoding.\n", varName, e.name)
- if e.comment != "" {
- printf("//\n// %s\n", e.comment)
- }
- printf("var %s *Charmap = &%s\n\nvar %s = Charmap{\nname: %q,\n",
- varName, lowerVarName, lowerVarName, e.name)
- if mibs[e.mib] {
- log.Fatalf("MIB type %q declared multiple times.", e.mib)
- }
- printf("mib: identifier.%s,\n", e.mib)
- printf("asciiSuperset: %t,\n", asciiSuperset)
- printf("low: 0x%02x,\n", low)
- printf("replacement: 0x%02x,\n", e.replacement)
-
- printf("decode: [256]utf8Enc{\n")
- i, backMapping := 0, map[rune]byte{}
- for _, c := range e.mapping {
- if _, ok := backMapping[c]; !ok && c != utf8.RuneError {
- backMapping[c] = byte(i)
- }
- var buf [8]byte
- n := utf8.EncodeRune(buf[:], c)
- if n > 3 {
- panic(fmt.Sprintf("rune %q (%U) is too long", c, c))
- }
- printf("{%d,[3]byte{0x%02x,0x%02x,0x%02x}},", n, buf[0], buf[1], buf[2])
- if i%2 == 1 {
- printf("\n")
- }
- i++
- }
- printf("},\n")
-
- printf("encode: [256]uint32{\n")
- encode := make([]uint32, 0, 256)
- for c, i := range backMapping {
- encode = append(encode, uint32(i)<<24|uint32(c))
- }
- sort.Sort(byRune(encode))
- for len(encode) < cap(encode) {
- encode = append(encode, encode[len(encode)-1])
- }
- for i, enc := range encode {
- printf("0x%08x,", enc)
- if i%8 == 7 {
- printf("\n")
- }
- }
- printf("},\n}\n")
-
- // Add an estimate of the size of a single Charmap{} struct value, which
- // includes two 256 elem arrays of 4 bytes and some extra fields, which
- // align to 3 uint64s on 64-bit architectures.
- w.Size += 2*4*256 + 3*8
- }
- // TODO: add proper line breaking.
- printf("var listAll = []encoding.Encoding{\n%s,\n}\n\n", strings.Join(all, ",\n"))
-}
-
-type byRune []uint32
-
-func (b byRune) Len() int { return len(b) }
-func (b byRune) Less(i, j int) bool { return b[i]&0xffffff < b[j]&0xffffff }
-func (b byRune) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
diff --git a/vendor/golang.org/x/text/encoding/htmlindex/gen.go b/vendor/golang.org/x/text/encoding/htmlindex/gen.go
deleted file mode 100644
index ac6b4a77f..000000000
--- a/vendor/golang.org/x/text/encoding/htmlindex/gen.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "log"
- "strings"
-
- "golang.org/x/text/internal/gen"
-)
-
-type group struct {
- Encodings []struct {
- Labels []string
- Name string
- }
-}
-
-func main() {
- gen.Init()
-
- r := gen.Open("https://encoding.spec.whatwg.org", "whatwg", "encodings.json")
- var groups []group
- if err := json.NewDecoder(r).Decode(&groups); err != nil {
- log.Fatalf("Error reading encodings.json: %v", err)
- }
-
- w := &bytes.Buffer{}
- fmt.Fprintln(w, "type htmlEncoding byte")
- fmt.Fprintln(w, "const (")
- for i, g := range groups {
- for _, e := range g.Encodings {
- key := strings.ToLower(e.Name)
- name := consts[key]
- if name == "" {
- log.Fatalf("No const defined for %s.", key)
- }
- if i == 0 {
- fmt.Fprintf(w, "%s htmlEncoding = iota\n", name)
- } else {
- fmt.Fprintf(w, "%s\n", name)
- }
- }
- }
- fmt.Fprintln(w, "numEncodings")
- fmt.Fprint(w, ")\n\n")
-
- fmt.Fprintln(w, "var canonical = [numEncodings]string{")
- for _, g := range groups {
- for _, e := range g.Encodings {
- fmt.Fprintf(w, "%q,\n", strings.ToLower(e.Name))
- }
- }
- fmt.Fprint(w, "}\n\n")
-
- fmt.Fprintln(w, "var nameMap = map[string]htmlEncoding{")
- for _, g := range groups {
- for _, e := range g.Encodings {
- for _, l := range e.Labels {
- key := strings.ToLower(e.Name)
- name := consts[key]
- fmt.Fprintf(w, "%q: %s,\n", l, name)
- }
- }
- }
- fmt.Fprint(w, "}\n\n")
-
- var tags []string
- fmt.Fprintln(w, "var localeMap = []htmlEncoding{")
- for _, loc := range locales {
- tags = append(tags, loc.tag)
- fmt.Fprintf(w, "%s, // %s \n", consts[loc.name], loc.tag)
- }
- fmt.Fprint(w, "}\n\n")
-
- fmt.Fprintf(w, "const locales = %q\n", strings.Join(tags, " "))
-
- gen.WriteGoFile("tables.go", "htmlindex", w.Bytes())
-}
-
-// consts maps canonical encoding name to internal constant.
-var consts = map[string]string{
- "utf-8": "utf8",
- "ibm866": "ibm866",
- "iso-8859-2": "iso8859_2",
- "iso-8859-3": "iso8859_3",
- "iso-8859-4": "iso8859_4",
- "iso-8859-5": "iso8859_5",
- "iso-8859-6": "iso8859_6",
- "iso-8859-7": "iso8859_7",
- "iso-8859-8": "iso8859_8",
- "iso-8859-8-i": "iso8859_8I",
- "iso-8859-10": "iso8859_10",
- "iso-8859-13": "iso8859_13",
- "iso-8859-14": "iso8859_14",
- "iso-8859-15": "iso8859_15",
- "iso-8859-16": "iso8859_16",
- "koi8-r": "koi8r",
- "koi8-u": "koi8u",
- "macintosh": "macintosh",
- "windows-874": "windows874",
- "windows-1250": "windows1250",
- "windows-1251": "windows1251",
- "windows-1252": "windows1252",
- "windows-1253": "windows1253",
- "windows-1254": "windows1254",
- "windows-1255": "windows1255",
- "windows-1256": "windows1256",
- "windows-1257": "windows1257",
- "windows-1258": "windows1258",
- "x-mac-cyrillic": "macintoshCyrillic",
- "gbk": "gbk",
- "gb18030": "gb18030",
- // "hz-gb-2312": "hzgb2312", // Was removed from WhatWG
- "big5": "big5",
- "euc-jp": "eucjp",
- "iso-2022-jp": "iso2022jp",
- "shift_jis": "shiftJIS",
- "euc-kr": "euckr",
- "replacement": "replacement",
- "utf-16be": "utf16be",
- "utf-16le": "utf16le",
- "x-user-defined": "xUserDefined",
-}
-
-// locales is taken from
-// https://html.spec.whatwg.org/multipage/syntax.html#encoding-sniffing-algorithm.
-var locales = []struct{ tag, name string }{
- // The default value. Explicitly state latin to benefit from the exact
- // script option, while still making 1252 the default encoding for languages
- // written in Latin script.
- {"und_Latn", "windows-1252"},
- {"ar", "windows-1256"},
- {"ba", "windows-1251"},
- {"be", "windows-1251"},
- {"bg", "windows-1251"},
- {"cs", "windows-1250"},
- {"el", "iso-8859-7"},
- {"et", "windows-1257"},
- {"fa", "windows-1256"},
- {"he", "windows-1255"},
- {"hr", "windows-1250"},
- {"hu", "iso-8859-2"},
- {"ja", "shift_jis"},
- {"kk", "windows-1251"},
- {"ko", "euc-kr"},
- {"ku", "windows-1254"},
- {"ky", "windows-1251"},
- {"lt", "windows-1257"},
- {"lv", "windows-1257"},
- {"mk", "windows-1251"},
- {"pl", "iso-8859-2"},
- {"ru", "windows-1251"},
- {"sah", "windows-1251"},
- {"sk", "windows-1250"},
- {"sl", "iso-8859-2"},
- {"sr", "windows-1251"},
- {"tg", "windows-1251"},
- {"th", "windows-874"},
- {"tr", "windows-1254"},
- {"tt", "windows-1251"},
- {"uk", "windows-1251"},
- {"vi", "windows-1258"},
- {"zh-hans", "gb18030"},
- {"zh-hant", "big5"},
-}
diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/gen.go b/vendor/golang.org/x/text/encoding/internal/identifier/gen.go
deleted file mode 100644
index 26cfef9c6..000000000
--- a/vendor/golang.org/x/text/encoding/internal/identifier/gen.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-import (
- "bytes"
- "encoding/xml"
- "fmt"
- "io"
- "log"
- "strings"
-
- "golang.org/x/text/internal/gen"
-)
-
-type registry struct {
- XMLName xml.Name `xml:"registry"`
- Updated string `xml:"updated"`
- Registry []struct {
- ID string `xml:"id,attr"`
- Record []struct {
- Name string `xml:"name"`
- Xref []struct {
- Type string `xml:"type,attr"`
- Data string `xml:"data,attr"`
- } `xml:"xref"`
- Desc struct {
- Data string `xml:",innerxml"`
- // Any []struct {
- // Data string `xml:",chardata"`
- // } `xml:",any"`
- // Data string `xml:",chardata"`
- } `xml:"description,"`
- MIB string `xml:"value"`
- Alias []string `xml:"alias"`
- MIME string `xml:"preferred_alias"`
- } `xml:"record"`
- } `xml:"registry"`
-}
-
-func main() {
- r := gen.OpenIANAFile("assignments/character-sets/character-sets.xml")
- reg := &registry{}
- if err := xml.NewDecoder(r).Decode(&reg); err != nil && err != io.EOF {
- log.Fatalf("Error decoding charset registry: %v", err)
- }
- if len(reg.Registry) == 0 || reg.Registry[0].ID != "character-sets-1" {
- log.Fatalf("Unexpected ID %s", reg.Registry[0].ID)
- }
-
- w := &bytes.Buffer{}
- fmt.Fprintf(w, "const (\n")
- for _, rec := range reg.Registry[0].Record {
- constName := ""
- for _, a := range rec.Alias {
- if strings.HasPrefix(a, "cs") && strings.IndexByte(a, '-') == -1 {
- // Some of the constant definitions have comments in them. Strip those.
- constName = strings.Title(strings.SplitN(a[2:], "\n", 2)[0])
- }
- }
- if constName == "" {
- switch rec.MIB {
- case "2085":
- constName = "HZGB2312" // Not listed as alias for some reason.
- default:
- log.Fatalf("No cs alias defined for %s.", rec.MIB)
- }
- }
- if rec.MIME != "" {
- rec.MIME = fmt.Sprintf(" (MIME: %s)", rec.MIME)
- }
- fmt.Fprintf(w, "// %s is the MIB identifier with IANA name %s%s.\n//\n", constName, rec.Name, rec.MIME)
- if len(rec.Desc.Data) > 0 {
- fmt.Fprint(w, "// ")
- d := xml.NewDecoder(strings.NewReader(rec.Desc.Data))
- inElem := true
- attr := ""
- for {
- t, err := d.Token()
- if err != nil {
- if err != io.EOF {
- log.Fatal(err)
- }
- break
- }
- switch x := t.(type) {
- case xml.CharData:
- attr = "" // Don't need attribute info.
- a := bytes.Split([]byte(x), []byte("\n"))
- for i, b := range a {
- if b = bytes.TrimSpace(b); len(b) != 0 {
- if !inElem && i > 0 {
- fmt.Fprint(w, "\n// ")
- }
- inElem = false
- fmt.Fprintf(w, "%s ", string(b))
- }
- }
- case xml.StartElement:
- if x.Name.Local == "xref" {
- inElem = true
- use := false
- for _, a := range x.Attr {
- if a.Name.Local == "type" {
- use = use || a.Value != "person"
- }
- if a.Name.Local == "data" && use {
- // Patch up URLs to use https. From some links, the
- // https version is different from the http one.
- s := a.Value
- s = strings.Replace(s, "http://", "https://", -1)
- s = strings.Replace(s, "/unicode/", "/", -1)
- attr = s + " "
- }
- }
- }
- case xml.EndElement:
- inElem = false
- fmt.Fprint(w, attr)
- }
- }
- fmt.Fprint(w, "\n")
- }
- for _, x := range rec.Xref {
- switch x.Type {
- case "rfc":
- fmt.Fprintf(w, "// Reference: %s\n", strings.ToUpper(x.Data))
- case "uri":
- fmt.Fprintf(w, "// Reference: %s\n", x.Data)
- }
- }
- fmt.Fprintf(w, "%s MIB = %s\n", constName, rec.MIB)
- fmt.Fprintln(w)
- }
- fmt.Fprintln(w, ")")
-
- gen.WriteGoFile("mib.go", "identifier", w.Bytes())
-}
diff --git a/vendor/golang.org/x/text/encoding/japanese/maketables.go b/vendor/golang.org/x/text/encoding/japanese/maketables.go
deleted file mode 100644
index 023957a67..000000000
--- a/vendor/golang.org/x/text/encoding/japanese/maketables.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-// This program generates tables.go:
-// go run maketables.go | gofmt > tables.go
-
-// TODO: Emoji extensions?
-// https://www.unicode.org/faq/emoji_dingbats.html
-// https://www.unicode.org/Public/UNIDATA/EmojiSources.txt
-
-import (
- "bufio"
- "fmt"
- "log"
- "net/http"
- "sort"
- "strings"
-)
-
-type entry struct {
- jisCode, table int
-}
-
-func main() {
- fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n")
- fmt.Printf("// Package japanese provides Japanese encodings such as EUC-JP and Shift JIS.\n")
- fmt.Printf(`package japanese // import "golang.org/x/text/encoding/japanese"` + "\n\n")
-
- reverse := [65536]entry{}
- for i := range reverse {
- reverse[i].table = -1
- }
-
- tables := []struct {
- url string
- name string
- }{
- {"http://encoding.spec.whatwg.org/index-jis0208.txt", "0208"},
- {"http://encoding.spec.whatwg.org/index-jis0212.txt", "0212"},
- }
- for i, table := range tables {
- res, err := http.Get(table.url)
- if err != nil {
- log.Fatalf("%q: Get: %v", table.url, err)
- }
- defer res.Body.Close()
-
- mapping := [65536]uint16{}
-
- scanner := bufio.NewScanner(res.Body)
- for scanner.Scan() {
- s := strings.TrimSpace(scanner.Text())
- if s == "" || s[0] == '#' {
- continue
- }
- x, y := 0, uint16(0)
- if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
- log.Fatalf("%q: could not parse %q", table.url, s)
- }
- if x < 0 || 120*94 <= x {
- log.Fatalf("%q: JIS code %d is out of range", table.url, x)
- }
- mapping[x] = y
- if reverse[y].table == -1 {
- reverse[y] = entry{jisCode: x, table: i}
- }
- }
- if err := scanner.Err(); err != nil {
- log.Fatalf("%q: scanner error: %v", table.url, err)
- }
-
- fmt.Printf("// jis%sDecode is the decoding table from JIS %s code to Unicode.\n// It is defined at %s\n",
- table.name, table.name, table.url)
- fmt.Printf("var jis%sDecode = [...]uint16{\n", table.name)
- for i, m := range mapping {
- if m != 0 {
- fmt.Printf("\t%d: 0x%04X,\n", i, m)
- }
- }
- fmt.Printf("}\n\n")
- }
-
- // Any run of at least separation continuous zero entries in the reverse map will
- // be a separate encode table.
- const separation = 1024
-
- intervals := []interval(nil)
- low, high := -1, -1
- for i, v := range reverse {
- if v.table == -1 {
- continue
- }
- if low < 0 {
- low = i
- } else if i-high >= separation {
- if high >= 0 {
- intervals = append(intervals, interval{low, high})
- }
- low = i
- }
- high = i + 1
- }
- if high >= 0 {
- intervals = append(intervals, interval{low, high})
- }
- sort.Sort(byDecreasingLength(intervals))
-
- fmt.Printf("const (\n")
- fmt.Printf("\tjis0208 = 1\n")
- fmt.Printf("\tjis0212 = 2\n")
- fmt.Printf("\tcodeMask = 0x7f\n")
- fmt.Printf("\tcodeShift = 7\n")
- fmt.Printf("\ttableShift = 14\n")
- fmt.Printf(")\n\n")
-
- fmt.Printf("const numEncodeTables = %d\n\n", len(intervals))
- fmt.Printf("// encodeX are the encoding tables from Unicode to JIS code,\n")
- fmt.Printf("// sorted by decreasing length.\n")
- for i, v := range intervals {
- fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high)
- }
- fmt.Printf("//\n")
- fmt.Printf("// The high two bits of the value record whether the JIS code comes from the\n")
- fmt.Printf("// JIS0208 table (high bits == 1) or the JIS0212 table (high bits == 2).\n")
- fmt.Printf("// The low 14 bits are two 7-bit unsigned integers j1 and j2 that form the\n")
- fmt.Printf("// JIS code (94*j1 + j2) within that table.\n")
- fmt.Printf("\n")
-
- for i, v := range intervals {
- fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high)
- fmt.Printf("var encode%d = [...]uint16{\n", i)
- for j := v.low; j < v.high; j++ {
- x := reverse[j]
- if x.table == -1 {
- continue
- }
- fmt.Printf("\t%d - %d: jis%s<<14 | 0x%02X<<7 | 0x%02X,\n",
- j, v.low, tables[x.table].name, x.jisCode/94, x.jisCode%94)
- }
- fmt.Printf("}\n\n")
- }
-}
-
-// interval is a half-open interval [low, high).
-type interval struct {
- low, high int
-}
-
-func (i interval) len() int { return i.high - i.low }
-
-// byDecreasingLength sorts intervals by decreasing length.
-type byDecreasingLength []interval
-
-func (b byDecreasingLength) Len() int { return len(b) }
-func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() }
-func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
diff --git a/vendor/golang.org/x/text/encoding/korean/maketables.go b/vendor/golang.org/x/text/encoding/korean/maketables.go
deleted file mode 100644
index c84034fb6..000000000
--- a/vendor/golang.org/x/text/encoding/korean/maketables.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-// This program generates tables.go:
-// go run maketables.go | gofmt > tables.go
-
-import (
- "bufio"
- "fmt"
- "log"
- "net/http"
- "sort"
- "strings"
-)
-
-func main() {
- fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n")
- fmt.Printf("// Package korean provides Korean encodings such as EUC-KR.\n")
- fmt.Printf(`package korean // import "golang.org/x/text/encoding/korean"` + "\n\n")
-
- res, err := http.Get("http://encoding.spec.whatwg.org/index-euc-kr.txt")
- if err != nil {
- log.Fatalf("Get: %v", err)
- }
- defer res.Body.Close()
-
- mapping := [65536]uint16{}
- reverse := [65536]uint16{}
-
- scanner := bufio.NewScanner(res.Body)
- for scanner.Scan() {
- s := strings.TrimSpace(scanner.Text())
- if s == "" || s[0] == '#' {
- continue
- }
- x, y := uint16(0), uint16(0)
- if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
- log.Fatalf("could not parse %q", s)
- }
- if x < 0 || 178*(0xc7-0x81)+(0xfe-0xc7)*94+(0xff-0xa1) <= x {
- log.Fatalf("EUC-KR code %d is out of range", x)
- }
- mapping[x] = y
- if reverse[y] == 0 {
- c0, c1 := uint16(0), uint16(0)
- if x < 178*(0xc7-0x81) {
- c0 = uint16(x/178) + 0x81
- c1 = uint16(x % 178)
- switch {
- case c1 < 1*26:
- c1 += 0x41
- case c1 < 2*26:
- c1 += 0x47
- default:
- c1 += 0x4d
- }
- } else {
- x -= 178 * (0xc7 - 0x81)
- c0 = uint16(x/94) + 0xc7
- c1 = uint16(x%94) + 0xa1
- }
- reverse[y] = c0<<8 | c1
- }
- }
- if err := scanner.Err(); err != nil {
- log.Fatalf("scanner error: %v", err)
- }
-
- fmt.Printf("// decode is the decoding table from EUC-KR code to Unicode.\n")
- fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-euc-kr.txt\n")
- fmt.Printf("var decode = [...]uint16{\n")
- for i, v := range mapping {
- if v != 0 {
- fmt.Printf("\t%d: 0x%04X,\n", i, v)
- }
- }
- fmt.Printf("}\n\n")
-
- // Any run of at least separation continuous zero entries in the reverse map will
- // be a separate encode table.
- const separation = 1024
-
- intervals := []interval(nil)
- low, high := -1, -1
- for i, v := range reverse {
- if v == 0 {
- continue
- }
- if low < 0 {
- low = i
- } else if i-high >= separation {
- if high >= 0 {
- intervals = append(intervals, interval{low, high})
- }
- low = i
- }
- high = i + 1
- }
- if high >= 0 {
- intervals = append(intervals, interval{low, high})
- }
- sort.Sort(byDecreasingLength(intervals))
-
- fmt.Printf("const numEncodeTables = %d\n\n", len(intervals))
- fmt.Printf("// encodeX are the encoding tables from Unicode to EUC-KR code,\n")
- fmt.Printf("// sorted by decreasing length.\n")
- for i, v := range intervals {
- fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high)
- }
- fmt.Printf("\n")
-
- for i, v := range intervals {
- fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high)
- fmt.Printf("var encode%d = [...]uint16{\n", i)
- for j := v.low; j < v.high; j++ {
- x := reverse[j]
- if x == 0 {
- continue
- }
- fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x)
- }
- fmt.Printf("}\n\n")
- }
-}
-
-// interval is a half-open interval [low, high).
-type interval struct {
- low, high int
-}
-
-func (i interval) len() int { return i.high - i.low }
-
-// byDecreasingLength sorts intervals by decreasing length.
-type byDecreasingLength []interval
-
-func (b byDecreasingLength) Len() int { return len(b) }
-func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() }
-func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
diff --git a/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go b/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go
deleted file mode 100644
index 55016c786..000000000
--- a/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-// This program generates tables.go:
-// go run maketables.go | gofmt > tables.go
-
-import (
- "bufio"
- "fmt"
- "log"
- "net/http"
- "sort"
- "strings"
-)
-
-func main() {
- fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n")
- fmt.Printf("// Package simplifiedchinese provides Simplified Chinese encodings such as GBK.\n")
- fmt.Printf(`package simplifiedchinese // import "golang.org/x/text/encoding/simplifiedchinese"` + "\n\n")
-
- printGB18030()
- printGBK()
-}
-
-func printGB18030() {
- res, err := http.Get("http://encoding.spec.whatwg.org/index-gb18030.txt")
- if err != nil {
- log.Fatalf("Get: %v", err)
- }
- defer res.Body.Close()
-
- fmt.Printf("// gb18030 is the table from http://encoding.spec.whatwg.org/index-gb18030.txt\n")
- fmt.Printf("var gb18030 = [...][2]uint16{\n")
- scanner := bufio.NewScanner(res.Body)
- for scanner.Scan() {
- s := strings.TrimSpace(scanner.Text())
- if s == "" || s[0] == '#' {
- continue
- }
- x, y := uint32(0), uint32(0)
- if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
- log.Fatalf("could not parse %q", s)
- }
- if x < 0x10000 && y < 0x10000 {
- fmt.Printf("\t{0x%04x, 0x%04x},\n", x, y)
- }
- }
- fmt.Printf("}\n\n")
-}
-
-func printGBK() {
- res, err := http.Get("http://encoding.spec.whatwg.org/index-gbk.txt")
- if err != nil {
- log.Fatalf("Get: %v", err)
- }
- defer res.Body.Close()
-
- mapping := [65536]uint16{}
- reverse := [65536]uint16{}
-
- scanner := bufio.NewScanner(res.Body)
- for scanner.Scan() {
- s := strings.TrimSpace(scanner.Text())
- if s == "" || s[0] == '#' {
- continue
- }
- x, y := uint16(0), uint16(0)
- if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
- log.Fatalf("could not parse %q", s)
- }
- if x < 0 || 126*190 <= x {
- log.Fatalf("GBK code %d is out of range", x)
- }
- mapping[x] = y
- if reverse[y] == 0 {
- c0, c1 := x/190, x%190
- if c1 >= 0x3f {
- c1++
- }
- reverse[y] = (0x81+c0)<<8 | (0x40 + c1)
- }
- }
- if err := scanner.Err(); err != nil {
- log.Fatalf("scanner error: %v", err)
- }
-
- fmt.Printf("// decode is the decoding table from GBK code to Unicode.\n")
- fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-gbk.txt\n")
- fmt.Printf("var decode = [...]uint16{\n")
- for i, v := range mapping {
- if v != 0 {
- fmt.Printf("\t%d: 0x%04X,\n", i, v)
- }
- }
- fmt.Printf("}\n\n")
-
- // Any run of at least separation continuous zero entries in the reverse map will
- // be a separate encode table.
- const separation = 1024
-
- intervals := []interval(nil)
- low, high := -1, -1
- for i, v := range reverse {
- if v == 0 {
- continue
- }
- if low < 0 {
- low = i
- } else if i-high >= separation {
- if high >= 0 {
- intervals = append(intervals, interval{low, high})
- }
- low = i
- }
- high = i + 1
- }
- if high >= 0 {
- intervals = append(intervals, interval{low, high})
- }
- sort.Sort(byDecreasingLength(intervals))
-
- fmt.Printf("const numEncodeTables = %d\n\n", len(intervals))
- fmt.Printf("// encodeX are the encoding tables from Unicode to GBK code,\n")
- fmt.Printf("// sorted by decreasing length.\n")
- for i, v := range intervals {
- fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high)
- }
- fmt.Printf("\n")
-
- for i, v := range intervals {
- fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high)
- fmt.Printf("var encode%d = [...]uint16{\n", i)
- for j := v.low; j < v.high; j++ {
- x := reverse[j]
- if x == 0 {
- continue
- }
- fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x)
- }
- fmt.Printf("}\n\n")
- }
-}
-
-// interval is a half-open interval [low, high).
-type interval struct {
- low, high int
-}
-
-func (i interval) len() int { return i.high - i.low }
-
-// byDecreasingLength sorts intervals by decreasing length.
-type byDecreasingLength []interval
-
-func (b byDecreasingLength) Len() int { return len(b) }
-func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() }
-func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
diff --git a/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go b/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go
deleted file mode 100644
index cf7fdb31a..000000000
--- a/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-// This program generates tables.go:
-// go run maketables.go | gofmt > tables.go
-
-import (
- "bufio"
- "fmt"
- "log"
- "net/http"
- "sort"
- "strings"
-)
-
-func main() {
- fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n")
- fmt.Printf("// Package traditionalchinese provides Traditional Chinese encodings such as Big5.\n")
- fmt.Printf(`package traditionalchinese // import "golang.org/x/text/encoding/traditionalchinese"` + "\n\n")
-
- res, err := http.Get("http://encoding.spec.whatwg.org/index-big5.txt")
- if err != nil {
- log.Fatalf("Get: %v", err)
- }
- defer res.Body.Close()
-
- mapping := [65536]uint32{}
- reverse := [65536 * 4]uint16{}
-
- scanner := bufio.NewScanner(res.Body)
- for scanner.Scan() {
- s := strings.TrimSpace(scanner.Text())
- if s == "" || s[0] == '#' {
- continue
- }
- x, y := uint16(0), uint32(0)
- if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
- log.Fatalf("could not parse %q", s)
- }
- if x < 0 || 126*157 <= x {
- log.Fatalf("Big5 code %d is out of range", x)
- }
- mapping[x] = y
-
- // The WHATWG spec http://encoding.spec.whatwg.org/#indexes says that
- // "The index pointer for code point in index is the first pointer
- // corresponding to code point in index", which would normally mean
- // that the code below should be guarded by "if reverse[y] == 0", but
- // last instead of first seems to match the behavior of
- // "iconv -f UTF-8 -t BIG5". For example, U+8005 者 occurs twice in
- // http://encoding.spec.whatwg.org/index-big5.txt, as index 2148
- // (encoded as "\x8e\xcd") and index 6543 (encoded as "\xaa\xcc")
- // and "echo 者 | iconv -f UTF-8 -t BIG5 | xxd" gives "\xaa\xcc".
- c0, c1 := x/157, x%157
- if c1 < 0x3f {
- c1 += 0x40
- } else {
- c1 += 0x62
- }
- reverse[y] = (0x81+c0)<<8 | c1
- }
- if err := scanner.Err(); err != nil {
- log.Fatalf("scanner error: %v", err)
- }
-
- fmt.Printf("// decode is the decoding table from Big5 code to Unicode.\n")
- fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-big5.txt\n")
- fmt.Printf("var decode = [...]uint32{\n")
- for i, v := range mapping {
- if v != 0 {
- fmt.Printf("\t%d: 0x%08X,\n", i, v)
- }
- }
- fmt.Printf("}\n\n")
-
- // Any run of at least separation continuous zero entries in the reverse map will
- // be a separate encode table.
- const separation = 1024
-
- intervals := []interval(nil)
- low, high := -1, -1
- for i, v := range reverse {
- if v == 0 {
- continue
- }
- if low < 0 {
- low = i
- } else if i-high >= separation {
- if high >= 0 {
- intervals = append(intervals, interval{low, high})
- }
- low = i
- }
- high = i + 1
- }
- if high >= 0 {
- intervals = append(intervals, interval{low, high})
- }
- sort.Sort(byDecreasingLength(intervals))
-
- fmt.Printf("const numEncodeTables = %d\n\n", len(intervals))
- fmt.Printf("// encodeX are the encoding tables from Unicode to Big5 code,\n")
- fmt.Printf("// sorted by decreasing length.\n")
- for i, v := range intervals {
- fmt.Printf("// encode%d: %5d entries for runes in [%6d, %6d).\n", i, v.len(), v.low, v.high)
- }
- fmt.Printf("\n")
-
- for i, v := range intervals {
- fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high)
- fmt.Printf("var encode%d = [...]uint16{\n", i)
- for j := v.low; j < v.high; j++ {
- x := reverse[j]
- if x == 0 {
- continue
- }
- fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x)
- }
- fmt.Printf("}\n\n")
- }
-}
-
-// interval is a half-open interval [low, high).
-type interval struct {
- low, high int
-}
-
-func (i interval) len() int { return i.high - i.low }
-
-// byDecreasingLength sorts intervals by decreasing length.
-type byDecreasingLength []interval
-
-func (b byDecreasingLength) Len() int { return len(b) }
-func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() }
-func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
diff --git a/vendor/golang.org/x/text/internal/language/compact/gen.go b/vendor/golang.org/x/text/internal/language/compact/gen.go
deleted file mode 100644
index 0c36a052f..000000000
--- a/vendor/golang.org/x/text/internal/language/compact/gen.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// Language tag table generator.
-// Data read from the web.
-
-package main
-
-import (
- "flag"
- "fmt"
- "log"
-
- "golang.org/x/text/internal/gen"
- "golang.org/x/text/unicode/cldr"
-)
-
-var (
- test = flag.Bool("test",
- false,
- "test existing tables; can be used to compare web data with package data.")
- outputFile = flag.String("output",
- "tables.go",
- "output file for generated tables")
-)
-
-func main() {
- gen.Init()
-
- w := gen.NewCodeWriter()
- defer w.WriteGoFile("tables.go", "compact")
-
- fmt.Fprintln(w, `import "golang.org/x/text/internal/language"`)
-
- b := newBuilder(w)
- gen.WriteCLDRVersion(w)
-
- b.writeCompactIndex()
-}
-
-type builder struct {
- w *gen.CodeWriter
- data *cldr.CLDR
- supp *cldr.SupplementalData
-}
-
-func newBuilder(w *gen.CodeWriter) *builder {
- r := gen.OpenCLDRCoreZip()
- defer r.Close()
- d := &cldr.Decoder{}
- data, err := d.DecodeZip(r)
- if err != nil {
- log.Fatal(err)
- }
- b := builder{
- w: w,
- data: data,
- supp: data.Supplemental(),
- }
- return &b
-}
diff --git a/vendor/golang.org/x/text/internal/language/compact/gen_index.go b/vendor/golang.org/x/text/internal/language/compact/gen_index.go
deleted file mode 100644
index 136cefaf0..000000000
--- a/vendor/golang.org/x/text/internal/language/compact/gen_index.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-// This file generates derivative tables based on the language package itself.
-
-import (
- "fmt"
- "log"
- "sort"
- "strings"
-
- "golang.org/x/text/internal/language"
-)
-
-// Compact indices:
-// Note -va-X variants only apply to localization variants.
-// BCP variants only ever apply to language.
-// The only ambiguity between tags is with regions.
-
-func (b *builder) writeCompactIndex() {
- // Collect all language tags for which we have any data in CLDR.
- m := map[language.Tag]bool{}
- for _, lang := range b.data.Locales() {
- // We include all locales unconditionally to be consistent with en_US.
- // We want en_US, even though it has no data associated with it.
-
- // TODO: put any of the languages for which no data exists at the end
- // of the index. This allows all components based on ICU to use that
- // as the cutoff point.
- // if x := data.RawLDML(lang); false ||
- // x.LocaleDisplayNames != nil ||
- // x.Characters != nil ||
- // x.Delimiters != nil ||
- // x.Measurement != nil ||
- // x.Dates != nil ||
- // x.Numbers != nil ||
- // x.Units != nil ||
- // x.ListPatterns != nil ||
- // x.Collations != nil ||
- // x.Segmentations != nil ||
- // x.Rbnf != nil ||
- // x.Annotations != nil ||
- // x.Metadata != nil {
-
- // TODO: support POSIX natively, albeit non-standard.
- tag := language.Make(strings.Replace(lang, "_POSIX", "-u-va-posix", 1))
- m[tag] = true
- // }
- }
-
- // TODO: plural rules are also defined for the deprecated tags:
- // iw mo sh tl
- // Consider removing these as compact tags.
-
- // Include locales for plural rules, which uses a different structure.
- for _, plurals := range b.supp.Plurals {
- for _, rules := range plurals.PluralRules {
- for _, lang := range strings.Split(rules.Locales, " ") {
- m[language.Make(lang)] = true
- }
- }
- }
-
- var coreTags []language.CompactCoreInfo
- var special []string
-
- for t := range m {
- if x := t.Extensions(); len(x) != 0 && fmt.Sprint(x) != "[u-va-posix]" {
- log.Fatalf("Unexpected extension %v in %v", x, t)
- }
- if len(t.Variants()) == 0 && len(t.Extensions()) == 0 {
- cci, ok := language.GetCompactCore(t)
- if !ok {
- log.Fatalf("Locale for non-basic language %q", t)
- }
- coreTags = append(coreTags, cci)
- } else {
- special = append(special, t.String())
- }
- }
-
- w := b.w
-
- sort.Slice(coreTags, func(i, j int) bool { return coreTags[i] < coreTags[j] })
- sort.Strings(special)
-
- w.WriteComment(`
- NumCompactTags is the number of common tags. The maximum tag is
- NumCompactTags-1.`)
- w.WriteConst("NumCompactTags", len(m))
-
- fmt.Fprintln(w, "const (")
- for i, t := range coreTags {
- fmt.Fprintf(w, "%s ID = %d\n", ident(t.Tag().String()), i)
- }
- for i, t := range special {
- fmt.Fprintf(w, "%s ID = %d\n", ident(t), i+len(coreTags))
- }
- fmt.Fprintln(w, ")")
-
- w.WriteVar("coreTags", coreTags)
-
- w.WriteConst("specialTagsStr", strings.Join(special, " "))
-}
-
-func ident(s string) string {
- return strings.Replace(s, "-", "", -1) + "Index"
-}
diff --git a/vendor/golang.org/x/text/internal/language/compact/gen_parents.go b/vendor/golang.org/x/text/internal/language/compact/gen_parents.go
deleted file mode 100644
index 9543d5832..000000000
--- a/vendor/golang.org/x/text/internal/language/compact/gen_parents.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-import (
- "log"
-
- "golang.org/x/text/internal/gen"
- "golang.org/x/text/internal/language"
- "golang.org/x/text/internal/language/compact"
- "golang.org/x/text/unicode/cldr"
-)
-
-func main() {
- r := gen.OpenCLDRCoreZip()
- defer r.Close()
-
- d := &cldr.Decoder{}
- data, err := d.DecodeZip(r)
- if err != nil {
- log.Fatalf("DecodeZip: %v", err)
- }
-
- w := gen.NewCodeWriter()
- defer w.WriteGoFile("parents.go", "compact")
-
- // Create parents table.
- type ID uint16
- parents := make([]ID, compact.NumCompactTags)
- for _, loc := range data.Locales() {
- tag := language.MustParse(loc)
- index, ok := compact.FromTag(tag)
- if !ok {
- continue
- }
- parentIndex := compact.ID(0) // und
- for p := tag.Parent(); p != language.Und; p = p.Parent() {
- if x, ok := compact.FromTag(p); ok {
- parentIndex = x
- break
- }
- }
- parents[index] = ID(parentIndex)
- }
-
- w.WriteComment(`
- parents maps a compact index of a tag to the compact index of the parent of
- this tag.`)
- w.WriteVar("parents", parents)
-}
diff --git a/vendor/golang.org/x/text/internal/language/gen.go b/vendor/golang.org/x/text/internal/language/gen.go
deleted file mode 100644
index cdcc7febc..000000000
--- a/vendor/golang.org/x/text/internal/language/gen.go
+++ /dev/null
@@ -1,1520 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// Language tag table generator.
-// Data read from the web.
-
-package main
-
-import (
- "bufio"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "math"
- "reflect"
- "regexp"
- "sort"
- "strconv"
- "strings"
-
- "golang.org/x/text/internal/gen"
- "golang.org/x/text/internal/tag"
- "golang.org/x/text/unicode/cldr"
-)
-
-var (
- test = flag.Bool("test",
- false,
- "test existing tables; can be used to compare web data with package data.")
- outputFile = flag.String("output",
- "tables.go",
- "output file for generated tables")
-)
-
-var comment = []string{
- `
-lang holds an alphabetically sorted list of ISO-639 language identifiers.
-All entries are 4 bytes. The index of the identifier (divided by 4) is the language tag.
-For 2-byte language identifiers, the two successive bytes have the following meaning:
- - if the first letter of the 2- and 3-letter ISO codes are the same:
- the second and third letter of the 3-letter ISO code.
- - otherwise: a 0 and a by 2 bits right-shifted index into altLangISO3.
-For 3-byte language identifiers the 4th byte is 0.`,
- `
-langNoIndex is a bit vector of all 3-letter language codes that are not used as an index
-in lookup tables. The language ids for these language codes are derived directly
-from the letters and are not consecutive.`,
- `
-altLangISO3 holds an alphabetically sorted list of 3-letter language code alternatives
-to 2-letter language codes that cannot be derived using the method described above.
-Each 3-letter code is followed by its 1-byte langID.`,
- `
-altLangIndex is used to convert indexes in altLangISO3 to langIDs.`,
- `
-AliasMap maps langIDs to their suggested replacements.`,
- `
-script is an alphabetically sorted list of ISO 15924 codes. The index
-of the script in the string, divided by 4, is the internal scriptID.`,
- `
-isoRegionOffset needs to be added to the index of regionISO to obtain the regionID
-for 2-letter ISO codes. (The first isoRegionOffset regionIDs are reserved for
-the UN.M49 codes used for groups.)`,
- `
-regionISO holds a list of alphabetically sorted 2-letter ISO region codes.
-Each 2-letter codes is followed by two bytes with the following meaning:
- - [A-Z}{2}: the first letter of the 2-letter code plus these two
- letters form the 3-letter ISO code.
- - 0, n: index into altRegionISO3.`,
- `
-regionTypes defines the status of a region for various standards.`,
- `
-m49 maps regionIDs to UN.M49 codes. The first isoRegionOffset entries are
-codes indicating collections of regions.`,
- `
-m49Index gives indexes into fromM49 based on the three most significant bits
-of a 10-bit UN.M49 code. To search an UN.M49 code in fromM49, search in
- fromM49[m49Index[msb39(code)]:m49Index[msb3(code)+1]]
-for an entry where the first 7 bits match the 7 lsb of the UN.M49 code.
-The region code is stored in the 9 lsb of the indexed value.`,
- `
-fromM49 contains entries to map UN.M49 codes to regions. See m49Index for details.`,
- `
-altRegionISO3 holds a list of 3-letter region codes that cannot be
-mapped to 2-letter codes using the default algorithm. This is a short list.`,
- `
-altRegionIDs holds a list of regionIDs the positions of which match those
-of the 3-letter ISO codes in altRegionISO3.`,
- `
-variantNumSpecialized is the number of specialized variants in variants.`,
- `
-suppressScript is an index from langID to the dominant script for that language,
-if it exists. If a script is given, it should be suppressed from the language tag.`,
- `
-likelyLang is a lookup table, indexed by langID, for the most likely
-scripts and regions given incomplete information. If more entries exist for a
-given language, region and script are the index and size respectively
-of the list in likelyLangList.`,
- `
-likelyLangList holds lists info associated with likelyLang.`,
- `
-likelyRegion is a lookup table, indexed by regionID, for the most likely
-languages and scripts given incomplete information. If more entries exist
-for a given regionID, lang and script are the index and size respectively
-of the list in likelyRegionList.
-TODO: exclude containers and user-definable regions from the list.`,
- `
-likelyRegionList holds lists info associated with likelyRegion.`,
- `
-likelyScript is a lookup table, indexed by scriptID, for the most likely
-languages and regions given a script.`,
- `
-nRegionGroups is the number of region groups.`,
- `
-regionInclusion maps region identifiers to sets of regions in regionInclusionBits,
-where each set holds all groupings that are directly connected in a region
-containment graph.`,
- `
-regionInclusionBits is an array of bit vectors where every vector represents
-a set of region groupings. These sets are used to compute the distance
-between two regions for the purpose of language matching.`,
- `
-regionInclusionNext marks, for each entry in regionInclusionBits, the set of
-all groups that are reachable from the groups set in the respective entry.`,
-}
-
-// TODO: consider changing some of these structures to tries. This can reduce
-// memory, but may increase the need for memory allocations. This could be
-// mitigated if we can piggyback on language tags for common cases.
-
-func failOnError(e error) {
- if e != nil {
- log.Panic(e)
- }
-}
-
-type setType int
-
-const (
- Indexed setType = 1 + iota // all elements must be of same size
- Linear
-)
-
-type stringSet struct {
- s []string
- sorted, frozen bool
-
- // We often need to update values after the creation of an index is completed.
- // We include a convenience map for keeping track of this.
- update map[string]string
- typ setType // used for checking.
-}
-
-func (ss *stringSet) clone() stringSet {
- c := *ss
- c.s = append([]string(nil), c.s...)
- return c
-}
-
-func (ss *stringSet) setType(t setType) {
- if ss.typ != t && ss.typ != 0 {
- log.Panicf("type %d cannot be assigned as it was already %d", t, ss.typ)
- }
-}
-
-// parse parses a whitespace-separated string and initializes ss with its
-// components.
-func (ss *stringSet) parse(s string) {
- scan := bufio.NewScanner(strings.NewReader(s))
- scan.Split(bufio.ScanWords)
- for scan.Scan() {
- ss.add(scan.Text())
- }
-}
-
-func (ss *stringSet) assertChangeable() {
- if ss.frozen {
- log.Panic("attempt to modify a frozen stringSet")
- }
-}
-
-func (ss *stringSet) add(s string) {
- ss.assertChangeable()
- ss.s = append(ss.s, s)
- ss.sorted = ss.frozen
-}
-
-func (ss *stringSet) freeze() {
- ss.compact()
- ss.frozen = true
-}
-
-func (ss *stringSet) compact() {
- if ss.sorted {
- return
- }
- a := ss.s
- sort.Strings(a)
- k := 0
- for i := 1; i < len(a); i++ {
- if a[k] != a[i] {
- a[k+1] = a[i]
- k++
- }
- }
- ss.s = a[:k+1]
- ss.sorted = ss.frozen
-}
-
-type funcSorter struct {
- fn func(a, b string) bool
- sort.StringSlice
-}
-
-func (s funcSorter) Less(i, j int) bool {
- return s.fn(s.StringSlice[i], s.StringSlice[j])
-}
-
-func (ss *stringSet) sortFunc(f func(a, b string) bool) {
- ss.compact()
- sort.Sort(funcSorter{f, sort.StringSlice(ss.s)})
-}
-
-func (ss *stringSet) remove(s string) {
- ss.assertChangeable()
- if i, ok := ss.find(s); ok {
- copy(ss.s[i:], ss.s[i+1:])
- ss.s = ss.s[:len(ss.s)-1]
- }
-}
-
-func (ss *stringSet) replace(ol, nu string) {
- ss.s[ss.index(ol)] = nu
- ss.sorted = ss.frozen
-}
-
-func (ss *stringSet) index(s string) int {
- ss.setType(Indexed)
- i, ok := ss.find(s)
- if !ok {
- if i < len(ss.s) {
- log.Panicf("find: item %q is not in list. Closest match is %q.", s, ss.s[i])
- }
- log.Panicf("find: item %q is not in list", s)
-
- }
- return i
-}
-
-func (ss *stringSet) find(s string) (int, bool) {
- ss.compact()
- i := sort.SearchStrings(ss.s, s)
- return i, i != len(ss.s) && ss.s[i] == s
-}
-
-func (ss *stringSet) slice() []string {
- ss.compact()
- return ss.s
-}
-
-func (ss *stringSet) updateLater(v, key string) {
- if ss.update == nil {
- ss.update = map[string]string{}
- }
- ss.update[v] = key
-}
-
-// join joins the string and ensures that all entries are of the same length.
-func (ss *stringSet) join() string {
- ss.setType(Indexed)
- n := len(ss.s[0])
- for _, s := range ss.s {
- if len(s) != n {
- log.Panicf("join: not all entries are of the same length: %q", s)
- }
- }
- ss.s = append(ss.s, strings.Repeat("\xff", n))
- return strings.Join(ss.s, "")
-}
-
-// ianaEntry holds information for an entry in the IANA Language Subtag Repository.
-// All types use the same entry.
-// See http://tools.ietf.org/html/bcp47#section-5.1 for a description of the various
-// fields.
-type ianaEntry struct {
- typ string
- description []string
- scope string
- added string
- preferred string
- deprecated string
- suppressScript string
- macro string
- prefix []string
-}
-
-type builder struct {
- w *gen.CodeWriter
- hw io.Writer // MultiWriter for w and w.Hash
- data *cldr.CLDR
- supp *cldr.SupplementalData
-
- // indices
- locale stringSet // common locales
- lang stringSet // canonical language ids (2 or 3 letter ISO codes) with data
- langNoIndex stringSet // 3-letter ISO codes with no associated data
- script stringSet // 4-letter ISO codes
- region stringSet // 2-letter ISO or 3-digit UN M49 codes
- variant stringSet // 4-8-alphanumeric variant code.
-
- // Region codes that are groups with their corresponding group IDs.
- groups map[int]index
-
- // langInfo
- registry map[string]*ianaEntry
-}
-
-type index uint
-
-func newBuilder(w *gen.CodeWriter) *builder {
- r := gen.OpenCLDRCoreZip()
- defer r.Close()
- d := &cldr.Decoder{}
- data, err := d.DecodeZip(r)
- failOnError(err)
- b := builder{
- w: w,
- hw: io.MultiWriter(w, w.Hash),
- data: data,
- supp: data.Supplemental(),
- }
- b.parseRegistry()
- return &b
-}
-
-func (b *builder) parseRegistry() {
- r := gen.OpenIANAFile("assignments/language-subtag-registry")
- defer r.Close()
- b.registry = make(map[string]*ianaEntry)
-
- scan := bufio.NewScanner(r)
- scan.Split(bufio.ScanWords)
- var record *ianaEntry
- for more := scan.Scan(); more; {
- key := scan.Text()
- more = scan.Scan()
- value := scan.Text()
- switch key {
- case "Type:":
- record = &ianaEntry{typ: value}
- case "Subtag:", "Tag:":
- if s := strings.SplitN(value, "..", 2); len(s) > 1 {
- for a := s[0]; a <= s[1]; a = inc(a) {
- b.addToRegistry(a, record)
- }
- } else {
- b.addToRegistry(value, record)
- }
- case "Suppress-Script:":
- record.suppressScript = value
- case "Added:":
- record.added = value
- case "Deprecated:":
- record.deprecated = value
- case "Macrolanguage:":
- record.macro = value
- case "Preferred-Value:":
- record.preferred = value
- case "Prefix:":
- record.prefix = append(record.prefix, value)
- case "Scope:":
- record.scope = value
- case "Description:":
- buf := []byte(value)
- for more = scan.Scan(); more; more = scan.Scan() {
- b := scan.Bytes()
- if b[0] == '%' || b[len(b)-1] == ':' {
- break
- }
- buf = append(buf, ' ')
- buf = append(buf, b...)
- }
- record.description = append(record.description, string(buf))
- continue
- default:
- continue
- }
- more = scan.Scan()
- }
- if scan.Err() != nil {
- log.Panic(scan.Err())
- }
-}
-
-func (b *builder) addToRegistry(key string, entry *ianaEntry) {
- if info, ok := b.registry[key]; ok {
- if info.typ != "language" || entry.typ != "extlang" {
- log.Fatalf("parseRegistry: tag %q already exists", key)
- }
- } else {
- b.registry[key] = entry
- }
-}
-
-var commentIndex = make(map[string]string)
-
-func init() {
- for _, s := range comment {
- key := strings.TrimSpace(strings.SplitN(s, " ", 2)[0])
- commentIndex[key] = s
- }
-}
-
-func (b *builder) comment(name string) {
- if s := commentIndex[name]; len(s) > 0 {
- b.w.WriteComment(s)
- } else {
- fmt.Fprintln(b.w)
- }
-}
-
-func (b *builder) pf(f string, x ...interface{}) {
- fmt.Fprintf(b.hw, f, x...)
- fmt.Fprint(b.hw, "\n")
-}
-
-func (b *builder) p(x ...interface{}) {
- fmt.Fprintln(b.hw, x...)
-}
-
-func (b *builder) addSize(s int) {
- b.w.Size += s
- b.pf("// Size: %d bytes", s)
-}
-
-func (b *builder) writeConst(name string, x interface{}) {
- b.comment(name)
- b.w.WriteConst(name, x)
-}
-
-// writeConsts computes f(v) for all v in values and writes the results
-// as constants named _v to a single constant block.
-func (b *builder) writeConsts(f func(string) int, values ...string) {
- b.pf("const (")
- for _, v := range values {
- b.pf("\t_%s = %v", v, f(v))
- }
- b.pf(")")
-}
-
-// writeType writes the type of the given value, which must be a struct.
-func (b *builder) writeType(value interface{}) {
- b.comment(reflect.TypeOf(value).Name())
- b.w.WriteType(value)
-}
-
-func (b *builder) writeSlice(name string, ss interface{}) {
- b.writeSliceAddSize(name, 0, ss)
-}
-
-func (b *builder) writeSliceAddSize(name string, extraSize int, ss interface{}) {
- b.comment(name)
- b.w.Size += extraSize
- v := reflect.ValueOf(ss)
- t := v.Type().Elem()
- b.pf("// Size: %d bytes, %d elements", v.Len()*int(t.Size())+extraSize, v.Len())
-
- fmt.Fprintf(b.w, "var %s = ", name)
- b.w.WriteArray(ss)
- b.p()
-}
-
-type FromTo struct {
- From, To uint16
-}
-
-func (b *builder) writeSortedMap(name string, ss *stringSet, index func(s string) uint16) {
- ss.sortFunc(func(a, b string) bool {
- return index(a) < index(b)
- })
- m := []FromTo{}
- for _, s := range ss.s {
- m = append(m, FromTo{index(s), index(ss.update[s])})
- }
- b.writeSlice(name, m)
-}
-
-const base = 'z' - 'a' + 1
-
-func strToInt(s string) uint {
- v := uint(0)
- for i := 0; i < len(s); i++ {
- v *= base
- v += uint(s[i] - 'a')
- }
- return v
-}
-
-// converts the given integer to the original ASCII string passed to strToInt.
-// len(s) must match the number of characters obtained.
-func intToStr(v uint, s []byte) {
- for i := len(s) - 1; i >= 0; i-- {
- s[i] = byte(v%base) + 'a'
- v /= base
- }
-}
-
-func (b *builder) writeBitVector(name string, ss []string) {
- vec := make([]uint8, int(math.Ceil(math.Pow(base, float64(len(ss[0])))/8)))
- for _, s := range ss {
- v := strToInt(s)
- vec[v/8] |= 1 << (v % 8)
- }
- b.writeSlice(name, vec)
-}
-
-// TODO: convert this type into a list or two-stage trie.
-func (b *builder) writeMapFunc(name string, m map[string]string, f func(string) uint16) {
- b.comment(name)
- v := reflect.ValueOf(m)
- sz := v.Len() * (2 + int(v.Type().Key().Size()))
- for _, k := range m {
- sz += len(k)
- }
- b.addSize(sz)
- keys := []string{}
- b.pf(`var %s = map[string]uint16{`, name)
- for k := range m {
- keys = append(keys, k)
- }
- sort.Strings(keys)
- for _, k := range keys {
- b.pf("\t%q: %v,", k, f(m[k]))
- }
- b.p("}")
-}
-
-func (b *builder) writeMap(name string, m interface{}) {
- b.comment(name)
- v := reflect.ValueOf(m)
- sz := v.Len() * (2 + int(v.Type().Key().Size()) + int(v.Type().Elem().Size()))
- b.addSize(sz)
- f := strings.FieldsFunc(fmt.Sprintf("%#v", m), func(r rune) bool {
- return strings.IndexRune("{}, ", r) != -1
- })
- sort.Strings(f[1:])
- b.pf(`var %s = %s{`, name, f[0])
- for _, kv := range f[1:] {
- b.pf("\t%s,", kv)
- }
- b.p("}")
-}
-
-func (b *builder) langIndex(s string) uint16 {
- if s == "und" {
- return 0
- }
- if i, ok := b.lang.find(s); ok {
- return uint16(i)
- }
- return uint16(strToInt(s)) + uint16(len(b.lang.s))
-}
-
-// inc advances the string to its lexicographical successor.
-func inc(s string) string {
- const maxTagLength = 4
- var buf [maxTagLength]byte
- intToStr(strToInt(strings.ToLower(s))+1, buf[:len(s)])
- for i := 0; i < len(s); i++ {
- if s[i] <= 'Z' {
- buf[i] -= 'a' - 'A'
- }
- }
- return string(buf[:len(s)])
-}
-
-func (b *builder) parseIndices() {
- meta := b.supp.Metadata
-
- for k, v := range b.registry {
- var ss *stringSet
- switch v.typ {
- case "language":
- if len(k) == 2 || v.suppressScript != "" || v.scope == "special" {
- b.lang.add(k)
- continue
- } else {
- ss = &b.langNoIndex
- }
- case "region":
- ss = &b.region
- case "script":
- ss = &b.script
- case "variant":
- ss = &b.variant
- default:
- continue
- }
- ss.add(k)
- }
- // Include any language for which there is data.
- for _, lang := range b.data.Locales() {
- if x := b.data.RawLDML(lang); false ||
- x.LocaleDisplayNames != nil ||
- x.Characters != nil ||
- x.Delimiters != nil ||
- x.Measurement != nil ||
- x.Dates != nil ||
- x.Numbers != nil ||
- x.Units != nil ||
- x.ListPatterns != nil ||
- x.Collations != nil ||
- x.Segmentations != nil ||
- x.Rbnf != nil ||
- x.Annotations != nil ||
- x.Metadata != nil {
-
- from := strings.Split(lang, "_")
- if lang := from[0]; lang != "root" {
- b.lang.add(lang)
- }
- }
- }
- // Include locales for plural rules, which uses a different structure.
- for _, plurals := range b.data.Supplemental().Plurals {
- for _, rules := range plurals.PluralRules {
- for _, lang := range strings.Split(rules.Locales, " ") {
- if lang = strings.Split(lang, "_")[0]; lang != "root" {
- b.lang.add(lang)
- }
- }
- }
- }
- // Include languages in likely subtags.
- for _, m := range b.supp.LikelySubtags.LikelySubtag {
- from := strings.Split(m.From, "_")
- b.lang.add(from[0])
- }
- // Include ISO-639 alpha-3 bibliographic entries.
- for _, a := range meta.Alias.LanguageAlias {
- if a.Reason == "bibliographic" {
- b.langNoIndex.add(a.Type)
- }
- }
- // Include regions in territoryAlias (not all are in the IANA registry!)
- for _, reg := range b.supp.Metadata.Alias.TerritoryAlias {
- if len(reg.Type) == 2 {
- b.region.add(reg.Type)
- }
- }
-
- for _, s := range b.lang.s {
- if len(s) == 3 {
- b.langNoIndex.remove(s)
- }
- }
- b.writeConst("NumLanguages", len(b.lang.slice())+len(b.langNoIndex.slice()))
- b.writeConst("NumScripts", len(b.script.slice()))
- b.writeConst("NumRegions", len(b.region.slice()))
-
- // Add dummy codes at the start of each list to represent "unspecified".
- b.lang.add("---")
- b.script.add("----")
- b.region.add("---")
-
- // common locales
- b.locale.parse(meta.DefaultContent.Locales)
-}
-
-// TODO: region inclusion data will probably not be use used in future matchers.
-
-func (b *builder) computeRegionGroups() {
- b.groups = make(map[int]index)
-
- // Create group indices.
- for i := 1; b.region.s[i][0] < 'A'; i++ { // Base M49 indices on regionID.
- b.groups[i] = index(len(b.groups))
- }
- for _, g := range b.supp.TerritoryContainment.Group {
- // Skip UN and EURO zone as they are flattening the containment
- // relationship.
- if g.Type == "EZ" || g.Type == "UN" {
- continue
- }
- group := b.region.index(g.Type)
- if _, ok := b.groups[group]; !ok {
- b.groups[group] = index(len(b.groups))
- }
- }
- if len(b.groups) > 64 {
- log.Fatalf("only 64 groups supported, found %d", len(b.groups))
- }
- b.writeConst("nRegionGroups", len(b.groups))
-}
-
-var langConsts = []string{
- "af", "am", "ar", "az", "bg", "bn", "ca", "cs", "da", "de", "el", "en", "es",
- "et", "fa", "fi", "fil", "fr", "gu", "he", "hi", "hr", "hu", "hy", "id", "is",
- "it", "ja", "ka", "kk", "km", "kn", "ko", "ky", "lo", "lt", "lv", "mk", "ml",
- "mn", "mo", "mr", "ms", "mul", "my", "nb", "ne", "nl", "no", "pa", "pl", "pt",
- "ro", "ru", "sh", "si", "sk", "sl", "sq", "sr", "sv", "sw", "ta", "te", "th",
- "tl", "tn", "tr", "uk", "ur", "uz", "vi", "zh", "zu",
-
- // constants for grandfathered tags (if not already defined)
- "jbo", "ami", "bnn", "hak", "tlh", "lb", "nv", "pwn", "tao", "tay", "tsu",
- "nn", "sfb", "vgt", "sgg", "cmn", "nan", "hsn",
-}
-
-// writeLanguage generates all tables needed for language canonicalization.
-func (b *builder) writeLanguage() {
- meta := b.supp.Metadata
-
- b.writeConst("nonCanonicalUnd", b.lang.index("und"))
- b.writeConsts(func(s string) int { return int(b.langIndex(s)) }, langConsts...)
- b.writeConst("langPrivateStart", b.langIndex("qaa"))
- b.writeConst("langPrivateEnd", b.langIndex("qtz"))
-
- // Get language codes that need to be mapped (overlong 3-letter codes,
- // deprecated 2-letter codes, legacy and grandfathered tags.)
- langAliasMap := stringSet{}
- aliasTypeMap := map[string]AliasType{}
-
- // altLangISO3 get the alternative ISO3 names that need to be mapped.
- altLangISO3 := stringSet{}
- // Add dummy start to avoid the use of index 0.
- altLangISO3.add("---")
- altLangISO3.updateLater("---", "aa")
-
- lang := b.lang.clone()
- for _, a := range meta.Alias.LanguageAlias {
- if a.Replacement == "" {
- a.Replacement = "und"
- }
- // TODO: support mapping to tags
- repl := strings.SplitN(a.Replacement, "_", 2)[0]
- if a.Reason == "overlong" {
- if len(a.Replacement) == 2 && len(a.Type) == 3 {
- lang.updateLater(a.Replacement, a.Type)
- }
- } else if len(a.Type) <= 3 {
- switch a.Reason {
- case "macrolanguage":
- aliasTypeMap[a.Type] = Macro
- case "deprecated":
- // handled elsewhere
- continue
- case "bibliographic", "legacy":
- if a.Type == "no" {
- continue
- }
- aliasTypeMap[a.Type] = Legacy
- default:
- log.Fatalf("new %s alias: %s", a.Reason, a.Type)
- }
- langAliasMap.add(a.Type)
- langAliasMap.updateLater(a.Type, repl)
- }
- }
- // Manually add the mapping of "nb" (Norwegian) to its macro language.
- // This can be removed if CLDR adopts this change.
- langAliasMap.add("nb")
- langAliasMap.updateLater("nb", "no")
- aliasTypeMap["nb"] = Macro
-
- for k, v := range b.registry {
- // Also add deprecated values for 3-letter ISO codes, which CLDR omits.
- if v.typ == "language" && v.deprecated != "" && v.preferred != "" {
- langAliasMap.add(k)
- langAliasMap.updateLater(k, v.preferred)
- aliasTypeMap[k] = Deprecated
- }
- }
- // Fix CLDR mappings.
- lang.updateLater("tl", "tgl")
- lang.updateLater("sh", "hbs")
- lang.updateLater("mo", "mol")
- lang.updateLater("no", "nor")
- lang.updateLater("tw", "twi")
- lang.updateLater("nb", "nob")
- lang.updateLater("ak", "aka")
- lang.updateLater("bh", "bih")
-
- // Ensure that each 2-letter code is matched with a 3-letter code.
- for _, v := range lang.s[1:] {
- s, ok := lang.update[v]
- if !ok {
- if s, ok = lang.update[langAliasMap.update[v]]; !ok {
- continue
- }
- lang.update[v] = s
- }
- if v[0] != s[0] {
- altLangISO3.add(s)
- altLangISO3.updateLater(s, v)
- }
- }
-
- // Complete canonicalized language tags.
- lang.freeze()
- for i, v := range lang.s {
- // We can avoid these manual entries by using the IANA registry directly.
- // Seems easier to update the list manually, as changes are rare.
- // The panic in this loop will trigger if we miss an entry.
- add := ""
- if s, ok := lang.update[v]; ok {
- if s[0] == v[0] {
- add = s[1:]
- } else {
- add = string([]byte{0, byte(altLangISO3.index(s))})
- }
- } else if len(v) == 3 {
- add = "\x00"
- } else {
- log.Panicf("no data for long form of %q", v)
- }
- lang.s[i] += add
- }
- b.writeConst("lang", tag.Index(lang.join()))
-
- b.writeConst("langNoIndexOffset", len(b.lang.s))
-
- // space of all valid 3-letter language identifiers.
- b.writeBitVector("langNoIndex", b.langNoIndex.slice())
-
- altLangIndex := []uint16{}
- for i, s := range altLangISO3.slice() {
- altLangISO3.s[i] += string([]byte{byte(len(altLangIndex))})
- if i > 0 {
- idx := b.lang.index(altLangISO3.update[s])
- altLangIndex = append(altLangIndex, uint16(idx))
- }
- }
- b.writeConst("altLangISO3", tag.Index(altLangISO3.join()))
- b.writeSlice("altLangIndex", altLangIndex)
-
- b.writeSortedMap("AliasMap", &langAliasMap, b.langIndex)
- types := make([]AliasType, len(langAliasMap.s))
- for i, s := range langAliasMap.s {
- types[i] = aliasTypeMap[s]
- }
- b.writeSlice("AliasTypes", types)
-}
-
-var scriptConsts = []string{
- "Latn", "Hani", "Hans", "Hant", "Qaaa", "Qaai", "Qabx", "Zinh", "Zyyy",
- "Zzzz",
-}
-
-func (b *builder) writeScript() {
- b.writeConsts(b.script.index, scriptConsts...)
- b.writeConst("script", tag.Index(b.script.join()))
-
- supp := make([]uint8, len(b.lang.slice()))
- for i, v := range b.lang.slice()[1:] {
- if sc := b.registry[v].suppressScript; sc != "" {
- supp[i+1] = uint8(b.script.index(sc))
- }
- }
- b.writeSlice("suppressScript", supp)
-
- // There is only one deprecated script in CLDR. This value is hard-coded.
- // We check here if the code must be updated.
- for _, a := range b.supp.Metadata.Alias.ScriptAlias {
- if a.Type != "Qaai" {
- log.Panicf("unexpected deprecated stript %q", a.Type)
- }
- }
-}
-
-func parseM49(s string) int16 {
- if len(s) == 0 {
- return 0
- }
- v, err := strconv.ParseUint(s, 10, 10)
- failOnError(err)
- return int16(v)
-}
-
-var regionConsts = []string{
- "001", "419", "BR", "CA", "ES", "GB", "MD", "PT", "UK", "US",
- "ZZ", "XA", "XC", "XK", // Unofficial tag for Kosovo.
-}
-
-func (b *builder) writeRegion() {
- b.writeConsts(b.region.index, regionConsts...)
-
- isoOffset := b.region.index("AA")
- m49map := make([]int16, len(b.region.slice()))
- fromM49map := make(map[int16]int)
- altRegionISO3 := ""
- altRegionIDs := []uint16{}
-
- b.writeConst("isoRegionOffset", isoOffset)
-
- // 2-letter region lookup and mapping to numeric codes.
- regionISO := b.region.clone()
- regionISO.s = regionISO.s[isoOffset:]
- regionISO.sorted = false
-
- regionTypes := make([]byte, len(b.region.s))
-
- // Is the region valid BCP 47?
- for s, e := range b.registry {
- if len(s) == 2 && s == strings.ToUpper(s) {
- i := b.region.index(s)
- for _, d := range e.description {
- if strings.Contains(d, "Private use") {
- regionTypes[i] = iso3166UserAssigned
- }
- }
- regionTypes[i] |= bcp47Region
- }
- }
-
- // Is the region a valid ccTLD?
- r := gen.OpenIANAFile("domains/root/db")
- defer r.Close()
-
- buf, err := ioutil.ReadAll(r)
- failOnError(err)
- re := regexp.MustCompile(`"/domains/root/db/([a-z]{2}).html"`)
- for _, m := range re.FindAllSubmatch(buf, -1) {
- i := b.region.index(strings.ToUpper(string(m[1])))
- regionTypes[i] |= ccTLD
- }
-
- b.writeSlice("regionTypes", regionTypes)
-
- iso3Set := make(map[string]int)
- update := func(iso2, iso3 string) {
- i := regionISO.index(iso2)
- if j, ok := iso3Set[iso3]; !ok && iso3[0] == iso2[0] {
- regionISO.s[i] += iso3[1:]
- iso3Set[iso3] = -1
- } else {
- if ok && j >= 0 {
- regionISO.s[i] += string([]byte{0, byte(j)})
- } else {
- iso3Set[iso3] = len(altRegionISO3)
- regionISO.s[i] += string([]byte{0, byte(len(altRegionISO3))})
- altRegionISO3 += iso3
- altRegionIDs = append(altRegionIDs, uint16(isoOffset+i))
- }
- }
- }
- for _, tc := range b.supp.CodeMappings.TerritoryCodes {
- i := regionISO.index(tc.Type) + isoOffset
- if d := m49map[i]; d != 0 {
- log.Panicf("%s found as a duplicate UN.M49 code of %03d", tc.Numeric, d)
- }
- m49 := parseM49(tc.Numeric)
- m49map[i] = m49
- if r := fromM49map[m49]; r == 0 {
- fromM49map[m49] = i
- } else if r != i {
- dep := b.registry[regionISO.s[r-isoOffset]].deprecated
- if t := b.registry[tc.Type]; t != nil && dep != "" && (t.deprecated == "" || t.deprecated > dep) {
- fromM49map[m49] = i
- }
- }
- }
- for _, ta := range b.supp.Metadata.Alias.TerritoryAlias {
- if len(ta.Type) == 3 && ta.Type[0] <= '9' && len(ta.Replacement) == 2 {
- from := parseM49(ta.Type)
- if r := fromM49map[from]; r == 0 {
- fromM49map[from] = regionISO.index(ta.Replacement) + isoOffset
- }
- }
- }
- for _, tc := range b.supp.CodeMappings.TerritoryCodes {
- if len(tc.Alpha3) == 3 {
- update(tc.Type, tc.Alpha3)
- }
- }
- // This entries are not included in territoryCodes. Mostly 3-letter variants
- // of deleted codes and an entry for QU.
- for _, m := range []struct{ iso2, iso3 string }{
- {"CT", "CTE"},
- {"DY", "DHY"},
- {"HV", "HVO"},
- {"JT", "JTN"},
- {"MI", "MID"},
- {"NH", "NHB"},
- {"NQ", "ATN"},
- {"PC", "PCI"},
- {"PU", "PUS"},
- {"PZ", "PCZ"},
- {"RH", "RHO"},
- {"VD", "VDR"},
- {"WK", "WAK"},
- // These three-letter codes are used for others as well.
- {"FQ", "ATF"},
- } {
- update(m.iso2, m.iso3)
- }
- for i, s := range regionISO.s {
- if len(s) != 4 {
- regionISO.s[i] = s + " "
- }
- }
- b.writeConst("regionISO", tag.Index(regionISO.join()))
- b.writeConst("altRegionISO3", altRegionISO3)
- b.writeSlice("altRegionIDs", altRegionIDs)
-
- // Create list of deprecated regions.
- // TODO: consider inserting SF -> FI. Not included by CLDR, but is the only
- // Transitionally-reserved mapping not included.
- regionOldMap := stringSet{}
- // Include regions in territoryAlias (not all are in the IANA registry!)
- for _, reg := range b.supp.Metadata.Alias.TerritoryAlias {
- if len(reg.Type) == 2 && reg.Reason == "deprecated" && len(reg.Replacement) == 2 {
- regionOldMap.add(reg.Type)
- regionOldMap.updateLater(reg.Type, reg.Replacement)
- i, _ := regionISO.find(reg.Type)
- j, _ := regionISO.find(reg.Replacement)
- if k := m49map[i+isoOffset]; k == 0 {
- m49map[i+isoOffset] = m49map[j+isoOffset]
- }
- }
- }
- b.writeSortedMap("regionOldMap", &regionOldMap, func(s string) uint16 {
- return uint16(b.region.index(s))
- })
- // 3-digit region lookup, groupings.
- for i := 1; i < isoOffset; i++ {
- m := parseM49(b.region.s[i])
- m49map[i] = m
- fromM49map[m] = i
- }
- b.writeSlice("m49", m49map)
-
- const (
- searchBits = 7
- regionBits = 9
- )
- if len(m49map) >= 1<<regionBits {
- log.Fatalf("Maximum number of regions exceeded: %d > %d", len(m49map), 1<<regionBits)
- }
- m49Index := [9]int16{}
- fromM49 := []uint16{}
- m49 := []int{}
- for k, _ := range fromM49map {
- m49 = append(m49, int(k))
- }
- sort.Ints(m49)
- for _, k := range m49[1:] {
- val := (k & (1<<searchBits - 1)) << regionBits
- fromM49 = append(fromM49, uint16(val|fromM49map[int16(k)]))
- m49Index[1:][k>>searchBits] = int16(len(fromM49))
- }
- b.writeSlice("m49Index", m49Index)
- b.writeSlice("fromM49", fromM49)
-}
-
-const (
- // TODO: put these lists in regionTypes as user data? Could be used for
- // various optimizations and refinements and could be exposed in the API.
- iso3166Except = "AC CP DG EA EU FX IC SU TA UK"
- iso3166Trans = "AN BU CS NT TP YU ZR" // SF is not in our set of Regions.
- // DY and RH are actually not deleted, but indeterminately reserved.
- iso3166DelCLDR = "CT DD DY FQ HV JT MI NH NQ PC PU PZ RH VD WK YD"
-)
-
-const (
- iso3166UserAssigned = 1 << iota
- ccTLD
- bcp47Region
-)
-
-func find(list []string, s string) int {
- for i, t := range list {
- if t == s {
- return i
- }
- }
- return -1
-}
-
-// writeVariants generates per-variant information and creates a map from variant
-// name to index value. We assign index values such that sorting multiple
-// variants by index value will result in the correct order.
-// There are two types of variants: specialized and general. Specialized variants
-// are only applicable to certain language or language-script pairs. Generalized
-// variants apply to any language. Generalized variants always sort after
-// specialized variants. We will therefore always assign a higher index value
-// to a generalized variant than any other variant. Generalized variants are
-// sorted alphabetically among themselves.
-// Specialized variants may also sort after other specialized variants. Such
-// variants will be ordered after any of the variants they may follow.
-// We assume that if a variant x is followed by a variant y, then for any prefix
-// p of x, p-x is a prefix of y. This allows us to order tags based on the
-// maximum of the length of any of its prefixes.
-// TODO: it is possible to define a set of Prefix values on variants such that
-// a total order cannot be defined to the point that this algorithm breaks.
-// In other words, we cannot guarantee the same order of variants for the
-// future using the same algorithm or for non-compliant combinations of
-// variants. For this reason, consider using simple alphabetic sorting
-// of variants and ignore Prefix restrictions altogether.
-func (b *builder) writeVariant() {
- generalized := stringSet{}
- specialized := stringSet{}
- specializedExtend := stringSet{}
- // Collate the variants by type and check assumptions.
- for _, v := range b.variant.slice() {
- e := b.registry[v]
- if len(e.prefix) == 0 {
- generalized.add(v)
- continue
- }
- c := strings.Split(e.prefix[0], "-")
- hasScriptOrRegion := false
- if len(c) > 1 {
- _, hasScriptOrRegion = b.script.find(c[1])
- if !hasScriptOrRegion {
- _, hasScriptOrRegion = b.region.find(c[1])
-
- }
- }
- if len(c) == 1 || len(c) == 2 && hasScriptOrRegion {
- // Variant is preceded by a language.
- specialized.add(v)
- continue
- }
- // Variant is preceded by another variant.
- specializedExtend.add(v)
- prefix := c[0] + "-"
- if hasScriptOrRegion {
- prefix += c[1]
- }
- for _, p := range e.prefix {
- // Verify that the prefix minus the last element is a prefix of the
- // predecessor element.
- i := strings.LastIndex(p, "-")
- pred := b.registry[p[i+1:]]
- if find(pred.prefix, p[:i]) < 0 {
- log.Fatalf("prefix %q for variant %q not consistent with predecessor spec", p, v)
- }
- // The sorting used below does not work in the general case. It works
- // if we assume that variants that may be followed by others only have
- // prefixes of the same length. Verify this.
- count := strings.Count(p[:i], "-")
- for _, q := range pred.prefix {
- if c := strings.Count(q, "-"); c != count {
- log.Fatalf("variant %q preceding %q has a prefix %q of size %d; want %d", p[i+1:], v, q, c, count)
- }
- }
- if !strings.HasPrefix(p, prefix) {
- log.Fatalf("prefix %q of variant %q should start with %q", p, v, prefix)
- }
- }
- }
-
- // Sort extended variants.
- a := specializedExtend.s
- less := func(v, w string) bool {
- // Sort by the maximum number of elements.
- maxCount := func(s string) (max int) {
- for _, p := range b.registry[s].prefix {
- if c := strings.Count(p, "-"); c > max {
- max = c
- }
- }
- return
- }
- if cv, cw := maxCount(v), maxCount(w); cv != cw {
- return cv < cw
- }
- // Sort by name as tie breaker.
- return v < w
- }
- sort.Sort(funcSorter{less, sort.StringSlice(a)})
- specializedExtend.frozen = true
-
- // Create index from variant name to index.
- variantIndex := make(map[string]uint8)
- add := func(s []string) {
- for _, v := range s {
- variantIndex[v] = uint8(len(variantIndex))
- }
- }
- add(specialized.slice())
- add(specializedExtend.s)
- numSpecialized := len(variantIndex)
- add(generalized.slice())
- if n := len(variantIndex); n > 255 {
- log.Fatalf("maximum number of variants exceeded: was %d; want <= 255", n)
- }
- b.writeMap("variantIndex", variantIndex)
- b.writeConst("variantNumSpecialized", numSpecialized)
-}
-
-func (b *builder) writeLanguageInfo() {
-}
-
-// writeLikelyData writes tables that are used both for finding parent relations and for
-// language matching. Each entry contains additional bits to indicate the status of the
-// data to know when it cannot be used for parent relations.
-func (b *builder) writeLikelyData() {
- const (
- isList = 1 << iota
- scriptInFrom
- regionInFrom
- )
- type ( // generated types
- likelyScriptRegion struct {
- region uint16
- script uint8
- flags uint8
- }
- likelyLangScript struct {
- lang uint16
- script uint8
- flags uint8
- }
- likelyLangRegion struct {
- lang uint16
- region uint16
- }
- // likelyTag is used for getting likely tags for group regions, where
- // the likely region might be a region contained in the group.
- likelyTag struct {
- lang uint16
- region uint16
- script uint8
- }
- )
- var ( // generated variables
- likelyRegionGroup = make([]likelyTag, len(b.groups))
- likelyLang = make([]likelyScriptRegion, len(b.lang.s))
- likelyRegion = make([]likelyLangScript, len(b.region.s))
- likelyScript = make([]likelyLangRegion, len(b.script.s))
- likelyLangList = []likelyScriptRegion{}
- likelyRegionList = []likelyLangScript{}
- )
- type fromTo struct {
- from, to []string
- }
- langToOther := map[int][]fromTo{}
- regionToOther := map[int][]fromTo{}
- for _, m := range b.supp.LikelySubtags.LikelySubtag {
- from := strings.Split(m.From, "_")
- to := strings.Split(m.To, "_")
- if len(to) != 3 {
- log.Fatalf("invalid number of subtags in %q: found %d, want 3", m.To, len(to))
- }
- if len(from) > 3 {
- log.Fatalf("invalid number of subtags: found %d, want 1-3", len(from))
- }
- if from[0] != to[0] && from[0] != "und" {
- log.Fatalf("unexpected language change in expansion: %s -> %s", from, to)
- }
- if len(from) == 3 {
- if from[2] != to[2] {
- log.Fatalf("unexpected region change in expansion: %s -> %s", from, to)
- }
- if from[0] != "und" {
- log.Fatalf("unexpected fully specified from tag: %s -> %s", from, to)
- }
- }
- if len(from) == 1 || from[0] != "und" {
- id := 0
- if from[0] != "und" {
- id = b.lang.index(from[0])
- }
- langToOther[id] = append(langToOther[id], fromTo{from, to})
- } else if len(from) == 2 && len(from[1]) == 4 {
- sid := b.script.index(from[1])
- likelyScript[sid].lang = uint16(b.langIndex(to[0]))
- likelyScript[sid].region = uint16(b.region.index(to[2]))
- } else {
- r := b.region.index(from[len(from)-1])
- if id, ok := b.groups[r]; ok {
- if from[0] != "und" {
- log.Fatalf("region changed unexpectedly: %s -> %s", from, to)
- }
- likelyRegionGroup[id].lang = uint16(b.langIndex(to[0]))
- likelyRegionGroup[id].script = uint8(b.script.index(to[1]))
- likelyRegionGroup[id].region = uint16(b.region.index(to[2]))
- } else {
- regionToOther[r] = append(regionToOther[r], fromTo{from, to})
- }
- }
- }
- b.writeType(likelyLangRegion{})
- b.writeSlice("likelyScript", likelyScript)
-
- for id := range b.lang.s {
- list := langToOther[id]
- if len(list) == 1 {
- likelyLang[id].region = uint16(b.region.index(list[0].to[2]))
- likelyLang[id].script = uint8(b.script.index(list[0].to[1]))
- } else if len(list) > 1 {
- likelyLang[id].flags = isList
- likelyLang[id].region = uint16(len(likelyLangList))
- likelyLang[id].script = uint8(len(list))
- for _, x := range list {
- flags := uint8(0)
- if len(x.from) > 1 {
- if x.from[1] == x.to[2] {
- flags = regionInFrom
- } else {
- flags = scriptInFrom
- }
- }
- likelyLangList = append(likelyLangList, likelyScriptRegion{
- region: uint16(b.region.index(x.to[2])),
- script: uint8(b.script.index(x.to[1])),
- flags: flags,
- })
- }
- }
- }
- // TODO: merge suppressScript data with this table.
- b.writeType(likelyScriptRegion{})
- b.writeSlice("likelyLang", likelyLang)
- b.writeSlice("likelyLangList", likelyLangList)
-
- for id := range b.region.s {
- list := regionToOther[id]
- if len(list) == 1 {
- likelyRegion[id].lang = uint16(b.langIndex(list[0].to[0]))
- likelyRegion[id].script = uint8(b.script.index(list[0].to[1]))
- if len(list[0].from) > 2 {
- likelyRegion[id].flags = scriptInFrom
- }
- } else if len(list) > 1 {
- likelyRegion[id].flags = isList
- likelyRegion[id].lang = uint16(len(likelyRegionList))
- likelyRegion[id].script = uint8(len(list))
- for i, x := range list {
- if len(x.from) == 2 && i != 0 || i > 0 && len(x.from) != 3 {
- log.Fatalf("unspecified script must be first in list: %v at %d", x.from, i)
- }
- x := likelyLangScript{
- lang: uint16(b.langIndex(x.to[0])),
- script: uint8(b.script.index(x.to[1])),
- }
- if len(list[0].from) > 2 {
- x.flags = scriptInFrom
- }
- likelyRegionList = append(likelyRegionList, x)
- }
- }
- }
- b.writeType(likelyLangScript{})
- b.writeSlice("likelyRegion", likelyRegion)
- b.writeSlice("likelyRegionList", likelyRegionList)
-
- b.writeType(likelyTag{})
- b.writeSlice("likelyRegionGroup", likelyRegionGroup)
-}
-
-func (b *builder) writeRegionInclusionData() {
- var (
- // mm holds for each group the set of groups with a distance of 1.
- mm = make(map[int][]index)
-
- // containment holds for each group the transitive closure of
- // containment of other groups.
- containment = make(map[index][]index)
- )
- for _, g := range b.supp.TerritoryContainment.Group {
- // Skip UN and EURO zone as they are flattening the containment
- // relationship.
- if g.Type == "EZ" || g.Type == "UN" {
- continue
- }
- group := b.region.index(g.Type)
- groupIdx := b.groups[group]
- for _, mem := range strings.Split(g.Contains, " ") {
- r := b.region.index(mem)
- mm[r] = append(mm[r], groupIdx)
- if g, ok := b.groups[r]; ok {
- mm[group] = append(mm[group], g)
- containment[groupIdx] = append(containment[groupIdx], g)
- }
- }
- }
-
- regionContainment := make([]uint64, len(b.groups))
- for _, g := range b.groups {
- l := containment[g]
-
- // Compute the transitive closure of containment.
- for i := 0; i < len(l); i++ {
- l = append(l, containment[l[i]]...)
- }
-
- // Compute the bitmask.
- regionContainment[g] = 1 << g
- for _, v := range l {
- regionContainment[g] |= 1 << v
- }
- }
- b.writeSlice("regionContainment", regionContainment)
-
- regionInclusion := make([]uint8, len(b.region.s))
- bvs := make(map[uint64]index)
- // Make the first bitvector positions correspond with the groups.
- for r, i := range b.groups {
- bv := uint64(1 << i)
- for _, g := range mm[r] {
- bv |= 1 << g
- }
- bvs[bv] = i
- regionInclusion[r] = uint8(bvs[bv])
- }
- for r := 1; r < len(b.region.s); r++ {
- if _, ok := b.groups[r]; !ok {
- bv := uint64(0)
- for _, g := range mm[r] {
- bv |= 1 << g
- }
- if bv == 0 {
- // Pick the world for unspecified regions.
- bv = 1 << b.groups[b.region.index("001")]
- }
- if _, ok := bvs[bv]; !ok {
- bvs[bv] = index(len(bvs))
- }
- regionInclusion[r] = uint8(bvs[bv])
- }
- }
- b.writeSlice("regionInclusion", regionInclusion)
- regionInclusionBits := make([]uint64, len(bvs))
- for k, v := range bvs {
- regionInclusionBits[v] = uint64(k)
- }
- // Add bit vectors for increasingly large distances until a fixed point is reached.
- regionInclusionNext := []uint8{}
- for i := 0; i < len(regionInclusionBits); i++ {
- bits := regionInclusionBits[i]
- next := bits
- for i := uint(0); i < uint(len(b.groups)); i++ {
- if bits&(1<<i) != 0 {
- next |= regionInclusionBits[i]
- }
- }
- if _, ok := bvs[next]; !ok {
- bvs[next] = index(len(bvs))
- regionInclusionBits = append(regionInclusionBits, next)
- }
- regionInclusionNext = append(regionInclusionNext, uint8(bvs[next]))
- }
- b.writeSlice("regionInclusionBits", regionInclusionBits)
- b.writeSlice("regionInclusionNext", regionInclusionNext)
-}
-
-type parentRel struct {
- lang uint16
- script uint8
- maxScript uint8
- toRegion uint16
- fromRegion []uint16
-}
-
-func (b *builder) writeParents() {
- b.writeType(parentRel{})
-
- parents := []parentRel{}
-
- // Construct parent overrides.
- n := 0
- for _, p := range b.data.Supplemental().ParentLocales.ParentLocale {
- // Skipping non-standard scripts to root is implemented using addTags.
- if p.Parent == "root" {
- continue
- }
-
- sub := strings.Split(p.Parent, "_")
- parent := parentRel{lang: b.langIndex(sub[0])}
- if len(sub) == 2 {
- // TODO: check that all undefined scripts are indeed Latn in these
- // cases.
- parent.maxScript = uint8(b.script.index("Latn"))
- parent.toRegion = uint16(b.region.index(sub[1]))
- } else {
- parent.script = uint8(b.script.index(sub[1]))
- parent.maxScript = parent.script
- parent.toRegion = uint16(b.region.index(sub[2]))
- }
- for _, c := range strings.Split(p.Locales, " ") {
- region := b.region.index(c[strings.LastIndex(c, "_")+1:])
- parent.fromRegion = append(parent.fromRegion, uint16(region))
- }
- parents = append(parents, parent)
- n += len(parent.fromRegion)
- }
- b.writeSliceAddSize("parents", n*2, parents)
-}
-
-func main() {
- gen.Init()
-
- gen.Repackage("gen_common.go", "common.go", "language")
-
- w := gen.NewCodeWriter()
- defer w.WriteGoFile("tables.go", "language")
-
- fmt.Fprintln(w, `import "golang.org/x/text/internal/tag"`)
-
- b := newBuilder(w)
- gen.WriteCLDRVersion(w)
-
- b.parseIndices()
- b.writeType(FromTo{})
- b.writeLanguage()
- b.writeScript()
- b.writeRegion()
- b.writeVariant()
- // TODO: b.writeLocale()
- b.computeRegionGroups()
- b.writeLikelyData()
- b.writeRegionInclusionData()
- b.writeParents()
-}
diff --git a/vendor/golang.org/x/text/internal/language/gen_common.go b/vendor/golang.org/x/text/internal/language/gen_common.go
deleted file mode 100644
index c419ceeb1..000000000
--- a/vendor/golang.org/x/text/internal/language/gen_common.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-// This file contains code common to the maketables.go and the package code.
-
-// AliasType is the type of an alias in AliasMap.
-type AliasType int8
-
-const (
- Deprecated AliasType = iota
- Macro
- Legacy
-
- AliasTypeUnknown AliasType = -1
-)
diff --git a/vendor/golang.org/x/text/language/gen.go b/vendor/golang.org/x/text/language/gen.go
deleted file mode 100644
index 3004eb42c..000000000
--- a/vendor/golang.org/x/text/language/gen.go
+++ /dev/null
@@ -1,305 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// Language tag table generator.
-// Data read from the web.
-
-package main
-
-import (
- "flag"
- "fmt"
- "io"
- "log"
- "sort"
- "strconv"
- "strings"
-
- "golang.org/x/text/internal/gen"
- "golang.org/x/text/internal/language"
- "golang.org/x/text/unicode/cldr"
-)
-
-var (
- test = flag.Bool("test",
- false,
- "test existing tables; can be used to compare web data with package data.")
- outputFile = flag.String("output",
- "tables.go",
- "output file for generated tables")
-)
-
-func main() {
- gen.Init()
-
- w := gen.NewCodeWriter()
- defer w.WriteGoFile("tables.go", "language")
-
- b := newBuilder(w)
- gen.WriteCLDRVersion(w)
-
- b.writeConstants()
- b.writeMatchData()
-}
-
-type builder struct {
- w *gen.CodeWriter
- hw io.Writer // MultiWriter for w and w.Hash
- data *cldr.CLDR
- supp *cldr.SupplementalData
-}
-
-func (b *builder) langIndex(s string) uint16 {
- return uint16(language.MustParseBase(s))
-}
-
-func (b *builder) regionIndex(s string) int {
- return int(language.MustParseRegion(s))
-}
-
-func (b *builder) scriptIndex(s string) int {
- return int(language.MustParseScript(s))
-}
-
-func newBuilder(w *gen.CodeWriter) *builder {
- r := gen.OpenCLDRCoreZip()
- defer r.Close()
- d := &cldr.Decoder{}
- data, err := d.DecodeZip(r)
- if err != nil {
- log.Fatal(err)
- }
- b := builder{
- w: w,
- hw: io.MultiWriter(w, w.Hash),
- data: data,
- supp: data.Supplemental(),
- }
- return &b
-}
-
-// writeConsts computes f(v) for all v in values and writes the results
-// as constants named _v to a single constant block.
-func (b *builder) writeConsts(f func(string) int, values ...string) {
- fmt.Fprintln(b.w, "const (")
- for _, v := range values {
- fmt.Fprintf(b.w, "\t_%s = %v\n", v, f(v))
- }
- fmt.Fprintln(b.w, ")")
-}
-
-// TODO: region inclusion data will probably not be use used in future matchers.
-
-var langConsts = []string{
- "de", "en", "fr", "it", "mo", "no", "nb", "pt", "sh", "mul", "und",
-}
-
-var scriptConsts = []string{
- "Latn", "Hani", "Hans", "Hant", "Qaaa", "Qaai", "Qabx", "Zinh", "Zyyy",
- "Zzzz",
-}
-
-var regionConsts = []string{
- "001", "419", "BR", "CA", "ES", "GB", "MD", "PT", "UK", "US",
- "ZZ", "XA", "XC", "XK", // Unofficial tag for Kosovo.
-}
-
-func (b *builder) writeConstants() {
- b.writeConsts(func(s string) int { return int(b.langIndex(s)) }, langConsts...)
- b.writeConsts(b.regionIndex, regionConsts...)
- b.writeConsts(b.scriptIndex, scriptConsts...)
-}
-
-type mutualIntelligibility struct {
- want, have uint16
- distance uint8
- oneway bool
-}
-
-type scriptIntelligibility struct {
- wantLang, haveLang uint16
- wantScript, haveScript uint8
- distance uint8
- // Always oneway
-}
-
-type regionIntelligibility struct {
- lang uint16 // compact language id
- script uint8 // 0 means any
- group uint8 // 0 means any; if bit 7 is set it means inverse
- distance uint8
- // Always twoway.
-}
-
-// writeMatchData writes tables with languages and scripts for which there is
-// mutual intelligibility. The data is based on CLDR's languageMatching data.
-// Note that we use a different algorithm than the one defined by CLDR and that
-// we slightly modify the data. For example, we convert scores to confidence levels.
-// We also drop all region-related data as we use a different algorithm to
-// determine region equivalence.
-func (b *builder) writeMatchData() {
- lm := b.supp.LanguageMatching.LanguageMatches
- cldr.MakeSlice(&lm).SelectAnyOf("type", "written_new")
-
- regionHierarchy := map[string][]string{}
- for _, g := range b.supp.TerritoryContainment.Group {
- regions := strings.Split(g.Contains, " ")
- regionHierarchy[g.Type] = append(regionHierarchy[g.Type], regions...)
- }
- regionToGroups := make([]uint8, language.NumRegions)
-
- idToIndex := map[string]uint8{}
- for i, mv := range lm[0].MatchVariable {
- if i > 6 {
- log.Fatalf("Too many groups: %d", i)
- }
- idToIndex[mv.Id] = uint8(i + 1)
- // TODO: also handle '-'
- for _, r := range strings.Split(mv.Value, "+") {
- todo := []string{r}
- for k := 0; k < len(todo); k++ {
- r := todo[k]
- regionToGroups[b.regionIndex(r)] |= 1 << uint8(i)
- todo = append(todo, regionHierarchy[r]...)
- }
- }
- }
- b.w.WriteVar("regionToGroups", regionToGroups)
-
- // maps language id to in- and out-of-group region.
- paradigmLocales := [][3]uint16{}
- locales := strings.Split(lm[0].ParadigmLocales[0].Locales, " ")
- for i := 0; i < len(locales); i += 2 {
- x := [3]uint16{}
- for j := 0; j < 2; j++ {
- pc := strings.SplitN(locales[i+j], "-", 2)
- x[0] = b.langIndex(pc[0])
- if len(pc) == 2 {
- x[1+j] = uint16(b.regionIndex(pc[1]))
- }
- }
- paradigmLocales = append(paradigmLocales, x)
- }
- b.w.WriteVar("paradigmLocales", paradigmLocales)
-
- b.w.WriteType(mutualIntelligibility{})
- b.w.WriteType(scriptIntelligibility{})
- b.w.WriteType(regionIntelligibility{})
-
- matchLang := []mutualIntelligibility{}
- matchScript := []scriptIntelligibility{}
- matchRegion := []regionIntelligibility{}
- // Convert the languageMatch entries in lists keyed by desired language.
- for _, m := range lm[0].LanguageMatch {
- // Different versions of CLDR use different separators.
- desired := strings.Replace(m.Desired, "-", "_", -1)
- supported := strings.Replace(m.Supported, "-", "_", -1)
- d := strings.Split(desired, "_")
- s := strings.Split(supported, "_")
- if len(d) != len(s) {
- log.Fatalf("not supported: desired=%q; supported=%q", desired, supported)
- continue
- }
- distance, _ := strconv.ParseInt(m.Distance, 10, 8)
- switch len(d) {
- case 2:
- if desired == supported && desired == "*_*" {
- continue
- }
- // language-script pair.
- matchScript = append(matchScript, scriptIntelligibility{
- wantLang: uint16(b.langIndex(d[0])),
- haveLang: uint16(b.langIndex(s[0])),
- wantScript: uint8(b.scriptIndex(d[1])),
- haveScript: uint8(b.scriptIndex(s[1])),
- distance: uint8(distance),
- })
- if m.Oneway != "true" {
- matchScript = append(matchScript, scriptIntelligibility{
- wantLang: uint16(b.langIndex(s[0])),
- haveLang: uint16(b.langIndex(d[0])),
- wantScript: uint8(b.scriptIndex(s[1])),
- haveScript: uint8(b.scriptIndex(d[1])),
- distance: uint8(distance),
- })
- }
- case 1:
- if desired == supported && desired == "*" {
- continue
- }
- if distance == 1 {
- // nb == no is already handled by macro mapping. Check there
- // really is only this case.
- if d[0] != "no" || s[0] != "nb" {
- log.Fatalf("unhandled equivalence %s == %s", s[0], d[0])
- }
- continue
- }
- // TODO: consider dropping oneway field and just doubling the entry.
- matchLang = append(matchLang, mutualIntelligibility{
- want: uint16(b.langIndex(d[0])),
- have: uint16(b.langIndex(s[0])),
- distance: uint8(distance),
- oneway: m.Oneway == "true",
- })
- case 3:
- if desired == supported && desired == "*_*_*" {
- continue
- }
- if desired != supported {
- // This is now supported by CLDR, but only one case, which
- // should already be covered by paradigm locales. For instance,
- // test case "und, en, en-GU, en-IN, en-GB ; en-ZA ; en-GB" in
- // testdata/CLDRLocaleMatcherTest.txt tests this.
- if supported != "en_*_GB" {
- log.Fatalf("not supported: desired=%q; supported=%q", desired, supported)
- }
- continue
- }
- ri := regionIntelligibility{
- lang: b.langIndex(d[0]),
- distance: uint8(distance),
- }
- if d[1] != "*" {
- ri.script = uint8(b.scriptIndex(d[1]))
- }
- switch {
- case d[2] == "*":
- ri.group = 0x80 // not contained in anything
- case strings.HasPrefix(d[2], "$!"):
- ri.group = 0x80
- d[2] = "$" + d[2][len("$!"):]
- fallthrough
- case strings.HasPrefix(d[2], "$"):
- ri.group |= idToIndex[d[2]]
- }
- matchRegion = append(matchRegion, ri)
- default:
- log.Fatalf("not supported: desired=%q; supported=%q", desired, supported)
- }
- }
- sort.SliceStable(matchLang, func(i, j int) bool {
- return matchLang[i].distance < matchLang[j].distance
- })
- b.w.WriteComment(`
- matchLang holds pairs of langIDs of base languages that are typically
- mutually intelligible. Each pair is associated with a confidence and
- whether the intelligibility goes one or both ways.`)
- b.w.WriteVar("matchLang", matchLang)
-
- b.w.WriteComment(`
- matchScript holds pairs of scriptIDs where readers of one script
- can typically also read the other. Each is associated with a confidence.`)
- sort.SliceStable(matchScript, func(i, j int) bool {
- return matchScript[i].distance < matchScript[j].distance
- })
- b.w.WriteVar("matchScript", matchScript)
-
- sort.SliceStable(matchRegion, func(i, j int) bool {
- return matchRegion[i].distance < matchRegion[j].distance
- })
- b.w.WriteVar("matchRegion", matchRegion)
-}
diff --git a/vendor/golang.org/x/text/unicode/bidi/gen.go b/vendor/golang.org/x/text/unicode/bidi/gen.go
deleted file mode 100644
index 987fc169c..000000000
--- a/vendor/golang.org/x/text/unicode/bidi/gen.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-import (
- "flag"
- "log"
-
- "golang.org/x/text/internal/gen"
- "golang.org/x/text/internal/triegen"
- "golang.org/x/text/internal/ucd"
-)
-
-var outputFile = flag.String("out", "tables.go", "output file")
-
-func main() {
- gen.Init()
- gen.Repackage("gen_trieval.go", "trieval.go", "bidi")
- gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi")
-
- genTables()
-}
-
-// bidiClass names and codes taken from class "bc" in
-// https://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt
-var bidiClass = map[string]Class{
- "AL": AL, // ArabicLetter
- "AN": AN, // ArabicNumber
- "B": B, // ParagraphSeparator
- "BN": BN, // BoundaryNeutral
- "CS": CS, // CommonSeparator
- "EN": EN, // EuropeanNumber
- "ES": ES, // EuropeanSeparator
- "ET": ET, // EuropeanTerminator
- "L": L, // LeftToRight
- "NSM": NSM, // NonspacingMark
- "ON": ON, // OtherNeutral
- "R": R, // RightToLeft
- "S": S, // SegmentSeparator
- "WS": WS, // WhiteSpace
-
- "FSI": Control,
- "PDF": Control,
- "PDI": Control,
- "LRE": Control,
- "LRI": Control,
- "LRO": Control,
- "RLE": Control,
- "RLI": Control,
- "RLO": Control,
-}
-
-func genTables() {
- if numClass > 0x0F {
- log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass)
- }
- w := gen.NewCodeWriter()
- defer w.WriteVersionedGoFile(*outputFile, "bidi")
-
- gen.WriteUnicodeVersion(w)
-
- t := triegen.NewTrie("bidi")
-
- // Build data about bracket mapping. These bits need to be or-ed with
- // any other bits.
- orMask := map[rune]uint64{}
-
- xorMap := map[rune]int{}
- xorMasks := []rune{0} // First value is no-op.
-
- ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) {
- r1 := p.Rune(0)
- r2 := p.Rune(1)
- xor := r1 ^ r2
- if _, ok := xorMap[xor]; !ok {
- xorMap[xor] = len(xorMasks)
- xorMasks = append(xorMasks, xor)
- }
- entry := uint64(xorMap[xor]) << xorMaskShift
- switch p.String(2) {
- case "o":
- entry |= openMask
- case "c", "n":
- default:
- log.Fatalf("Unknown bracket class %q.", p.String(2))
- }
- orMask[r1] = entry
- })
-
- w.WriteComment(`
- xorMasks contains masks to be xor-ed with brackets to get the reverse
- version.`)
- w.WriteVar("xorMasks", xorMasks)
-
- done := map[rune]bool{}
-
- insert := func(r rune, c Class) {
- if !done[r] {
- t.Insert(r, orMask[r]|uint64(c))
- done[r] = true
- }
- }
-
- // Insert the derived BiDi properties.
- ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) {
- r := p.Rune(0)
- class, ok := bidiClass[p.String(1)]
- if !ok {
- log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1))
- }
- insert(r, class)
- })
- visitDefaults(insert)
-
- // TODO: use sparse blocks. This would reduce table size considerably
- // from the looks of it.
-
- sz, err := t.Gen(w)
- if err != nil {
- log.Fatal(err)
- }
- w.Size += sz
-}
-
-// dummy values to make methods in gen_common compile. The real versions
-// will be generated by this file to tables.go.
-var (
- xorMasks []rune
-)
diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go b/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go
deleted file mode 100644
index 02c3b505d..000000000
--- a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-import (
- "unicode"
-
- "golang.org/x/text/internal/gen"
- "golang.org/x/text/internal/ucd"
- "golang.org/x/text/unicode/rangetable"
-)
-
-// These tables are hand-extracted from:
-// https://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt
-func visitDefaults(fn func(r rune, c Class)) {
- // first write default values for ranges listed above.
- visitRunes(fn, AL, []rune{
- 0x0600, 0x07BF, // Arabic
- 0x08A0, 0x08FF, // Arabic Extended-A
- 0xFB50, 0xFDCF, // Arabic Presentation Forms
- 0xFDF0, 0xFDFF,
- 0xFE70, 0xFEFF,
- 0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols
- })
- visitRunes(fn, R, []rune{
- 0x0590, 0x05FF, // Hebrew
- 0x07C0, 0x089F, // Nko et al.
- 0xFB1D, 0xFB4F,
- 0x00010800, 0x00010FFF, // Cypriot Syllabary et. al.
- 0x0001E800, 0x0001EDFF,
- 0x0001EF00, 0x0001EFFF,
- })
- visitRunes(fn, ET, []rune{ // European Terminator
- 0x20A0, 0x20Cf, // Currency symbols
- })
- rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) {
- fn(r, BN) // Boundary Neutral
- })
- ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) {
- if p.String(1) == "Default_Ignorable_Code_Point" {
- fn(p.Rune(0), BN) // Boundary Neutral
- }
- })
-}
-
-func visitRunes(fn func(r rune, c Class), c Class, runes []rune) {
- for i := 0; i < len(runes); i += 2 {
- lo, hi := runes[i], runes[i+1]
- for j := lo; j <= hi; j++ {
- fn(j, c)
- }
- }
-}
diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go b/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go
deleted file mode 100644
index 9cb994289..000000000
--- a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-// Class is the Unicode BiDi class. Each rune has a single class.
-type Class uint
-
-const (
- L Class = iota // LeftToRight
- R // RightToLeft
- EN // EuropeanNumber
- ES // EuropeanSeparator
- ET // EuropeanTerminator
- AN // ArabicNumber
- CS // CommonSeparator
- B // ParagraphSeparator
- S // SegmentSeparator
- WS // WhiteSpace
- ON // OtherNeutral
- BN // BoundaryNeutral
- NSM // NonspacingMark
- AL // ArabicLetter
- Control // Control LRO - PDI
-
- numClass
-
- LRO // LeftToRightOverride
- RLO // RightToLeftOverride
- LRE // LeftToRightEmbedding
- RLE // RightToLeftEmbedding
- PDF // PopDirectionalFormat
- LRI // LeftToRightIsolate
- RLI // RightToLeftIsolate
- FSI // FirstStrongIsolate
- PDI // PopDirectionalIsolate
-
- unknownClass = ^Class(0)
-)
-
-var controlToClass = map[rune]Class{
- 0x202D: LRO, // LeftToRightOverride,
- 0x202E: RLO, // RightToLeftOverride,
- 0x202A: LRE, // LeftToRightEmbedding,
- 0x202B: RLE, // RightToLeftEmbedding,
- 0x202C: PDF, // PopDirectionalFormat,
- 0x2066: LRI, // LeftToRightIsolate,
- 0x2067: RLI, // RightToLeftIsolate,
- 0x2068: FSI, // FirstStrongIsolate,
- 0x2069: PDI, // PopDirectionalIsolate,
-}
-
-// A trie entry has the following bits:
-// 7..5 XOR mask for brackets
-// 4 1: Bracket open, 0: Bracket close
-// 3..0 Class type
-
-const (
- openMask = 0x10
- xorMaskShift = 5
-)
diff --git a/vendor/golang.org/x/text/unicode/norm/maketables.go b/vendor/golang.org/x/text/unicode/norm/maketables.go
deleted file mode 100644
index 30a3aa933..000000000
--- a/vendor/golang.org/x/text/unicode/norm/maketables.go
+++ /dev/null
@@ -1,986 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// Normalization table generator.
-// Data read from the web.
-// See forminfo.go for a description of the trie values associated with each rune.
-
-package main
-
-import (
- "bytes"
- "encoding/binary"
- "flag"
- "fmt"
- "io"
- "log"
- "sort"
- "strconv"
- "strings"
-
- "golang.org/x/text/internal/gen"
- "golang.org/x/text/internal/triegen"
- "golang.org/x/text/internal/ucd"
-)
-
-func main() {
- gen.Init()
- loadUnicodeData()
- compactCCC()
- loadCompositionExclusions()
- completeCharFields(FCanonical)
- completeCharFields(FCompatibility)
- computeNonStarterCounts()
- verifyComputed()
- printChars()
- testDerived()
- printTestdata()
- makeTables()
-}
-
-var (
- tablelist = flag.String("tables",
- "all",
- "comma-separated list of which tables to generate; "+
- "can be 'decomp', 'recomp', 'info' and 'all'")
- test = flag.Bool("test",
- false,
- "test existing tables against DerivedNormalizationProps and generate test data for regression testing")
- verbose = flag.Bool("verbose",
- false,
- "write data to stdout as it is parsed")
-)
-
-const MaxChar = 0x10FFFF // anything above this shouldn't exist
-
-// Quick Check properties of runes allow us to quickly
-// determine whether a rune may occur in a normal form.
-// For a given normal form, a rune may be guaranteed to occur
-// verbatim (QC=Yes), may or may not combine with another
-// rune (QC=Maybe), or may not occur (QC=No).
-type QCResult int
-
-const (
- QCUnknown QCResult = iota
- QCYes
- QCNo
- QCMaybe
-)
-
-func (r QCResult) String() string {
- switch r {
- case QCYes:
- return "Yes"
- case QCNo:
- return "No"
- case QCMaybe:
- return "Maybe"
- }
- return "***UNKNOWN***"
-}
-
-const (
- FCanonical = iota // NFC or NFD
- FCompatibility // NFKC or NFKD
- FNumberOfFormTypes
-)
-
-const (
- MComposed = iota // NFC or NFKC
- MDecomposed // NFD or NFKD
- MNumberOfModes
-)
-
-// This contains only the properties we're interested in.
-type Char struct {
- name string
- codePoint rune // if zero, this index is not a valid code point.
- ccc uint8 // canonical combining class
- origCCC uint8
- excludeInComp bool // from CompositionExclusions.txt
- compatDecomp bool // it has a compatibility expansion
-
- nTrailingNonStarters uint8
- nLeadingNonStarters uint8 // must be equal to trailing if non-zero
-
- forms [FNumberOfFormTypes]FormInfo // For FCanonical and FCompatibility
-
- state State
-}
-
-var chars = make([]Char, MaxChar+1)
-var cccMap = make(map[uint8]uint8)
-
-func (c Char) String() string {
- buf := new(bytes.Buffer)
-
- fmt.Fprintf(buf, "%U [%s]:\n", c.codePoint, c.name)
- fmt.Fprintf(buf, " ccc: %v\n", c.ccc)
- fmt.Fprintf(buf, " excludeInComp: %v\n", c.excludeInComp)
- fmt.Fprintf(buf, " compatDecomp: %v\n", c.compatDecomp)
- fmt.Fprintf(buf, " state: %v\n", c.state)
- fmt.Fprintf(buf, " NFC:\n")
- fmt.Fprint(buf, c.forms[FCanonical])
- fmt.Fprintf(buf, " NFKC:\n")
- fmt.Fprint(buf, c.forms[FCompatibility])
-
- return buf.String()
-}
-
-// In UnicodeData.txt, some ranges are marked like this:
-// 3400;<CJK Ideograph Extension A, First>;Lo;0;L;;;;;N;;;;;
-// 4DB5;<CJK Ideograph Extension A, Last>;Lo;0;L;;;;;N;;;;;
-// parseCharacter keeps a state variable indicating the weirdness.
-type State int
-
-const (
- SNormal State = iota // known to be zero for the type
- SFirst
- SLast
- SMissing
-)
-
-var lastChar = rune('\u0000')
-
-func (c Char) isValid() bool {
- return c.codePoint != 0 && c.state != SMissing
-}
-
-type FormInfo struct {
- quickCheck [MNumberOfModes]QCResult // index: MComposed or MDecomposed
- verified [MNumberOfModes]bool // index: MComposed or MDecomposed
-
- combinesForward bool // May combine with rune on the right
- combinesBackward bool // May combine with rune on the left
- isOneWay bool // Never appears in result
- inDecomp bool // Some decompositions result in this char.
- decomp Decomposition
- expandedDecomp Decomposition
-}
-
-func (f FormInfo) String() string {
- buf := bytes.NewBuffer(make([]byte, 0))
-
- fmt.Fprintf(buf, " quickCheck[C]: %v\n", f.quickCheck[MComposed])
- fmt.Fprintf(buf, " quickCheck[D]: %v\n", f.quickCheck[MDecomposed])
- fmt.Fprintf(buf, " cmbForward: %v\n", f.combinesForward)
- fmt.Fprintf(buf, " cmbBackward: %v\n", f.combinesBackward)
- fmt.Fprintf(buf, " isOneWay: %v\n", f.isOneWay)
- fmt.Fprintf(buf, " inDecomp: %v\n", f.inDecomp)
- fmt.Fprintf(buf, " decomposition: %X\n", f.decomp)
- fmt.Fprintf(buf, " expandedDecomp: %X\n", f.expandedDecomp)
-
- return buf.String()
-}
-
-type Decomposition []rune
-
-func parseDecomposition(s string, skipfirst bool) (a []rune, err error) {
- decomp := strings.Split(s, " ")
- if len(decomp) > 0 && skipfirst {
- decomp = decomp[1:]
- }
- for _, d := range decomp {
- point, err := strconv.ParseUint(d, 16, 64)
- if err != nil {
- return a, err
- }
- a = append(a, rune(point))
- }
- return a, nil
-}
-
-func loadUnicodeData() {
- f := gen.OpenUCDFile("UnicodeData.txt")
- defer f.Close()
- p := ucd.New(f)
- for p.Next() {
- r := p.Rune(ucd.CodePoint)
- char := &chars[r]
-
- char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass))
- decmap := p.String(ucd.DecompMapping)
-
- exp, err := parseDecomposition(decmap, false)
- isCompat := false
- if err != nil {
- if len(decmap) > 0 {
- exp, err = parseDecomposition(decmap, true)
- if err != nil {
- log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err)
- }
- isCompat = true
- }
- }
-
- char.name = p.String(ucd.Name)
- char.codePoint = r
- char.forms[FCompatibility].decomp = exp
- if !isCompat {
- char.forms[FCanonical].decomp = exp
- } else {
- char.compatDecomp = true
- }
- if len(decmap) > 0 {
- char.forms[FCompatibility].decomp = exp
- }
- }
- if err := p.Err(); err != nil {
- log.Fatal(err)
- }
-}
-
-// compactCCC converts the sparse set of CCC values to a continguous one,
-// reducing the number of bits needed from 8 to 6.
-func compactCCC() {
- m := make(map[uint8]uint8)
- for i := range chars {
- c := &chars[i]
- m[c.ccc] = 0
- }
- cccs := []int{}
- for v, _ := range m {
- cccs = append(cccs, int(v))
- }
- sort.Ints(cccs)
- for i, c := range cccs {
- cccMap[uint8(i)] = uint8(c)
- m[uint8(c)] = uint8(i)
- }
- for i := range chars {
- c := &chars[i]
- c.origCCC = c.ccc
- c.ccc = m[c.ccc]
- }
- if len(m) >= 1<<6 {
- log.Fatalf("too many difference CCC values: %d >= 64", len(m))
- }
-}
-
-// CompositionExclusions.txt has form:
-// 0958 # ...
-// See https://unicode.org/reports/tr44/ for full explanation
-func loadCompositionExclusions() {
- f := gen.OpenUCDFile("CompositionExclusions.txt")
- defer f.Close()
- p := ucd.New(f)
- for p.Next() {
- c := &chars[p.Rune(0)]
- if c.excludeInComp {
- log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint)
- }
- c.excludeInComp = true
- }
- if e := p.Err(); e != nil {
- log.Fatal(e)
- }
-}
-
-// hasCompatDecomp returns true if any of the recursive
-// decompositions contains a compatibility expansion.
-// In this case, the character may not occur in NFK*.
-func hasCompatDecomp(r rune) bool {
- c := &chars[r]
- if c.compatDecomp {
- return true
- }
- for _, d := range c.forms[FCompatibility].decomp {
- if hasCompatDecomp(d) {
- return true
- }
- }
- return false
-}
-
-// Hangul related constants.
-const (
- HangulBase = 0xAC00
- HangulEnd = 0xD7A4 // hangulBase + Jamo combinations (19 * 21 * 28)
-
- JamoLBase = 0x1100
- JamoLEnd = 0x1113
- JamoVBase = 0x1161
- JamoVEnd = 0x1176
- JamoTBase = 0x11A8
- JamoTEnd = 0x11C3
-
- JamoLVTCount = 19 * 21 * 28
- JamoTCount = 28
-)
-
-func isHangul(r rune) bool {
- return HangulBase <= r && r < HangulEnd
-}
-
-func isHangulWithoutJamoT(r rune) bool {
- if !isHangul(r) {
- return false
- }
- r -= HangulBase
- return r < JamoLVTCount && r%JamoTCount == 0
-}
-
-func ccc(r rune) uint8 {
- return chars[r].ccc
-}
-
-// Insert a rune in a buffer, ordered by Canonical Combining Class.
-func insertOrdered(b Decomposition, r rune) Decomposition {
- n := len(b)
- b = append(b, 0)
- cc := ccc(r)
- if cc > 0 {
- // Use bubble sort.
- for ; n > 0; n-- {
- if ccc(b[n-1]) <= cc {
- break
- }
- b[n] = b[n-1]
- }
- }
- b[n] = r
- return b
-}
-
-// Recursively decompose.
-func decomposeRecursive(form int, r rune, d Decomposition) Decomposition {
- dcomp := chars[r].forms[form].decomp
- if len(dcomp) == 0 {
- return insertOrdered(d, r)
- }
- for _, c := range dcomp {
- d = decomposeRecursive(form, c, d)
- }
- return d
-}
-
-func completeCharFields(form int) {
- // Phase 0: pre-expand decomposition.
- for i := range chars {
- f := &chars[i].forms[form]
- if len(f.decomp) == 0 {
- continue
- }
- exp := make(Decomposition, 0)
- for _, c := range f.decomp {
- exp = decomposeRecursive(form, c, exp)
- }
- f.expandedDecomp = exp
- }
-
- // Phase 1: composition exclusion, mark decomposition.
- for i := range chars {
- c := &chars[i]
- f := &c.forms[form]
-
- // Marks script-specific exclusions and version restricted.
- f.isOneWay = c.excludeInComp
-
- // Singletons
- f.isOneWay = f.isOneWay || len(f.decomp) == 1
-
- // Non-starter decompositions
- if len(f.decomp) > 1 {
- chk := c.ccc != 0 || chars[f.decomp[0]].ccc != 0
- f.isOneWay = f.isOneWay || chk
- }
-
- // Runes that decompose into more than two runes.
- f.isOneWay = f.isOneWay || len(f.decomp) > 2
-
- if form == FCompatibility {
- f.isOneWay = f.isOneWay || hasCompatDecomp(c.codePoint)
- }
-
- for _, r := range f.decomp {
- chars[r].forms[form].inDecomp = true
- }
- }
-
- // Phase 2: forward and backward combining.
- for i := range chars {
- c := &chars[i]
- f := &c.forms[form]
-
- if !f.isOneWay && len(f.decomp) == 2 {
- f0 := &chars[f.decomp[0]].forms[form]
- f1 := &chars[f.decomp[1]].forms[form]
- if !f0.isOneWay {
- f0.combinesForward = true
- }
- if !f1.isOneWay {
- f1.combinesBackward = true
- }
- }
- if isHangulWithoutJamoT(rune(i)) {
- f.combinesForward = true
- }
- }
-
- // Phase 3: quick check values.
- for i := range chars {
- c := &chars[i]
- f := &c.forms[form]
-
- switch {
- case len(f.decomp) > 0:
- f.quickCheck[MDecomposed] = QCNo
- case isHangul(rune(i)):
- f.quickCheck[MDecomposed] = QCNo
- default:
- f.quickCheck[MDecomposed] = QCYes
- }
- switch {
- case f.isOneWay:
- f.quickCheck[MComposed] = QCNo
- case (i & 0xffff00) == JamoLBase:
- f.quickCheck[MComposed] = QCYes
- if JamoLBase <= i && i < JamoLEnd {
- f.combinesForward = true
- }
- if JamoVBase <= i && i < JamoVEnd {
- f.quickCheck[MComposed] = QCMaybe
- f.combinesBackward = true
- f.combinesForward = true
- }
- if JamoTBase <= i && i < JamoTEnd {
- f.quickCheck[MComposed] = QCMaybe
- f.combinesBackward = true
- }
- case !f.combinesBackward:
- f.quickCheck[MComposed] = QCYes
- default:
- f.quickCheck[MComposed] = QCMaybe
- }
- }
-}
-
-func computeNonStarterCounts() {
- // Phase 4: leading and trailing non-starter count
- for i := range chars {
- c := &chars[i]
-
- runes := []rune{rune(i)}
- // We always use FCompatibility so that the CGJ insertion points do not
- // change for repeated normalizations with different forms.
- if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 {
- runes = exp
- }
- // We consider runes that combine backwards to be non-starters for the
- // purpose of Stream-Safe Text Processing.
- for _, r := range runes {
- if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward {
- break
- }
- c.nLeadingNonStarters++
- }
- for i := len(runes) - 1; i >= 0; i-- {
- if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward {
- break
- }
- c.nTrailingNonStarters++
- }
- if c.nTrailingNonStarters > 3 {
- log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes)
- }
-
- if isHangul(rune(i)) {
- c.nTrailingNonStarters = 2
- if isHangulWithoutJamoT(rune(i)) {
- c.nTrailingNonStarters = 1
- }
- }
-
- if l, t := c.nLeadingNonStarters, c.nTrailingNonStarters; l > 0 && l != t {
- log.Fatalf("%U: number of leading and trailing non-starters should be equal (%d vs %d)", i, l, t)
- }
- if t := c.nTrailingNonStarters; t > 3 {
- log.Fatalf("%U: number of trailing non-starters is %d > 3", t)
- }
- }
-}
-
-func printBytes(w io.Writer, b []byte, name string) {
- fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b))
- fmt.Fprintf(w, "var %s = [...]byte {", name)
- for i, c := range b {
- switch {
- case i%64 == 0:
- fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63)
- case i%8 == 0:
- fmt.Fprintf(w, "\n")
- }
- fmt.Fprintf(w, "0x%.2X, ", c)
- }
- fmt.Fprint(w, "\n}\n\n")
-}
-
-// See forminfo.go for format.
-func makeEntry(f *FormInfo, c *Char) uint16 {
- e := uint16(0)
- if r := c.codePoint; HangulBase <= r && r < HangulEnd {
- e |= 0x40
- }
- if f.combinesForward {
- e |= 0x20
- }
- if f.quickCheck[MDecomposed] == QCNo {
- e |= 0x4
- }
- switch f.quickCheck[MComposed] {
- case QCYes:
- case QCNo:
- e |= 0x10
- case QCMaybe:
- e |= 0x18
- default:
- log.Fatalf("Illegal quickcheck value %v.", f.quickCheck[MComposed])
- }
- e |= uint16(c.nTrailingNonStarters)
- return e
-}
-
-// decompSet keeps track of unique decompositions, grouped by whether
-// the decomposition is followed by a trailing and/or leading CCC.
-type decompSet [7]map[string]bool
-
-const (
- normalDecomp = iota
- firstMulti
- firstCCC
- endMulti
- firstLeadingCCC
- firstCCCZeroExcept
- firstStarterWithNLead
- lastDecomp
-)
-
-var cname = []string{"firstMulti", "firstCCC", "endMulti", "firstLeadingCCC", "firstCCCZeroExcept", "firstStarterWithNLead", "lastDecomp"}
-
-func makeDecompSet() decompSet {
- m := decompSet{}
- for i := range m {
- m[i] = make(map[string]bool)
- }
- return m
-}
-func (m *decompSet) insert(key int, s string) {
- m[key][s] = true
-}
-
-func printCharInfoTables(w io.Writer) int {
- mkstr := func(r rune, f *FormInfo) (int, string) {
- d := f.expandedDecomp
- s := string([]rune(d))
- if max := 1 << 6; len(s) >= max {
- const msg = "%U: too many bytes in decomposition: %d >= %d"
- log.Fatalf(msg, r, len(s), max)
- }
- head := uint8(len(s))
- if f.quickCheck[MComposed] != QCYes {
- head |= 0x40
- }
- if f.combinesForward {
- head |= 0x80
- }
- s = string([]byte{head}) + s
-
- lccc := ccc(d[0])
- tccc := ccc(d[len(d)-1])
- cc := ccc(r)
- if cc != 0 && lccc == 0 && tccc == 0 {
- log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc)
- }
- if tccc < lccc && lccc != 0 {
- const msg = "%U: lccc (%d) must be <= tcc (%d)"
- log.Fatalf(msg, r, lccc, tccc)
- }
- index := normalDecomp
- nTrail := chars[r].nTrailingNonStarters
- nLead := chars[r].nLeadingNonStarters
- if tccc > 0 || lccc > 0 || nTrail > 0 {
- tccc <<= 2
- tccc |= nTrail
- s += string([]byte{tccc})
- index = endMulti
- for _, r := range d[1:] {
- if ccc(r) == 0 {
- index = firstCCC
- }
- }
- if lccc > 0 || nLead > 0 {
- s += string([]byte{lccc})
- if index == firstCCC {
- log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r)
- }
- index = firstLeadingCCC
- }
- if cc != lccc {
- if cc != 0 {
- log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc)
- }
- index = firstCCCZeroExcept
- }
- } else if len(d) > 1 {
- index = firstMulti
- }
- return index, s
- }
-
- decompSet := makeDecompSet()
- const nLeadStr = "\x00\x01" // 0-byte length and tccc with nTrail.
- decompSet.insert(firstStarterWithNLead, nLeadStr)
-
- // Store the uniqued decompositions in a byte buffer,
- // preceded by their byte length.
- for _, c := range chars {
- for _, f := range c.forms {
- if len(f.expandedDecomp) == 0 {
- continue
- }
- if f.combinesBackward {
- log.Fatalf("%U: combinesBackward and decompose", c.codePoint)
- }
- index, s := mkstr(c.codePoint, &f)
- decompSet.insert(index, s)
- }
- }
-
- decompositions := bytes.NewBuffer(make([]byte, 0, 10000))
- size := 0
- positionMap := make(map[string]uint16)
- decompositions.WriteString("\000")
- fmt.Fprintln(w, "const (")
- for i, m := range decompSet {
- sa := []string{}
- for s := range m {
- sa = append(sa, s)
- }
- sort.Strings(sa)
- for _, s := range sa {
- p := decompositions.Len()
- decompositions.WriteString(s)
- positionMap[s] = uint16(p)
- }
- if cname[i] != "" {
- fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len())
- }
- }
- fmt.Fprintln(w, "maxDecomp = 0x8000")
- fmt.Fprintln(w, ")")
- b := decompositions.Bytes()
- printBytes(w, b, "decomps")
- size += len(b)
-
- varnames := []string{"nfc", "nfkc"}
- for i := 0; i < FNumberOfFormTypes; i++ {
- trie := triegen.NewTrie(varnames[i])
-
- for r, c := range chars {
- f := c.forms[i]
- d := f.expandedDecomp
- if len(d) != 0 {
- _, key := mkstr(c.codePoint, &f)
- trie.Insert(rune(r), uint64(positionMap[key]))
- if c.ccc != ccc(d[0]) {
- // We assume the lead ccc of a decomposition !=0 in this case.
- if ccc(d[0]) == 0 {
- log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc)
- }
- }
- } else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward {
- // Handle cases where it can't be detected that the nLead should be equal
- // to nTrail.
- trie.Insert(c.codePoint, uint64(positionMap[nLeadStr]))
- } else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 {
- trie.Insert(c.codePoint, uint64(0x8000|v))
- }
- }
- sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]}))
- if err != nil {
- log.Fatal(err)
- }
- size += sz
- }
- return size
-}
-
-func contains(sa []string, s string) bool {
- for _, a := range sa {
- if a == s {
- return true
- }
- }
- return false
-}
-
-func makeTables() {
- w := &bytes.Buffer{}
-
- size := 0
- if *tablelist == "" {
- return
- }
- list := strings.Split(*tablelist, ",")
- if *tablelist == "all" {
- list = []string{"recomp", "info"}
- }
-
- // Compute maximum decomposition size.
- max := 0
- for _, c := range chars {
- if n := len(string(c.forms[FCompatibility].expandedDecomp)); n > max {
- max = n
- }
- }
- fmt.Fprintln(w, `import "sync"`)
- fmt.Fprintln(w)
-
- fmt.Fprintln(w, "const (")
- fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.")
- fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion())
- fmt.Fprintln(w)
- fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform")
- fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at")
- fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that")
- fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.")
- fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max)
- fmt.Fprintln(w, ")\n")
-
- // Print the CCC remap table.
- size += len(cccMap)
- fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap))
- for i := 0; i < len(cccMap); i++ {
- if i%8 == 0 {
- fmt.Fprintln(w)
- }
- fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)])
- }
- fmt.Fprintln(w, "\n}\n")
-
- if contains(list, "info") {
- size += printCharInfoTables(w)
- }
-
- if contains(list, "recomp") {
- // Note that we use 32 bit keys, instead of 64 bit.
- // This clips the bits of three entries, but we know
- // this won't cause a collision. The compiler will catch
- // any changes made to UnicodeData.txt that introduces
- // a collision.
- // Note that the recomposition map for NFC and NFKC
- // are identical.
-
- // Recomposition map
- nrentries := 0
- for _, c := range chars {
- f := c.forms[FCanonical]
- if !f.isOneWay && len(f.decomp) > 0 {
- nrentries++
- }
- }
- sz := nrentries * 8
- size += sz
- fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz)
- fmt.Fprintln(w, "var recompMap map[uint32]rune")
- fmt.Fprintln(w, "var recompMapOnce sync.Once\n")
- fmt.Fprintln(w, `const recompMapPacked = "" +`)
- var buf [8]byte
- for i, c := range chars {
- f := c.forms[FCanonical]
- d := f.decomp
- if !f.isOneWay && len(d) > 0 {
- key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1]))
- binary.BigEndian.PutUint32(buf[:4], key)
- binary.BigEndian.PutUint32(buf[4:], uint32(i))
- fmt.Fprintf(w, "\t\t%q + // 0x%.8X: 0x%.8X\n", string(buf[:]), key, uint32(i))
- }
- }
- // hack so we don't have to special case the trailing plus sign
- fmt.Fprintf(w, ` ""`)
- fmt.Fprintln(w)
- }
-
- fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size)
- gen.WriteVersionedGoFile("tables.go", "norm", w.Bytes())
-}
-
-func printChars() {
- if *verbose {
- for _, c := range chars {
- if !c.isValid() || c.state == SMissing {
- continue
- }
- fmt.Println(c)
- }
- }
-}
-
-// verifyComputed does various consistency tests.
-func verifyComputed() {
- for i, c := range chars {
- for _, f := range c.forms {
- isNo := (f.quickCheck[MDecomposed] == QCNo)
- if (len(f.decomp) > 0) != isNo && !isHangul(rune(i)) {
- log.Fatalf("%U: NF*D QC must be No if rune decomposes", i)
- }
-
- isMaybe := f.quickCheck[MComposed] == QCMaybe
- if f.combinesBackward != isMaybe {
- log.Fatalf("%U: NF*C QC must be Maybe if combinesBackward", i)
- }
- if len(f.decomp) > 0 && f.combinesForward && isMaybe {
- log.Fatalf("%U: NF*C QC must be Yes or No if combinesForward and decomposes", i)
- }
-
- if len(f.expandedDecomp) != 0 {
- continue
- }
- if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b {
- // We accept these runes to be treated differently (it only affects
- // segment breaking in iteration, most likely on improper use), but
- // reconsider if more characters are added.
- // U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L;<narrow> 3099;;;;N;;;;;
- // U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L;<narrow> 309A;;;;N;;;;;
- // U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<compat> 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;;
- // U+318E HANGUL LETTER ARAEAE;Lo;0;L;<compat> 11A1;;;;N;HANGUL LETTER ALAE AE;;;;
- // U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<narrow> 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;;
- // U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L;<narrow> 3163;;;;N;;;;;
- if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) {
- log.Fatalf("%U: nLead was %v; want %v", i, a, b)
- }
- }
- }
- nfc := c.forms[FCanonical]
- nfkc := c.forms[FCompatibility]
- if nfc.combinesBackward != nfkc.combinesBackward {
- log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint)
- }
- }
-}
-
-// Use values in DerivedNormalizationProps.txt to compare against the
-// values we computed.
-// DerivedNormalizationProps.txt has form:
-// 00C0..00C5 ; NFD_QC; N # ...
-// 0374 ; NFD_QC; N # ...
-// See https://unicode.org/reports/tr44/ for full explanation
-func testDerived() {
- f := gen.OpenUCDFile("DerivedNormalizationProps.txt")
- defer f.Close()
- p := ucd.New(f)
- for p.Next() {
- r := p.Rune(0)
- c := &chars[r]
-
- var ftype, mode int
- qt := p.String(1)
- switch qt {
- case "NFC_QC":
- ftype, mode = FCanonical, MComposed
- case "NFD_QC":
- ftype, mode = FCanonical, MDecomposed
- case "NFKC_QC":
- ftype, mode = FCompatibility, MComposed
- case "NFKD_QC":
- ftype, mode = FCompatibility, MDecomposed
- default:
- continue
- }
- var qr QCResult
- switch p.String(2) {
- case "Y":
- qr = QCYes
- case "N":
- qr = QCNo
- case "M":
- qr = QCMaybe
- default:
- log.Fatalf(`Unexpected quick check value "%s"`, p.String(2))
- }
- if got := c.forms[ftype].quickCheck[mode]; got != qr {
- log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr)
- }
- c.forms[ftype].verified[mode] = true
- }
- if err := p.Err(); err != nil {
- log.Fatal(err)
- }
- // Any unspecified value must be QCYes. Verify this.
- for i, c := range chars {
- for j, fd := range c.forms {
- for k, qr := range fd.quickCheck {
- if !fd.verified[k] && qr != QCYes {
- m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n"
- log.Printf(m, i, j, k, qr, c.name)
- }
- }
- }
- }
-}
-
-var testHeader = `const (
- Yes = iota
- No
- Maybe
-)
-
-type formData struct {
- qc uint8
- combinesForward bool
- decomposition string
-}
-
-type runeData struct {
- r rune
- ccc uint8
- nLead uint8
- nTrail uint8
- f [2]formData // 0: canonical; 1: compatibility
-}
-
-func f(qc uint8, cf bool, dec string) [2]formData {
- return [2]formData{{qc, cf, dec}, {qc, cf, dec}}
-}
-
-func g(qc, qck uint8, cf, cfk bool, d, dk string) [2]formData {
- return [2]formData{{qc, cf, d}, {qck, cfk, dk}}
-}
-
-var testData = []runeData{
-`
-
-func printTestdata() {
- type lastInfo struct {
- ccc uint8
- nLead uint8
- nTrail uint8
- f string
- }
-
- last := lastInfo{}
- w := &bytes.Buffer{}
- fmt.Fprintf(w, testHeader)
- for r, c := range chars {
- f := c.forms[FCanonical]
- qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp)
- f = c.forms[FCompatibility]
- qck, cfk, dk := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp)
- s := ""
- if d == dk && qc == qck && cf == cfk {
- s = fmt.Sprintf("f(%s, %v, %q)", qc, cf, d)
- } else {
- s = fmt.Sprintf("g(%s, %s, %v, %v, %q, %q)", qc, qck, cf, cfk, d, dk)
- }
- current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s}
- if last != current {
- fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s)
- last = current
- }
- }
- fmt.Fprintln(w, "}")
- gen.WriteVersionedGoFile("data_test.go", "norm", w.Bytes())
-}
diff --git a/vendor/golang.org/x/text/unicode/norm/triegen.go b/vendor/golang.org/x/text/unicode/norm/triegen.go
deleted file mode 100644
index 45d711900..000000000
--- a/vendor/golang.org/x/text/unicode/norm/triegen.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// Trie table generator.
-// Used by make*tables tools to generate a go file with trie data structures
-// for mapping UTF-8 to a 16-bit value. All but the last byte in a UTF-8 byte
-// sequence are used to lookup offsets in the index table to be used for the
-// next byte. The last byte is used to index into a table with 16-bit values.
-
-package main
-
-import (
- "fmt"
- "io"
-)
-
-const maxSparseEntries = 16
-
-type normCompacter struct {
- sparseBlocks [][]uint64
- sparseOffset []uint16
- sparseCount int
- name string
-}
-
-func mostFrequentStride(a []uint64) int {
- counts := make(map[int]int)
- var v int
- for _, x := range a {
- if stride := int(x) - v; v != 0 && stride >= 0 {
- counts[stride]++
- }
- v = int(x)
- }
- var maxs, maxc int
- for stride, cnt := range counts {
- if cnt > maxc || (cnt == maxc && stride < maxs) {
- maxs, maxc = stride, cnt
- }
- }
- return maxs
-}
-
-func countSparseEntries(a []uint64) int {
- stride := mostFrequentStride(a)
- var v, count int
- for _, tv := range a {
- if int(tv)-v != stride {
- if tv != 0 {
- count++
- }
- }
- v = int(tv)
- }
- return count
-}
-
-func (c *normCompacter) Size(v []uint64) (sz int, ok bool) {
- if n := countSparseEntries(v); n <= maxSparseEntries {
- return (n+1)*4 + 2, true
- }
- return 0, false
-}
-
-func (c *normCompacter) Store(v []uint64) uint32 {
- h := uint32(len(c.sparseOffset))
- c.sparseBlocks = append(c.sparseBlocks, v)
- c.sparseOffset = append(c.sparseOffset, uint16(c.sparseCount))
- c.sparseCount += countSparseEntries(v) + 1
- return h
-}
-
-func (c *normCompacter) Handler() string {
- return c.name + "Sparse.lookup"
-}
-
-func (c *normCompacter) Print(w io.Writer) (retErr error) {
- p := func(f string, x ...interface{}) {
- if _, err := fmt.Fprintf(w, f, x...); retErr == nil && err != nil {
- retErr = err
- }
- }
-
- ls := len(c.sparseBlocks)
- p("// %sSparseOffset: %d entries, %d bytes\n", c.name, ls, ls*2)
- p("var %sSparseOffset = %#v\n\n", c.name, c.sparseOffset)
-
- ns := c.sparseCount
- p("// %sSparseValues: %d entries, %d bytes\n", c.name, ns, ns*4)
- p("var %sSparseValues = [%d]valueRange {", c.name, ns)
- for i, b := range c.sparseBlocks {
- p("\n// Block %#x, offset %#x", i, c.sparseOffset[i])
- var v int
- stride := mostFrequentStride(b)
- n := countSparseEntries(b)
- p("\n{value:%#04x,lo:%#02x},", stride, uint8(n))
- for i, nv := range b {
- if int(nv)-v != stride {
- if v != 0 {
- p(",hi:%#02x},", 0x80+i-1)
- }
- if nv != 0 {
- p("\n{value:%#04x,lo:%#02x", nv, 0x80+i)
- }
- }
- v = int(nv)
- }
- if v != 0 {
- p(",hi:%#02x},", 0x80+len(b)-1)
- }
- }
- p("\n}\n\n")
- return
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 53239b7cd..71c50d78d 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,30 +1,30 @@
# github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78
-github.com/Azure/go-ansiterm/winterm
github.com/Azure/go-ansiterm
+github.com/Azure/go-ansiterm/winterm
# github.com/BurntSushi/toml v0.3.1
github.com/BurntSushi/toml
# github.com/Microsoft/go-winio v0.4.14
github.com/Microsoft/go-winio
-github.com/Microsoft/go-winio/pkg/guid
github.com/Microsoft/go-winio/archive/tar
github.com/Microsoft/go-winio/backuptar
+github.com/Microsoft/go-winio/pkg/guid
# github.com/Microsoft/hcsshim v0.8.6
-github.com/Microsoft/hcsshim/osversion
github.com/Microsoft/hcsshim
+github.com/Microsoft/hcsshim/internal/guestrequest
github.com/Microsoft/hcsshim/internal/guid
github.com/Microsoft/hcsshim/internal/hcs
github.com/Microsoft/hcsshim/internal/hcserror
github.com/Microsoft/hcsshim/internal/hns
-github.com/Microsoft/hcsshim/internal/mergemaps
-github.com/Microsoft/hcsshim/internal/schema1
-github.com/Microsoft/hcsshim/internal/wclayer
-github.com/Microsoft/hcsshim/internal/guestrequest
github.com/Microsoft/hcsshim/internal/interop
github.com/Microsoft/hcsshim/internal/logfields
-github.com/Microsoft/hcsshim/internal/timeout
-github.com/Microsoft/hcsshim/internal/schema2
github.com/Microsoft/hcsshim/internal/longpath
+github.com/Microsoft/hcsshim/internal/mergemaps
github.com/Microsoft/hcsshim/internal/safefile
+github.com/Microsoft/hcsshim/internal/schema1
+github.com/Microsoft/hcsshim/internal/schema2
+github.com/Microsoft/hcsshim/internal/timeout
+github.com/Microsoft/hcsshim/internal/wclayer
+github.com/Microsoft/hcsshim/osversion
# github.com/VividCortex/ewma v1.1.1
github.com/VividCortex/ewma
# github.com/beorn7/perks v1.0.1
@@ -40,204 +40,205 @@ github.com/checkpoint-restore/go-criu/rpc
github.com/containerd/containerd/errdefs
# github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc
github.com/containerd/continuity/fs
-github.com/containerd/continuity/sysx
github.com/containerd/continuity/syscallx
-# github.com/containernetworking/cni v0.7.1
-github.com/containernetworking/cni/pkg/types
-github.com/containernetworking/cni/pkg/types/current
-github.com/containernetworking/cni/pkg/version
+github.com/containerd/continuity/sysx
+# github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784
github.com/containernetworking/cni/libcni
github.com/containernetworking/cni/pkg/invoke
+github.com/containernetworking/cni/pkg/types
github.com/containernetworking/cni/pkg/types/020
+github.com/containernetworking/cni/pkg/types/current
+github.com/containernetworking/cni/pkg/utils
+github.com/containernetworking/cni/pkg/version
# github.com/containernetworking/plugins v0.8.2
-github.com/containernetworking/plugins/pkg/ns
github.com/containernetworking/plugins/pkg/ip
-github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator
+github.com/containernetworking/plugins/pkg/ns
github.com/containernetworking/plugins/pkg/utils/hwaddr
github.com/containernetworking/plugins/plugins/ipam/host-local/backend
+github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator
# github.com/containers/buildah v1.11.5-0.20191031204705-20e92ffe0982
github.com/containers/buildah
-github.com/containers/buildah/imagebuildah
-github.com/containers/buildah/pkg/chrootuser
-github.com/containers/buildah/pkg/cli
-github.com/containers/buildah/pkg/formats
-github.com/containers/buildah/util
-github.com/containers/buildah/pkg/secrets
-github.com/containers/buildah/pkg/parse
github.com/containers/buildah/bind
github.com/containers/buildah/chroot
github.com/containers/buildah/docker
+github.com/containers/buildah/imagebuildah
github.com/containers/buildah/pkg/blobcache
github.com/containers/buildah/pkg/cgroups
+github.com/containers/buildah/pkg/chrootuser
+github.com/containers/buildah/pkg/cli
+github.com/containers/buildah/pkg/formats
github.com/containers/buildah/pkg/overlay
-github.com/containers/buildah/pkg/unshare
+github.com/containers/buildah/pkg/parse
+github.com/containers/buildah/pkg/secrets
github.com/containers/buildah/pkg/umask
+github.com/containers/buildah/pkg/unshare
+github.com/containers/buildah/util
# github.com/containers/image/v5 v5.0.0
+github.com/containers/image/v5/copy
github.com/containers/image/v5/directory
+github.com/containers/image/v5/directory/explicitfilepath
github.com/containers/image/v5/docker
github.com/containers/image/v5/docker/archive
-github.com/containers/image/v5/manifest
-github.com/containers/image/v5/pkg/docker/config
-github.com/containers/image/v5/signature
-github.com/containers/image/v5/transports
-github.com/containers/image/v5/transports/alltransports
-github.com/containers/image/v5/types
-github.com/containers/image/v5/oci/archive
-github.com/containers/image/v5/storage
-github.com/containers/image/v5/copy
+github.com/containers/image/v5/docker/daemon
+github.com/containers/image/v5/docker/policyconfiguration
github.com/containers/image/v5/docker/reference
github.com/containers/image/v5/docker/tarfile
github.com/containers/image/v5/image
-github.com/containers/image/v5/oci/layout
-github.com/containers/image/v5/tarball
-github.com/containers/image/v5/pkg/sysregistriesv2
-github.com/containers/image/v5/directory/explicitfilepath
-github.com/containers/image/v5/docker/policyconfiguration
-github.com/containers/image/v5/pkg/blobinfocache/none
-github.com/containers/image/v5/pkg/tlsclientconfig
-github.com/containers/image/v5/pkg/compression
-github.com/containers/image/v5/pkg/strslice
github.com/containers/image/v5/internal/pkg/keyctl
-github.com/containers/image/v5/version
-github.com/containers/image/v5/docker/daemon
-github.com/containers/image/v5/openshift
-github.com/containers/image/v5/ostree
-github.com/containers/image/v5/pkg/compression/types
github.com/containers/image/v5/internal/tmpdir
+github.com/containers/image/v5/manifest
+github.com/containers/image/v5/oci/archive
github.com/containers/image/v5/oci/internal
+github.com/containers/image/v5/oci/layout
+github.com/containers/image/v5/openshift
+github.com/containers/image/v5/ostree
github.com/containers/image/v5/pkg/blobinfocache
-github.com/containers/image/v5/pkg/compression/internal
github.com/containers/image/v5/pkg/blobinfocache/boltdb
-github.com/containers/image/v5/pkg/blobinfocache/memory
github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize
+github.com/containers/image/v5/pkg/blobinfocache/memory
+github.com/containers/image/v5/pkg/blobinfocache/none
+github.com/containers/image/v5/pkg/compression
+github.com/containers/image/v5/pkg/compression/internal
+github.com/containers/image/v5/pkg/compression/types
+github.com/containers/image/v5/pkg/docker/config
+github.com/containers/image/v5/pkg/strslice
+github.com/containers/image/v5/pkg/sysregistriesv2
+github.com/containers/image/v5/pkg/tlsclientconfig
+github.com/containers/image/v5/signature
+github.com/containers/image/v5/storage
+github.com/containers/image/v5/tarball
+github.com/containers/image/v5/transports
+github.com/containers/image/v5/transports/alltransports
+github.com/containers/image/v5/types
+github.com/containers/image/v5/version
# github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b
github.com/containers/libtrust
# github.com/containers/psgo v1.3.2
github.com/containers/psgo
github.com/containers/psgo/internal/capabilities
+github.com/containers/psgo/internal/cgroups
github.com/containers/psgo/internal/dev
+github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-github.com/containers/psgo/internal/cgroups
-github.com/containers/psgo/internal/host
# github.com/containers/storage v1.13.5
github.com/containers/storage
-github.com/containers/storage/pkg/archive
-github.com/containers/storage/pkg/chrootarchive
-github.com/containers/storage/pkg/idtools
-github.com/containers/storage/pkg/reexec
-github.com/containers/storage/pkg/mount
-github.com/containers/storage/pkg/stringid
-github.com/containers/storage/pkg/system
-github.com/containers/storage/pkg/truncindex
-github.com/containers/storage/pkg/parsers/kernel
-github.com/containers/storage/pkg/fileutils
-github.com/containers/storage/pkg/ioutils
-github.com/containers/storage/pkg/pools
-github.com/containers/storage/pkg/homedir
github.com/containers/storage/drivers
-github.com/containers/storage/drivers/register
-github.com/containers/storage/pkg/config
-github.com/containers/storage/pkg/directory
-github.com/containers/storage/pkg/lockfile
-github.com/containers/storage/pkg/parsers
-github.com/containers/storage/pkg/stringutils
-github.com/containers/storage/pkg/tarlog
-github.com/containers/storage/pkg/longpath
-github.com/containers/storage/pkg/promise
github.com/containers/storage/drivers/aufs
github.com/containers/storage/drivers/btrfs
+github.com/containers/storage/drivers/copy
github.com/containers/storage/drivers/devmapper
github.com/containers/storage/drivers/overlay
+github.com/containers/storage/drivers/overlayutils
+github.com/containers/storage/drivers/quota
+github.com/containers/storage/drivers/register
github.com/containers/storage/drivers/vfs
github.com/containers/storage/drivers/windows
github.com/containers/storage/drivers/zfs
-github.com/containers/storage/pkg/locker
+github.com/containers/storage/pkg/archive
+github.com/containers/storage/pkg/chrootarchive
+github.com/containers/storage/pkg/config
github.com/containers/storage/pkg/devicemapper
+github.com/containers/storage/pkg/directory
github.com/containers/storage/pkg/dmesg
-github.com/containers/storage/pkg/loopback
-github.com/containers/storage/drivers/overlayutils
-github.com/containers/storage/drivers/quota
+github.com/containers/storage/pkg/fileutils
github.com/containers/storage/pkg/fsutils
-github.com/containers/storage/drivers/copy
+github.com/containers/storage/pkg/homedir
+github.com/containers/storage/pkg/idtools
+github.com/containers/storage/pkg/ioutils
+github.com/containers/storage/pkg/locker
+github.com/containers/storage/pkg/lockfile
+github.com/containers/storage/pkg/longpath
+github.com/containers/storage/pkg/loopback
+github.com/containers/storage/pkg/mount
+github.com/containers/storage/pkg/parsers
+github.com/containers/storage/pkg/parsers/kernel
+github.com/containers/storage/pkg/pools
+github.com/containers/storage/pkg/promise
+github.com/containers/storage/pkg/reexec
+github.com/containers/storage/pkg/stringid
+github.com/containers/storage/pkg/stringutils
+github.com/containers/storage/pkg/system
+github.com/containers/storage/pkg/tarlog
+github.com/containers/storage/pkg/truncindex
# github.com/coreos/go-iptables v0.4.2
github.com/coreos/go-iptables/iptables
# github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
github.com/coreos/go-systemd/activation
github.com/coreos/go-systemd/dbus
-github.com/coreos/go-systemd/sdjournal
github.com/coreos/go-systemd/journal
+github.com/coreos/go-systemd/sdjournal
# github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f
github.com/coreos/pkg/dlopen
-# github.com/cri-o/ocicni v0.1.1-0.20190702175919-7762645d18ca
+# github.com/cri-o/ocicni v0.1.1-0.20190920040751-deac903fd99b
github.com/cri-o/ocicni/pkg/ocicni
# github.com/cyphar/filepath-securejoin v0.2.2
github.com/cyphar/filepath-securejoin
# github.com/davecgh/go-spew v1.1.1
github.com/davecgh/go-spew/spew
# github.com/docker/distribution v2.7.1+incompatible
+github.com/docker/distribution
+github.com/docker/distribution/digestset
+github.com/docker/distribution/metrics
github.com/docker/distribution/reference
github.com/docker/distribution/registry/api/errcode
github.com/docker/distribution/registry/api/v2
github.com/docker/distribution/registry/client
-github.com/docker/distribution/digestset
-github.com/docker/distribution
github.com/docker/distribution/registry/client/auth/challenge
github.com/docker/distribution/registry/client/transport
github.com/docker/distribution/registry/storage/cache
github.com/docker/distribution/registry/storage/cache/memory
-github.com/docker/distribution/metrics
# github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce
-github.com/docker/docker/pkg/signal
-github.com/docker/docker/pkg/homedir
-github.com/docker/docker/oci/caps
-github.com/docker/docker/pkg/namesgenerator
-github.com/docker/docker/pkg/term
-github.com/docker/docker/pkg/ioutils
-github.com/docker/docker/pkg/parsers
-github.com/docker/docker/api/types/versions
-github.com/docker/docker/errdefs
-github.com/docker/docker/pkg/term/windows
-github.com/docker/docker/pkg/longpath
-github.com/docker/docker/api/types/registry
-github.com/docker/docker/api/types/swarm
-github.com/docker/docker/pkg/archive
-github.com/docker/docker/pkg/fileutils
-github.com/docker/docker/pkg/jsonmessage
-github.com/docker/docker/pkg/stdcopy
-github.com/docker/docker/pkg/system
-github.com/docker/docker/client
-github.com/docker/docker/api/types/container
-github.com/docker/docker/api/types/mount
-github.com/docker/docker/api/types/network
-github.com/docker/docker/api/types/swarm/runtime
-github.com/docker/docker/pkg/idtools
-github.com/docker/docker/pkg/pools
-github.com/docker/docker/pkg/mount
github.com/docker/docker/api
github.com/docker/docker/api/types
+github.com/docker/docker/api/types/blkiodev
+github.com/docker/docker/api/types/container
github.com/docker/docker/api/types/events
github.com/docker/docker/api/types/filters
github.com/docker/docker/api/types/image
+github.com/docker/docker/api/types/mount
+github.com/docker/docker/api/types/network
+github.com/docker/docker/api/types/registry
+github.com/docker/docker/api/types/strslice
+github.com/docker/docker/api/types/swarm
+github.com/docker/docker/api/types/swarm/runtime
github.com/docker/docker/api/types/time
+github.com/docker/docker/api/types/versions
github.com/docker/docker/api/types/volume
-github.com/docker/docker/api/types/blkiodev
-github.com/docker/docker/api/types/strslice
+github.com/docker/docker/client
+github.com/docker/docker/errdefs
+github.com/docker/docker/oci/caps
+github.com/docker/docker/pkg/archive
+github.com/docker/docker/pkg/fileutils
+github.com/docker/docker/pkg/homedir
+github.com/docker/docker/pkg/idtools
+github.com/docker/docker/pkg/ioutils
+github.com/docker/docker/pkg/jsonmessage
+github.com/docker/docker/pkg/longpath
+github.com/docker/docker/pkg/mount
+github.com/docker/docker/pkg/namesgenerator
+github.com/docker/docker/pkg/parsers
+github.com/docker/docker/pkg/pools
+github.com/docker/docker/pkg/signal
+github.com/docker/docker/pkg/stdcopy
+github.com/docker/docker/pkg/system
+github.com/docker/docker/pkg/term
+github.com/docker/docker/pkg/term/windows
# github.com/docker/docker-credential-helpers v0.6.3
-github.com/docker/docker-credential-helpers/credentials
github.com/docker/docker-credential-helpers/client
+github.com/docker/docker-credential-helpers/credentials
# github.com/docker/go-connections v0.4.0
github.com/docker/go-connections/nat
-github.com/docker/go-connections/tlsconfig
github.com/docker/go-connections/sockets
+github.com/docker/go-connections/tlsconfig
# github.com/docker/go-metrics v0.0.1
github.com/docker/go-metrics
# github.com/docker/go-units v0.4.0
github.com/docker/go-units
# github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316
github.com/docker/libnetwork/resolvconf
-github.com/docker/libnetwork/types
github.com/docker/libnetwork/resolvconf/dns
+github.com/docker/libnetwork/types
# github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c
github.com/docker/spdystream
github.com/docker/spdystream/spdy
@@ -284,15 +285,15 @@ github.com/imdario/mergo
github.com/inconshreveable/mousetrap
# github.com/ishidawataru/sctp v0.0.0-20180918013207-6e2cb1366111
github.com/ishidawataru/sctp
-# github.com/json-iterator/go v1.1.7
+# github.com/json-iterator/go v1.1.8
github.com/json-iterator/go
# github.com/klauspost/compress v1.8.1
-github.com/klauspost/compress/zstd
github.com/klauspost/compress/flate
+github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
github.com/klauspost/compress/snappy
+github.com/klauspost/compress/zstd
github.com/klauspost/compress/zstd/internal/xxhash
-github.com/klauspost/compress/fse
# github.com/klauspost/cpuid v1.2.1
github.com/klauspost/cpuid
# github.com/klauspost/pgzip v1.2.1
@@ -318,83 +319,83 @@ github.com/mrunalp/fileutils
# github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c
github.com/mtrmac/gpgme
# github.com/onsi/ginkgo v1.10.3
-github.com/onsi/ginkgo/ginkgo
github.com/onsi/ginkgo
github.com/onsi/ginkgo/config
+github.com/onsi/ginkgo/extensions/table
+github.com/onsi/ginkgo/ginkgo
github.com/onsi/ginkgo/ginkgo/convert
github.com/onsi/ginkgo/ginkgo/interrupthandler
github.com/onsi/ginkgo/ginkgo/nodot
github.com/onsi/ginkgo/ginkgo/testrunner
github.com/onsi/ginkgo/ginkgo/testsuite
github.com/onsi/ginkgo/ginkgo/watch
-github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable
-github.com/onsi/ginkgo/types
github.com/onsi/ginkgo/internal/codelocation
+github.com/onsi/ginkgo/internal/containernode
github.com/onsi/ginkgo/internal/failer
+github.com/onsi/ginkgo/internal/leafnodes
github.com/onsi/ginkgo/internal/remote
+github.com/onsi/ginkgo/internal/spec
+github.com/onsi/ginkgo/internal/spec_iterator
+github.com/onsi/ginkgo/internal/specrunner
github.com/onsi/ginkgo/internal/suite
github.com/onsi/ginkgo/internal/testingtproxy
github.com/onsi/ginkgo/internal/writer
github.com/onsi/ginkgo/reporters
github.com/onsi/ginkgo/reporters/stenographer
-github.com/onsi/ginkgo/extensions/table
+github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable
github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty
-github.com/onsi/ginkgo/internal/spec_iterator
-github.com/onsi/ginkgo/internal/containernode
-github.com/onsi/ginkgo/internal/leafnodes
-github.com/onsi/ginkgo/internal/spec
-github.com/onsi/ginkgo/internal/specrunner
-# github.com/onsi/gomega v1.7.0
+github.com/onsi/ginkgo/types
+# github.com/onsi/gomega v1.7.1
github.com/onsi/gomega
-github.com/onsi/gomega/gexec
github.com/onsi/gomega/format
+github.com/onsi/gomega/gbytes
+github.com/onsi/gomega/gexec
github.com/onsi/gomega/internal/assertion
github.com/onsi/gomega/internal/asyncassertion
+github.com/onsi/gomega/internal/oraclematcher
github.com/onsi/gomega/internal/testingtsupport
github.com/onsi/gomega/matchers
-github.com/onsi/gomega/types
-github.com/onsi/gomega/gbytes
-github.com/onsi/gomega/internal/oraclematcher
github.com/onsi/gomega/matchers/support/goraph/bipartitegraph
github.com/onsi/gomega/matchers/support/goraph/edge
github.com/onsi/gomega/matchers/support/goraph/node
github.com/onsi/gomega/matchers/support/goraph/util
+github.com/onsi/gomega/types
# github.com/opencontainers/go-digest v1.0.0-rc1
github.com/opencontainers/go-digest
# github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
-github.com/opencontainers/image-spec/specs-go/v1
github.com/opencontainers/image-spec/specs-go
+github.com/opencontainers/image-spec/specs-go/v1
# github.com/opencontainers/runc v1.0.0-rc8.0.20190827142921-dd075602f158
-github.com/opencontainers/runc/libcontainer/user
github.com/opencontainers/runc/libcontainer/apparmor
+github.com/opencontainers/runc/libcontainer/cgroups
github.com/opencontainers/runc/libcontainer/configs
github.com/opencontainers/runc/libcontainer/devices
-github.com/opencontainers/runc/libcontainer/cgroups
github.com/opencontainers/runc/libcontainer/system
+github.com/opencontainers/runc/libcontainer/user
# github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7
github.com/opencontainers/runtime-spec/specs-go
# github.com/opencontainers/runtime-tools v0.9.0
+github.com/opencontainers/runtime-tools/error
+github.com/opencontainers/runtime-tools/filepath
github.com/opencontainers/runtime-tools/generate
-github.com/opencontainers/runtime-tools/validate
github.com/opencontainers/runtime-tools/generate/seccomp
-github.com/opencontainers/runtime-tools/filepath
github.com/opencontainers/runtime-tools/specerror
-github.com/opencontainers/runtime-tools/error
+github.com/opencontainers/runtime-tools/validate
# github.com/opencontainers/selinux v1.3.0
-github.com/opencontainers/selinux/go-selinux/label
github.com/opencontainers/selinux/go-selinux
+github.com/opencontainers/selinux/go-selinux/label
# github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible
github.com/openshift/api/config/v1
# github.com/openshift/imagebuilder v1.1.1
github.com/openshift/imagebuilder
-github.com/openshift/imagebuilder/dockerfile/parser
github.com/openshift/imagebuilder/dockerfile/command
+github.com/openshift/imagebuilder/dockerfile/parser
github.com/openshift/imagebuilder/signal
github.com/openshift/imagebuilder/strslice
# github.com/opentracing/opentracing-go v1.1.0
github.com/opentracing/opentracing-go
-github.com/opentracing/opentracing-go/log
github.com/opentracing/opentracing-go/ext
+github.com/opentracing/opentracing-go/log
# github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913
github.com/ostreedev/ostree-go/pkg/glibobject
github.com/ostreedev/ostree-go/pkg/otbuiltin
@@ -406,19 +407,19 @@ github.com/pkg/profile
github.com/pmezard/go-difflib/difflib
# github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9
github.com/pquerna/ffjson/fflib/v1
+github.com/pquerna/ffjson/fflib/v1/internal
github.com/pquerna/ffjson/inception
github.com/pquerna/ffjson/shared
-github.com/pquerna/ffjson/fflib/v1/internal
# github.com/prometheus/client_golang v1.1.0
github.com/prometheus/client_golang/prometheus
-github.com/prometheus/client_golang/prometheus/promhttp
github.com/prometheus/client_golang/prometheus/internal
+github.com/prometheus/client_golang/prometheus/promhttp
# github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90
github.com/prometheus/client_model/go
# github.com/prometheus/common v0.6.0
github.com/prometheus/common/expfmt
-github.com/prometheus/common/model
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
+github.com/prometheus/common/model
# github.com/prometheus/procfs v0.0.3
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
@@ -442,34 +443,34 @@ github.com/stretchr/testify/require
github.com/syndtr/gocapability/capability
# github.com/tchap/go-patricia v2.3.0+incompatible
github.com/tchap/go-patricia/patricia
-# github.com/uber/jaeger-client-go v2.19.0+incompatible
+# github.com/uber/jaeger-client-go v2.20.0+incompatible
github.com/uber/jaeger-client-go
github.com/uber/jaeger-client-go/config
github.com/uber/jaeger-client-go/internal/baggage
+github.com/uber/jaeger-client-go/internal/baggage/remote
github.com/uber/jaeger-client-go/internal/spanlog
github.com/uber/jaeger-client-go/internal/throttler
+github.com/uber/jaeger-client-go/internal/throttler/remote
github.com/uber/jaeger-client-go/log
+github.com/uber/jaeger-client-go/rpcmetrics
github.com/uber/jaeger-client-go/thrift
+github.com/uber/jaeger-client-go/thrift-gen/agent
+github.com/uber/jaeger-client-go/thrift-gen/baggage
github.com/uber/jaeger-client-go/thrift-gen/jaeger
github.com/uber/jaeger-client-go/thrift-gen/sampling
github.com/uber/jaeger-client-go/thrift-gen/zipkincore
-github.com/uber/jaeger-client-go/utils
-github.com/uber/jaeger-client-go/internal/baggage/remote
-github.com/uber/jaeger-client-go/internal/throttler/remote
-github.com/uber/jaeger-client-go/rpcmetrics
github.com/uber/jaeger-client-go/transport
-github.com/uber/jaeger-client-go/thrift-gen/agent
-github.com/uber/jaeger-client-go/thrift-gen/baggage
+github.com/uber/jaeger-client-go/utils
# github.com/uber/jaeger-lib v0.0.0-20190122222657-d036253de8f5
github.com/uber/jaeger-lib/metrics
# github.com/ulikunitz/xz v0.5.6
github.com/ulikunitz/xz
+github.com/ulikunitz/xz/internal/hash
github.com/ulikunitz/xz/internal/xlog
github.com/ulikunitz/xz/lzma
-github.com/ulikunitz/xz/internal/hash
# github.com/varlink/go v0.0.0-20190502142041-0f1d566d194b
-github.com/varlink/go/varlink
github.com/varlink/go/cmd/varlink-go-interface-generator
+github.com/varlink/go/varlink
github.com/varlink/go/varlink/idl
# github.com/vbatts/tar-split v0.11.1
github.com/vbatts/tar-split/archive/tar
@@ -477,8 +478,8 @@ github.com/vbatts/tar-split/tar/asm
github.com/vbatts/tar-split/tar/storage
# github.com/vbauerster/mpb v3.4.0+incompatible
github.com/vbauerster/mpb
-github.com/vbauerster/mpb/decor
github.com/vbauerster/mpb/cwriter
+github.com/vbauerster/mpb/decor
github.com/vbauerster/mpb/internal
# github.com/vishvananda/netlink v1.0.0
github.com/vishvananda/netlink
@@ -491,33 +492,35 @@ github.com/xeipuuv/gojsonpointer
github.com/xeipuuv/gojsonreference
# github.com/xeipuuv/gojsonschema v1.1.0
github.com/xeipuuv/gojsonschema
+# go.uber.org/atomic v1.4.0
+go.uber.org/atomic
# golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad
-golang.org/x/crypto/ssh/terminal
+golang.org/x/crypto/cast5
golang.org/x/crypto/openpgp
golang.org/x/crypto/openpgp/armor
+golang.org/x/crypto/openpgp/elgamal
golang.org/x/crypto/openpgp/errors
golang.org/x/crypto/openpgp/packet
golang.org/x/crypto/openpgp/s2k
-golang.org/x/crypto/cast5
-golang.org/x/crypto/openpgp/elgamal
+golang.org/x/crypto/ssh/terminal
# golang.org/x/net v0.0.0-20190628185345-da137c7871d7
golang.org/x/net/context
-golang.org/x/net/http2
+golang.org/x/net/context/ctxhttp
+golang.org/x/net/html
+golang.org/x/net/html/atom
golang.org/x/net/html/charset
-golang.org/x/net/proxy
golang.org/x/net/http/httpguts
+golang.org/x/net/http2
golang.org/x/net/http2/hpack
golang.org/x/net/idna
-golang.org/x/net/html
golang.org/x/net/internal/socks
-golang.org/x/net/html/atom
-golang.org/x/net/context/ctxhttp
+golang.org/x/net/proxy
# golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
golang.org/x/oauth2
golang.org/x/oauth2/internal
# golang.org/x/sync v0.0.0-20190423024810-112230192c58
-golang.org/x/sync/semaphore
golang.org/x/sync/errgroup
+golang.org/x/sync/semaphore
# golang.org/x/sys v0.0.0-20190902133755-9109b7679e13
golang.org/x/sys/unix
golang.org/x/sys/windows
@@ -525,41 +528,41 @@ golang.org/x/sys/windows
golang.org/x/text/encoding
golang.org/x/text/encoding/charmap
golang.org/x/text/encoding/htmlindex
-golang.org/x/text/transform
-golang.org/x/text/secure/bidirule
-golang.org/x/text/unicode/bidi
-golang.org/x/text/unicode/norm
-golang.org/x/text/encoding/internal/identifier
golang.org/x/text/encoding/internal
+golang.org/x/text/encoding/internal/identifier
golang.org/x/text/encoding/japanese
golang.org/x/text/encoding/korean
golang.org/x/text/encoding/simplifiedchinese
golang.org/x/text/encoding/traditionalchinese
golang.org/x/text/encoding/unicode
-golang.org/x/text/language
-golang.org/x/text/internal/utf8internal
-golang.org/x/text/runes
golang.org/x/text/internal/language
golang.org/x/text/internal/language/compact
golang.org/x/text/internal/tag
+golang.org/x/text/internal/utf8internal
+golang.org/x/text/language
+golang.org/x/text/runes
+golang.org/x/text/secure/bidirule
+golang.org/x/text/transform
+golang.org/x/text/unicode/bidi
+golang.org/x/text/unicode/norm
# golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0
golang.org/x/time/rate
# google.golang.org/appengine v1.6.1
-google.golang.org/appengine/urlfetch
google.golang.org/appengine/internal
-google.golang.org/appengine/internal/urlfetch
google.golang.org/appengine/internal/base
google.golang.org/appengine/internal/datastore
google.golang.org/appengine/internal/log
google.golang.org/appengine/internal/remote_api
+google.golang.org/appengine/internal/urlfetch
+google.golang.org/appengine/urlfetch
# google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601
google.golang.org/genproto/googleapis/rpc/status
# google.golang.org/grpc v1.24.0
google.golang.org/grpc/codes
-google.golang.org/grpc/status
-google.golang.org/grpc/internal
google.golang.org/grpc/connectivity
google.golang.org/grpc/grpclog
+google.golang.org/grpc/internal
+google.golang.org/grpc/status
# gopkg.in/fsnotify.v1 v1.4.7
gopkg.in/fsnotify.v1
# gopkg.in/inf.v0 v0.9.1
@@ -571,61 +574,61 @@ gopkg.in/yaml.v2
# k8s.io/api v0.0.0-20190813020757-36bff7324fb7
k8s.io/api/core/v1
# k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010
-k8s.io/apimachinery/pkg/apis/meta/v1
-k8s.io/apimachinery/pkg/util/runtime
+k8s.io/apimachinery/pkg/api/errors
k8s.io/apimachinery/pkg/api/resource
-k8s.io/apimachinery/pkg/runtime
-k8s.io/apimachinery/pkg/runtime/schema
-k8s.io/apimachinery/pkg/types
-k8s.io/apimachinery/pkg/util/intstr
+k8s.io/apimachinery/pkg/apis/meta/v1
+k8s.io/apimachinery/pkg/apis/meta/v1/unstructured
k8s.io/apimachinery/pkg/conversion
+k8s.io/apimachinery/pkg/conversion/queryparams
k8s.io/apimachinery/pkg/fields
k8s.io/apimachinery/pkg/labels
+k8s.io/apimachinery/pkg/runtime
+k8s.io/apimachinery/pkg/runtime/schema
+k8s.io/apimachinery/pkg/runtime/serializer
+k8s.io/apimachinery/pkg/runtime/serializer/json
+k8s.io/apimachinery/pkg/runtime/serializer/protobuf
+k8s.io/apimachinery/pkg/runtime/serializer/recognizer
+k8s.io/apimachinery/pkg/runtime/serializer/streaming
+k8s.io/apimachinery/pkg/runtime/serializer/versioning
k8s.io/apimachinery/pkg/selection
-k8s.io/apimachinery/pkg/watch
-k8s.io/apimachinery/pkg/util/httpstream
-k8s.io/apimachinery/pkg/util/remotecommand
-k8s.io/apimachinery/pkg/conversion/queryparams
+k8s.io/apimachinery/pkg/types
+k8s.io/apimachinery/pkg/util/clock
k8s.io/apimachinery/pkg/util/errors
+k8s.io/apimachinery/pkg/util/framer
+k8s.io/apimachinery/pkg/util/httpstream
+k8s.io/apimachinery/pkg/util/httpstream/spdy
+k8s.io/apimachinery/pkg/util/intstr
k8s.io/apimachinery/pkg/util/json
k8s.io/apimachinery/pkg/util/naming
+k8s.io/apimachinery/pkg/util/net
+k8s.io/apimachinery/pkg/util/remotecommand
+k8s.io/apimachinery/pkg/util/runtime
k8s.io/apimachinery/pkg/util/sets
-k8s.io/apimachinery/third_party/forked/golang/reflect
k8s.io/apimachinery/pkg/util/validation
-k8s.io/apimachinery/pkg/util/net
-k8s.io/apimachinery/pkg/api/errors
-k8s.io/apimachinery/pkg/runtime/serializer/streaming
-k8s.io/apimachinery/pkg/util/httpstream/spdy
k8s.io/apimachinery/pkg/util/validation/field
+k8s.io/apimachinery/pkg/util/yaml
k8s.io/apimachinery/pkg/version
-k8s.io/apimachinery/pkg/runtime/serializer
-k8s.io/apimachinery/pkg/util/clock
+k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/netutil
-k8s.io/apimachinery/pkg/runtime/serializer/json
-k8s.io/apimachinery/pkg/runtime/serializer/protobuf
-k8s.io/apimachinery/pkg/runtime/serializer/recognizer
-k8s.io/apimachinery/pkg/runtime/serializer/versioning
-k8s.io/apimachinery/pkg/util/framer
-k8s.io/apimachinery/pkg/util/yaml
-k8s.io/apimachinery/pkg/apis/meta/v1/unstructured
+k8s.io/apimachinery/third_party/forked/golang/reflect
# k8s.io/client-go v0.0.0-20190620085101-78d2af792bab
-k8s.io/client-go/tools/remotecommand
-k8s.io/client-go/rest
-k8s.io/client-go/transport/spdy
-k8s.io/client-go/util/exec
-k8s.io/client-go/util/homedir
+k8s.io/client-go/pkg/apis/clientauthentication
+k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1
+k8s.io/client-go/pkg/apis/clientauthentication/v1beta1
k8s.io/client-go/pkg/version
k8s.io/client-go/plugin/pkg/client/auth/exec
+k8s.io/client-go/rest
k8s.io/client-go/rest/watch
k8s.io/client-go/tools/clientcmd/api
k8s.io/client-go/tools/metrics
+k8s.io/client-go/tools/remotecommand
k8s.io/client-go/transport
+k8s.io/client-go/transport/spdy
k8s.io/client-go/util/cert
-k8s.io/client-go/util/flowcontrol
-k8s.io/client-go/pkg/apis/clientauthentication
-k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1
-k8s.io/client-go/pkg/apis/clientauthentication/v1beta1
k8s.io/client-go/util/connrotation
+k8s.io/client-go/util/exec
+k8s.io/client-go/util/flowcontrol
+k8s.io/client-go/util/homedir
k8s.io/client-go/util/keyutil
# k8s.io/klog v0.3.3
k8s.io/klog