diff options
92 files changed, 3093 insertions, 723 deletions
@@ -522,6 +522,9 @@ vendor: $(GO) mod vendor && \ $(GO) mod verify +vendor-in-container: + podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.12 make vendor + .PHONY: \ .gopathok \ binaries \ diff --git a/cmd/podman/attach.go b/cmd/podman/attach.go index b78633ed6..b03673f29 100644 --- a/cmd/podman/attach.go +++ b/cmd/podman/attach.go @@ -2,6 +2,7 @@ package main import ( "github.com/containers/libpod/cmd/podman/cliconfig" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/adapter" "github.com/pkg/errors" "github.com/spf13/cobra" @@ -31,7 +32,7 @@ func init() { attachCommand.SetHelpTemplate(HelpTemplate()) attachCommand.SetUsageTemplate(UsageTemplate()) flags := attachCommand.Flags() - flags.StringVar(&attachCommand.DetachKeys, "detach-keys", "", "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: `a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`") + flags.StringVar(&attachCommand.DetachKeys, "detach-keys", define.DefaultDetachKeys, "Select the key sequence for detaching a container. Format is a single character `[a-Z]` or a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: `a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`") flags.BoolVar(&attachCommand.NoStdin, "no-stdin", false, "Do not attach STDIN. The default is false") flags.BoolVar(&attachCommand.SigProxy, "sig-proxy", true, "Proxy received signals to the process") flags.BoolVarP(&attachCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of") diff --git a/cmd/podman/cliconfig/config.go b/cmd/podman/cliconfig/config.go index 58d67ddc1..780b68333 100644 --- a/cmd/podman/cliconfig/config.go +++ b/cmd/podman/cliconfig/config.go @@ -467,14 +467,15 @@ type RestartValues struct { type RestoreValues struct { PodmanCommand - All bool - Keep bool - Latest bool - TcpEstablished bool - Import string - Name string - IgnoreRootfs bool - IgnoreStaticIP bool + All bool + Keep bool + Latest bool + TcpEstablished bool + Import string + Name string + IgnoreRootfs bool + IgnoreStaticIP bool + IgnoreStaticMAC bool } type RmValues struct { diff --git a/cmd/podman/common.go b/cmd/podman/common.go index 37511641b..3e86b8e20 100644 --- a/cmd/podman/common.go +++ b/cmd/podman/common.go @@ -188,7 +188,7 @@ func getCreateFlags(c *cliconfig.PodmanCommand) { "Run container in background and print container ID", ) createFlags.String( - "detach-keys", "", + "detach-keys", define.DefaultDetachKeys, "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: `a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`", ) createFlags.StringSlice( @@ -328,7 +328,7 @@ func getCreateFlags(c *cliconfig.PodmanCommand) { ) createFlags.String( "mac-address", "", - "Container MAC address (e.g. 92:d0:c6:0a:29:33), not currently supported", + "Container MAC address (e.g. 92:d0:c6:0a:29:33)", ) createFlags.StringP( "memory", "m", "", diff --git a/cmd/podman/exec.go b/cmd/podman/exec.go index 649a7b0db..afa701897 100644 --- a/cmd/podman/exec.go +++ b/cmd/podman/exec.go @@ -2,6 +2,7 @@ package main import ( "github.com/containers/libpod/cmd/podman/cliconfig" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/adapter" "github.com/pkg/errors" "github.com/spf13/cobra" @@ -34,7 +35,7 @@ func init() { execCommand.SetUsageTemplate(UsageTemplate()) flags := execCommand.Flags() flags.SetInterspersed(false) - flags.StringVar(&execCommand.DetachKeys, "detach-keys", "", "Override the key sequence for detaching a container. Format is a single character [a-Z] or ctrl-<value> where <value> is one of: a-z, @, ^, [, , or _") + flags.StringVar(&execCommand.DetachKeys, "detach-keys", define.DefaultDetachKeys, "Select the key sequence for detaching a container. Format is a single character [a-Z] or ctrl-<value> where <value> is one of: a-z, @, ^, [, , or _") flags.StringArrayVarP(&execCommand.Env, "env", "e", []string{}, "Set environment variables") flags.BoolVarP(&execCommand.Interactive, "interactive", "i", false, "Keep STDIN open even if not attached") flags.BoolVarP(&execCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of") diff --git a/cmd/podman/images.go b/cmd/podman/images.go index 6157fda2a..7d498517c 100644 --- a/cmd/podman/images.go +++ b/cmd/podman/images.go @@ -291,6 +291,10 @@ func getImagesTemplateOutput(ctx context.Context, images []*adapter.ContainerIma if len(tag) == 71 && strings.HasPrefix(tag, "sha256:") { imageDigest = digest.Digest(tag) tag = "" + } else { + if img.Digest() != "" { + imageDigest = img.Digest() + } } params := imagesTemplateParams{ Repository: repo, diff --git a/cmd/podman/restore.go b/cmd/podman/restore.go index 90d0b2dc4..caefadb6d 100644 --- a/cmd/podman/restore.go +++ b/cmd/podman/restore.go @@ -47,6 +47,7 @@ func init() { flags.StringVarP(&restoreCommand.Name, "name", "n", "", "Specify new name for container restored from exported checkpoint (only works with --import)") flags.BoolVar(&restoreCommand.IgnoreRootfs, "ignore-rootfs", false, "Do not apply root file-system changes when importing from exported checkpoint") flags.BoolVar(&restoreCommand.IgnoreStaticIP, "ignore-static-ip", false, "Ignore IP address set via --static-ip") + flags.BoolVar(&restoreCommand.IgnoreStaticMAC, "ignore-static-mac", false, "Ignore MAC address set via --mac-address") markFlagHiddenForRemoteClient("latest", flags) } diff --git a/cmd/podman/shared/create.go b/cmd/podman/shared/create.go index dc343e694..6c74b8a9b 100644 --- a/cmd/podman/shared/create.go +++ b/cmd/podman/shared/create.go @@ -336,10 +336,6 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod. return nil, err } - if c.String("mac-address") != "" { - return nil, errors.Errorf("--mac-address option not currently supported") - } - imageID := "" inputCommand = c.InputArgs[1:] diff --git a/cmd/podman/start.go b/cmd/podman/start.go index 2d2cf74d2..d4b4534bb 100644 --- a/cmd/podman/start.go +++ b/cmd/podman/start.go @@ -35,7 +35,7 @@ func init() { startCommand.SetUsageTemplate(UsageTemplate()) flags := startCommand.Flags() flags.BoolVarP(&startCommand.Attach, "attach", "a", false, "Attach container's STDOUT and STDERR") - flags.StringVar(&startCommand.DetachKeys, "detach-keys", "", "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: `a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`") + flags.StringVar(&startCommand.DetachKeys, "detach-keys", define.DefaultDetachKeys, "Select the key sequence for detaching a container. Format is a single character `[a-Z]` or a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: `a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`") flags.BoolVarP(&startCommand.Interactive, "interactive", "i", false, "Keep STDIN open even if not attached") flags.BoolVarP(&startCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of") flags.BoolVar(&startCommand.SigProxy, "sig-proxy", false, "Proxy received signals to the process (default true if attaching, false otherwise)") diff --git a/cni/87-podman-bridge.conflist b/cni/87-podman-bridge.conflist index 9db416a19..a7bcf47bb 100644 --- a/cni/87-podman-bridge.conflist +++ b/cni/87-podman-bridge.conflist @@ -33,6 +33,9 @@ { "type": "firewall", "backend": "iptables" + }, + { + "type": "tuning" } ] } diff --git a/completions/bash/podman b/completions/bash/podman index 0abf9e738..4d552b0a7 100644 --- a/completions/bash/podman +++ b/completions/bash/podman @@ -877,6 +877,7 @@ _podman_container_restore() { --tcp-established --ignore-rootfs --ignore-static-ip + --ignore-static-mac " case "$prev" in -i|--import) diff --git a/docs/source/markdown/podman-attach.1.md b/docs/source/markdown/podman-attach.1.md index cef01f0f6..1ac2e49a9 100644 --- a/docs/source/markdown/podman-attach.1.md +++ b/docs/source/markdown/podman-attach.1.md @@ -20,9 +20,7 @@ it in the **libpod.conf** file: see **libpod.conf(5)** for more information. ## OPTIONS **--detach-keys**=*sequence* -Override the key sequence for detaching a container. Format is a single character `[a-Z]` or -a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: -`a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`. +Specify the key sequence for detaching a container. Format is a single character `[a-Z]` or one or more `ctrl-<value>` characters where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Specifying "" will disable this feature. The default is *ctrl-p,ctrl-q*. **--latest**, **-l** diff --git a/docs/source/markdown/podman-container-restore.1.md b/docs/source/markdown/podman-container-restore.1.md index 1d2cf0b3e..d71daf4af 100644 --- a/docs/source/markdown/podman-container-restore.1.md +++ b/docs/source/markdown/podman-container-restore.1.md @@ -76,6 +76,15 @@ a container is restored multiple times from an exported checkpoint with **--name Using **--ignore-static-ip** tells Podman to ignore the IP address if it was configured with **--ip** during container creation. +**--ignore-static-mac** + +If the container was started with **--mac-address** the restored container also +tries to use that MAC address and restore fails if that MAC address is already +in use. This can happen, if a container is restored multiple times from an +exported checkpoint with **--name, -n**. + +Using **--ignore-static-mac** tells Podman to ignore the MAC address if it was +configured with **--mac-address** during container creation. ## EXAMPLE podman container restore mywebserver diff --git a/docs/source/markdown/podman-create.1.md b/docs/source/markdown/podman-create.1.md index 7f6243298..d9ee69f82 100644 --- a/docs/source/markdown/podman-create.1.md +++ b/docs/source/markdown/podman-create.1.md @@ -198,9 +198,7 @@ it in the **libpod.conf** file: see **libpod.conf(5)** for more information. **--detach-keys**=*sequence* -Override the key sequence for detaching a container. Format is a single character `[a-Z]` or -a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: -`a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`. +Specify the key sequence for detaching a container. Format is a single character `[a-Z]` or one or more `ctrl-<value>` characters where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Specifying "" will disable this feature. The default is *ctrl-p,ctrl-q*. **--device**=*device* diff --git a/docs/source/markdown/podman-exec.1.md b/docs/source/markdown/podman-exec.1.md index 4c17c056a..9624425dc 100644 --- a/docs/source/markdown/podman-exec.1.md +++ b/docs/source/markdown/podman-exec.1.md @@ -15,7 +15,7 @@ podman\-exec - Execute a command in a running container **--detach-keys**=*sequence* -Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +Specify the key sequence for detaching a container. Format is a single character `[a-Z]` or one or more `ctrl-<value>` characters where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Specifying "" will disable this feature. The default is *ctrl-p,ctrl-q*. **--env**, **-e** diff --git a/docs/source/markdown/podman-run.1.md b/docs/source/markdown/podman-run.1.md index fc66d1b02..28b00ee29 100644 --- a/docs/source/markdown/podman-run.1.md +++ b/docs/source/markdown/podman-run.1.md @@ -204,9 +204,7 @@ it in the **libpod.conf** file: see **libpod.conf(5)** for more information. **--detach-keys**=*sequence* -Override the key sequence for detaching a container. Format is a single character `[a-Z]` or -a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: -`a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`. +Specify the key sequence for detaching a container. Format is a single character `[a-Z]` or one or more `ctrl-<value>` characters where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Specifying "" will disable this feature. The default is *ctrl-p,ctrl-q*. **--device**=*device* diff --git a/docs/source/markdown/podman-start.1.md b/docs/source/markdown/podman-start.1.md index 4c81d17bd..84af9d800 100644 --- a/docs/source/markdown/podman-start.1.md +++ b/docs/source/markdown/podman-start.1.md @@ -23,9 +23,7 @@ starting multiple containers. **--detach-keys**=*sequence* -Override the key sequence for detaching a container. Format is a single character `[a-Z]` or -a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: -`a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`. +Specify the key sequence for detaching a container. Format is a single character `[a-Z]` or one or more `ctrl-<value>` characters where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Specifying "" will disable this feature. The default is *ctrl-p,ctrl-q*. **--interactive**, **-i** @@ -9,7 +9,7 @@ require ( github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc // indirect - github.com/containernetworking/cni v0.7.1 + github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 github.com/containernetworking/plugins v0.8.2 github.com/containers/buildah v1.11.5-0.20191031204705-20e92ffe0982 github.com/containers/image/v5 v5.0.0 @@ -17,7 +17,7 @@ require ( github.com/containers/storage v1.13.5 github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect - github.com/cri-o/ocicni v0.1.1-0.20190702175919-7762645d18ca + github.com/cri-o/ocicni v0.1.1-0.20190920040751-deac903fd99b github.com/cyphar/filepath-securejoin v0.2.2 github.com/davecgh/go-spew v1.1.1 github.com/docker/distribution v2.7.1+incompatible @@ -37,11 +37,11 @@ require ( github.com/hashicorp/go-multierror v1.0.0 github.com/hpcloud/tail v1.0.0 github.com/imdario/mergo v0.3.7 // indirect - github.com/json-iterator/go v1.1.7 + github.com/json-iterator/go v1.1.8 github.com/mattn/go-isatty v0.0.8 // indirect github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 github.com/onsi/ginkgo v1.10.3 - github.com/onsi/gomega v1.7.0 + github.com/onsi/gomega v1.7.1 github.com/opencontainers/go-digest v1.0.0-rc1 github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 github.com/opencontainers/runc v1.0.0-rc8.0.20190827142921-dd075602f158 @@ -59,7 +59,7 @@ require ( github.com/stretchr/testify v1.4.0 github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 github.com/uber-go/atomic v1.4.0 // indirect - github.com/uber/jaeger-client-go v2.19.0+incompatible + github.com/uber/jaeger-client-go v2.20.0+incompatible github.com/uber/jaeger-lib v0.0.0-20190122222657-d036253de8f5 // indirect github.com/varlink/go v0.0.0-20190502142041-0f1d566d194b github.com/vishvananda/netlink v1.0.0 @@ -53,6 +53,8 @@ github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL github.com/containernetworking/cni v0.7.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.7.1 h1:fE3r16wpSEyaqY4Z4oFrLMmIGfBYIKpPrHK31EJ9FzE= github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 h1:rqUVLD8I859xRgUx/WMC3v7QAFqbLKZbs+0kqYboRJc= +github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/plugins v0.8.2 h1:5lnwfsAYO+V7yXhysJKy3E1A2Gy9oVut031zfdOzI9w= github.com/containernetworking/plugins v0.8.2/go.mod h1:TxALKWZpWL79BC3GOYKJzzXr7U8R23PdhwaLp6F3adc= github.com/containers/buildah v1.11.4-0.20191028173731-21b4778b359e h1:iDavHEx5Yr7o+0l6495Ya6N0YEPplIUZuWC2e14baDM= @@ -83,6 +85,8 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cri-o/ocicni v0.1.1-0.20190702175919-7762645d18ca h1:CJstDqYy9ClWuPcDHMTCAiUS+ckekluYetGR2iYYWuo= github.com/cri-o/ocicni v0.1.1-0.20190702175919-7762645d18ca/go.mod h1:BO0al9TKber3XUTucLzKgoG5sq8qiOB41H7zSdfw6r8= +github.com/cri-o/ocicni v0.1.1-0.20190920040751-deac903fd99b h1:SgS+WV10y2Bubuy2HquSBori6DXj9sqRN77Hgs5H7Qc= +github.com/cri-o/ocicni v0.1.1-0.20190920040751-deac903fd99b/go.mod h1:ZOuIEOp/3MB1eCBWANnNxM3zUA3NWh76wSRCsnKAg2c= github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= @@ -220,6 +224,8 @@ github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwK github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/juju/errors v0.0.0-20180806074554-22422dad46e1/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= github.com/juju/testing v0.0.0-20190613124551-e81189438503/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= @@ -295,6 +301,8 @@ github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= @@ -399,6 +407,8 @@ github.com/uber-go/atomic v1.4.0 h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o= github.com/uber-go/atomic v1.4.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= github.com/uber/jaeger-client-go v2.19.0+incompatible h1:pbwbYfHUoaase0oPQOdZ1GcaUjImYGimUXSQ/+8+Z8Q= github.com/uber/jaeger-client-go v2.19.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.20.0+incompatible h1:ttG9wKdl2ikV/BGOtu+eb+VPp+R7jMeuM177Ihs5Fdc= +github.com/uber/jaeger-client-go v2.20.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v0.0.0-20190122222657-d036253de8f5 h1:CwmGyzHTzCqCdZJkWR0A7ucZXgrCY7spRcpvm7ci//s= github.com/uber/jaeger-lib v0.0.0-20190122222657-d036253de8f5/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= diff --git a/libpod.conf b/libpod.conf index 47f66ecc1..7e0228c19 100644 --- a/libpod.conf +++ b/libpod.conf @@ -142,8 +142,36 @@ runc = [ ] crun = [ - "/usr/bin/crun", - "/usr/local/bin/crun", + "/usr/bin/crun", + "/usr/sbin/crun", + "/usr/local/bin/crun", + "/usr/local/sbin/crun", + "/sbin/crun", + "/bin/crun", + "/run/current-system/sw/bin/crun", +] + +# Kata Containers is an OCI runtime, where containers are run inside lightweight +# Virtual Machines (VMs). Kata provides additional isolation towards the host, +# minimizing the host attack surface and mitigating the consequences of +# containers breakout. +# Please notes that Kata does not support rootless podman yet, but we can leave +# the paths below blank to let them be discovered by the $PATH environment +# variable. + +# Kata Containers with the default configured VMM +kata-runtime = [ + "/usr/bin/kata-runtime", +] + +# Kata Containers with the QEMU VMM +kata-qemu = [ + "/usr/bin/kata-qemu", +] + +# Kata Containers with the Firecracker VMM +kata-fc = [ + "/usr/bin/kata-fc", ] # The [runtimes] table MUST be the last thing in this file. diff --git a/libpod/config/default.go b/libpod/config/default.go index 17574c059..5decaeab7 100644 --- a/libpod/config/default.go +++ b/libpod/config/default.go @@ -6,6 +6,7 @@ import ( "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" + "github.com/containers/libpod/pkg/cgroups" "github.com/containers/libpod/pkg/rootless" "github.com/containers/libpod/pkg/util" "github.com/containers/storage" @@ -47,6 +48,12 @@ func defaultConfigFromMemory() (*Config, error) { c.ImageDefaultTransport = _defaultTransport c.StateType = define.BoltDBStateStore c.OCIRuntime = "runc" + + // If we're running on cgroups v2, default to using crun. + if onCgroupsv2, _ := cgroups.IsCgroup2UnifiedMode(); onCgroupsv2 { + c.OCIRuntime = "crun" + } + c.OCIRuntimes = map[string][]string{ "runc": { "/usr/bin/runc", @@ -58,7 +65,15 @@ func defaultConfigFromMemory() (*Config, error) { "/usr/lib/cri-o-runc/sbin/runc", "/run/current-system/sw/bin/runc", }, - // TODO - should we add "crun" defaults here as well? + "crun": { + "/usr/bin/crun", + "/usr/sbin/crun", + "/usr/local/bin/crun", + "/usr/local/sbin/crun", + "/sbin/crun", + "/bin/crun", + "/run/current-system/sw/bin/crun", + }, } c.ConmonPath = []string{ "/usr/libexec/podman/conmon", diff --git a/libpod/container.go b/libpod/container.go index 8e24391b9..4f7fc067e 100644 --- a/libpod/container.go +++ b/libpod/container.go @@ -138,6 +138,10 @@ type Container struct { // being checkpointed. If requestedIP is set it will be used instead // of config.StaticIP. requestedIP net.IP + // A restored container should have the same MAC address as before + // being checkpointed. If requestedMAC is set it will be used instead + // of config.StaticMAC. + requestedMAC net.HardwareAddr // This is true if a container is restored from a checkpoint. restoreFromCheckpoint bool @@ -296,6 +300,10 @@ type ContainerConfig struct { // This cannot be set unless CreateNetNS is set. // If not set, the container will be dynamically assigned an IP by CNI. StaticIP net.IP `json:"staticIP"` + // StaticMAC is a static MAC to request for the container. + // This cannot be set unless CreateNetNS is set. + // If not set, the container will be dynamically assigned a MAC by CNI. + StaticMAC net.HardwareAddr `json:"staticMAC"` // PortMappings are the ports forwarded to the container's network // namespace // These are not used unless CreateNetNS is true diff --git a/libpod/container_api.go b/libpod/container_api.go index a6f5b54d5..b8cfe02f6 100644 --- a/libpod/container_api.go +++ b/libpod/container_api.go @@ -794,6 +794,11 @@ type ContainerCheckpointOptions struct { // important to be able to restore a container multiple // times with '--import --name'. IgnoreStaticIP bool + // IgnoreStaticMAC tells the API to ignore the MAC set + // during 'podman run' with '--mac-address'. This is especially + // important to be able to restore a container multiple + // times with '--import --name'. + IgnoreStaticMAC bool } // Checkpoint checkpoints a container diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index 471648bc8..26d6771b0 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -794,6 +794,15 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti c.config.StaticIP = nil } + // If a container is restored multiple times from an exported checkpoint with + // the help of '--import --name', the restore will fail if during 'podman run' + // a static container MAC address was set with '--mac-address'. The user + // can tell the restore process to ignore the static MAC with + // '--ignore-static-mac' + if options.IgnoreStaticMAC { + c.config.StaticMAC = nil + } + // Read network configuration from checkpoint // Currently only one interface with one IP is supported. networkStatusFile, err := os.Open(filepath.Join(c.bundlePath(), "network.status")) @@ -803,9 +812,9 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti // TODO: This implicit restoring with or without IP depending on an // unrelated restore parameter (--name) does not seem like the // best solution. - if err == nil && options.Name == "" && !options.IgnoreStaticIP { + if err == nil && options.Name == "" && (!options.IgnoreStaticIP || !options.IgnoreStaticMAC) { // The file with the network.status does exist. Let's restore the - // container with the same IP address as during checkpointing. + // container with the same IP address / MAC address as during checkpointing. defer networkStatusFile.Close() var networkStatus []*cnitypes.Result networkJSON, err := ioutil.ReadAll(networkStatusFile) @@ -815,16 +824,35 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti if err := json.Unmarshal(networkJSON, &networkStatus); err != nil { return err } - // Take the first IP address - var IP net.IP - if len(networkStatus) > 0 { - if len(networkStatus[0].IPs) > 0 { - IP = networkStatus[0].IPs[0].Address.IP + if !options.IgnoreStaticIP { + // Take the first IP address + var IP net.IP + if len(networkStatus) > 0 { + if len(networkStatus[0].IPs) > 0 { + IP = networkStatus[0].IPs[0].Address.IP + } + } + if IP != nil { + // Tell CNI which IP address we want. + c.requestedIP = IP } } - if IP != nil { - // Tell CNI which IP address we want. - c.requestedIP = IP + if !options.IgnoreStaticMAC { + // Take the first device with a defined sandbox. + var MAC net.HardwareAddr + for _, n := range networkStatus[0].Interfaces { + if n.Sandbox != "" { + MAC, err = net.ParseMAC(n.Mac) + if err != nil { + return errors.Wrapf(err, "failed to parse MAC %v", n.Mac) + } + break + } + } + if MAC != nil { + // Tell CNI which MAC address we want. + c.requestedMAC = MAC + } } } @@ -1314,7 +1342,7 @@ func (c *Container) copyOwnerAndPerms(source, dest string) error { // Teardown CNI config on refresh func (c *Container) refreshCNI() error { // Let's try and delete any lingering network config... - podNetwork := c.runtime.getPodNetwork(c.ID(), c.config.Name, "", c.config.Networks, c.config.PortMappings, c.config.StaticIP) + podNetwork := c.runtime.getPodNetwork(c.ID(), c.config.Name, "", c.config.Networks, c.config.PortMappings, c.config.StaticIP, c.config.StaticMAC) return c.runtime.netPlugin.TearDownPod(podNetwork) } diff --git a/libpod/events/journal_linux.go b/libpod/events/journal_linux.go index 470c76959..9e6fffc29 100644 --- a/libpod/events/journal_linux.go +++ b/libpod/events/journal_linux.go @@ -54,6 +54,7 @@ func (e EventJournalD) Write(ee Event) error { // Read reads events from the journal and sends qualified events to the event channel func (e EventJournalD) Read(options ReadOptions) error { + defer close(options.EventChannel) eventOptions, err := generateEventOptions(options.Filters, options.Since, options.Until) if err != nil { return errors.Wrapf(err, "failed to generate event options") @@ -87,7 +88,6 @@ func (e EventJournalD) Read(options ReadOptions) error { if err != nil { return err } - defer close(options.EventChannel) for { if _, err := j.Next(); err != nil { return err diff --git a/libpod/events/logfile.go b/libpod/events/logfile.go index 4b65b0ad0..93e6fa3c9 100644 --- a/libpod/events/logfile.go +++ b/libpod/events/logfile.go @@ -41,6 +41,7 @@ func (e EventLogFile) Write(ee Event) error { // Reads from the log file func (e EventLogFile) Read(options ReadOptions) error { + defer close(options.EventChannel) eventOptions, err := generateEventOptions(options.Filters, options.Since, options.Until) if err != nil { return errors.Wrapf(err, "unable to generate event options") @@ -68,7 +69,6 @@ func (e EventLogFile) Read(options ReadOptions) error { options.EventChannel <- event } } - close(options.EventChannel) return nil } diff --git a/libpod/kube.go b/libpod/kube.go index d0e7baf95..47a77991e 100644 --- a/libpod/kube.go +++ b/libpod/kube.go @@ -487,13 +487,16 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) { if err := c.syncContainer(); err != nil { return nil, errors.Wrapf(err, "unable to sync container during YAML generation") } + logrus.Debugf("Looking in container for user: %s", c.User()) - u, err := lookup.GetUser(c.state.Mountpoint, c.User()) + execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, c.User(), nil) if err != nil { return nil, err } - user := int64(u.Uid) - sc.RunAsUser = &user + uid := int64(execUser.Uid) + gid := int64(execUser.Gid) + sc.RunAsUser = &uid + sc.RunAsGroup = &gid } return &sc, nil } diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go index ac1144fbe..cba7b636a 100644 --- a/libpod/networking_linux.go +++ b/libpod/networking_linux.go @@ -28,23 +28,34 @@ import ( ) // Get an OCICNI network config -func (r *Runtime) getPodNetwork(id, name, nsPath string, networks []string, ports []ocicni.PortMapping, staticIP net.IP) ocicni.PodNetwork { +func (r *Runtime) getPodNetwork(id, name, nsPath string, networks []string, ports []ocicni.PortMapping, staticIP net.IP, staticMAC net.HardwareAddr) ocicni.PodNetwork { defaultNetwork := r.netPlugin.GetDefaultNetworkName() network := ocicni.PodNetwork{ Name: name, Namespace: name, // TODO is there something else we should put here? We don't know about Kube namespaces ID: id, NetNS: nsPath, - Networks: networks, RuntimeConfig: map[string]ocicni.RuntimeConfig{ defaultNetwork: {PortMappings: ports}, }, } - if staticIP != nil { - network.Networks = []string{defaultNetwork} + if staticIP != nil || staticMAC != nil { + network.Networks = []ocicni.NetAttachment{{Name: defaultNetwork}} + var rt ocicni.RuntimeConfig = ocicni.RuntimeConfig{PortMappings: ports} + if staticIP != nil { + rt.IP = staticIP.String() + } + if staticMAC != nil { + rt.MAC = staticMAC.String() + } network.RuntimeConfig = map[string]ocicni.RuntimeConfig{ - defaultNetwork: {IP: staticIP.String(), PortMappings: ports}, + defaultNetwork: rt, + } + } else { + network.Networks = make([]ocicni.NetAttachment, len(networks)) + for i, netName := range networks { + network.Networks[i].Name = netName } } @@ -62,7 +73,16 @@ func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) ([]*cnitypes.Re requestedIP = ctr.config.StaticIP } - podNetwork := r.getPodNetwork(ctr.ID(), ctr.Name(), ctrNS.Path(), ctr.config.Networks, ctr.config.PortMappings, requestedIP) + var requestedMAC net.HardwareAddr + if ctr.requestedMAC != nil { + requestedMAC = ctr.requestedMAC + // cancel request for a specific MAC in case the container is reused later + ctr.requestedMAC = nil + } else { + requestedMAC = ctr.config.StaticMAC + } + + podNetwork := r.getPodNetwork(ctr.ID(), ctr.Name(), ctrNS.Path(), ctr.config.Networks, ctr.config.PortMappings, requestedIP, requestedMAC) results, err := r.netPlugin.SetUpPod(podNetwork) if err != nil { @@ -78,10 +98,10 @@ func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) ([]*cnitypes.Re networkStatus := make([]*cnitypes.Result, 0) for idx, r := range results { - logrus.Debugf("[%d] CNI result: %v", idx, r.String()) - resultCurrent, err := cnitypes.GetResult(r) + logrus.Debugf("[%d] CNI result: %v", idx, r.Result.String()) + resultCurrent, err := cnitypes.GetResult(r.Result) if err != nil { - return nil, errors.Wrapf(err, "error parsing CNI plugin result %q: %v", r.String(), err) + return nil, errors.Wrapf(err, "error parsing CNI plugin result %q: %v", r.Result.String(), err) } networkStatus = append(networkStatus, resultCurrent) } @@ -443,7 +463,16 @@ func (r *Runtime) teardownNetNS(ctr *Container) error { requestedIP = ctr.config.StaticIP } - podNetwork := r.getPodNetwork(ctr.ID(), ctr.Name(), ctr.state.NetNS.Path(), ctr.config.Networks, ctr.config.PortMappings, requestedIP) + var requestedMAC net.HardwareAddr + if ctr.requestedMAC != nil { + requestedMAC = ctr.requestedMAC + // cancel request for a specific MAC in case the container is reused later + ctr.requestedMAC = nil + } else { + requestedMAC = ctr.config.StaticMAC + } + + podNetwork := r.getPodNetwork(ctr.ID(), ctr.Name(), ctr.state.NetNS.Path(), ctr.config.Networks, ctr.config.PortMappings, requestedIP, requestedMAC) if err := r.netPlugin.TearDownPod(podNetwork); err != nil { return errors.Wrapf(err, "error tearing down CNI namespace configuration for container %s", ctr.ID()) diff --git a/libpod/oci_attach_linux.go b/libpod/oci_attach_linux.go index eeaee6d43..46c70e7eb 100644 --- a/libpod/oci_attach_linux.go +++ b/libpod/oci_attach_linux.go @@ -152,7 +152,7 @@ func (c *Container) attachToExec(streams *AttachStreams, keys string, resize <-c func processDetachKeys(keys string) ([]byte, error) { // Check the validity of the provided keys first if len(keys) == 0 { - keys = define.DefaultDetachKeys + return []byte{}, nil } detachKeys, err := term.ToBytes(keys) if err != nil { diff --git a/libpod/options.go b/libpod/options.go index 66e8ef93c..00b5626b4 100644 --- a/libpod/options.go +++ b/libpod/options.go @@ -1052,6 +1052,31 @@ func WithStaticIP(ip net.IP) CtrCreateOption { } } +// WithStaticMAC indicates that the container should request a static MAC from +// the CNI plugins. +// It cannot be set unless WithNetNS has already been passed. +// Further, it cannot be set if additional CNI networks to join have been +// specified. +func WithStaticMAC(mac net.HardwareAddr) CtrCreateOption { + return func(ctr *Container) error { + if ctr.valid { + return define.ErrCtrFinalized + } + + if !ctr.config.CreateNetNS { + return errors.Wrapf(define.ErrInvalidArg, "cannot set a static MAC if the container is not creating a network namespace") + } + + if len(ctr.config.Networks) != 0 { + return errors.Wrapf(define.ErrInvalidArg, "cannot set a static MAC if joining additional CNI networks") + } + + ctr.config.StaticMAC = mac + + return nil + } +} + // WithLogDriver sets the log driver for the container func WithLogDriver(driver string) CtrCreateOption { return func(ctr *Container) error { diff --git a/pkg/adapter/containers.go b/pkg/adapter/containers.go index 64550f545..287bd8474 100644 --- a/pkg/adapter/containers.go +++ b/pkg/adapter/containers.go @@ -400,17 +400,8 @@ func (r *LocalRuntime) Run(ctx context.Context, c *cliconfig.RunValues, exitCode } } - config, err := r.Runtime.GetConfig() - if err != nil { - return exitCode, err - } - detachKeys := c.String("detach-keys") - if detachKeys == "" { - detachKeys = config.DetachKeys - } - // if the container was created as part of a pod, also start its dependencies, if any. - if err := StartAttachCtr(ctx, ctr, outputStream, errorStream, inputStream, detachKeys, c.Bool("sig-proxy"), true, c.IsSet("pod")); err != nil { + if err := StartAttachCtr(ctx, ctr, outputStream, errorStream, inputStream, c.String("detach-keys"), c.Bool("sig-proxy"), true, c.IsSet("pod")); err != nil { // We've manually detached from the container // Do not perform cleanup, or wait for container exit code // Just exit immediately @@ -547,12 +538,13 @@ func (r *LocalRuntime) Restore(ctx context.Context, c *cliconfig.RestoreValues) ) options := libpod.ContainerCheckpointOptions{ - Keep: c.Keep, - TCPEstablished: c.TcpEstablished, - TargetFile: c.Import, - Name: c.Name, - IgnoreRootfs: c.IgnoreRootfs, - IgnoreStaticIP: c.IgnoreStaticIP, + Keep: c.Keep, + TCPEstablished: c.TcpEstablished, + TargetFile: c.Import, + Name: c.Name, + IgnoreRootfs: c.IgnoreRootfs, + IgnoreStaticIP: c.IgnoreStaticIP, + IgnoreStaticMAC: c.IgnoreStaticMAC, } filterFuncs = append(filterFuncs, func(c *libpod.Container) bool { diff --git a/pkg/adapter/pods.go b/pkg/adapter/pods.go index f6795970b..6648edc82 100644 --- a/pkg/adapter/pods.go +++ b/pkg/adapter/pods.go @@ -666,6 +666,58 @@ func getPodPorts(containers []v1.Container) []ocicni.PortMapping { return infraPorts } +func setupSecurityContext(containerConfig *createconfig.CreateConfig, containerYAML v1.Container) { + if containerYAML.SecurityContext == nil { + return + } + if containerYAML.SecurityContext.ReadOnlyRootFilesystem != nil { + containerConfig.ReadOnlyRootfs = *containerYAML.SecurityContext.ReadOnlyRootFilesystem + } + if containerYAML.SecurityContext.Privileged != nil { + containerConfig.Privileged = *containerYAML.SecurityContext.Privileged + } + + if containerYAML.SecurityContext.AllowPrivilegeEscalation != nil { + containerConfig.NoNewPrivs = !*containerYAML.SecurityContext.AllowPrivilegeEscalation + } + + if seopt := containerYAML.SecurityContext.SELinuxOptions; seopt != nil { + if seopt.User != "" { + containerConfig.SecurityOpts = append(containerConfig.SecurityOpts, fmt.Sprintf("label=user:%s", seopt.User)) + containerConfig.LabelOpts = append(containerConfig.LabelOpts, fmt.Sprintf("user:%s", seopt.User)) + } + if seopt.Role != "" { + containerConfig.SecurityOpts = append(containerConfig.SecurityOpts, fmt.Sprintf("label=role:%s", seopt.Role)) + containerConfig.LabelOpts = append(containerConfig.LabelOpts, fmt.Sprintf("role:%s", seopt.Role)) + } + if seopt.Type != "" { + containerConfig.SecurityOpts = append(containerConfig.SecurityOpts, fmt.Sprintf("label=type:%s", seopt.Type)) + containerConfig.LabelOpts = append(containerConfig.LabelOpts, fmt.Sprintf("type:%s", seopt.Type)) + } + if seopt.Level != "" { + containerConfig.SecurityOpts = append(containerConfig.SecurityOpts, fmt.Sprintf("label=level:%s", seopt.Level)) + containerConfig.LabelOpts = append(containerConfig.LabelOpts, fmt.Sprintf("level:%s", seopt.Level)) + } + } + if caps := containerYAML.SecurityContext.Capabilities; caps != nil { + for _, capability := range caps.Add { + containerConfig.CapAdd = append(containerConfig.CapAdd, string(capability)) + } + for _, capability := range caps.Drop { + containerConfig.CapDrop = append(containerConfig.CapDrop, string(capability)) + } + } + if containerYAML.SecurityContext.RunAsUser != nil { + containerConfig.User = fmt.Sprintf("%d", *containerYAML.SecurityContext.RunAsUser) + } + if containerYAML.SecurityContext.RunAsGroup != nil { + if containerConfig.User == "" { + containerConfig.User = "0" + } + containerConfig.User = fmt.Sprintf("%s:%d", containerConfig.User, *containerYAML.SecurityContext.RunAsGroup) + } +} + // kubeContainerToCreateConfig takes a v1.Container and returns a createconfig describing a container func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, runtime *libpod.Runtime, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID string) (*createconfig.CreateConfig, error) { var ( @@ -690,47 +742,8 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container containerConfig.User = imageData.Config.User } - if containerYAML.SecurityContext != nil { - if containerConfig.SecurityOpts != nil { - if containerYAML.SecurityContext.ReadOnlyRootFilesystem != nil { - containerConfig.ReadOnlyRootfs = *containerYAML.SecurityContext.ReadOnlyRootFilesystem - } - if containerYAML.SecurityContext.Privileged != nil { - containerConfig.Privileged = *containerYAML.SecurityContext.Privileged - } - - if containerYAML.SecurityContext.AllowPrivilegeEscalation != nil { - containerConfig.NoNewPrivs = !*containerYAML.SecurityContext.AllowPrivilegeEscalation - } + setupSecurityContext(&containerConfig, containerYAML) - } - if seopt := containerYAML.SecurityContext.SELinuxOptions; seopt != nil { - if seopt.User != "" { - containerConfig.SecurityOpts = append(containerConfig.SecurityOpts, fmt.Sprintf("label=user:%s", seopt.User)) - containerConfig.LabelOpts = append(containerConfig.LabelOpts, fmt.Sprintf("user:%s", seopt.User)) - } - if seopt.Role != "" { - containerConfig.SecurityOpts = append(containerConfig.SecurityOpts, fmt.Sprintf("label=role:%s", seopt.Role)) - containerConfig.LabelOpts = append(containerConfig.LabelOpts, fmt.Sprintf("role:%s", seopt.Role)) - } - if seopt.Type != "" { - containerConfig.SecurityOpts = append(containerConfig.SecurityOpts, fmt.Sprintf("label=type:%s", seopt.Type)) - containerConfig.LabelOpts = append(containerConfig.LabelOpts, fmt.Sprintf("type:%s", seopt.Type)) - } - if seopt.Level != "" { - containerConfig.SecurityOpts = append(containerConfig.SecurityOpts, fmt.Sprintf("label=level:%s", seopt.Level)) - containerConfig.LabelOpts = append(containerConfig.LabelOpts, fmt.Sprintf("level:%s", seopt.Level)) - } - } - if caps := containerYAML.SecurityContext.Capabilities; caps != nil { - for _, capability := range caps.Add { - containerConfig.CapAdd = append(containerConfig.CapAdd, string(capability)) - } - for _, capability := range caps.Drop { - containerConfig.CapDrop = append(containerConfig.CapDrop, string(capability)) - } - } - } var err error containerConfig.SeccompProfilePath, err = libpod.DefaultSeccompPath() if err != nil { diff --git a/pkg/spec/createconfig.go b/pkg/spec/createconfig.go index 2a8fe7332..e054b3b13 100644 --- a/pkg/spec/createconfig.go +++ b/pkg/spec/createconfig.go @@ -396,6 +396,14 @@ func (c *CreateConfig) getContainerCreateOptions(runtime *libpod.Runtime, pod *l options = append(options, libpod.WithStaticIP(ip)) } + if c.MacAddress != "" { + mac, err := net.ParseMAC(c.MacAddress) + if err != nil { + return nil, errors.Wrapf(define.ErrInvalidArg, "cannot parse %s as MAC address: %v", c.MacAddress, err) + } + options = append(options, libpod.WithStaticMAC(mac)) + } + options = append(options, libpod.WithPrivileged(c.Privileged)) useImageVolumes := c.ImageVolumeType == TypeBind diff --git a/test/e2e/checkpoint_test.go b/test/e2e/checkpoint_test.go index 4b43ceb30..2d3efcbef 100644 --- a/test/e2e/checkpoint_test.go +++ b/test/e2e/checkpoint_test.go @@ -334,6 +334,10 @@ var _ = Describe("Podman checkpoint", func() { IPBefore.WaitWithDefaultTimeout() Expect(IPBefore.ExitCode()).To(Equal(0)) + MACBefore := podmanTest.Podman([]string{"inspect", "-l", "--format={{.NetworkSettings.MacAddress}}"}) + MACBefore.WaitWithDefaultTimeout() + Expect(MACBefore.ExitCode()).To(Equal(0)) + result := podmanTest.Podman([]string{"container", "checkpoint", "test_name"}) result.WaitWithDefaultTimeout() @@ -348,9 +352,16 @@ var _ = Describe("Podman checkpoint", func() { IPAfter.WaitWithDefaultTimeout() Expect(IPAfter.ExitCode()).To(Equal(0)) + MACAfter := podmanTest.Podman([]string{"inspect", "-l", "--format={{.NetworkSettings.MacAddress}}"}) + MACAfter.WaitWithDefaultTimeout() + Expect(MACAfter.ExitCode()).To(Equal(0)) + // Check that IP address did not change between checkpointing and restoring Expect(IPBefore.OutputToString()).To(Equal(IPAfter.OutputToString())) + // Check that MAC address did not change between checkpointing and restoring + Expect(MACBefore.OutputToString()).To(Equal(MACAfter.OutputToString())) + Expect(result.ExitCode()).To(Equal(0)) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1)) Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up")) diff --git a/test/e2e/create_staticmac_test.go b/test/e2e/create_staticmac_test.go new file mode 100644 index 000000000..6460659f7 --- /dev/null +++ b/test/e2e/create_staticmac_test.go @@ -0,0 +1,46 @@ +// +build !remoteclient + +package integration + +import ( + "os" + + . "github.com/containers/libpod/test/utils" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Podman run with --mac-address flag", func() { + var ( + tempdir string + err error + podmanTest *PodmanTestIntegration + ) + + BeforeEach(func() { + SkipIfRootless() + tempdir, err = CreateTempDirInTempDir() + if err != nil { + os.Exit(1) + } + podmanTest = PodmanTestCreate(tempdir) + podmanTest.Setup() + podmanTest.SeedImages() + // Cleanup the CNI networks used by the tests + os.RemoveAll("/var/lib/cni/networks/podman") + }) + + AfterEach(func() { + podmanTest.Cleanup() + f := CurrentGinkgoTestDescription() + processTestResult(f) + + }) + + It("Podman run --mac-address", func() { + result := podmanTest.Podman([]string{"run", "--mac-address", "92:d0:c6:0a:29:34", ALPINE, "ip", "addr"}) + result.WaitWithDefaultTimeout() + Expect(result.ExitCode()).To(Equal(0)) + Expect(result.OutputToString()).To(ContainSubstring("92:d0:c6:0a:29:34")) + }) +}) diff --git a/test/e2e/generate_kube_test.go b/test/e2e/generate_kube_test.go index 5d3b1238a..603edbe6b 100644 --- a/test/e2e/generate_kube_test.go +++ b/test/e2e/generate_kube_test.go @@ -208,6 +208,39 @@ var _ = Describe("Podman generate kube", func() { Expect(psOut).To(ContainSubstring("test2")) }) + It("podman generate with user and reimport kube on pod", func() { + podName := "toppod" + _, rc, _ := podmanTest.CreatePod(podName) + Expect(rc).To(Equal(0)) + + session := podmanTest.Podman([]string{"create", "--pod", podName, "--name", "test1", "--user", "100:200", ALPINE, "top"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + inspect := podmanTest.Podman([]string{"inspect", "--format", "{{.Config.User}}", "test1"}) + inspect.WaitWithDefaultTimeout() + Expect(inspect.ExitCode()).To(Equal(0)) + Expect(inspect.OutputToString()).To(ContainSubstring("100:200")) + + outputFile := filepath.Join(podmanTest.RunRoot, "pod.yaml") + kube := podmanTest.Podman([]string{"generate", "kube", "-f", outputFile, podName}) + kube.WaitWithDefaultTimeout() + Expect(kube.ExitCode()).To(Equal(0)) + + session = podmanTest.Podman([]string{"pod", "rm", "-af"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + session = podmanTest.Podman([]string{"play", "kube", outputFile}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + inspect1 := podmanTest.Podman([]string{"inspect", "--format", "{{.Config.User}}", "test1"}) + inspect1.WaitWithDefaultTimeout() + Expect(inspect1.ExitCode()).To(Equal(0)) + Expect(inspect1.OutputToString()).To(ContainSubstring(inspect.OutputToString())) + }) + It("podman generate kube with volume", func() { vol1 := filepath.Join(podmanTest.TempDir, "vol-test1") err := os.MkdirAll(vol1, 0755) diff --git a/test/e2e/test.yaml b/test/e2e/test.yaml index 319d6a4a0..98d2c91df 100644 --- a/test/e2e/test.yaml +++ b/test/e2e/test.yaml @@ -24,6 +24,9 @@ spec: name: test resources: {} securityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 allowPrivilegeEscalation: true capabilities: {} privileged: false diff --git a/vendor/github.com/containernetworking/cni/libcni/api.go b/vendor/github.com/containernetworking/cni/libcni/api.go index 0f14d3427..22b111742 100644 --- a/vendor/github.com/containernetworking/cni/libcni/api.go +++ b/vendor/github.com/containernetworking/cni/libcni/api.go @@ -25,6 +25,7 @@ import ( "github.com/containernetworking/cni/pkg/invoke" "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/utils" "github.com/containernetworking/cni/pkg/version" ) @@ -32,6 +33,10 @@ var ( CacheDir = "/var/lib/cni" ) +const ( + CNICacheV1 = "cniCacheV1" +) + // A RuntimeConf holds the arguments to one invocation of a CNI plugin // excepting the network configuration, with the nested exception that // the `runtimeConfig` from the network configuration is included @@ -48,7 +53,7 @@ type RuntimeConf struct { // to the plugin CapabilityArgs map[string]interface{} - // A cache directory in which to library data. Defaults to CacheDir + // DEPRECATED. Will be removed in a future release. CacheDir string } @@ -70,19 +75,22 @@ type CNI interface { CheckNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error DelNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error GetNetworkListCachedResult(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) + GetNetworkListCachedConfig(net *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error) AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) + GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) ValidateNetworkList(ctx context.Context, net *NetworkConfigList) ([]string, error) ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) } type CNIConfig struct { - Path []string - exec invoke.Exec + Path []string + exec invoke.Exec + cacheDir string } // CNIConfig implements the CNI interface @@ -92,9 +100,18 @@ var _ CNI = &CNIConfig{} // in the given paths and use the given exec interface to run those plugins, // or if the exec interface is not given, will use a default exec handler. func NewCNIConfig(path []string, exec invoke.Exec) *CNIConfig { + return NewCNIConfigWithCacheDir(path, "", exec) +} + +// NewCNIConfigWithCacheDir returns a new CNIConfig object that will search for plugins +// in the given paths use the given exec interface to run those plugins, +// or if the exec interface is not given, will use a default exec handler. +// The given cache directory will be used for temporary data storage when needed. +func NewCNIConfigWithCacheDir(path []string, cacheDir string, exec invoke.Exec) *CNIConfig { return &CNIConfig{ - Path: path, - exec: exec, + Path: path, + cacheDir: cacheDir, + exec: exec, } } @@ -165,33 +182,122 @@ func (c *CNIConfig) ensureExec() invoke.Exec { return c.exec } -func getResultCacheFilePath(netName string, rt *RuntimeConf) string { - cacheDir := rt.CacheDir - if cacheDir == "" { - cacheDir = CacheDir +type cachedInfo struct { + Kind string `json:"kind"` + ContainerID string `json:"containerId"` + Config []byte `json:"config"` + IfName string `json:"ifName"` + NetworkName string `json:"networkName"` + CniArgs [][2]string `json:"cniArgs,omitempty"` + CapabilityArgs map[string]interface{} `json:"capabilityArgs,omitempty"` + RawResult map[string]interface{} `json:"result,omitempty"` + Result types.Result `json:"-"` +} + +// getCacheDir returns the cache directory in this order: +// 1) global cacheDir from CNIConfig object +// 2) deprecated cacheDir from RuntimeConf object +// 3) fall back to default cache directory +func (c *CNIConfig) getCacheDir(rt *RuntimeConf) string { + if c.cacheDir != "" { + return c.cacheDir + } + if rt.CacheDir != "" { + return rt.CacheDir + } + return CacheDir +} + +func (c *CNIConfig) getCacheFilePath(netName string, rt *RuntimeConf) (string, error) { + if netName == "" || rt.ContainerID == "" || rt.IfName == "" { + return "", fmt.Errorf("cache file path requires network name (%q), container ID (%q), and interface name (%q)", netName, rt.ContainerID, rt.IfName) } - return filepath.Join(cacheDir, "results", fmt.Sprintf("%s-%s-%s", netName, rt.ContainerID, rt.IfName)) + return filepath.Join(c.getCacheDir(rt), "results", fmt.Sprintf("%s-%s-%s", netName, rt.ContainerID, rt.IfName)), nil } -func setCachedResult(result types.Result, netName string, rt *RuntimeConf) error { +func (c *CNIConfig) cacheAdd(result types.Result, config []byte, netName string, rt *RuntimeConf) error { + cached := cachedInfo{ + Kind: CNICacheV1, + ContainerID: rt.ContainerID, + Config: config, + IfName: rt.IfName, + NetworkName: netName, + CniArgs: rt.Args, + CapabilityArgs: rt.CapabilityArgs, + } + + // We need to get type.Result into cachedInfo as JSON map + // Marshal to []byte, then Unmarshal into cached.RawResult data, err := json.Marshal(result) if err != nil { return err } - fname := getResultCacheFilePath(netName, rt) + + err = json.Unmarshal(data, &cached.RawResult) + if err != nil { + return err + } + + newBytes, err := json.Marshal(&cached) + if err != nil { + return err + } + + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return err + } if err := os.MkdirAll(filepath.Dir(fname), 0700); err != nil { return err } - return ioutil.WriteFile(fname, data, 0600) + + return ioutil.WriteFile(fname, newBytes, 0600) } -func delCachedResult(netName string, rt *RuntimeConf) error { - fname := getResultCacheFilePath(netName, rt) +func (c *CNIConfig) cacheDel(netName string, rt *RuntimeConf) error { + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + // Ignore error + return nil + } return os.Remove(fname) } -func getCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) { - fname := getResultCacheFilePath(netName, rt) +func (c *CNIConfig) getCachedConfig(netName string, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { + var bytes []byte + + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return nil, nil, err + } + bytes, err = ioutil.ReadFile(fname) + if err != nil { + // Ignore read errors; the cached result may not exist on-disk + return nil, nil, nil + } + + unmarshaled := cachedInfo{} + if err := json.Unmarshal(bytes, &unmarshaled); err != nil { + return nil, nil, fmt.Errorf("failed to unmarshal cached network %q config: %v", netName, err) + } + if unmarshaled.Kind != CNICacheV1 { + return nil, nil, fmt.Errorf("read cached network %q config has wrong kind: %v", netName, unmarshaled.Kind) + } + + newRt := *rt + if unmarshaled.CniArgs != nil { + newRt.Args = unmarshaled.CniArgs + } + newRt.CapabilityArgs = unmarshaled.CapabilityArgs + + return unmarshaled.Config, &newRt, nil +} + +func (c *CNIConfig) getLegacyCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) { + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return nil, err + } data, err := ioutil.ReadFile(fname) if err != nil { // Ignore read errors; the cached result may not exist on-disk @@ -222,16 +328,73 @@ func getCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, return result, err } +func (c *CNIConfig) getCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) { + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return nil, err + } + fdata, err := ioutil.ReadFile(fname) + if err != nil { + // Ignore read errors; the cached result may not exist on-disk + return nil, nil + } + + cachedInfo := cachedInfo{} + if err := json.Unmarshal(fdata, &cachedInfo); err != nil || cachedInfo.Kind != CNICacheV1 { + return c.getLegacyCachedResult(netName, cniVersion, rt) + } + + newBytes, err := json.Marshal(&cachedInfo.RawResult) + if err != nil { + return nil, fmt.Errorf("failed to marshal cached network %q config: %v", netName, err) + } + + // Read the version of the cached result + decoder := version.ConfigDecoder{} + resultCniVersion, err := decoder.Decode(newBytes) + if err != nil { + return nil, err + } + + // Ensure we can understand the result + result, err := version.NewResult(resultCniVersion, newBytes) + if err != nil { + return nil, err + } + + // Convert to the config version to ensure plugins get prevResult + // in the same version as the config. The cached result version + // should match the config version unless the config was changed + // while the container was running. + result, err = result.GetAsVersion(cniVersion) + if err != nil && resultCniVersion != cniVersion { + return nil, fmt.Errorf("failed to convert cached result version %q to config version %q: %v", resultCniVersion, cniVersion, err) + } + return result, err +} + // GetNetworkListCachedResult returns the cached Result of the previous -// previous AddNetworkList() operation for a network list, or an error. +// AddNetworkList() operation for a network list, or an error. func (c *CNIConfig) GetNetworkListCachedResult(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { - return getCachedResult(list.Name, list.CNIVersion, rt) + return c.getCachedResult(list.Name, list.CNIVersion, rt) } // GetNetworkCachedResult returns the cached Result of the previous -// previous AddNetwork() operation for a network, or an error. +// AddNetwork() operation for a network, or an error. func (c *CNIConfig) GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { - return getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) + return c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) +} + +// GetNetworkListCachedConfig copies the input RuntimeConf to output +// RuntimeConf with fields updated with info from the cached Config. +func (c *CNIConfig) GetNetworkListCachedConfig(list *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { + return c.getCachedConfig(list.Name, rt) +} + +// GetNetworkCachedConfig copies the input RuntimeConf to output +// RuntimeConf with fields updated with info from the cached Config. +func (c *CNIConfig) GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { + return c.getCachedConfig(net.Network.Name, rt) } func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) { @@ -240,6 +403,12 @@ func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net if err != nil { return nil, err } + if err := utils.ValidateContainerID(rt.ContainerID); err != nil { + return nil, err + } + if err := utils.ValidateNetworkName(name); err != nil { + return nil, err + } newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) if err != nil { @@ -260,7 +429,7 @@ func (c *CNIConfig) AddNetworkList(ctx context.Context, list *NetworkConfigList, } } - if err = setCachedResult(result, list.Name, rt); err != nil { + if err = c.cacheAdd(result, list.Bytes, list.Name, rt); err != nil { return nil, fmt.Errorf("failed to set network %q cached result: %v", list.Name, err) } @@ -295,7 +464,7 @@ func (c *CNIConfig) CheckNetworkList(ctx context.Context, list *NetworkConfigLis return nil } - cachedResult, err := getCachedResult(list.Name, list.CNIVersion, rt) + cachedResult, err := c.getCachedResult(list.Name, list.CNIVersion, rt) if err != nil { return fmt.Errorf("failed to get network %q cached result: %v", list.Name, err) } @@ -332,7 +501,7 @@ func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList, if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { return err } else if gtet { - cachedResult, err = getCachedResult(list.Name, list.CNIVersion, rt) + cachedResult, err = c.getCachedResult(list.Name, list.CNIVersion, rt) if err != nil { return fmt.Errorf("failed to get network %q cached result: %v", list.Name, err) } @@ -344,7 +513,7 @@ func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList, return err } } - _ = delCachedResult(list.Name, rt) + _ = c.cacheDel(list.Name, rt) return nil } @@ -356,7 +525,7 @@ func (c *CNIConfig) AddNetwork(ctx context.Context, net *NetworkConfig, rt *Runt return nil, err } - if err = setCachedResult(result, net.Network.Name, rt); err != nil { + if err = c.cacheAdd(result, net.Bytes, net.Network.Name, rt); err != nil { return nil, fmt.Errorf("failed to set network %q cached result: %v", net.Network.Name, err) } @@ -372,7 +541,7 @@ func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *Ru return fmt.Errorf("configuration version %q does not support the CHECK command", net.Network.CNIVersion) } - cachedResult, err := getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) + cachedResult, err := c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) if err != nil { return fmt.Errorf("failed to get network %q cached result: %v", net.Network.Name, err) } @@ -387,7 +556,7 @@ func (c *CNIConfig) DelNetwork(ctx context.Context, net *NetworkConfig, rt *Runt if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { return err } else if gtet { - cachedResult, err = getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) + cachedResult, err = c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) if err != nil { return fmt.Errorf("failed to get network %q cached result: %v", net.Network.Name, err) } @@ -396,7 +565,7 @@ func (c *CNIConfig) DelNetwork(ctx context.Context, net *NetworkConfig, rt *Runt if err := c.delNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt); err != nil { return err } - _ = delCachedResult(net.Network.Name, rt) + _ = c.cacheDel(net.Network.Name, rt) return nil } @@ -455,7 +624,8 @@ func (c *CNIConfig) ValidateNetwork(ctx context.Context, net *NetworkConfig) ([] // validatePlugin checks that an individual plugin's configuration is sane func (c *CNIConfig) validatePlugin(ctx context.Context, pluginName, expectedVersion string) error { - pluginPath, err := invoke.FindInPath(pluginName, c.Path) + c.ensureExec() + pluginPath, err := c.exec.FindInPath(pluginName, c.Path) if err != nil { return err } diff --git a/vendor/github.com/containernetworking/cni/libcni/conf.go b/vendor/github.com/containernetworking/cni/libcni/conf.go index ea56c509d..d8920cf8c 100644 --- a/vendor/github.com/containernetworking/cni/libcni/conf.go +++ b/vendor/github.com/containernetworking/cni/libcni/conf.go @@ -114,11 +114,11 @@ func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) { for i, conf := range plugins { newBytes, err := json.Marshal(conf) if err != nil { - return nil, fmt.Errorf("Failed to marshal plugin config %d: %v", i, err) + return nil, fmt.Errorf("failed to marshal plugin config %d: %v", i, err) } netConf, err := ConfFromBytes(newBytes) if err != nil { - return nil, fmt.Errorf("Failed to parse plugin config %d: %v", i, err) + return nil, fmt.Errorf("failed to parse plugin config %d: %v", i, err) } list.Plugins = append(list.Plugins, netConf) } diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/args.go b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go index 913528c1d..d31a44e87 100644 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/args.go +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go @@ -32,7 +32,7 @@ type inherited struct{} var inheritArgsFromEnv inherited -func (_ *inherited) AsEnv() []string { +func (*inherited) AsEnv() []string { return nil } diff --git a/vendor/github.com/containernetworking/cni/pkg/types/args.go b/vendor/github.com/containernetworking/cni/pkg/types/args.go index bd8640fc9..4eac64899 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/args.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/args.go @@ -36,7 +36,7 @@ func (b *UnmarshallableBool) UnmarshalText(data []byte) error { case "0", "false": *b = false default: - return fmt.Errorf("Boolean unmarshal error: invalid input %s", s) + return fmt.Errorf("boolean unmarshal error: invalid input %s", s) } return nil } diff --git a/vendor/github.com/containernetworking/cni/pkg/types/types.go b/vendor/github.com/containernetworking/cni/pkg/types/types.go index d0d11006a..3e185c1ce 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/types.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/types.go @@ -16,7 +16,6 @@ package types import ( "encoding/json" - "errors" "fmt" "io" "net" @@ -134,9 +133,16 @@ func (r *Route) String() string { // Well known error codes // see https://github.com/containernetworking/cni/blob/master/SPEC.md#well-known-error-codes const ( - ErrUnknown uint = iota // 0 - ErrIncompatibleCNIVersion // 1 - ErrUnsupportedField // 2 + ErrUnknown uint = iota // 0 + ErrIncompatibleCNIVersion // 1 + ErrUnsupportedField // 2 + ErrUnknownContainer // 3 + ErrInvalidEnvironmentVariables // 4 + ErrIOFailure // 5 + ErrDecodingFailure // 6 + ErrInvalidNetworkConfig // 7 + ErrTryAgainLater uint = 11 + ErrInternal uint = 999 ) type Error struct { @@ -145,6 +151,14 @@ type Error struct { Details string `json:"details,omitempty"` } +func NewError(code uint, msg, details string) *Error { + return &Error{ + Code: code, + Msg: msg, + Details: details, + } +} + func (e *Error) Error() string { details := "" if e.Details != "" { @@ -194,6 +208,3 @@ func prettyPrint(obj interface{}) error { _, err = os.Stdout.Write(data) return err } - -// NotImplementedError is used to indicate that a method is not implemented for the given platform -var NotImplementedError = errors.New("Not Implemented") diff --git a/vendor/github.com/containernetworking/cni/pkg/utils/utils.go b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go new file mode 100644 index 000000000..324c40dea --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go @@ -0,0 +1,51 @@ +// Copyright 2019 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "regexp" + + "github.com/containernetworking/cni/pkg/types" +) + +// cniValidNameChars is the regexp used to validate valid characters in +// containerID and networkName +const cniValidNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.\-]` + +var cniReg = regexp.MustCompile(`^` + cniValidNameChars + `*$`) + +// ValidateContainerID will validate that the supplied containerID is not empty does not contain invalid characters +func ValidateContainerID(containerID string) *types.Error { + + if containerID == "" { + return types.NewError(types.ErrUnknownContainer, "missing containerID", "") + } + if !cniReg.MatchString(containerID) { + return types.NewError(types.ErrInvalidEnvironmentVariables, "invalid characters in containerID", containerID) + } + return nil +} + +// ValidateNetworkName will validate that the supplied networkName does not contain invalid characters +func ValidateNetworkName(networkName string) *types.Error { + + if networkName == "" { + return types.NewError(types.ErrInvalidNetworkConfig, "missing network name:", "") + } + if !cniReg.MatchString(networkName) { + return types.NewError(types.ErrInvalidNetworkConfig, "invalid characters found in network name", networkName) + } + return nil +} diff --git a/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go b/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go index 8743abc56..0cdbf14b7 100644 --- a/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go +++ b/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go @@ -2,11 +2,14 @@ package ocicni import ( "context" + "encoding/json" "errors" "fmt" + "io/ioutil" "net" "os" "path" + "path/filepath" "sort" "strings" "sync" @@ -21,10 +24,11 @@ import ( ) type cniNetworkPlugin struct { + cniConfig *libcni.CNIConfig loNetwork *cniNetwork sync.RWMutex - defaultNetName string + defaultNetName netName networks map[string]*cniNetwork nsManager *nsManager @@ -47,11 +51,15 @@ type cniNetworkPlugin struct { cacheDir string } +type netName struct { + name string + changeable bool +} + type cniNetwork struct { - name string - filePath string - NetworkConfig *libcni.NetworkConfigList - CNIConfig *libcni.CNIConfig + name string + filePath string + config *libcni.NetworkConfigList } var errMissingDefaultNetwork = errors.New("Missing CNI default network") @@ -186,6 +194,8 @@ func (plugin *cniNetworkPlugin) monitorConfDir(start *sync.WaitGroup) { // If defaultNetName is not empty, a CNI config with that network name will // be used as the default CNI network, and container network operations will // fail until that network config is present and valid. +// If defaultNetName is empty, CNI config files should be reloaded real-time and +// defaultNetName should be changeable and determined by file sorting. func InitCNI(defaultNetName string, confDir string, binDirs ...string) (CNIPlugin, error) { return initCNI(nil, "", defaultNetName, confDir, binDirs...) } @@ -198,17 +208,24 @@ func initCNI(exec cniinvoke.Exec, cacheDir, defaultNetName string, confDir strin if len(binDirs) == 0 { binDirs = []string{DefaultBinDir} } + plugin := &cniNetworkPlugin{ - defaultNetName: defaultNetName, - networks: make(map[string]*cniNetwork), - loNetwork: getLoNetwork(exec, binDirs), - confDir: confDir, - binDirs: binDirs, - shutdownChan: make(chan struct{}), - done: &sync.WaitGroup{}, - pods: make(map[string]*podLock), - exec: exec, - cacheDir: cacheDir, + cniConfig: libcni.NewCNIConfig(binDirs, exec), + defaultNetName: netName{ + name: defaultNetName, + // If defaultNetName is not assigned in initialization, + // it should be changeable + changeable: defaultNetName == "", + }, + networks: make(map[string]*cniNetwork), + loNetwork: getLoNetwork(), + confDir: confDir, + binDirs: binDirs, + shutdownChan: make(chan struct{}), + done: &sync.WaitGroup{}, + pods: make(map[string]*podLock), + exec: exec, + cacheDir: cacheDir, } if exec == nil { @@ -246,7 +263,7 @@ func (plugin *cniNetworkPlugin) Shutdown() error { return nil } -func loadNetworks(exec cniinvoke.Exec, confDir string, binDirs []string) (map[string]*cniNetwork, string, error) { +func loadNetworks(confDir string, cni *libcni.CNIConfig) (map[string]*cniNetwork, string, error) { files, err := libcni.ConfFiles(confDir, []string{".conf", ".conflist", ".json"}) if err != nil { return nil, "", err @@ -284,17 +301,30 @@ func loadNetworks(exec cniinvoke.Exec, confDir string, binDirs []string) (map[st logrus.Warningf("CNI config list %s has no networks, skipping", confFile) continue } + + // Validation on CNI config should be done to pre-check presence + // of plugins which are necessary. + if _, err := cni.ValidateNetworkList(context.TODO(), confList); err != nil { + logrus.Warningf("Error validating CNI config file %s: %v", confFile, err) + continue + } + if confList.Name == "" { confList.Name = path.Base(confFile) } + cniNet := &cniNetwork{ + name: confList.Name, + filePath: confFile, + config: confList, + } + logrus.Infof("Found CNI network %s (type=%v) at %s", confList.Name, confList.Plugins[0].Network.Type, confFile) - networks[confList.Name] = &cniNetwork{ - name: confList.Name, - filePath: confFile, - NetworkConfig: confList, - CNIConfig: libcni.NewCNIConfig(binDirs, exec), + if _, ok := networks[confList.Name]; !ok { + networks[confList.Name] = cniNet + } else { + logrus.Infof("Ignore CNI network %s (type=%v) at %s because already exists", confList.Name, confList.Plugins[0].Network.Type, confFile) } if defaultNetName == "" { @@ -305,39 +335,49 @@ func loadNetworks(exec cniinvoke.Exec, confDir string, binDirs []string) (map[st return networks, defaultNetName, nil } -func getLoNetwork(exec cniinvoke.Exec, binDirs []string) *cniNetwork { - loConfig, err := libcni.ConfListFromBytes([]byte(`{ - "cniVersion": "0.2.0", - "name": "cni-loopback", +const ( + loIfname string = "lo" + loNetname string = "cni-loopback" +) + +func getLoNetwork() *cniNetwork { + loConfig, err := libcni.ConfListFromBytes([]byte(fmt.Sprintf(`{ + "cniVersion": "0.3.1", + "name": "%s", "plugins": [{ "type": "loopback" }] -}`)) +}`, loNetname))) if err != nil { // The hardcoded config above should always be valid and unit tests will // catch this panic(err) } loNetwork := &cniNetwork{ - name: "lo", - NetworkConfig: loConfig, - CNIConfig: libcni.NewCNIConfig(binDirs, exec), + name: loIfname, + config: loConfig, } return loNetwork } func (plugin *cniNetworkPlugin) syncNetworkConfig() error { - networks, defaultNetName, err := loadNetworks(plugin.exec, plugin.confDir, plugin.binDirs) + networks, defaultNetName, err := loadNetworks(plugin.confDir, plugin.cniConfig) if err != nil { return err } plugin.Lock() defer plugin.Unlock() - if plugin.defaultNetName == "" { - plugin.defaultNetName = defaultNetName + + // Update defaultNetName if it is changeable + if plugin.defaultNetName.changeable { + plugin.defaultNetName.name = defaultNetName + logrus.Infof("Update default CNI network name to %s", defaultNetName) + } else { + logrus.Warnf("Default CNI network name %s is unchangeable", plugin.defaultNetName.name) } + plugin.networks = networks return nil @@ -356,7 +396,7 @@ func (plugin *cniNetworkPlugin) getNetwork(name string) (*cniNetwork, error) { func (plugin *cniNetworkPlugin) GetDefaultNetworkName() string { plugin.RLock() defer plugin.RUnlock() - return plugin.defaultNetName + return plugin.defaultNetName.name } func (plugin *cniNetworkPlugin) getDefaultNetwork() *cniNetwork { @@ -382,27 +422,120 @@ func (plugin *cniNetworkPlugin) Name() string { return CNIPluginName } -func (plugin *cniNetworkPlugin) forEachNetwork(podNetwork *PodNetwork, forEachFunc func(*cniNetwork, string, *PodNetwork, RuntimeConfig) error) error { +func (plugin *cniNetworkPlugin) loadNetworkFromCache(name string, rt *libcni.RuntimeConf) (*cniNetwork, *libcni.RuntimeConf, error) { + cniNet := &cniNetwork{ + name: name, + config: &libcni.NetworkConfigList{ + Name: name, + }, + } + + var confBytes []byte + var err error + confBytes, rt, err = plugin.cniConfig.GetNetworkListCachedConfig(cniNet.config, rt) + if err != nil { + return nil, nil, err + } else if confBytes == nil { + return nil, nil, fmt.Errorf("network %q not found in CNI cache", name) + } + + cniNet.config, err = libcni.ConfListFromBytes(confBytes) + if err != nil { + // Might be a plain NetworkConfig + netConf, err := libcni.ConfFromBytes(confBytes) + if err != nil { + return nil, nil, err + } + // Up-convert to a NetworkConfigList + cniNet.config, err = libcni.ConfListFromConf(netConf) + if err != nil { + return nil, nil, err + } + } + + return cniNet, rt, nil +} + +type forEachNetworkFn func(*cniNetwork, *PodNetwork, *libcni.RuntimeConf) error + +func (plugin *cniNetworkPlugin) forEachNetwork(podNetwork *PodNetwork, fromCache bool, actionFn forEachNetworkFn) error { networks := podNetwork.Networks if len(networks) == 0 { - networks = append(networks, plugin.GetDefaultNetworkName()) + networks = append(networks, NetAttachment{ + Name: plugin.GetDefaultNetworkName(), + }) } - for i, netName := range networks { - // Interface names start at "eth0" and count up for each network - ifName := fmt.Sprintf("eth%d", i) - network, err := plugin.getNetwork(netName) + + allIfNames := make(map[string]bool) + for _, req := range networks { + if req.Ifname != "" { + // Make sure the requested name isn't already assigned + if allIfNames[req.Ifname] { + return fmt.Errorf("network %q requested interface name %q already assigned", req.Name, req.Ifname) + } + allIfNames[req.Ifname] = true + } + } + + for _, network := range networks { + ifName := network.Ifname + if ifName == "" { + for i := 0; i < 10000; i++ { + candidate := fmt.Sprintf("eth%d", i) + if !allIfNames[candidate] { + allIfNames[candidate] = true + ifName = candidate + break + } + } + if ifName == "" { + return fmt.Errorf("failed to find free interface name for network %q", network.Name) + } + } + + rt, err := buildCNIRuntimeConf(plugin.cacheDir, podNetwork, ifName, podNetwork.RuntimeConfig[network.Name]) if err != nil { - logrus.Errorf(err.Error()) + logrus.Errorf("error building CNI runtime config: %v", err) return err } - if err := forEachFunc(network, ifName, podNetwork, podNetwork.RuntimeConfig[netName]); err != nil { + + var cniNet *cniNetwork + if fromCache { + var newRt *libcni.RuntimeConf + cniNet, newRt, err = plugin.loadNetworkFromCache(network.Name, rt) + if err != nil { + logrus.Errorf("error loading cached network config: %v", err) + // fall back to loading from existing plugins on disk + } else { + // Use the updated RuntimeConf + rt = newRt + } + } + if cniNet == nil { + cniNet, err = plugin.getNetwork(network.Name) + if err != nil { + logrus.Errorf(err.Error()) + return err + } + } + + if err := actionFn(cniNet, podNetwork, rt); err != nil { return err } } return nil } -func (plugin *cniNetworkPlugin) SetUpPod(podNetwork PodNetwork) ([]cnitypes.Result, error) { +func buildLoopbackRuntimeConf(cacheDir string, podNetwork *PodNetwork) *libcni.RuntimeConf { + return &libcni.RuntimeConf{ + ContainerID: podNetwork.ID, + NetNS: podNetwork.NetNS, + CacheDir: cacheDir, + IfName: loIfname, + } +} + +func (plugin *cniNetworkPlugin) SetUpPod(podNetwork PodNetwork) ([]NetResult, error) { if err := plugin.networksAvailable(&podNetwork); err != nil { return nil, err } @@ -410,20 +543,26 @@ func (plugin *cniNetworkPlugin) SetUpPod(podNetwork PodNetwork) ([]cnitypes.Resu plugin.podLock(podNetwork).Lock() defer plugin.podUnlock(podNetwork) - _, err := plugin.loNetwork.addToNetwork(plugin.cacheDir, &podNetwork, "lo", RuntimeConfig{}) - if err != nil { + loRt := buildLoopbackRuntimeConf(plugin.cacheDir, &podNetwork) + if _, err := plugin.loNetwork.addToNetwork(loRt, plugin.cniConfig); err != nil { logrus.Errorf("Error while adding to cni lo network: %s", err) return nil, err } - results := make([]cnitypes.Result, 0) - if err := plugin.forEachNetwork(&podNetwork, func(network *cniNetwork, ifName string, podNetwork *PodNetwork, runtimeConfig RuntimeConfig) error { - result, err := network.addToNetwork(plugin.cacheDir, podNetwork, ifName, runtimeConfig) + results := make([]NetResult, 0) + if err := plugin.forEachNetwork(&podNetwork, false, func(network *cniNetwork, podNetwork *PodNetwork, rt *libcni.RuntimeConf) error { + result, err := network.addToNetwork(rt, plugin.cniConfig) if err != nil { logrus.Errorf("Error while adding pod to CNI network %q: %s", network.name, err) return err } - results = append(results, result) + results = append(results, NetResult{ + Result: result, + NetAttachment: NetAttachment{ + Name: network.name, + Ifname: rt.IfName, + }, + }) return nil }); err != nil { return nil, err @@ -432,16 +571,99 @@ func (plugin *cniNetworkPlugin) SetUpPod(podNetwork PodNetwork) ([]cnitypes.Resu return results, nil } +func (plugin *cniNetworkPlugin) getCachedNetworkInfo(containerID string) ([]NetAttachment, error) { + cacheDir := libcni.CacheDir + if plugin.cacheDir != "" { + cacheDir = plugin.cacheDir + } + + dirPath := filepath.Join(cacheDir, "results") + entries, err := ioutil.ReadDir(dirPath) + if err != nil { + return nil, err + } + + fileNames := make([]string, 0, len(entries)) + for _, e := range entries { + fileNames = append(fileNames, e.Name()) + } + sort.Strings(fileNames) + + attachments := []NetAttachment{} + for _, fname := range fileNames { + part := fmt.Sprintf("-%s-", containerID) + pos := strings.Index(fname, part) + if pos <= 0 || pos+len(part) >= len(fname) { + continue + } + + cacheFile := filepath.Join(dirPath, fname) + bytes, err := ioutil.ReadFile(cacheFile) + if err != nil { + logrus.Warningf("failed to read CNI cache file %s: %v", cacheFile, err) + continue + } + + cachedInfo := struct { + Kind string `json:"kind"` + IfName string `json:"ifName"` + ContainerID string `json:"containerID"` + NetName string `json:"networkName"` + }{} + + if err := json.Unmarshal(bytes, &cachedInfo); err != nil { + logrus.Warningf("failed to unmarshal CNI cache file %s: %v", cacheFile, err) + continue + } + if cachedInfo.Kind != libcni.CNICacheV1 { + logrus.Warningf("unknown CNI cache file %s kind %q", cacheFile, cachedInfo.Kind) + continue + } + if cachedInfo.ContainerID != containerID { + continue + } + // Ignore the loopback interface; it's handled separately + if cachedInfo.IfName == loIfname && cachedInfo.NetName == loNetname { + continue + } + if cachedInfo.IfName == "" || cachedInfo.NetName == "" { + logrus.Warningf("missing CNI cache file %s ifname %q or netname %q", cacheFile, cachedInfo.IfName, cachedInfo.NetName) + continue + } + + attachments = append(attachments, NetAttachment{ + Name: cachedInfo.NetName, + Ifname: cachedInfo.IfName, + }) + } + return attachments, nil +} + +// TearDownPod tears down pod networks. Prefers cached pod attachment information +// but falls back to given network attachment information. func (plugin *cniNetworkPlugin) TearDownPod(podNetwork PodNetwork) error { + if len(podNetwork.Networks) == 0 { + attachments, err := plugin.getCachedNetworkInfo(podNetwork.ID) + if err == nil && len(attachments) > 0 { + podNetwork.Networks = attachments + } + } + if err := plugin.networksAvailable(&podNetwork); err != nil { return err } + loRt := buildLoopbackRuntimeConf(plugin.cacheDir, &podNetwork) + if err := plugin.loNetwork.deleteFromNetwork(loRt, plugin.cniConfig); err != nil { + logrus.Errorf("Error while removing pod from CNI lo network: %v", err) + // Loopback teardown errors are not fatal + } + plugin.podLock(podNetwork).Lock() defer plugin.podUnlock(podNetwork) - return plugin.forEachNetwork(&podNetwork, func(network *cniNetwork, ifName string, podNetwork *PodNetwork, runtimeConfig RuntimeConfig) error { - if err := network.deleteFromNetwork(plugin.cacheDir, podNetwork, ifName, runtimeConfig); err != nil { + return plugin.forEachNetwork(&podNetwork, true, func(network *cniNetwork, podNetwork *PodNetwork, rt *libcni.RuntimeConf) error { + if err := network.deleteFromNetwork(rt, plugin.cniConfig); err != nil { logrus.Errorf("Error while removing pod from CNI network %q: %s", network.name, err) return err } @@ -451,19 +673,25 @@ func (plugin *cniNetworkPlugin) TearDownPod(podNetwork PodNetwork) error { // GetPodNetworkStatus returns IP addressing and interface details for all // networks attached to the pod. -func (plugin *cniNetworkPlugin) GetPodNetworkStatus(podNetwork PodNetwork) ([]cnitypes.Result, error) { +func (plugin *cniNetworkPlugin) GetPodNetworkStatus(podNetwork PodNetwork) ([]NetResult, error) { plugin.podLock(podNetwork).Lock() defer plugin.podUnlock(podNetwork) - results := make([]cnitypes.Result, 0) - if err := plugin.forEachNetwork(&podNetwork, func(network *cniNetwork, ifName string, podNetwork *PodNetwork, runtimeConfig RuntimeConfig) error { - result, err := network.checkNetwork(plugin.cacheDir, podNetwork, ifName, runtimeConfig, plugin.nsManager) + results := make([]NetResult, 0) + if err := plugin.forEachNetwork(&podNetwork, true, func(network *cniNetwork, podNetwork *PodNetwork, rt *libcni.RuntimeConf) error { + result, err := network.checkNetwork(rt, plugin.cniConfig, plugin.nsManager, podNetwork.NetNS) if err != nil { logrus.Errorf("Error while checking pod to CNI network %q: %s", network.name, err) return err } if result != nil { - results = append(results, result) + results = append(results, NetResult{ + Result: result, + NetAttachment: NetAttachment{ + Name: network.name, + Ifname: rt.IfName, + }, + }) } return nil }); err != nil { @@ -473,16 +701,9 @@ func (plugin *cniNetworkPlugin) GetPodNetworkStatus(podNetwork PodNetwork) ([]cn return results, nil } -func (network *cniNetwork) addToNetwork(cacheDir string, podNetwork *PodNetwork, ifName string, runtimeConfig RuntimeConfig) (cnitypes.Result, error) { - rt, err := buildCNIRuntimeConf(cacheDir, podNetwork, ifName, runtimeConfig) - if err != nil { - logrus.Errorf("Error adding network: %v", err) - return nil, err - } - - netconf, cninet := network.NetworkConfig, network.CNIConfig - logrus.Infof("About to add CNI network %s (type=%v)", netconf.Name, netconf.Plugins[0].Network.Type) - res, err := cninet.AddNetworkList(context.Background(), netconf, rt) +func (network *cniNetwork) addToNetwork(rt *libcni.RuntimeConf, cni *libcni.CNIConfig) (cnitypes.Result, error) { + logrus.Infof("About to add CNI network %s (type=%v)", network.name, network.config.Plugins[0].Network.Type) + res, err := cni.AddNetworkList(context.Background(), network.config, rt) if err != nil { logrus.Errorf("Error adding network: %v", err) return nil, err @@ -491,18 +712,10 @@ func (network *cniNetwork) addToNetwork(cacheDir string, podNetwork *PodNetwork, return res, nil } -func (network *cniNetwork) checkNetwork(cacheDir string, podNetwork *PodNetwork, ifName string, runtimeConfig RuntimeConfig, nsManager *nsManager) (cnitypes.Result, error) { - - rt, err := buildCNIRuntimeConf(cacheDir, podNetwork, ifName, runtimeConfig) - if err != nil { - logrus.Errorf("Error checking network: %v", err) - return nil, err - } - - netconf, cninet := network.NetworkConfig, network.CNIConfig - logrus.Infof("About to check CNI network %s (type=%v)", netconf.Name, netconf.Plugins[0].Network.Type) +func (network *cniNetwork) checkNetwork(rt *libcni.RuntimeConf, cni *libcni.CNIConfig, nsManager *nsManager, netns string) (cnitypes.Result, error) { + logrus.Infof("About to check CNI network %s (type=%v)", network.name, network.config.Plugins[0].Network.Type) - gtet, err := cniversion.GreaterThanOrEqualTo(netconf.CNIVersion, "0.4.0") + gtet, err := cniversion.GreaterThanOrEqualTo(network.config.CNIVersion, "0.4.0") if err != nil { return nil, err } @@ -511,15 +724,15 @@ func (network *cniNetwork) checkNetwork(cacheDir string, podNetwork *PodNetwork, // When CNIVersion supports Check, use it. Otherwise fall back on what was done initially. if gtet { - err = cninet.CheckNetworkList(context.Background(), netconf, rt) - logrus.Infof("Checking CNI network %s (config version=%v)", netconf.Name, netconf.CNIVersion) + err = cni.CheckNetworkList(context.Background(), network.config, rt) + logrus.Infof("Checking CNI network %s (config version=%v)", network.name, network.config.CNIVersion) if err != nil { logrus.Errorf("Error checking network: %v", err) return nil, err } } - result, err = cninet.GetNetworkListCachedResult(netconf, rt) + result, err = cni.GetNetworkListCachedResult(network.config, rt) if err != nil { logrus.Errorf("Error GetNetworkListCachedResult: %v", err) return nil, err @@ -528,19 +741,19 @@ func (network *cniNetwork) checkNetwork(cacheDir string, podNetwork *PodNetwork, } // result doesn't exist, create one - logrus.Infof("Checking CNI network %s (config version=%v) nsManager=%v", netconf.Name, netconf.CNIVersion, nsManager) + logrus.Infof("Checking CNI network %s (config version=%v) nsManager=%v", network.name, network.config.CNIVersion, nsManager) var cniInterface *cnicurrent.Interface ips := []*cnicurrent.IPConfig{} errs := []error{} for _, version := range []string{"4", "6"} { - ip, mac, err := getContainerDetails(nsManager, podNetwork.NetNS, ifName, "-"+version) + ip, mac, err := getContainerDetails(nsManager, netns, rt.IfName, "-"+version) if err == nil { if cniInterface == nil { cniInterface = &cnicurrent.Interface{ - Name: ifName, + Name: rt.IfName, Mac: mac.String(), - Sandbox: podNetwork.NetNS, + Sandbox: netns, } } ips = append(ips, &cnicurrent.IPConfig{ @@ -557,25 +770,23 @@ func (network *cniNetwork) checkNetwork(cacheDir string, podNetwork *PodNetwork, } result = &cnicurrent.Result{ - CNIVersion: netconf.CNIVersion, + CNIVersion: network.config.CNIVersion, Interfaces: []*cnicurrent.Interface{cniInterface}, IPs: ips, } - return result, nil -} - -func (network *cniNetwork) deleteFromNetwork(cacheDir string, podNetwork *PodNetwork, ifName string, runtimeConfig RuntimeConfig) error { - rt, err := buildCNIRuntimeConf(cacheDir, podNetwork, ifName, runtimeConfig) + // Result must be the same CNIVersion as the CNI config + converted, err := result.GetAsVersion(network.config.CNIVersion) if err != nil { - logrus.Errorf("Error deleting network: %v", err) - return err + return nil, err } - netconf, cninet := network.NetworkConfig, network.CNIConfig - logrus.Infof("About to del CNI network %s (type=%v)", netconf.Name, netconf.Plugins[0].Network.Type) - err = cninet.DelNetworkList(context.Background(), netconf, rt) - if err != nil { + return converted, nil +} + +func (network *cniNetwork) deleteFromNetwork(rt *libcni.RuntimeConf, cni *libcni.CNIConfig) error { + logrus.Infof("About to del CNI network %s (type=%v)", network.name, network.config.Plugins[0].Network.Type) + if err := cni.DelNetworkList(context.Background(), network.config, rt); err != nil { logrus.Errorf("Error deleting network: %v", err) return err } @@ -608,6 +819,16 @@ func buildCNIRuntimeConf(cacheDir string, podNetwork *PodNetwork, ifName string, rt.Args = append(rt.Args, [2]string{"IP", ip}) } + // Add the requested static MAC to CNI_ARGS + mac := runtimeConfig.MAC + if mac != "" { + _, err := net.ParseMAC(mac) + if err != nil { + return nil, fmt.Errorf("unable to parse MAC address %q: %v", mac, err) + } + rt.Args = append(rt.Args, [2]string{"MAC", mac}) + } + // Set PortMappings in Capabilities if len(runtimeConfig.PortMappings) != 0 { rt.CapabilityArgs["portMappings"] = runtimeConfig.PortMappings diff --git a/vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go b/vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go index 8709711e0..717ecda33 100644 --- a/vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go +++ b/vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go @@ -44,6 +44,9 @@ type RuntimeConfig struct { // with the hostlocal IP allocator. If left unset, an IP will be // dynamically allocated. IP string + // MAC is a static MAC address to be assigned to the network interface. + // If left unset, a MAC will be dynamically allocated. + MAC string // PortMappings is the port mapping of the sandbox. PortMappings []PortMapping // Bandwidth is the bandwidth limiting of the pod @@ -75,9 +78,10 @@ type PodNetwork struct { // NetNS is the network namespace path of the sandbox. NetNS string - // Networks is a list of CNI network names to attach to the sandbox - // Leave this list empty to attach the default network to the sandbox - Networks []string + // Networks is a list of CNI network names (and optional interface + // names) to attach to the sandbox. Leave this list empty to attach the + // default network to the sandbox + Networks []NetAttachment // NetworkConfig is configuration specific to a single CNI network. // It is optional, and can be omitted for some or all specified networks @@ -85,6 +89,24 @@ type PodNetwork struct { RuntimeConfig map[string]RuntimeConfig } +// NetAttachment describes a container network attachment +type NetAttachment struct { + // NetName contains the name of the CNI network to which the container + // should be or is attached + Name string + // Ifname contains the optional interface name of the attachment + Ifname string +} + +// NetResult contains the result the network attachment operation +type NetResult struct { + // Result is the CNI Result + Result types.Result + // NetAttachment contains the network and interface names of this + // network attachment + NetAttachment +} + // CNIPlugin is the interface that needs to be implemented by a plugin type CNIPlugin interface { // Name returns the plugin's name. This will be used when searching @@ -98,13 +120,13 @@ type CNIPlugin interface { // SetUpPod is the method called after the sandbox container of // the pod has been created but before the other containers of the // pod are launched. - SetUpPod(network PodNetwork) ([]types.Result, error) + SetUpPod(network PodNetwork) ([]NetResult, error) // TearDownPod is the method called before a pod's sandbox container will be deleted TearDownPod(network PodNetwork) error // Status is the method called to obtain the ipv4 or ipv6 addresses of the pod sandbox - GetPodNetworkStatus(network PodNetwork) ([]types.Result, error) + GetPodNetworkStatus(network PodNetwork) ([]NetResult, error) // NetworkStatus returns error if the network plugin is in error state Status() error diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go index 95ae54fbf..29b31cf78 100644 --- a/vendor/github.com/json-iterator/go/iter.go +++ b/vendor/github.com/json-iterator/go/iter.go @@ -74,6 +74,7 @@ type Iterator struct { buf []byte head int tail int + depth int captureStartedAt int captured []byte Error error @@ -88,6 +89,7 @@ func NewIterator(cfg API) *Iterator { buf: nil, head: 0, tail: 0, + depth: 0, } } @@ -99,6 +101,7 @@ func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { buf: make([]byte, bufSize), head: 0, tail: 0, + depth: 0, } } @@ -110,6 +113,7 @@ func ParseBytes(cfg API, input []byte) *Iterator { buf: input, head: 0, tail: len(input), + depth: 0, } } @@ -128,6 +132,7 @@ func (iter *Iterator) Reset(reader io.Reader) *Iterator { iter.reader = reader iter.head = 0 iter.tail = 0 + iter.depth = 0 return iter } @@ -137,6 +142,7 @@ func (iter *Iterator) ResetBytes(input []byte) *Iterator { iter.buf = input iter.head = 0 iter.tail = len(input) + iter.depth = 0 return iter } @@ -320,3 +326,24 @@ func (iter *Iterator) Read() interface{} { return nil } } + +// limit maximum depth of nesting, as allowed by https://tools.ietf.org/html/rfc7159#section-9 +const maxDepth = 10000 + +func (iter *Iterator) incrementDepth() (success bool) { + iter.depth++ + if iter.depth <= maxDepth { + return true + } + iter.ReportError("incrementDepth", "exceeded max depth") + return false +} + +func (iter *Iterator) decrementDepth() (success bool) { + iter.depth-- + if iter.depth >= 0 { + return true + } + iter.ReportError("decrementDepth", "unexpected negative nesting") + return false +} diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go index 6188cb457..204fe0e09 100644 --- a/vendor/github.com/json-iterator/go/iter_array.go +++ b/vendor/github.com/json-iterator/go/iter_array.go @@ -28,26 +28,32 @@ func (iter *Iterator) ReadArray() (ret bool) { func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { c := iter.nextToken() if c == '[' { + if !iter.incrementDepth() { + return false + } c = iter.nextToken() if c != ']' { iter.unreadByte() if !callback(iter) { + iter.decrementDepth() return false } c = iter.nextToken() for c == ',' { if !callback(iter) { + iter.decrementDepth() return false } c = iter.nextToken() } if c != ']' { iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c})) + iter.decrementDepth() return false } - return true + return iter.decrementDepth() } - return true + return iter.decrementDepth() } if c == 'n' { iter.skipThreeBytes('u', 'l', 'l') diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go index 1c5757671..b65137114 100644 --- a/vendor/github.com/json-iterator/go/iter_object.go +++ b/vendor/github.com/json-iterator/go/iter_object.go @@ -112,6 +112,9 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { c := iter.nextToken() var field string if c == '{' { + if !iter.incrementDepth() { + return false + } c = iter.nextToken() if c == '"' { iter.unreadByte() @@ -121,6 +124,7 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) } if !callback(iter, field) { + iter.decrementDepth() return false } c = iter.nextToken() @@ -131,20 +135,23 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) } if !callback(iter, field) { + iter.decrementDepth() return false } c = iter.nextToken() } if c != '}' { iter.ReportError("ReadObjectCB", `object not ended with }`) + iter.decrementDepth() return false } - return true + return iter.decrementDepth() } if c == '}' { - return true + return iter.decrementDepth() } iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c})) + iter.decrementDepth() return false } if c == 'n' { @@ -159,15 +166,20 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { c := iter.nextToken() if c == '{' { + if !iter.incrementDepth() { + return false + } c = iter.nextToken() if c == '"' { iter.unreadByte() field := iter.ReadString() if iter.nextToken() != ':' { iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() return false } if !callback(iter, field) { + iter.decrementDepth() return false } c = iter.nextToken() @@ -175,23 +187,27 @@ func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { field = iter.ReadString() if iter.nextToken() != ':' { iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() return false } if !callback(iter, field) { + iter.decrementDepth() return false } c = iter.nextToken() } if c != '}' { iter.ReportError("ReadMapCB", `object not ended with }`) + iter.decrementDepth() return false } - return true + return iter.decrementDepth() } if c == '}' { - return true + return iter.decrementDepth() } iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) + iter.decrementDepth() return false } if c == 'n' { diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go index 8fcdc3b69..9303de41e 100644 --- a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go +++ b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go @@ -22,6 +22,9 @@ func (iter *Iterator) skipNumber() { func (iter *Iterator) skipArray() { level := 1 + if !iter.incrementDepth() { + return + } for { for i := iter.head; i < iter.tail; i++ { switch iter.buf[i] { @@ -31,8 +34,14 @@ func (iter *Iterator) skipArray() { i = iter.head - 1 // it will be i++ soon case '[': // If open symbol, increase level level++ + if !iter.incrementDepth() { + return + } case ']': // If close symbol, increase level level-- + if !iter.decrementDepth() { + return + } // If we have returned to the original level, we're done if level == 0 { @@ -50,6 +59,10 @@ func (iter *Iterator) skipArray() { func (iter *Iterator) skipObject() { level := 1 + if !iter.incrementDepth() { + return + } + for { for i := iter.head; i < iter.tail; i++ { switch iter.buf[i] { @@ -59,8 +72,14 @@ func (iter *Iterator) skipObject() { i = iter.head - 1 // it will be i++ soon case '{': // If open symbol, increase level level++ + if !iter.incrementDepth() { + return + } case '}': // If close symbol, increase level level-- + if !iter.decrementDepth() { + return + } // If we have returned to the original level, we're done if level == 0 { diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go index 4459e203f..74974ba74 100644 --- a/vendor/github.com/json-iterator/go/reflect.go +++ b/vendor/github.com/json-iterator/go/reflect.go @@ -60,6 +60,7 @@ func (b *ctx) append(prefix string) *ctx { // ReadVal copy the underlying JSON into go interface, same as json.Unmarshal func (iter *Iterator) ReadVal(obj interface{}) { + depth := iter.depth cacheKey := reflect2.RTypeOf(obj) decoder := iter.cfg.getDecoderFromCache(cacheKey) if decoder == nil { @@ -76,6 +77,10 @@ func (iter *Iterator) ReadVal(obj interface{}) { return } decoder.Decode(ptr, iter) + if iter.depth != depth { + iter.ReportError("ReadVal", "unexpected mismatched nesting") + return + } } // WriteVal copy the go interface into underlying JSON, same as json.Marshal diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go index 05e8fbf1f..e27e8d191 100644 --- a/vendor/github.com/json-iterator/go/reflect_extension.go +++ b/vendor/github.com/json-iterator/go/reflect_extension.go @@ -341,10 +341,10 @@ func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor { if ctx.onlyTaggedField && !hastag && !field.Anonymous() { continue } - tagParts := strings.Split(tag, ",") if tag == "-" { continue } + tagParts := strings.Split(tag, ",") if field.Anonymous() && (tag == "" || tagParts[0] == "") { if field.Type().Kind() == reflect.Struct { structDescriptor := describeStruct(ctx, field.Type()) diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go index 547b4421e..08e9a3912 100644 --- a/vendor/github.com/json-iterator/go/reflect_map.go +++ b/vendor/github.com/json-iterator/go/reflect_map.go @@ -249,6 +249,10 @@ type mapEncoder struct { } func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } stream.WriteObjectStart() iter := encoder.mapType.UnsafeIterate(ptr) for i := 0; iter.HasNext(); i++ { diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go index fea50719d..3e21f3756 100644 --- a/vendor/github.com/json-iterator/go/reflect_marshaler.go +++ b/vendor/github.com/json-iterator/go/reflect_marshaler.go @@ -3,8 +3,9 @@ package jsoniter import ( "encoding" "encoding/json" - "github.com/modern-go/reflect2" "unsafe" + + "github.com/modern-go/reflect2" ) var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem() @@ -93,10 +94,17 @@ func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { stream.WriteNil() return } - bytes, err := json.Marshal(obj) + marshaler := obj.(json.Marshaler) + bytes, err := marshaler.MarshalJSON() if err != nil { stream.Error = err } else { + // html escape was already done by jsoniter + // but the extra '\n' should be trimed + l := len(bytes) + if l > 0 && bytes[l-1] == '\n' { + bytes = bytes[:l-1] + } stream.Write(bytes) } } diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go index 932641ac4..5ad5cc561 100644 --- a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go +++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -500,6 +500,9 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } var c byte for c = ','; c == ','; c = iter.nextToken() { decoder.decodeOneField(ptr, iter) @@ -510,6 +513,7 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) if c != '}' { iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c})) } + iter.decrementDepth() } func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) { @@ -571,6 +575,9 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { if iter.readFieldHash() == decoder.fieldHash { decoder.fieldDecoder.Decode(ptr, iter) @@ -584,6 +591,7 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type twoFieldsStructDecoder struct { @@ -598,6 +606,9 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -614,6 +625,7 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type threeFieldsStructDecoder struct { @@ -630,6 +642,9 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -648,6 +663,7 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type fourFieldsStructDecoder struct { @@ -666,6 +682,9 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -686,6 +705,7 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type fiveFieldsStructDecoder struct { @@ -706,6 +726,9 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -728,6 +751,7 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type sixFieldsStructDecoder struct { @@ -750,6 +774,9 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -774,6 +801,7 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type sevenFieldsStructDecoder struct { @@ -798,6 +826,9 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -824,6 +855,7 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type eightFieldsStructDecoder struct { @@ -850,6 +882,9 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -878,6 +913,7 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type nineFieldsStructDecoder struct { @@ -906,6 +942,9 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -936,6 +975,7 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type tenFieldsStructDecoder struct { @@ -966,6 +1006,9 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -998,6 +1041,7 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type structFieldDecoder struct { diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index f67074016..ecbdd2734 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,8 @@ +## 1.7.1 + +### Fixes +- Bump go-yaml version to cover fixed ddos heuristic (#362) [95e431e] + ## 1.7.0 ### Features diff --git a/vendor/github.com/onsi/gomega/go.mod b/vendor/github.com/onsi/gomega/go.mod index 65eedf696..177a541c4 100644 --- a/vendor/github.com/onsi/gomega/go.mod +++ b/vendor/github.com/onsi/gomega/go.mod @@ -11,5 +11,6 @@ require ( golang.org/x/text v0.3.0 // indirect gopkg.in/fsnotify.v1 v1.4.7 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect - gopkg.in/yaml.v2 v2.2.1 + gopkg.in/yaml.v2 v2.2.4 ) + diff --git a/vendor/github.com/onsi/gomega/go.sum b/vendor/github.com/onsi/gomega/go.sum index b23f6ef02..bbcc05d3e 100644 --- a/vendor/github.com/onsi/gomega/go.sum +++ b/vendor/github.com/onsi/gomega/go.sum @@ -20,5 +20,5 @@ gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index b145768cf..85505f2ec 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -24,7 +24,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.7.0" +const GOMEGA_VERSION = "1.7.1" const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/vendor/github.com/uber/jaeger-client-go/.travis.yml b/vendor/github.com/uber/jaeger-client-go/.travis.yml index 0d7bdd9ab..e81cc8805 100644 --- a/vendor/github.com/uber/jaeger-client-go/.travis.yml +++ b/vendor/github.com/uber/jaeger-client-go/.travis.yml @@ -7,21 +7,22 @@ dist: trusty matrix: include: - - go: 1.12.x + - go: 1.13.x env: - TESTS=true - USE_DEP=true - COVERAGE=true - - go: 1.12.x + - go: 1.13.x env: - USE_DEP=true - CROSSDOCK=true - - go: 1.12.x + - go: 1.13.x env: - TESTS=true - USE_DEP=false - USE_GLIDE=true - - go: 1.11.x + # test with previous version of Go + - go: 1.12.x env: - TESTS=true - USE_DEP=true diff --git a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md index 31b22e40c..c4590bf93 100644 --- a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md +++ b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md @@ -1,6 +1,45 @@ Changes by Version ================== +2.20.0 (2019-11-06) +------------------- + +## New Features + +- Allow all in-process spans of a trace to share sampling state (#443) -- Prithvi Raj + + Sampling state is shared between all spans of the trace that are still in memory. + This allows implementation of delayed sampling decisions (see below). + +- Support delayed sampling decisions (#449) -- Yuri Shkuro + + This is a large structural change to how the samplers work. + It allows some samplers to be executed multiple times on different + span events (like setting a tag) and make a positive sampling decision + later in the span life cycle, or even based on children spans. + See [README](./README.md#delayed-sampling) for more details. + + There is a related minor change in behavior of the adaptive (per-operation) sampler, + which will no longer re-sample the trace when `span.SetOperation()` is called, i.e. the + operation used to make the sampling decision is always the one provided at span creation. + +- Add experimental tag matching sampler (#452) -- Yuri Shkuro + + A sampler that can sample a trace based on a certain tag added to the root + span or one of its local (in-process) children. The sampler can be used with + another experimental `PrioritySampler` that allows multiple samplers to try + to make a sampling decision, in a certain priority order. + +- [log/zap] Report whether a trace was sampled (#445) -- Abhinav Gupta +- Allow config.FromEnv() to enrich an existing config object (#436) -- Vineeth Reddy + +## Minor patches + +- Expose Sampler on Tracer and accept sampler options via Configuration (#460) -- Yuri Shkuro +- Fix github.com/uber-go/atomic import (#464) -- Yuri Shkuro +- Add nodejs to crossdock tests (#441) -- Bhavin Gandhi +- Bump Go compiler version to 1.13 (#453) -- Yuri Shkuro + 2.19.0 (2019-09-23) ------------------- diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock index 1ed86f4a7..5a42ebf16 100644 --- a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock +++ b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock @@ -2,6 +2,14 @@ [[projects]] + digest = "1:9f3b30d9f8e0d7040f729b82dcbc8f0dead820a133b3147ce355fc451f32d761" + name = "github.com/BurntSushi/toml" + packages = ["."] + pruneopts = "UT" + revision = "3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005" + version = "v0.3.1" + +[[projects]] digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" name = "github.com/beorn7/perks" packages = ["quantile"] @@ -138,14 +146,6 @@ version = "v1.4.0" [[projects]] - digest = "1:a5158647b553c61877aa9ae74f4015000294e47981e6b8b07525edcbb0747c81" - name = "github.com/uber-go/atomic" - packages = ["."] - pruneopts = "UT" - revision = "df976f2515e274675050de7b3f42545de80594fd" - version = "v1.4.0" - -[[projects]] digest = "1:0ec60ffd594af00ba1660bc746aa0e443d27dd4003dee55f9d08a0b4ff5431a3" name = "github.com/uber/jaeger-lib" packages = [ @@ -158,23 +158,31 @@ version = "v2.2.0" [[projects]] - digest = "1:a5158647b553c61877aa9ae74f4015000294e47981e6b8b07525edcbb0747c81" + digest = "1:0bdcb0c740d79d400bd3f7946ac22a715c94db62b20bfd2e01cd50693aba0600" name = "go.uber.org/atomic" packages = ["."] pruneopts = "UT" - revision = "df976f2515e274675050de7b3f42545de80594fd" - version = "v1.4.0" + revision = "9dc4df04d0d1c39369750a9f6c32c39560672089" + version = "v1.5.0" [[projects]] - digest = "1:60bf2a5e347af463c42ed31a493d817f8a72f102543060ed992754e689805d1a" + digest = "1:002ebc50f3ef475ac325e1904be931d9dcba6dc6d73b5682afce0c63436e3902" name = "go.uber.org/multierr" packages = ["."] pruneopts = "UT" - revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" - version = "v1.1.0" + revision = "c3fc3d02ec864719d8e25be2d7dde1e35a36aa27" + version = "v1.3.0" + +[[projects]] + branch = "master" + digest = "1:3032e90a153750ea149f68bf081f97ca738f041fba45c41c80737f572ffdf2f4" + name = "go.uber.org/tools" + packages = ["update-license"] + pruneopts = "UT" + revision = "2cfd321de3ee5d5f8a5fda2521d1703478334d98" [[projects]] - digest = "1:676160e6a4722b08e0e26b11521d575c2cb2b6f0c679e1ee6178c5d8dee51e5e" + digest = "1:6be13632ab4bd5842a097abb3aabac045a8601e19a10da4239e7d8bd83d4b83c" name = "go.uber.org/zap" packages = [ ".", @@ -185,8 +193,19 @@ "zapcore", ] pruneopts = "UT" - revision = "27376062155ad36be76b0f12cf1572a221d3a48c" - version = "v1.10.0" + revision = "a6015e13fab9b744d96085308ce4e8f11bad1996" + version = "v1.12.0" + +[[projects]] + branch = "master" + digest = "1:21d7bad9b7da270fd2d50aba8971a041bd691165c95096a2a4c68db823cbc86a" + name = "golang.org/x/lint" + packages = [ + ".", + "golint", + ] + pruneopts = "UT" + revision = "16217165b5de779cb6a5e4fc81fa9c1166fda457" [[projects]] branch = "master" @@ -197,23 +216,81 @@ "context/ctxhttp", ] pruneopts = "UT" - revision = "aa69164e4478b84860dc6769c710c699c67058a3" + revision = "0deb6923b6d97481cb43bc1043fe5b72a0143032" [[projects]] branch = "master" - digest = "1:712252802d318c8107d8f2136b99aa10feb17eca715245ed915199fbfc260155" + digest = "1:5dfb17d45415b7b8927382f53955a66f55f9d9d11557aa82f7f481d642ab247a" name = "golang.org/x/sys" packages = ["windows"] pruneopts = "UT" - revision = "0a153f010e6963173baba2306531d173aa843137" + revision = "f43be2a4598cf3a47be9f94f0c28197ed9eae611" + +[[projects]] + branch = "master" + digest = "1:bae8b3bf837d9d7f601776f37f44e031d46943677beff8fb2eb9c7317d44de2f" + name = "golang.org/x/tools" + packages = [ + "go/analysis", + "go/analysis/passes/inspect", + "go/ast/astutil", + "go/ast/inspector", + "go/buildutil", + "go/gcexportdata", + "go/internal/gcimporter", + "go/internal/packagesdriver", + "go/packages", + "go/types/objectpath", + "go/types/typeutil", + "internal/fastwalk", + "internal/gopathwalk", + "internal/semver", + "internal/span", + ] + pruneopts = "UT" + revision = "8dbcdeb83d3faec5315146800b375c4962a42fc6" [[projects]] - digest = "1:4d2e5a73dc1500038e504a8d78b986630e3626dc027bc030ba5c75da257cdb96" + digest = "1:59f10c1537d2199d9115d946927fe31165959a95190849c82ff11e05803528b0" name = "gopkg.in/yaml.v2" packages = ["."] pruneopts = "UT" - revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" - version = "v2.2.2" + revision = "f221b8435cfb71e54062f6c6e99e9ade30b124d5" + version = "v2.2.4" + +[[projects]] + digest = "1:131158a88aad1f94854d0aa21a64af2802d0a470fb0f01cb33c04fafd2047111" + name = "honnef.co/go/tools" + packages = [ + "arg", + "cmd/staticcheck", + "config", + "deprecated", + "facts", + "functions", + "go/types/typeutil", + "internal/cache", + "internal/passes/buildssa", + "internal/renameio", + "internal/sharedcheck", + "lint", + "lint/lintdsl", + "lint/lintutil", + "lint/lintutil/format", + "loader", + "printf", + "simple", + "ssa", + "ssautil", + "staticcheck", + "staticcheck/vrp", + "stylecheck", + "unused", + "version", + ] + pruneopts = "UT" + revision = "afd67930eec2a9ed3e9b19f684d17a062285f16a" + version = "2019.2.3" [solve-meta] analyzer-name = "dep" @@ -229,10 +306,10 @@ "github.com/stretchr/testify/assert", "github.com/stretchr/testify/require", "github.com/stretchr/testify/suite", - "github.com/uber-go/atomic", "github.com/uber/jaeger-lib/metrics", "github.com/uber/jaeger-lib/metrics/metricstest", "github.com/uber/jaeger-lib/metrics/prometheus", + "go.uber.org/atomic", "go.uber.org/zap", "go.uber.org/zap/zapcore", ] diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.toml b/vendor/github.com/uber/jaeger-client-go/Gopkg.toml index 3e6ac35ae..1fed7f814 100644 --- a/vendor/github.com/uber/jaeger-client-go/Gopkg.toml +++ b/vendor/github.com/uber/jaeger-client-go/Gopkg.toml @@ -15,7 +15,7 @@ version = "^1.1.3" [[constraint]] - name = "github.com/uber-go/atomic" + name = "go.uber.org/atomic" version = "^1" [[constraint]] diff --git a/vendor/github.com/uber/jaeger-client-go/Makefile b/vendor/github.com/uber/jaeger-client-go/Makefile index 74e11787a..0cfe6a5f6 100644 --- a/vendor/github.com/uber/jaeger-client-go/Makefile +++ b/vendor/github.com/uber/jaeger-client-go/Makefile @@ -1,5 +1,5 @@ PROJECT_ROOT=github.com/uber/jaeger-client-go -PACKAGES := $(shell go list ./... | awk -F/ 'NR>1 {print "./"$$4"/..."}' | grep -v -e ./thrift-gen/... -e ./thrift/... | sort -u) +PACKAGES := . $(shell go list ./... | awk -F/ 'NR>1 {print "./"$$4"/..."}' | grep -v -e ./thrift-gen/... -e ./thrift/... | sort -u) # all .go files that don't exist in hidden directories ALL_SRC := $(shell find . -name "*.go" | grep -v -e vendor -e thrift-gen -e ./thrift/ \ -e ".*/\..*" \ @@ -125,3 +125,4 @@ ifeq ($(CI_SKIP_LINT),true) else make lint endif + diff --git a/vendor/github.com/uber/jaeger-client-go/README.md b/vendor/github.com/uber/jaeger-client-go/README.md index 604d4b571..a3366114d 100644 --- a/vendor/github.com/uber/jaeger-client-go/README.md +++ b/vendor/github.com/uber/jaeger-client-go/README.md @@ -182,6 +182,29 @@ are available: 1. `RateLimitingSampler` can be used to allow only a certain fixed number of traces to be sampled per second. +#### Delayed sampling + +Version 2.20 introduced the ability to delay sampling decisions in the life cycle +of the root span. It involves several features and architectural changes: + * **Shared sampling state**: the sampling state is shared across all local + (i.e. in-process) spans for a given trace. + * **New `SamplerV2` API** allows the sampler to be called at multiple points + in the life cycle of a span: + * on span creation + * on overwriting span operation name + * on setting span tags + * on finishing the span + * **Final/non-final sampling state**: the new `SamplerV2` API allows the sampler + to indicate if the negative sampling decision is final or not (positive sampling + decisions are always final). If the decision is not final, the sampler will be + called again on further span life cycle events, like setting tags. + +These new features are used in the experimental `x.TagMatchingSampler`, which +can sample a trace based on a certain tag added to the root +span or one of its local (in-process) children. The sampler can be used with +another experimental `x.PrioritySampler` that allows multiple samplers to try +to make a sampling decision, in a certain priority order. + ### Baggage Injection The OpenTracing spec allows for [baggage][baggage], which are key value pairs that are added diff --git a/vendor/github.com/uber/jaeger-client-go/config/config.go b/vendor/github.com/uber/jaeger-client-go/config/config.go index 6bce1b3b0..965f7c3ee 100644 --- a/vendor/github.com/uber/jaeger-client-go/config/config.go +++ b/vendor/github.com/uber/jaeger-client-go/config/config.go @@ -86,6 +86,9 @@ type SamplerConfig struct { // jaeger-agent for the appropriate sampling strategy. // Can be set by exporting an environment variable named JAEGER_SAMPLER_REFRESH_INTERVAL SamplingRefreshInterval time.Duration `yaml:"samplingRefreshInterval"` + + // Options can be used to programmatically pass additional options to the Remote sampler. + Options []jaeger.SamplerOption } // ReporterConfig configures the reporter. All fields are optional. @@ -357,6 +360,7 @@ func (sc *SamplerConfig) NewSampler( if sc.SamplingRefreshInterval != 0 { options = append(options, jaeger.SamplerOptions.SamplingRefreshInterval(sc.SamplingRefreshInterval)) } + options = append(options, sc.Options...) return jaeger.NewRemotelyControlledSampler(serviceName, options...), nil } return nil, fmt.Errorf("Unknown sampler type %v", sc.Type) diff --git a/vendor/github.com/uber/jaeger-client-go/config/config_env.go b/vendor/github.com/uber/jaeger-client-go/config/config_env.go index 14d69b11d..a729bd8fe 100644 --- a/vendor/github.com/uber/jaeger-client-go/config/config_env.go +++ b/vendor/github.com/uber/jaeger-client-go/config/config_env.go @@ -52,7 +52,11 @@ const ( // FromEnv uses environment variables to set the tracer's Configuration func FromEnv() (*Configuration, error) { c := &Configuration{} + return c.FromEnv() +} +// FromEnv uses environment variables and overrides existing tracer's Configuration +func (c *Configuration) FromEnv() (*Configuration, error) { if e := os.Getenv(envServiceName); e != "" { c.ServiceName = e } @@ -77,13 +81,21 @@ func FromEnv() (*Configuration, error) { c.Tags = parseTags(e) } - if s, err := samplerConfigFromEnv(); err == nil { + if c.Sampler == nil { + c.Sampler = &SamplerConfig{} + } + + if s, err := c.Sampler.samplerConfigFromEnv(); err == nil { c.Sampler = s } else { return nil, errors.Wrap(err, "cannot obtain sampler config from env") } - if r, err := reporterConfigFromEnv(); err == nil { + if c.Reporter == nil { + c.Reporter = &ReporterConfig{} + } + + if r, err := c.Reporter.reporterConfigFromEnv(); err == nil { c.Reporter = r } else { return nil, errors.Wrap(err, "cannot obtain reporter config from env") @@ -93,9 +105,7 @@ func FromEnv() (*Configuration, error) { } // samplerConfigFromEnv creates a new SamplerConfig based on the environment variables -func samplerConfigFromEnv() (*SamplerConfig, error) { - sc := &SamplerConfig{} - +func (sc *SamplerConfig) samplerConfigFromEnv() (*SamplerConfig, error) { if e := os.Getenv(envSamplerType); e != "" { sc.Type = e } @@ -135,9 +145,7 @@ func samplerConfigFromEnv() (*SamplerConfig, error) { } // reporterConfigFromEnv creates a new ReporterConfig based on the environment variables -func reporterConfigFromEnv() (*ReporterConfig, error) { - rc := &ReporterConfig{} - +func (rc *ReporterConfig) reporterConfigFromEnv() (*ReporterConfig, error) { if e := os.Getenv(envReporterMaxQueueSize); e != "" { if value, err := strconv.ParseInt(e, 10, 0); err == nil { rc.QueueSize = int(value) diff --git a/vendor/github.com/uber/jaeger-client-go/constants.go b/vendor/github.com/uber/jaeger-client-go/constants.go index e95b2ba09..0da47b02f 100644 --- a/vendor/github.com/uber/jaeger-client-go/constants.go +++ b/vendor/github.com/uber/jaeger-client-go/constants.go @@ -22,7 +22,7 @@ import ( const ( // JaegerClientVersion is the version of the client library reported as Span tag. - JaegerClientVersion = "Go-2.19.0" + JaegerClientVersion = "Go-2.20.0" // JaegerClientVersionTagKey is the name of the tag used to report client version. JaegerClientVersionTagKey = "jaeger.version" diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go index 6ce1caf87..f0f1afe2f 100644 --- a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go +++ b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go @@ -35,7 +35,7 @@ func BuildJaegerThrift(span *Span) *j.Span { SpanId: int64(span.context.spanID), ParentSpanId: int64(span.context.parentID), OperationName: span.operationName, - Flags: int32(span.context.flags), + Flags: int32(span.context.samplingState.flags()), StartTime: startTime, Duration: duration, Tags: buildTags(span.tags, span.tracer.options.maxTagValueLength), diff --git a/vendor/github.com/uber/jaeger-client-go/metrics.go b/vendor/github.com/uber/jaeger-client-go/metrics.go index e56db9b73..50e4e22d6 100644 --- a/vendor/github.com/uber/jaeger-client-go/metrics.go +++ b/vendor/github.com/uber/jaeger-client-go/metrics.go @@ -26,6 +26,9 @@ type Metrics struct { // Number of traces started by this tracer as not sampled TracesStartedNotSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer as not sampled"` + // Number of traces started by this tracer with delayed sampling + TracesStartedDelayedSampling metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer with delayed sampling"` + // Number of externally started sampled traces this tracer joined TracesJoinedSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=y" help:"Number of externally started sampled traces this tracer joined"` @@ -33,13 +36,22 @@ type Metrics struct { TracesJoinedNotSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=n" help:"Number of externally started not-sampled traces this tracer joined"` // Number of sampled spans started by this tracer - SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y" help:"Number of sampled spans started by this tracer"` + SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y" help:"Number of spans started by this tracer as sampled"` + + // Number of not sampled spans started by this tracer + SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n" help:"Number of spans started by this tracer as not sampled"` + + // Number of spans with delayed sampling started by this tracer + SpansStartedDelayedSampling metrics.Counter `metric:"started_spans" tags:"sampled=delayed" help:"Number of spans started by this tracer with delayed sampling"` - // Number of unsampled spans started by this tracer - SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n" help:"Number of unsampled spans started by this tracer"` + // Number of spans finished by this tracer + SpansFinishedSampled metrics.Counter `metric:"finished_spans" tags:"sampled=y" help:"Number of sampled spans finished by this tracer"` + + // Number of spans finished by this tracer + SpansFinishedNotSampled metrics.Counter `metric:"finished_spans" tags:"sampled=n" help:"Number of not-sampled spans finished by this tracer"` // Number of spans finished by this tracer - SpansFinished metrics.Counter `metric:"finished_spans" help:"Number of spans finished by this tracer"` + SpansFinishedDelayedSampling metrics.Counter `metric:"finished_spans" tags:"sampled=delayed" help:"Number of spans with delayed sampling finished by this tracer"` // Number of errors decoding tracing context DecodingErrors metrics.Counter `metric:"span_context_decoding_errors" help:"Number of errors decoding tracing context"` diff --git a/vendor/github.com/uber/jaeger-client-go/propagation.go b/vendor/github.com/uber/jaeger-client-go/propagation.go index 5b50cfb71..42fd64b58 100644 --- a/vendor/github.com/uber/jaeger-client-go/propagation.go +++ b/vendor/github.com/uber/jaeger-client-go/propagation.go @@ -193,7 +193,7 @@ func (p *BinaryPropagator) Inject( if err := binary.Write(carrier, binary.BigEndian, sc.parentID); err != nil { return err } - if err := binary.Write(carrier, binary.BigEndian, sc.flags); err != nil { + if err := binary.Write(carrier, binary.BigEndian, sc.samplingState.flags()); err != nil { return err } @@ -222,6 +222,7 @@ func (p *BinaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, er return emptyContext, opentracing.ErrInvalidCarrier } var ctx SpanContext + ctx.samplingState = &samplingState{} if err := binary.Read(carrier, binary.BigEndian, &ctx.traceID); err != nil { return emptyContext, opentracing.ErrSpanContextCorrupted @@ -232,9 +233,12 @@ func (p *BinaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, er if err := binary.Read(carrier, binary.BigEndian, &ctx.parentID); err != nil { return emptyContext, opentracing.ErrSpanContextCorrupted } - if err := binary.Read(carrier, binary.BigEndian, &ctx.flags); err != nil { + + var flags byte + if err := binary.Read(carrier, binary.BigEndian, &flags); err != nil { return emptyContext, opentracing.ErrSpanContextCorrupted } + ctx.samplingState.setFlags(flags) // Handle the baggage items var numBaggage int32 diff --git a/vendor/github.com/uber/jaeger-client-go/reporter.go b/vendor/github.com/uber/jaeger-client-go/reporter.go index 27163ebe4..0b78cec20 100644 --- a/vendor/github.com/uber/jaeger-client-go/reporter.go +++ b/vendor/github.com/uber/jaeger-client-go/reporter.go @@ -28,6 +28,8 @@ import ( // Reporter is called by the tracer when a span is completed to report the span to the tracing collector. type Reporter interface { // Report submits a new span to collectors, possibly asynchronously and/or with buffering. + // If the reporter is processing Span asynchronously then it needs to Retain() the span, + // and then Release() it when no longer needed, to avoid span data corruption. Report(span *Span) // Close does a clean shutdown of the reporter, flushing any traces that may be buffered in memory. diff --git a/vendor/github.com/uber/jaeger-client-go/sampler.go b/vendor/github.com/uber/jaeger-client-go/sampler.go index ea6984e02..6195d59c5 100644 --- a/vendor/github.com/uber/jaeger-client-go/sampler.go +++ b/vendor/github.com/uber/jaeger-client-go/sampler.go @@ -17,19 +17,14 @@ package jaeger import ( "fmt" "math" - "net/url" "sync" - "sync/atomic" - "time" - "github.com/uber/jaeger-client-go/log" "github.com/uber/jaeger-client-go/thrift-gen/sampling" "github.com/uber/jaeger-client-go/utils" ) const ( - defaultSamplingRefreshInterval = time.Minute - defaultMaxOperations = 2000 + defaultMaxOperations = 2000 ) // Sampler decides whether a new trace should be sampled or not. @@ -47,9 +42,7 @@ type Sampler interface { // Equal checks if the `other` sampler is functionally equivalent // to this sampler. - // TODO remove this function. This function is used to determine if 2 samplers are equivalent - // which does not bode well with the adaptive sampler which has to create all the composite samplers - // for the comparison to occur. This is expensive to do if only one sampler has changed. + // TODO (breaking change) remove this function. See PerOperationSampler.Equals for explanation. Equal(other Sampler) bool } @@ -57,17 +50,23 @@ type Sampler interface { // ConstSampler is a sampler that always makes the same decision. type ConstSampler struct { + legacySamplerV1Base Decision bool tags []Tag } // NewConstSampler creates a ConstSampler. -func NewConstSampler(sample bool) Sampler { +func NewConstSampler(sample bool) *ConstSampler { tags := []Tag{ {key: SamplerTypeTagKey, value: SamplerTypeConst}, {key: SamplerParamTagKey, value: sample}, } - return &ConstSampler{Decision: sample, tags: tags} + s := &ConstSampler{ + Decision: sample, + tags: tags, + } + s.delegate = s.IsSampled + return s } // IsSampled implements IsSampled() of Sampler. @@ -88,11 +87,17 @@ func (s *ConstSampler) Equal(other Sampler) bool { return false } +// String is used to log sampler details. +func (s *ConstSampler) String() string { + return fmt.Sprintf("ConstSampler(decision=%t)", s.Decision) +} + // ----------------------- // ProbabilisticSampler is a sampler that randomly samples a certain percentage // of traces. type ProbabilisticSampler struct { + legacySamplerV1Base samplingRate float64 samplingBoundary uint64 tags []Tag @@ -114,16 +119,19 @@ func NewProbabilisticSampler(samplingRate float64) (*ProbabilisticSampler, error } func newProbabilisticSampler(samplingRate float64) *ProbabilisticSampler { - samplingRate = math.Max(0.0, math.Min(samplingRate, 1.0)) - tags := []Tag{ + s := new(ProbabilisticSampler) + s.delegate = s.IsSampled + return s.init(samplingRate) +} + +func (s *ProbabilisticSampler) init(samplingRate float64) *ProbabilisticSampler { + s.samplingRate = math.Max(0.0, math.Min(samplingRate, 1.0)) + s.samplingBoundary = uint64(float64(maxRandomNumber) * s.samplingRate) + s.tags = []Tag{ {key: SamplerTypeTagKey, value: SamplerTypeProbabilistic}, - {key: SamplerParamTagKey, value: samplingRate}, - } - return &ProbabilisticSampler{ - samplingRate: samplingRate, - samplingBoundary: uint64(float64(maxRandomNumber) * samplingRate), - tags: tags, + {key: SamplerParamTagKey, value: s.samplingRate}, } + return s } // SamplingRate returns the sampling probability this sampled was constructed with. @@ -149,65 +157,104 @@ func (s *ProbabilisticSampler) Equal(other Sampler) bool { return false } +// Update modifies in-place the sampling rate. Locking must be done externally. +func (s *ProbabilisticSampler) Update(samplingRate float64) error { + if samplingRate < 0.0 || samplingRate > 1.0 { + return fmt.Errorf("Sampling Rate must be between 0.0 and 1.0, received %f", samplingRate) + } + s.init(samplingRate) + return nil +} + +// String is used to log sampler details. +func (s *ProbabilisticSampler) String() string { + return fmt.Sprintf("ProbabilisticSampler(samplingRate=%v)", s.samplingRate) +} + // ----------------------- -type rateLimitingSampler struct { +// RateLimitingSampler samples at most maxTracesPerSecond. The distribution of sampled traces follows +// burstiness of the service, i.e. a service with uniformly distributed requests will have those +// requests sampled uniformly as well, but if requests are bursty, especially sub-second, then a +// number of sequential requests can be sampled each second. +type RateLimitingSampler struct { + legacySamplerV1Base maxTracesPerSecond float64 - rateLimiter utils.RateLimiter + rateLimiter *utils.ReconfigurableRateLimiter tags []Tag } -// NewRateLimitingSampler creates a sampler that samples at most maxTracesPerSecond. The distribution of sampled -// traces follows burstiness of the service, i.e. a service with uniformly distributed requests will have those -// requests sampled uniformly as well, but if requests are bursty, especially sub-second, then a number of -// sequential requests can be sampled each second. -func NewRateLimitingSampler(maxTracesPerSecond float64) Sampler { - tags := []Tag{ +// NewRateLimitingSampler creates new RateLimitingSampler. +func NewRateLimitingSampler(maxTracesPerSecond float64) *RateLimitingSampler { + s := new(RateLimitingSampler) + s.delegate = s.IsSampled + return s.init(maxTracesPerSecond) +} + +func (s *RateLimitingSampler) init(maxTracesPerSecond float64) *RateLimitingSampler { + if s.rateLimiter == nil { + s.rateLimiter = utils.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0)) + } else { + s.rateLimiter.Update(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0)) + } + s.maxTracesPerSecond = maxTracesPerSecond + s.tags = []Tag{ {key: SamplerTypeTagKey, value: SamplerTypeRateLimiting}, {key: SamplerParamTagKey, value: maxTracesPerSecond}, } - return &rateLimitingSampler{ - maxTracesPerSecond: maxTracesPerSecond, - rateLimiter: utils.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0)), - tags: tags, - } + return s } // IsSampled implements IsSampled() of Sampler. -func (s *rateLimitingSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { +func (s *RateLimitingSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { return s.rateLimiter.CheckCredit(1.0), s.tags } -func (s *rateLimitingSampler) Close() { +// Update reconfigures the rate limiter, while preserving its accumulated balance. +// Locking must be done externally. +func (s *RateLimitingSampler) Update(maxTracesPerSecond float64) { + if s.maxTracesPerSecond != maxTracesPerSecond { + s.init(maxTracesPerSecond) + } +} + +// Close does nothing. +func (s *RateLimitingSampler) Close() { // nothing to do } -func (s *rateLimitingSampler) Equal(other Sampler) bool { - if o, ok := other.(*rateLimitingSampler); ok { +// Equal compares with another sampler. +func (s *RateLimitingSampler) Equal(other Sampler) bool { + if o, ok := other.(*RateLimitingSampler); ok { return s.maxTracesPerSecond == o.maxTracesPerSecond } return false } +// String is used to log sampler details. +func (s *RateLimitingSampler) String() string { + return fmt.Sprintf("RateLimitingSampler(maxTracesPerSecond=%v)", s.maxTracesPerSecond) +} + // ----------------------- -// GuaranteedThroughputProbabilisticSampler is a sampler that leverages both probabilisticSampler and -// rateLimitingSampler. The rateLimitingSampler is used as a guaranteed lower bound sampler such that +// GuaranteedThroughputProbabilisticSampler is a sampler that leverages both ProbabilisticSampler and +// RateLimitingSampler. The RateLimitingSampler is used as a guaranteed lower bound sampler such that // every operation is sampled at least once in a time interval defined by the lowerBound. ie a lowerBound // of 1.0 / (60 * 10) will sample an operation at least once every 10 minutes. // -// The probabilisticSampler is given higher priority when tags are emitted, ie. if IsSampled() for both -// samplers return true, the tags for probabilisticSampler will be used. +// The ProbabilisticSampler is given higher priority when tags are emitted, ie. if IsSampled() for both +// samplers return true, the tags for ProbabilisticSampler will be used. type GuaranteedThroughputProbabilisticSampler struct { probabilisticSampler *ProbabilisticSampler - lowerBoundSampler Sampler + lowerBoundSampler *RateLimitingSampler tags []Tag samplingRate float64 lowerBound float64 } // NewGuaranteedThroughputProbabilisticSampler returns a delegating sampler that applies both -// probabilisticSampler and rateLimitingSampler. +// ProbabilisticSampler and RateLimitingSampler. func NewGuaranteedThroughputProbabilisticSampler( lowerBound, samplingRate float64, ) (*GuaranteedThroughputProbabilisticSampler, error) { @@ -224,8 +271,14 @@ func newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate float6 } func (s *GuaranteedThroughputProbabilisticSampler) setProbabilisticSampler(samplingRate float64) { - if s.probabilisticSampler == nil || s.samplingRate != samplingRate { + if s.probabilisticSampler == nil { s.probabilisticSampler = newProbabilisticSampler(samplingRate) + } else if s.samplingRate != samplingRate { + s.probabilisticSampler.init(samplingRate) + } + // since we don't validate samplingRate, sampler may have clamped it to [0, 1] interval + samplingRate = s.probabilisticSampler.SamplingRate() + if s.samplingRate != samplingRate || s.tags == nil { s.samplingRate = s.probabilisticSampler.SamplingRate() s.tags = []Tag{ {key: SamplerTypeTagKey, value: SamplerTypeLowerBound}, @@ -252,7 +305,7 @@ func (s *GuaranteedThroughputProbabilisticSampler) Close() { // Equal implements Equal() of Sampler. func (s *GuaranteedThroughputProbabilisticSampler) Equal(other Sampler) bool { - // NB The Equal() function is expensive and will be removed. See adaptiveSampler.Equal() for + // NB The Equal() function is expensive and will be removed. See PerOperationSampler.Equal() for // more information. return false } @@ -261,52 +314,116 @@ func (s *GuaranteedThroughputProbabilisticSampler) Equal(other Sampler) bool { func (s *GuaranteedThroughputProbabilisticSampler) update(lowerBound, samplingRate float64) { s.setProbabilisticSampler(samplingRate) if s.lowerBound != lowerBound { - s.lowerBoundSampler = NewRateLimitingSampler(lowerBound) + s.lowerBoundSampler.Update(lowerBound) s.lowerBound = lowerBound } } // ----------------------- -type adaptiveSampler struct { +// PerOperationSampler is a delegating sampler that applies GuaranteedThroughputProbabilisticSampler +// on a per-operation basis. +type PerOperationSampler struct { sync.RWMutex samplers map[string]*GuaranteedThroughputProbabilisticSampler defaultSampler *ProbabilisticSampler lowerBound float64 maxOperations int + + // see description in PerOperationSamplerParams + operationNameLateBinding bool } -// NewAdaptiveSampler returns a delegating sampler that applies both probabilisticSampler and -// rateLimitingSampler via the guaranteedThroughputProbabilisticSampler. This sampler keeps track of all -// operations and delegates calls to the respective guaranteedThroughputProbabilisticSampler. -func NewAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) (Sampler, error) { - return newAdaptiveSampler(strategies, maxOperations), nil +// NewAdaptiveSampler returns a new PerOperationSampler. +// Deprecated: please use NewPerOperationSampler. +func NewAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) (*PerOperationSampler, error) { + return NewPerOperationSampler(PerOperationSamplerParams{ + MaxOperations: maxOperations, + Strategies: strategies, + }), nil } -func newAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) Sampler { +// PerOperationSamplerParams defines parameters when creating PerOperationSampler. +type PerOperationSamplerParams struct { + // Max number of operations that will be tracked. Other operations will be given default strategy. + MaxOperations int + + // Opt-in feature for applications that require late binding of span name via explicit call to SetOperationName. + // When this feature is enabled, the sampler will return retryable=true from OnCreateSpan(), thus leaving + // the sampling decision as non-final (and the span as writeable). This may lead to degraded performance + // in applications that always provide the correct span name on trace creation. + // + // For backwards compatibility this option is off by default. + OperationNameLateBinding bool + + // Initial configuration of the sampling strategies (usually retrieved from the backend by Remote Sampler). + Strategies *sampling.PerOperationSamplingStrategies +} + +// NewPerOperationSampler returns a new PerOperationSampler. +func NewPerOperationSampler(params PerOperationSamplerParams) *PerOperationSampler { samplers := make(map[string]*GuaranteedThroughputProbabilisticSampler) - for _, strategy := range strategies.PerOperationStrategies { + for _, strategy := range params.Strategies.PerOperationStrategies { sampler := newGuaranteedThroughputProbabilisticSampler( - strategies.DefaultLowerBoundTracesPerSecond, + params.Strategies.DefaultLowerBoundTracesPerSecond, strategy.ProbabilisticSampling.SamplingRate, ) samplers[strategy.Operation] = sampler } - return &adaptiveSampler{ - samplers: samplers, - defaultSampler: newProbabilisticSampler(strategies.DefaultSamplingProbability), - lowerBound: strategies.DefaultLowerBoundTracesPerSecond, - maxOperations: maxOperations, + return &PerOperationSampler{ + samplers: samplers, + defaultSampler: newProbabilisticSampler(params.Strategies.DefaultSamplingProbability), + lowerBound: params.Strategies.DefaultLowerBoundTracesPerSecond, + maxOperations: params.MaxOperations, + operationNameLateBinding: params.OperationNameLateBinding, } } -func (s *adaptiveSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { +// IsSampled is not used and only exists to match Sampler V1 API. +// TODO (breaking change) remove when upgrading everything to SamplerV2 +func (s *PerOperationSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { + return false, nil +} + +func (s *PerOperationSampler) trySampling(span *Span, operationName string) (bool, []Tag) { + samplerV1 := s.getSamplerForOperation(operationName) + var sampled bool + var tags []Tag + if span.context.samplingState.isLocalRootSpan(span.context.spanID) { + sampled, tags = samplerV1.IsSampled(span.context.TraceID(), operationName) + } + return sampled, tags +} + +// OnCreateSpan implements OnCreateSpan of SamplerV2. +func (s *PerOperationSampler) OnCreateSpan(span *Span) SamplingDecision { + sampled, tags := s.trySampling(span, span.OperationName()) + return SamplingDecision{Sample: sampled, Retryable: s.operationNameLateBinding, Tags: tags} +} + +// OnSetOperationName implements OnSetOperationName of SamplerV2. +func (s *PerOperationSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision { + sampled, tags := s.trySampling(span, operationName) + return SamplingDecision{Sample: sampled, Retryable: false, Tags: tags} +} + +// OnSetTag implements OnSetTag of SamplerV2. +func (s *PerOperationSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision { + return SamplingDecision{Sample: false, Retryable: true} +} + +// OnFinishSpan implements OnFinishSpan of SamplerV2. +func (s *PerOperationSampler) OnFinishSpan(span *Span) SamplingDecision { + return SamplingDecision{Sample: false, Retryable: true} +} + +func (s *PerOperationSampler) getSamplerForOperation(operation string) Sampler { s.RLock() sampler, ok := s.samplers[operation] if ok { defer s.RUnlock() - return sampler.IsSampled(id, operation) + return sampler } s.RUnlock() s.Lock() @@ -315,18 +432,19 @@ func (s *adaptiveSampler) IsSampled(id TraceID, operation string) (bool, []Tag) // Check if sampler has already been created sampler, ok = s.samplers[operation] if ok { - return sampler.IsSampled(id, operation) + return sampler } // Store only up to maxOperations of unique ops. if len(s.samplers) >= s.maxOperations { - return s.defaultSampler.IsSampled(id, operation) + return s.defaultSampler } newSampler := newGuaranteedThroughputProbabilisticSampler(s.lowerBound, s.defaultSampler.SamplingRate()) s.samplers[operation] = newSampler - return newSampler.IsSampled(id, operation) + return newSampler } -func (s *adaptiveSampler) Close() { +// Close invokes Close on all underlying samplers. +func (s *PerOperationSampler) Close() { s.Lock() defer s.Unlock() for _, sampler := range s.samplers { @@ -335,16 +453,18 @@ func (s *adaptiveSampler) Close() { s.defaultSampler.Close() } -func (s *adaptiveSampler) Equal(other Sampler) bool { - // NB The Equal() function is overly expensive for adaptiveSampler since it's composed of multiple +// Equal is not used. +// TODO (breaking change) remove this in the future +func (s *PerOperationSampler) Equal(other Sampler) bool { + // NB The Equal() function is overly expensive for PerOperationSampler since it's composed of multiple // samplers which all need to be initialized before this function can be called for a comparison. - // Therefore, adaptiveSampler uses the update() function to only alter the samplers that need + // Therefore, PerOperationSampler uses the update() function to only alter the samplers that need // changing. Hence this function always returns false so that the update function can be called. // Once the Equal() function is removed from the Sampler API, this will no longer be needed. return false } -func (s *adaptiveSampler) update(strategies *sampling.PerOperationSamplingStrategies) { +func (s *PerOperationSampler) update(strategies *sampling.PerOperationSamplingStrategies) { s.Lock() defer s.Unlock() newSamplers := map[string]*GuaranteedThroughputProbabilisticSampler{} @@ -369,191 +489,3 @@ func (s *adaptiveSampler) update(strategies *sampling.PerOperationSamplingStrate } s.samplers = newSamplers } - -// ----------------------- - -// RemotelyControlledSampler is a delegating sampler that polls a remote server -// for the appropriate sampling strategy, constructs a corresponding sampler and -// delegates to it for sampling decisions. -type RemotelyControlledSampler struct { - // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment. - // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq - closed int64 // 0 - not closed, 1 - closed - - sync.RWMutex - samplerOptions - - serviceName string - manager sampling.SamplingManager - doneChan chan *sync.WaitGroup -} - -type httpSamplingManager struct { - serverURL string -} - -func (s *httpSamplingManager) GetSamplingStrategy(serviceName string) (*sampling.SamplingStrategyResponse, error) { - var out sampling.SamplingStrategyResponse - v := url.Values{} - v.Set("service", serviceName) - if err := utils.GetJSON(s.serverURL+"?"+v.Encode(), &out); err != nil { - return nil, err - } - return &out, nil -} - -// NewRemotelyControlledSampler creates a sampler that periodically pulls -// the sampling strategy from an HTTP sampling server (e.g. jaeger-agent). -func NewRemotelyControlledSampler( - serviceName string, - opts ...SamplerOption, -) *RemotelyControlledSampler { - options := applySamplerOptions(opts...) - sampler := &RemotelyControlledSampler{ - samplerOptions: options, - serviceName: serviceName, - manager: &httpSamplingManager{serverURL: options.samplingServerURL}, - doneChan: make(chan *sync.WaitGroup), - } - go sampler.pollController() - return sampler -} - -func applySamplerOptions(opts ...SamplerOption) samplerOptions { - options := samplerOptions{} - for _, option := range opts { - option(&options) - } - if options.sampler == nil { - options.sampler = newProbabilisticSampler(0.001) - } - if options.logger == nil { - options.logger = log.NullLogger - } - if options.maxOperations <= 0 { - options.maxOperations = defaultMaxOperations - } - if options.samplingServerURL == "" { - options.samplingServerURL = DefaultSamplingServerURL - } - if options.metrics == nil { - options.metrics = NewNullMetrics() - } - if options.samplingRefreshInterval <= 0 { - options.samplingRefreshInterval = defaultSamplingRefreshInterval - } - return options -} - -// IsSampled implements IsSampled() of Sampler. -func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { - s.RLock() - defer s.RUnlock() - return s.sampler.IsSampled(id, operation) -} - -// Close implements Close() of Sampler. -func (s *RemotelyControlledSampler) Close() { - if swapped := atomic.CompareAndSwapInt64(&s.closed, 0, 1); !swapped { - s.logger.Error("Repeated attempt to close the sampler is ignored") - return - } - - var wg sync.WaitGroup - wg.Add(1) - s.doneChan <- &wg - wg.Wait() -} - -// Equal implements Equal() of Sampler. -func (s *RemotelyControlledSampler) Equal(other Sampler) bool { - // NB The Equal() function is expensive and will be removed. See adaptiveSampler.Equal() for - // more information. - if o, ok := other.(*RemotelyControlledSampler); ok { - s.RLock() - o.RLock() - defer s.RUnlock() - defer o.RUnlock() - return s.sampler.Equal(o.sampler) - } - return false -} - -func (s *RemotelyControlledSampler) pollController() { - ticker := time.NewTicker(s.samplingRefreshInterval) - defer ticker.Stop() - s.pollControllerWithTicker(ticker) -} - -func (s *RemotelyControlledSampler) pollControllerWithTicker(ticker *time.Ticker) { - for { - select { - case <-ticker.C: - s.updateSampler() - case wg := <-s.doneChan: - wg.Done() - return - } - } -} - -func (s *RemotelyControlledSampler) getSampler() Sampler { - s.Lock() - defer s.Unlock() - return s.sampler -} - -func (s *RemotelyControlledSampler) setSampler(sampler Sampler) { - s.Lock() - defer s.Unlock() - s.sampler = sampler -} - -func (s *RemotelyControlledSampler) updateSampler() { - res, err := s.manager.GetSamplingStrategy(s.serviceName) - if err != nil { - s.metrics.SamplerQueryFailure.Inc(1) - s.logger.Infof("Unable to query sampling strategy: %v", err) - return - } - s.Lock() - defer s.Unlock() - - s.metrics.SamplerRetrieved.Inc(1) - if strategies := res.GetOperationSampling(); strategies != nil { - s.updateAdaptiveSampler(strategies) - } else { - err = s.updateRateLimitingOrProbabilisticSampler(res) - } - if err != nil { - s.metrics.SamplerUpdateFailure.Inc(1) - s.logger.Infof("Unable to handle sampling strategy response %+v. Got error: %v", res, err) - return - } - s.metrics.SamplerUpdated.Inc(1) -} - -// NB: this function should only be called while holding a Write lock -func (s *RemotelyControlledSampler) updateAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies) { - if adaptiveSampler, ok := s.sampler.(*adaptiveSampler); ok { - adaptiveSampler.update(strategies) - } else { - s.sampler = newAdaptiveSampler(strategies, s.maxOperations) - } -} - -// NB: this function should only be called while holding a Write lock -func (s *RemotelyControlledSampler) updateRateLimitingOrProbabilisticSampler(res *sampling.SamplingStrategyResponse) error { - var newSampler Sampler - if probabilistic := res.GetProbabilisticSampling(); probabilistic != nil { - newSampler = newProbabilisticSampler(probabilistic.SamplingRate) - } else if rateLimiting := res.GetRateLimitingSampling(); rateLimiting != nil { - newSampler = NewRateLimitingSampler(float64(rateLimiting.MaxTracesPerSecond)) - } else { - return fmt.Errorf("Unsupported sampling strategy type %v", res.GetStrategyType()) - } - if !s.sampler.Equal(newSampler) { - s.sampler = newSampler - } - return nil -} diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_remote.go b/vendor/github.com/uber/jaeger-client-go/sampler_remote.go new file mode 100644 index 000000000..9bd0c9822 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/sampler_remote.go @@ -0,0 +1,334 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "sync" + "sync/atomic" + "time" + + "github.com/uber/jaeger-client-go/thrift-gen/sampling" +) + +const ( + defaultSamplingRefreshInterval = time.Minute +) + +// SamplingStrategyFetcher is used to fetch sampling strategy updates from remote server. +type SamplingStrategyFetcher interface { + Fetch(service string) ([]byte, error) +} + +// SamplingStrategyParser is used to parse sampling strategy updates. The output object +// should be of the type that is recognized by the SamplerUpdaters. +type SamplingStrategyParser interface { + Parse(response []byte) (interface{}, error) +} + +// SamplerUpdater is used by RemotelyControlledSampler to apply sampling strategies, +// retrieved from remote config server, to the current sampler. The updater can modify +// the sampler in-place if sampler supports it, or create a new one. +// +// If the strategy does not contain configuration for the sampler in question, +// updater must return modifiedSampler=nil to give other updaters a chance to inspect +// the sampling strategy response. +// +// RemotelyControlledSampler invokes the updaters while holding a lock on the main sampler. +type SamplerUpdater interface { + Update(sampler SamplerV2, strategy interface{}) (modified SamplerV2, err error) +} + +// RemotelyControlledSampler is a delegating sampler that polls a remote server +// for the appropriate sampling strategy, constructs a corresponding sampler and +// delegates to it for sampling decisions. +type RemotelyControlledSampler struct { + // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment. + // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq + closed int64 // 0 - not closed, 1 - closed + + sync.RWMutex + samplerOptions + + serviceName string + doneChan chan *sync.WaitGroup +} + +// NewRemotelyControlledSampler creates a sampler that periodically pulls +// the sampling strategy from an HTTP sampling server (e.g. jaeger-agent). +func NewRemotelyControlledSampler( + serviceName string, + opts ...SamplerOption, +) *RemotelyControlledSampler { + options := new(samplerOptions).applyOptionsAndDefaults(opts...) + sampler := &RemotelyControlledSampler{ + samplerOptions: *options, + serviceName: serviceName, + doneChan: make(chan *sync.WaitGroup), + } + go sampler.pollController() + return sampler +} + +// IsSampled implements IsSampled() of Sampler. +// TODO (breaking change) remove when Sampler V1 is removed +func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { + return false, nil +} + +// OnCreateSpan implements OnCreateSpan of SamplerV2. +func (s *RemotelyControlledSampler) OnCreateSpan(span *Span) SamplingDecision { + return s.sampler.OnCreateSpan(span) +} + +// OnSetOperationName implements OnSetOperationName of SamplerV2. +func (s *RemotelyControlledSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision { + return s.sampler.OnSetOperationName(span, operationName) +} + +// OnSetTag implements OnSetTag of SamplerV2. +func (s *RemotelyControlledSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision { + return s.sampler.OnSetTag(span, key, value) +} + +// OnFinishSpan implements OnFinishSpan of SamplerV2. +func (s *RemotelyControlledSampler) OnFinishSpan(span *Span) SamplingDecision { + return s.sampler.OnFinishSpan(span) +} + +// Close implements Close() of Sampler. +func (s *RemotelyControlledSampler) Close() { + if swapped := atomic.CompareAndSwapInt64(&s.closed, 0, 1); !swapped { + s.logger.Error("Repeated attempt to close the sampler is ignored") + return + } + + var wg sync.WaitGroup + wg.Add(1) + s.doneChan <- &wg + wg.Wait() +} + +// Equal implements Equal() of Sampler. +func (s *RemotelyControlledSampler) Equal(other Sampler) bool { + // NB The Equal() function is expensive and will be removed. See PerOperationSampler.Equal() for + // more information. + return false +} + +func (s *RemotelyControlledSampler) pollController() { + ticker := time.NewTicker(s.samplingRefreshInterval) + defer ticker.Stop() + s.pollControllerWithTicker(ticker) +} + +func (s *RemotelyControlledSampler) pollControllerWithTicker(ticker *time.Ticker) { + for { + select { + case <-ticker.C: + s.UpdateSampler() + case wg := <-s.doneChan: + wg.Done() + return + } + } +} + +// Sampler returns the currently active sampler. +func (s *RemotelyControlledSampler) Sampler() SamplerV2 { + s.Lock() + defer s.Unlock() + return s.sampler +} + +func (s *RemotelyControlledSampler) setSampler(sampler SamplerV2) { + s.Lock() + defer s.Unlock() + s.sampler = sampler +} + +// UpdateSampler forces the sampler to fetch sampling strategy from backend server. +// This function is called automatically on a timer, but can also be safely called manually, e.g. from tests. +func (s *RemotelyControlledSampler) UpdateSampler() { + res, err := s.samplingFetcher.Fetch(s.serviceName) + if err != nil { + s.metrics.SamplerQueryFailure.Inc(1) + s.logger.Infof("failed to fetch sampling strategy: %v", err) + return + } + strategy, err := s.samplingParser.Parse(res) + if err != nil { + s.metrics.SamplerUpdateFailure.Inc(1) + s.logger.Infof("failed to parse sampling strategy response: %v", err) + return + } + + s.Lock() + defer s.Unlock() + + s.metrics.SamplerRetrieved.Inc(1) + if err := s.updateSamplerViaUpdaters(strategy); err != nil { + s.metrics.SamplerUpdateFailure.Inc(1) + s.logger.Infof("failed to handle sampling strategy response %+v. Got error: %v", res, err) + return + } + s.metrics.SamplerUpdated.Inc(1) +} + +// NB: this function should only be called while holding a Write lock +func (s *RemotelyControlledSampler) updateSamplerViaUpdaters(strategy interface{}) error { + for _, updater := range s.updaters { + sampler, err := updater.Update(s.sampler, strategy) + if err != nil { + return err + } + if sampler != nil { + s.sampler = sampler + return nil + } + } + return fmt.Errorf("unsupported sampling strategy %+v", strategy) +} + +// ----------------------- + +// ProbabilisticSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration. +type ProbabilisticSamplerUpdater struct{} + +// Update implements Update of SamplerUpdater. +func (u *ProbabilisticSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) { + type response interface { + GetProbabilisticSampling() *sampling.ProbabilisticSamplingStrategy + } + var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check + if resp, ok := strategy.(response); ok { + if probabilistic := resp.GetProbabilisticSampling(); probabilistic != nil { + if ps, ok := sampler.(*ProbabilisticSampler); ok { + if err := ps.Update(probabilistic.SamplingRate); err != nil { + return nil, err + } + return sampler, nil + } + return newProbabilisticSampler(probabilistic.SamplingRate), nil + } + } + return nil, nil +} + +// ----------------------- + +// RateLimitingSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration. +type RateLimitingSamplerUpdater struct{} + +// Update implements Update of SamplerUpdater. +func (u *RateLimitingSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) { + type response interface { + GetRateLimitingSampling() *sampling.RateLimitingSamplingStrategy + } + var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check + if resp, ok := strategy.(response); ok { + if rateLimiting := resp.GetRateLimitingSampling(); rateLimiting != nil { + rateLimit := float64(rateLimiting.MaxTracesPerSecond) + if rl, ok := sampler.(*RateLimitingSampler); ok { + rl.Update(rateLimit) + return rl, nil + } + return NewRateLimitingSampler(rateLimit), nil + } + } + return nil, nil +} + +// ----------------------- + +// AdaptiveSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration. +type AdaptiveSamplerUpdater struct { + MaxOperations int // required + OperationNameLateBinding bool +} + +// Update implements Update of SamplerUpdater. +func (u *AdaptiveSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) { + type response interface { + GetOperationSampling() *sampling.PerOperationSamplingStrategies + } + var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check + if p, ok := strategy.(response); ok { + if operations := p.GetOperationSampling(); operations != nil { + if as, ok := sampler.(*PerOperationSampler); ok { + as.update(operations) + return as, nil + } + return NewPerOperationSampler(PerOperationSamplerParams{ + MaxOperations: u.MaxOperations, + OperationNameLateBinding: u.OperationNameLateBinding, + Strategies: operations, + }), nil + } + } + return nil, nil +} + +// ----------------------- + +type httpSamplingStrategyFetcher struct { + serverURL string + logger Logger +} + +func (f *httpSamplingStrategyFetcher) Fetch(serviceName string) ([]byte, error) { + v := url.Values{} + v.Set("service", serviceName) + uri := f.serverURL + "?" + v.Encode() + + // TODO create and reuse http.Client with proper timeout settings, etc. + resp, err := http.Get(uri) + if err != nil { + return nil, err + } + + defer func() { + if err := resp.Body.Close(); err != nil { + f.logger.Error(fmt.Sprintf("failed to close HTTP response body: %+v", err)) + } + }() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode >= 400 { + return nil, fmt.Errorf("StatusCode: %d, Body: %s", resp.StatusCode, body) + } + + return body, nil +} + +// ----------------------- + +type samplingStrategyParser struct{} + +func (p *samplingStrategyParser) Parse(response []byte) (interface{}, error) { + strategy := new(sampling.SamplingStrategyResponse) + if err := json.Unmarshal(response, strategy); err != nil { + return nil, err + } + return strategy, nil +} diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_options.go b/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go index 75d28a561..7a292effc 100644 --- a/vendor/github.com/uber/jaeger-client-go/sampler_options.go +++ b/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go @@ -16,6 +16,8 @@ package jaeger import ( "time" + + "github.com/uber/jaeger-client-go/log" ) // SamplerOption is a function that sets some option on the sampler @@ -27,10 +29,13 @@ var SamplerOptions samplerOptions type samplerOptions struct { metrics *Metrics maxOperations int - sampler Sampler + sampler SamplerV2 logger Logger samplingServerURL string samplingRefreshInterval time.Duration + samplingFetcher SamplingStrategyFetcher + samplingParser SamplingStrategyParser + updaters []SamplerUpdater } // Metrics creates a SamplerOption that initializes Metrics on the sampler, @@ -53,7 +58,7 @@ func (samplerOptions) MaxOperations(maxOperations int) SamplerOption { // to use before a remote sampler is created and used. func (samplerOptions) InitialSampler(sampler Sampler) SamplerOption { return func(o *samplerOptions) { - o.sampler = sampler + o.sampler = samplerV1toV2(sampler) } } @@ -79,3 +84,65 @@ func (samplerOptions) SamplingRefreshInterval(samplingRefreshInterval time.Durat o.samplingRefreshInterval = samplingRefreshInterval } } + +// SamplingStrategyFetcher creates a SamplerOption that initializes sampling strategy fetcher. +func (samplerOptions) SamplingStrategyFetcher(fetcher SamplingStrategyFetcher) SamplerOption { + return func(o *samplerOptions) { + o.samplingFetcher = fetcher + } +} + +// SamplingStrategyParser creates a SamplerOption that initializes sampling strategy parser. +func (samplerOptions) SamplingStrategyParser(parser SamplingStrategyParser) SamplerOption { + return func(o *samplerOptions) { + o.samplingParser = parser + } +} + +// Updaters creates a SamplerOption that initializes sampler updaters. +func (samplerOptions) Updaters(updaters ...SamplerUpdater) SamplerOption { + return func(o *samplerOptions) { + o.updaters = updaters + } +} + +func (o *samplerOptions) applyOptionsAndDefaults(opts ...SamplerOption) *samplerOptions { + for _, option := range opts { + option(o) + } + if o.sampler == nil { + o.sampler = newProbabilisticSampler(0.001) + } + if o.logger == nil { + o.logger = log.NullLogger + } + if o.maxOperations <= 0 { + o.maxOperations = defaultMaxOperations + } + if o.samplingServerURL == "" { + o.samplingServerURL = DefaultSamplingServerURL + } + if o.metrics == nil { + o.metrics = NewNullMetrics() + } + if o.samplingRefreshInterval <= 0 { + o.samplingRefreshInterval = defaultSamplingRefreshInterval + } + if o.samplingFetcher == nil { + o.samplingFetcher = &httpSamplingStrategyFetcher{ + serverURL: o.samplingServerURL, + logger: o.logger, + } + } + if o.samplingParser == nil { + o.samplingParser = new(samplingStrategyParser) + } + if o.updaters == nil { + o.updaters = []SamplerUpdater{ + &AdaptiveSamplerUpdater{MaxOperations: o.maxOperations}, + new(ProbabilisticSamplerUpdater), + new(RateLimitingSamplerUpdater), + } + } + return o +} diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_v2.go b/vendor/github.com/uber/jaeger-client-go/sampler_v2.go new file mode 100644 index 000000000..a50671a23 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/sampler_v2.go @@ -0,0 +1,93 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +// SamplingDecision is returned by the V2 samplers. +type SamplingDecision struct { + Sample bool + Retryable bool + Tags []Tag +} + +// SamplerV2 is an extension of the V1 samplers that allows sampling decisions +// be made at different points of the span lifecycle. +type SamplerV2 interface { + OnCreateSpan(span *Span) SamplingDecision + OnSetOperationName(span *Span, operationName string) SamplingDecision + OnSetTag(span *Span, key string, value interface{}) SamplingDecision + OnFinishSpan(span *Span) SamplingDecision + + // Close does a clean shutdown of the sampler, stopping any background + // go-routines it may have started. + Close() +} + +// samplerV1toV2 wraps legacy V1 sampler into an adapter that make it look like V2. +func samplerV1toV2(s Sampler) SamplerV2 { + if s2, ok := s.(SamplerV2); ok { + return s2 + } + type legacySamplerV1toV2Adapter struct { + legacySamplerV1Base + } + return &legacySamplerV1toV2Adapter{ + legacySamplerV1Base: legacySamplerV1Base{ + delegate: s.IsSampled, + }, + } +} + +// SamplerV2Base can be used by V2 samplers to implement dummy V1 methods. +// Supporting V1 API is required because Tracer configuration only accepts V1 Sampler +// for backwards compatibility reasons. +// TODO (breaking change) remove this in the next major release +type SamplerV2Base struct{} + +// IsSampled implements IsSampled of Sampler. +func (SamplerV2Base) IsSampled(id TraceID, operation string) (sampled bool, tags []Tag) { + return false, nil +} + +// Close implements Close of Sampler. +func (SamplerV2Base) Close() {} + +// Equal implements Equal of Sampler. +func (SamplerV2Base) Equal(other Sampler) bool { return false } + +// legacySamplerV1Base is used as a base for simple samplers that only implement +// the legacy isSampled() function that is not sensitive to its arguments. +type legacySamplerV1Base struct { + delegate func(id TraceID, operation string) (sampled bool, tags []Tag) +} + +func (s *legacySamplerV1Base) OnCreateSpan(span *Span) SamplingDecision { + isSampled, tags := s.delegate(span.context.traceID, span.operationName) + return SamplingDecision{Sample: isSampled, Retryable: false, Tags: tags} +} + +func (s *legacySamplerV1Base) OnSetOperationName(span *Span, operationName string) SamplingDecision { + isSampled, tags := s.delegate(span.context.traceID, span.operationName) + return SamplingDecision{Sample: isSampled, Retryable: false, Tags: tags} +} + +func (s *legacySamplerV1Base) OnSetTag(span *Span, key string, value interface{}) SamplingDecision { + return SamplingDecision{Sample: false, Retryable: true} +} + +func (s *legacySamplerV1Base) OnFinishSpan(span *Span) SamplingDecision { + return SamplingDecision{Sample: false, Retryable: true} +} + +func (s *legacySamplerV1Base) Close() {} diff --git a/vendor/github.com/uber/jaeger-client-go/span.go b/vendor/github.com/uber/jaeger-client-go/span.go index 9df8b6017..bbf6fb068 100644 --- a/vendor/github.com/uber/jaeger-client-go/span.go +++ b/vendor/github.com/uber/jaeger-client-go/span.go @@ -34,6 +34,7 @@ type Span struct { tracer *Tracer + // TODO: (breaking change) change to use a pointer context SpanContext // The name of the "operation" this span is an instance of. @@ -65,18 +66,26 @@ type Span struct { } // Tag is a simple key value wrapper. -// TODO deprecate in the next major release, use opentracing.Tag instead. +// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead. type Tag struct { key string value interface{} } +// NewTag creates a new Tag. +// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead. +func NewTag(key string, value interface{}) Tag { + return Tag{key: key, value: value} +} + // SetOperationName sets or changes the operation name. func (s *Span) SetOperationName(operationName string) opentracing.Span { s.Lock() - defer s.Unlock() - if s.context.IsSampled() { - s.operationName = operationName + s.operationName = operationName + s.Unlock() + if !s.isSamplingFinalized() { + decision := s.tracer.sampler.OnSetOperationName(s, operationName) + s.applySamplingDecision(decision, true) } s.observer.OnSetOperationName(operationName) return s @@ -84,14 +93,24 @@ func (s *Span) SetOperationName(operationName string) opentracing.Span { // SetTag implements SetTag() of opentracing.Span func (s *Span) SetTag(key string, value interface{}) opentracing.Span { + return s.setTagInternal(key, value, true) +} + +func (s *Span) setTagInternal(key string, value interface{}, lock bool) opentracing.Span { s.observer.OnSetTag(key, value) if key == string(ext.SamplingPriority) && !setSamplingPriority(s, value) { return s } - s.Lock() - defer s.Unlock() - if s.context.IsSampled() { - s.setTagNoLocking(key, value) + if !s.isSamplingFinalized() { + decision := s.tracer.sampler.OnSetTag(s, key, value) + s.applySamplingDecision(decision, lock) + } + if s.isWriteable() { + if lock { + s.Lock() + defer s.Unlock() + } + s.appendTagNoLocking(key, value) } return s } @@ -121,14 +140,38 @@ func (s *Span) Duration() time.Duration { func (s *Span) Tags() opentracing.Tags { s.Lock() defer s.Unlock() - var result = make(opentracing.Tags) + var result = make(opentracing.Tags, len(s.tags)) for _, tag := range s.tags { result[tag.key] = tag.value } return result } -func (s *Span) setTagNoLocking(key string, value interface{}) { +// Logs returns micro logs for span +func (s *Span) Logs() []opentracing.LogRecord { + s.Lock() + defer s.Unlock() + + return append([]opentracing.LogRecord(nil), s.logs...) +} + +// References returns references for this span +func (s *Span) References() []opentracing.SpanReference { + s.Lock() + defer s.Unlock() + + if s.references == nil || len(s.references) == 0 { + return nil + } + + result := make([]opentracing.SpanReference, len(s.references)) + for i, r := range s.references { + result[i] = opentracing.SpanReference{Type: r.Type, ReferencedContext: r.Context} + } + return result +} + +func (s *Span) appendTagNoLocking(key string, value interface{}) { s.tags = append(s.tags, Tag{key: key, value: value}) } @@ -148,7 +191,7 @@ func (s *Span) logFieldsNoLocking(fields ...log.Field) { Fields: fields, Timestamp: time.Now(), } - s.appendLog(lr) + s.appendLogNoLocking(lr) } // LogKV implements opentracing.Span API @@ -185,12 +228,12 @@ func (s *Span) Log(ld opentracing.LogData) { if ld.Timestamp.IsZero() { ld.Timestamp = s.tracer.timeNow() } - s.appendLog(ld.ToLogRecord()) + s.appendLogNoLocking(ld.ToLogRecord()) } } // this function should only be called while holding a Write lock -func (s *Span) appendLog(lr opentracing.LogRecord) { +func (s *Span) appendLogNoLocking(lr opentracing.LogRecord) { // TODO add logic to limit number of logs per span (issue #46) s.logs = append(s.logs, lr) } @@ -224,17 +267,25 @@ func (s *Span) FinishWithOptions(options opentracing.FinishOptions) { } s.observer.OnFinish(options) s.Lock() + s.duration = options.FinishTime.Sub(s.startTime) + s.Unlock() + if !s.isSamplingFinalized() { + decision := s.tracer.sampler.OnFinishSpan(s) + s.applySamplingDecision(decision, true) + } if s.context.IsSampled() { - s.duration = options.FinishTime.Sub(s.startTime) - // Note: bulk logs are not subject to maxLogsPerSpan limit - if options.LogRecords != nil { - s.logs = append(s.logs, options.LogRecords...) - } - for _, ld := range options.BulkLogData { - s.logs = append(s.logs, ld.ToLogRecord()) + if len(options.LogRecords) > 0 || len(options.BulkLogData) > 0 { + s.Lock() + // Note: bulk logs are not subject to maxLogsPerSpan limit + if options.LogRecords != nil { + s.logs = append(s.logs, options.LogRecords...) + } + for _, ld := range options.BulkLogData { + s.logs = append(s.logs, ld.ToLogRecord()) + } + s.Unlock() } } - s.Unlock() // call reportSpan even for non-sampled traces, to return span to the pool // and update metrics counter s.tracer.reportSpan(s) @@ -300,23 +351,62 @@ func (s *Span) serviceName() string { return s.tracer.serviceName } +func (s *Span) applySamplingDecision(decision SamplingDecision, lock bool) { + if !decision.Retryable { + s.context.samplingState.setFinal() + } + if decision.Sample { + s.context.samplingState.setSampled() + if len(decision.Tags) > 0 { + if lock { + s.Lock() + defer s.Unlock() + } + for _, tag := range decision.Tags { + s.appendTagNoLocking(tag.key, tag.value) + } + } + } +} + +// Span can be written to if it is sampled or the sampling decision has not been finalized. +func (s *Span) isWriteable() bool { + state := s.context.samplingState + return !state.isFinal() || state.isSampled() +} + +func (s *Span) isSamplingFinalized() bool { + return s.context.samplingState.isFinal() +} + // setSamplingPriority returns true if the flag was updated successfully, false otherwise. +// The behavior of setSamplingPriority is surprising +// If noDebugFlagOnForcedSampling is set +// setSamplingPriority(span, 1) always sets only flagSampled +// If noDebugFlagOnForcedSampling is unset, and isDebugAllowed passes +// setSamplingPriority(span, 1) sets both flagSampled and flagDebug +// However, +// setSamplingPriority(span, 0) always only resets flagSampled +// +// This means that doing a setSamplingPriority(span, 1) followed by setSamplingPriority(span, 0) can +// leave flagDebug set func setSamplingPriority(s *Span, value interface{}) bool { val, ok := value.(uint16) if !ok { return false } - s.Lock() - defer s.Unlock() if val == 0 { - s.context.flags = s.context.flags & (^flagSampled) + s.context.samplingState.unsetSampled() + s.context.samplingState.setFinal() return true } if s.tracer.options.noDebugFlagOnForcedSampling { - s.context.flags = s.context.flags | flagSampled + s.context.samplingState.setSampled() + s.context.samplingState.setFinal() return true } else if s.tracer.isDebugAllowed(s.operationName) { - s.context.flags = s.context.flags | flagDebug | flagSampled + s.context.samplingState.setDebugAndSampled() + s.context.samplingState.setFinal() return true } return false @@ -326,5 +416,5 @@ func setSamplingPriority(s *Span, value interface{}) bool { func EnableFirehose(s *Span) { s.Lock() defer s.Unlock() - s.context.flags |= flagFirehose + s.context.samplingState.setFirehose() } diff --git a/vendor/github.com/uber/jaeger-client-go/context.go b/vendor/github.com/uber/jaeger-client-go/span_context.go index 43553655a..b7230abfe 100644 --- a/vendor/github.com/uber/jaeger-client-go/context.go +++ b/vendor/github.com/uber/jaeger-client-go/span_context.go @@ -19,12 +19,15 @@ import ( "fmt" "strconv" "strings" + "sync" + + "go.uber.org/atomic" ) const ( - flagSampled = byte(1) - flagDebug = byte(2) - flagFirehose = byte(8) + flagSampled = 1 + flagDebug = 2 + flagFirehose = 8 ) var ( @@ -56,9 +59,6 @@ type SpanContext struct { // Should be 0 if the current span is a root span. parentID SpanID - // flags is a bitmap containing such bits as 'sampled' and 'debug'. - flags byte - // Distributed Context baggage. The is a snapshot in time. baggage map[string]string @@ -67,6 +67,102 @@ type SpanContext struct { // // See JaegerDebugHeader in constants.go debugID string + + // samplingState is shared across all spans + samplingState *samplingState + + // remote indicates that span context represents a remote parent + remote bool +} + +type samplingState struct { + // Span context's state flags that are propagated across processes. Only lower 8 bits are used. + // We use an int32 instead of byte to be able to use CAS operations. + stateFlags atomic.Int32 + + // When state is not final, sampling will be retried on other span write operations, + // like SetOperationName / SetTag, and the spans will remain writable. + final atomic.Bool + + // localRootSpan stores the SpanID of the first span created in this process for a given trace. + localRootSpan SpanID + + // extendedState allows samplers to keep intermediate state. + // The keys and values in this map are completely opaque: interface{} -> interface{}. + extendedState sync.Map +} + +func (s *samplingState) isLocalRootSpan(id SpanID) bool { + return id == s.localRootSpan +} + +func (s *samplingState) setFlag(newFlag int32) { + swapped := false + for !swapped { + old := s.stateFlags.Load() + swapped = s.stateFlags.CAS(old, old|newFlag) + } +} + +func (s *samplingState) unsetFlag(newFlag int32) { + swapped := false + for !swapped { + old := s.stateFlags.Load() + swapped = s.stateFlags.CAS(old, old&^newFlag) + } +} + +func (s *samplingState) setSampled() { + s.setFlag(flagSampled) +} + +func (s *samplingState) unsetSampled() { + s.unsetFlag(flagSampled) +} + +func (s *samplingState) setDebugAndSampled() { + s.setFlag(flagDebug | flagSampled) +} + +func (s *samplingState) setFirehose() { + s.setFlag(flagFirehose) +} + +func (s *samplingState) setFlags(flags byte) { + s.stateFlags.Store(int32(flags)) +} + +func (s *samplingState) setFinal() { + s.final.Store(true) +} + +func (s *samplingState) flags() byte { + return byte(s.stateFlags.Load()) +} + +func (s *samplingState) isSampled() bool { + return s.stateFlags.Load()&flagSampled == flagSampled +} + +func (s *samplingState) isDebug() bool { + return s.stateFlags.Load()&flagDebug == flagDebug +} + +func (s *samplingState) isFirehose() bool { + return s.stateFlags.Load()&flagFirehose == flagFirehose +} + +func (s *samplingState) isFinal() bool { + return s.final.Load() +} + +func (s *samplingState) extendedStateForKey(key interface{}, initValue func() interface{}) interface{} { + if value, ok := s.extendedState.Load(key); ok { + return value + } + value := initValue() + value, _ = s.extendedState.LoadOrStore(key, value) + return value } // ForeachBaggageItem implements ForeachBaggageItem() of opentracing.SpanContext @@ -81,17 +177,28 @@ func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) { // IsSampled returns whether this trace was chosen for permanent storage // by the sampling mechanism of the tracer. func (c SpanContext) IsSampled() bool { - return (c.flags & flagSampled) == flagSampled + return c.samplingState.isSampled() } // IsDebug indicates whether sampling was explicitly requested by the service. func (c SpanContext) IsDebug() bool { - return (c.flags & flagDebug) == flagDebug + return c.samplingState.isDebug() +} + +// IsSamplingFinalized indicates whether the sampling decision has been finalized. +func (c SpanContext) IsSamplingFinalized() bool { + return c.samplingState.isFinal() } // IsFirehose indicates whether the firehose flag was set func (c SpanContext) IsFirehose() bool { - return (c.flags & flagFirehose) == flagFirehose + return c.samplingState.isFirehose() +} + +// ExtendedSamplingState returns the custom state object for a given key. If the value for this key does not exist, +// it is initialized via initValue function. This state can be used by samplers (e.g. x.PrioritySampler). +func (c SpanContext) ExtendedSamplingState(key interface{}, initValue func() interface{}) interface{} { + return c.samplingState.extendedStateForKey(key, initValue) } // IsValid indicates whether this context actually represents a valid trace. @@ -99,11 +206,16 @@ func (c SpanContext) IsValid() bool { return c.traceID.IsValid() && c.spanID != 0 } +// SetFirehose enables firehose mode for this trace. +func (c SpanContext) SetFirehose() { + c.samplingState.setFirehose() +} + func (c SpanContext) String() string { if c.traceID.High == 0 { - return fmt.Sprintf("%x:%x:%x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.flags) + return fmt.Sprintf("%x:%x:%x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.samplingState.stateFlags.Load()) } - return fmt.Sprintf("%x%016x:%x:%x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.flags) + return fmt.Sprintf("%x%016x:%x:%x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.samplingState.stateFlags.Load()) } // ContextFromString reconstructs the Context encoded in a string @@ -130,7 +242,8 @@ func ContextFromString(value string) (SpanContext, error) { if err != nil { return emptyContext, err } - context.flags = byte(flags) + context.samplingState = &samplingState{} + context.samplingState.setFlags(byte(flags)) return context, nil } @@ -149,18 +262,24 @@ func (c SpanContext) ParentID() SpanID { return c.parentID } +// Flags returns the bitmap containing such bits as 'sampled' and 'debug'. +func (c SpanContext) Flags() byte { + return c.samplingState.flags() +} + // NewSpanContext creates a new instance of SpanContext func NewSpanContext(traceID TraceID, spanID, parentID SpanID, sampled bool, baggage map[string]string) SpanContext { - flags := byte(0) + samplingState := &samplingState{} if sampled { - flags = flagSampled + samplingState.setSampled() } + return SpanContext{ - traceID: traceID, - spanID: spanID, - parentID: parentID, - flags: flags, - baggage: baggage} + traceID: traceID, + spanID: spanID, + parentID: parentID, + samplingState: samplingState, + baggage: baggage} } // CopyFrom copies data from ctx into this context, including span identity and baggage. @@ -169,7 +288,7 @@ func (c *SpanContext) CopyFrom(ctx *SpanContext) { c.traceID = ctx.traceID c.spanID = ctx.spanID c.parentID = ctx.parentID - c.flags = ctx.flags + c.samplingState = ctx.samplingState if l := len(ctx.baggage); l > 0 { c.baggage = make(map[string]string, l) for k, v := range ctx.baggage { @@ -193,7 +312,7 @@ func (c SpanContext) WithBaggageItem(key, value string) SpanContext { newBaggage[key] = value } // Use positional parameters so the compiler will help catch new fields. - return SpanContext{c.traceID, c.spanID, c.parentID, c.flags, newBaggage, ""} + return SpanContext{c.traceID, c.spanID, c.parentID, newBaggage, "", c.samplingState, c.remote} } // isDebugIDContainerOnly returns true when the instance of the context is only diff --git a/vendor/github.com/uber/jaeger-client-go/tracer.go b/vendor/github.com/uber/jaeger-client-go/tracer.go index 745a0c38a..f03372dc7 100644 --- a/vendor/github.com/uber/jaeger-client-go/tracer.go +++ b/vendor/github.com/uber/jaeger-client-go/tracer.go @@ -38,7 +38,7 @@ type Tracer struct { serviceName string hostIPv4 uint32 // this is for zipkin endpoint conversion - sampler Sampler + sampler SamplerV2 reporter Reporter metrics Metrics logger log.Logger @@ -74,6 +74,7 @@ type Tracer struct { // NewTracer creates Tracer implementation that reports tracing to Jaeger. // The returned io.Closer can be used in shutdown hooks to ensure that the internal // queue of the Reporter is drained and all buffered spans are submitted to collectors. +// TODO (breaking change) return *Tracer only, without closer. func NewTracer( serviceName string, sampler Sampler, @@ -82,7 +83,7 @@ func NewTracer( ) (opentracing.Tracer, io.Closer) { t := &Tracer{ serviceName: serviceName, - sampler: sampler, + sampler: samplerV1toV2(sampler), reporter: reporter, injectors: make(map[interface{}]Injector), extractors: make(map[interface{}]Extractor), @@ -261,7 +262,7 @@ func (t *Tracer) startSpanWithOptions( rpcServer = (v == ext.SpanKindRPCServerEnum || v == string(ext.SpanKindRPCServerEnum)) } - var samplerTags []Tag + var internalTags []Tag newTrace := false if !isSelfRef { if !hasParent || !parent.IsValid() { @@ -272,13 +273,12 @@ func (t *Tracer) startSpanWithOptions( } ctx.spanID = SpanID(ctx.traceID.Low) ctx.parentID = 0 - ctx.flags = byte(0) + ctx.samplingState = &samplingState{ + localRootSpan: ctx.spanID, + } if hasParent && parent.isDebugIDContainerOnly() && t.isDebugAllowed(operationName) { - ctx.flags |= (flagSampled | flagDebug) - samplerTags = []Tag{{key: JaegerDebugHeader, value: parent.debugID}} - } else if sampled, tags := t.sampler.IsSampled(ctx.traceID, operationName); sampled { - ctx.flags |= flagSampled - samplerTags = tags + ctx.samplingState.setDebugAndSampled() + internalTags = append(internalTags, Tag{key: JaegerDebugHeader, value: parent.debugID}) } } else { ctx.traceID = parent.traceID @@ -290,7 +290,11 @@ func (t *Tracer) startSpanWithOptions( ctx.spanID = SpanID(t.randomID()) ctx.parentID = parent.spanID } - ctx.flags = parent.flags + ctx.samplingState = parent.samplingState + if parent.remote { + ctx.samplingState.setFinal() + ctx.samplingState.localRootSpan = ctx.spanID + } } if hasParent { // copy baggage items @@ -305,17 +309,30 @@ func (t *Tracer) startSpanWithOptions( sp := t.newSpan() sp.context = ctx + sp.tracer = t + sp.operationName = operationName + sp.startTime = options.StartTime + sp.duration = 0 + sp.references = references + sp.firstInProcess = rpcServer || sp.context.parentID == 0 + + if !sp.isSamplingFinalized() { + decision := t.sampler.OnCreateSpan(sp) + sp.applySamplingDecision(decision, false) + } sp.observer = t.observer.OnStartSpan(sp, operationName, options) - return t.startSpanInternal( - sp, - operationName, - options.StartTime, - samplerTags, - options.Tags, - newTrace, - rpcServer, - references, - ) + + if tagsTotalLength := len(options.Tags) + len(internalTags); tagsTotalLength > 0 { + if sp.tags == nil || cap(sp.tags) < tagsTotalLength { + sp.tags = make([]Tag, 0, tagsTotalLength) + } + sp.tags = append(sp.tags, internalTags...) + for k, v := range options.Tags { + sp.setTagInternal(k, v, false) + } + } + t.emitNewSpanMetrics(sp, newTrace) + return sp } // Inject implements Inject() method of opentracing.Tracer @@ -340,6 +357,7 @@ func (t *Tracer) Extract( if err != nil { return nil, err // ensure returned spanCtx is nil } + spanCtx.remote = true return spanCtx, nil } return nil, opentracing.ErrUnsupportedFormat @@ -350,10 +368,10 @@ func (t *Tracer) Close() error { t.reporter.Close() t.sampler.Close() if mgr, ok := t.baggageRestrictionManager.(io.Closer); ok { - mgr.Close() + _ = mgr.Close() } if throttler, ok := t.debugThrottler.(io.Closer); ok { - throttler.Close() + _ = throttler.Close() } return nil } @@ -368,6 +386,7 @@ func (t *Tracer) Tags() []opentracing.Tag { } // getTag returns the value of specific tag, if not exists, return nil. +// TODO only used by tests, move there. func (t *Tracer) getTag(key string) (interface{}, bool) { for _, tag := range t.tags { if tag.key == key { @@ -383,41 +402,21 @@ func (t *Tracer) newSpan() *Span { return t.spanAllocator.Get() } -func (t *Tracer) startSpanInternal( - sp *Span, - operationName string, - startTime time.Time, - internalTags []Tag, - tags opentracing.Tags, - newTrace bool, - rpcServer bool, - references []Reference, -) *Span { - sp.tracer = t - sp.operationName = operationName - sp.startTime = startTime - sp.duration = 0 - sp.references = references - sp.firstInProcess = rpcServer || sp.context.parentID == 0 - if len(tags) > 0 || len(internalTags) > 0 { - sp.tags = make([]Tag, len(internalTags), len(tags)+len(internalTags)) - copy(sp.tags, internalTags) - for k, v := range tags { - sp.observer.OnSetTag(k, v) - if k == string(ext.SamplingPriority) && !setSamplingPriority(sp, v) { - continue - } - sp.setTagNoLocking(k, v) +// emitNewSpanMetrics generates metrics on the number of started spans and traces. +// newTrace param: we cannot simply check for parentID==0 because in Zipkin model the +// server-side RPC span has the exact same trace/span/parent IDs as the +// calling client-side span, but obviously the server side span is +// no longer a root span of the trace. +func (t *Tracer) emitNewSpanMetrics(sp *Span, newTrace bool) { + if !sp.isSamplingFinalized() { + t.metrics.SpansStartedDelayedSampling.Inc(1) + if newTrace { + t.metrics.TracesStartedDelayedSampling.Inc(1) } - } - // emit metrics - if sp.context.IsSampled() { + // joining a trace is not possible, because sampling decision inherited from upstream is final + } else if sp.context.IsSampled() { t.metrics.SpansStartedSampled.Inc(1) if newTrace { - // We cannot simply check for parentID==0 because in Zipkin model the - // server-side RPC span has the exact same trace/span/parent IDs as the - // calling client-side span, but obviously the server side span is - // no longer a root span of the trace. t.metrics.TracesStartedSampled.Inc(1) } else if sp.firstInProcess { t.metrics.TracesJoinedSampled.Inc(1) @@ -430,15 +429,20 @@ func (t *Tracer) startSpanInternal( t.metrics.TracesJoinedNotSampled.Inc(1) } } - return sp } func (t *Tracer) reportSpan(sp *Span) { - t.metrics.SpansFinished.Inc(1) + if !sp.isSamplingFinalized() { + t.metrics.SpansFinishedDelayedSampling.Inc(1) + } else if sp.context.IsSampled() { + t.metrics.SpansFinishedSampled.Inc(1) + } else { + t.metrics.SpansFinishedNotSampled.Inc(1) + } - // Note: if the reporter is processing Span asynchronously need to Retain() it - // otherwise, in the racing condition will be rewritten span data before it will be sent - // * To remove object use method span.Release() + // Note: if the reporter is processing Span asynchronously then it needs to Retain() the span, + // and then Release() it when no longer needed. + // Otherwise, the span may be reused for another trace and its data may be overwritten. if sp.context.IsSampled() { t.reporter.Report(sp) } @@ -466,6 +470,11 @@ func (t *Tracer) isDebugAllowed(operation string) bool { return t.debugThrottler.IsAllowed(operation) } +// Sampler returns the sampler given to the tracer at creation. +func (t *Tracer) Sampler() SamplerV2 { + return t.sampler +} + // SelfRef creates an opentracing compliant SpanReference from a jaeger // SpanContext. This is a factory function in order to encapsulate jaeger specific // types. diff --git a/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go index 1b8db9758..bf2f13165 100644 --- a/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go +++ b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go @@ -20,22 +20,15 @@ import ( ) // RateLimiter is a filter used to check if a message that is worth itemCost units is within the rate limits. +// +// TODO (breaking change) remove this interface in favor of public struct below +// +// Deprecated, use ReconfigurableRateLimiter. type RateLimiter interface { CheckCredit(itemCost float64) bool } -type rateLimiter struct { - sync.Mutex - - creditsPerSecond float64 - balance float64 - maxBalance float64 - lastTick time.Time - - timeNow func() time.Time -} - -// NewRateLimiter creates a new rate limiter based on leaky bucket algorithm, formulated in terms of a +// ReconfigurableRateLimiter is a rate limiter based on leaky bucket algorithm, formulated in terms of a // credits balance that is replenished every time CheckCredit() method is called (tick) by the amount proportional // to the time elapsed since the last tick, up to max of creditsPerSecond. A call to CheckCredit() takes a cost // of an item we want to pay with the balance. If the balance exceeds the cost of the item, the item is "purchased" @@ -47,31 +40,73 @@ type rateLimiter struct { // // It can also be used to limit the rate of traffic in bytes, by setting creditsPerSecond to desired throughput // as bytes/second, and calling CheckCredit() with the actual message size. -func NewRateLimiter(creditsPerSecond, maxBalance float64) RateLimiter { - return &rateLimiter{ +// +// TODO (breaking change) rename to RateLimiter once the interface is removed +type ReconfigurableRateLimiter struct { + lock sync.Mutex + + creditsPerSecond float64 + balance float64 + maxBalance float64 + lastTick time.Time + + timeNow func() time.Time +} + +// NewRateLimiter creates a new ReconfigurableRateLimiter. +func NewRateLimiter(creditsPerSecond, maxBalance float64) *ReconfigurableRateLimiter { + return &ReconfigurableRateLimiter{ creditsPerSecond: creditsPerSecond, balance: maxBalance, maxBalance: maxBalance, lastTick: time.Now(), - timeNow: time.Now} + timeNow: time.Now, + } } -func (b *rateLimiter) CheckCredit(itemCost float64) bool { - b.Lock() - defer b.Unlock() - // calculate how much time passed since the last tick, and update current tick - currentTime := b.timeNow() - elapsedTime := currentTime.Sub(b.lastTick) - b.lastTick = currentTime - // calculate how much credit have we accumulated since the last tick - b.balance += elapsedTime.Seconds() * b.creditsPerSecond - if b.balance > b.maxBalance { - b.balance = b.maxBalance - } +// CheckCredit tries to reduce the current balance by itemCost provided that the current balance +// is not lest than itemCost. +func (rl *ReconfigurableRateLimiter) CheckCredit(itemCost float64) bool { + rl.lock.Lock() + defer rl.lock.Unlock() + // if we have enough credits to pay for current item, then reduce balance and allow - if b.balance >= itemCost { - b.balance -= itemCost + if rl.balance >= itemCost { + rl.balance -= itemCost + return true + } + // otherwise check if balance can be increased due to time elapsed, and try again + rl.updateBalance() + if rl.balance >= itemCost { + rl.balance -= itemCost return true } return false } + +// updateBalance recalculates current balance based on time elapsed. Must be called while holding a lock. +func (rl *ReconfigurableRateLimiter) updateBalance() { + // calculate how much time passed since the last tick, and update current tick + currentTime := rl.timeNow() + elapsedTime := currentTime.Sub(rl.lastTick) + rl.lastTick = currentTime + // calculate how much credit have we accumulated since the last tick + rl.balance += elapsedTime.Seconds() * rl.creditsPerSecond + if rl.balance > rl.maxBalance { + rl.balance = rl.maxBalance + } +} + +// Update changes the main parameters of the rate limiter in-place, while retaining +// the current accumulated balance (pro-rated to the new maxBalance value). Using this method +// instead of creating a new rate limiter helps to avoid thundering herd when sampling +// strategies are updated. +func (rl *ReconfigurableRateLimiter) Update(creditsPerSecond, maxBalance float64) { + rl.lock.Lock() + defer rl.lock.Unlock() + + rl.updateBalance() // get up to date balance + rl.balance = rl.balance * maxBalance / rl.maxBalance + rl.creditsPerSecond = creditsPerSecond + rl.maxBalance = maxBalance +} diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin.go b/vendor/github.com/uber/jaeger-client-go/zipkin.go index 636952b7f..98cab4b6e 100644 --- a/vendor/github.com/uber/jaeger-client-go/zipkin.go +++ b/vendor/github.com/uber/jaeger-client-go/zipkin.go @@ -55,7 +55,7 @@ func (p *zipkinPropagator) Inject( carrier.SetTraceID(ctx.TraceID().Low) // TODO this cannot work with 128bit IDs carrier.SetSpanID(uint64(ctx.SpanID())) carrier.SetParentID(uint64(ctx.ParentID())) - carrier.SetFlags(ctx.flags) + carrier.SetFlags(ctx.samplingState.flags()) return nil } @@ -71,6 +71,7 @@ func (p *zipkinPropagator) Extract(abstractCarrier interface{}) (SpanContext, er ctx.traceID.Low = carrier.TraceID() ctx.spanID = SpanID(carrier.SpanID()) ctx.parentID = SpanID(carrier.ParentID()) - ctx.flags = carrier.Flags() + ctx.samplingState = &samplingState{} + ctx.samplingState.setFlags(carrier.Flags()) return ctx, nil } diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml new file mode 100644 index 000000000..6d4d1be7b --- /dev/null +++ b/vendor/go.uber.org/atomic/.codecov.yml @@ -0,0 +1,15 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 100 # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure + diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore new file mode 100644 index 000000000..0a4504f11 --- /dev/null +++ b/vendor/go.uber.org/atomic/.gitignore @@ -0,0 +1,11 @@ +.DS_Store +/vendor +/cover +cover.out +lint.log + +# Binaries +*.test + +# Profiling output +*.prof diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml new file mode 100644 index 000000000..0f3769e5f --- /dev/null +++ b/vendor/go.uber.org/atomic/.travis.yml @@ -0,0 +1,27 @@ +sudo: false +language: go +go_import_path: go.uber.org/atomic + +go: + - 1.11.x + - 1.12.x + +matrix: + include: + - go: 1.12.x + env: NO_TEST=yes LINT=yes + +cache: + directories: + - vendor + +install: + - make install_ci + +script: + - test -n "$NO_TEST" || make test_ci + - test -n "$NO_TEST" || scripts/test-ubergo.sh + - test -z "$LINT" || make install_lint lint + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt new file mode 100644 index 000000000..8765c9fbc --- /dev/null +++ b/vendor/go.uber.org/atomic/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile new file mode 100644 index 000000000..1ef263075 --- /dev/null +++ b/vendor/go.uber.org/atomic/Makefile @@ -0,0 +1,51 @@ +# Many Go tools take file globs or directories as arguments instead of packages. +PACKAGE_FILES ?= *.go + +# For pre go1.6 +export GO15VENDOREXPERIMENT=1 + + +.PHONY: build +build: + go build -i ./... + + +.PHONY: install +install: + glide --version || go get github.com/Masterminds/glide + glide install + + +.PHONY: test +test: + go test -cover -race ./... + + +.PHONY: install_ci +install_ci: install + go get github.com/wadey/gocovmerge + go get github.com/mattn/goveralls + go get golang.org/x/tools/cmd/cover + +.PHONY: install_lint +install_lint: + go get golang.org/x/lint/golint + + +.PHONY: lint +lint: + @rm -rf lint.log + @echo "Checking formatting..." + @gofmt -d -s $(PACKAGE_FILES) 2>&1 | tee lint.log + @echo "Checking vet..." + @go vet ./... 2>&1 | tee -a lint.log;) + @echo "Checking lint..." + @golint $$(go list ./...) 2>&1 | tee -a lint.log + @echo "Checking for unresolved FIXMEs..." + @git grep -i fixme | grep -v -e vendor -e Makefile | tee -a lint.log + @[ ! -s lint.log ] + + +.PHONY: test_ci +test_ci: install_ci build + ./scripts/cover.sh $(shell go list $(PACKAGES)) diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md new file mode 100644 index 000000000..62eb8e576 --- /dev/null +++ b/vendor/go.uber.org/atomic/README.md @@ -0,0 +1,36 @@ +# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard] + +Simple wrappers for primitive types to enforce atomic access. + +## Installation +`go get -u go.uber.org/atomic` + +## Usage +The standard library's `sync/atomic` is powerful, but it's easy to forget which +variables must be accessed atomically. `go.uber.org/atomic` preserves all the +functionality of the standard library, but wraps the primitive types to +provide a safer, more convenient API. + +```go +var atom atomic.Uint32 +atom.Store(42) +atom.Sub(2) +atom.CAS(40, 11) +``` + +See the [documentation][doc] for a complete API specification. + +## Development Status +Stable. + +___ +Released under the [MIT License](LICENSE.txt). + +[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg +[doc]: https://godoc.org/go.uber.org/atomic +[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master +[ci]: https://travis-ci.com/uber-go/atomic +[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/atomic +[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic +[reportcard]: https://goreportcard.com/report/go.uber.org/atomic diff --git a/vendor/go.uber.org/atomic/atomic.go b/vendor/go.uber.org/atomic/atomic.go new file mode 100644 index 000000000..1db6849fc --- /dev/null +++ b/vendor/go.uber.org/atomic/atomic.go @@ -0,0 +1,351 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package atomic provides simple wrappers around numerics to enforce atomic +// access. +package atomic + +import ( + "math" + "sync/atomic" + "time" +) + +// Int32 is an atomic wrapper around an int32. +type Int32 struct{ v int32 } + +// NewInt32 creates an Int32. +func NewInt32(i int32) *Int32 { + return &Int32{i} +} + +// Load atomically loads the wrapped value. +func (i *Int32) Load() int32 { + return atomic.LoadInt32(&i.v) +} + +// Add atomically adds to the wrapped int32 and returns the new value. +func (i *Int32) Add(n int32) int32 { + return atomic.AddInt32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int32 and returns the new value. +func (i *Int32) Sub(n int32) int32 { + return atomic.AddInt32(&i.v, -n) +} + +// Inc atomically increments the wrapped int32 and returns the new value. +func (i *Int32) Inc() int32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Int32) Dec() int32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int32) CAS(old, new int32) bool { + return atomic.CompareAndSwapInt32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int32) Store(n int32) { + atomic.StoreInt32(&i.v, n) +} + +// Swap atomically swaps the wrapped int32 and returns the old value. +func (i *Int32) Swap(n int32) int32 { + return atomic.SwapInt32(&i.v, n) +} + +// Int64 is an atomic wrapper around an int64. +type Int64 struct{ v int64 } + +// NewInt64 creates an Int64. +func NewInt64(i int64) *Int64 { + return &Int64{i} +} + +// Load atomically loads the wrapped value. +func (i *Int64) Load() int64 { + return atomic.LoadInt64(&i.v) +} + +// Add atomically adds to the wrapped int64 and returns the new value. +func (i *Int64) Add(n int64) int64 { + return atomic.AddInt64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int64 and returns the new value. +func (i *Int64) Sub(n int64) int64 { + return atomic.AddInt64(&i.v, -n) +} + +// Inc atomically increments the wrapped int64 and returns the new value. +func (i *Int64) Inc() int64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int64 and returns the new value. +func (i *Int64) Dec() int64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int64) CAS(old, new int64) bool { + return atomic.CompareAndSwapInt64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int64) Store(n int64) { + atomic.StoreInt64(&i.v, n) +} + +// Swap atomically swaps the wrapped int64 and returns the old value. +func (i *Int64) Swap(n int64) int64 { + return atomic.SwapInt64(&i.v, n) +} + +// Uint32 is an atomic wrapper around an uint32. +type Uint32 struct{ v uint32 } + +// NewUint32 creates a Uint32. +func NewUint32(i uint32) *Uint32 { + return &Uint32{i} +} + +// Load atomically loads the wrapped value. +func (i *Uint32) Load() uint32 { + return atomic.LoadUint32(&i.v) +} + +// Add atomically adds to the wrapped uint32 and returns the new value. +func (i *Uint32) Add(n uint32) uint32 { + return atomic.AddUint32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint32 and returns the new value. +func (i *Uint32) Sub(n uint32) uint32 { + return atomic.AddUint32(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint32 and returns the new value. +func (i *Uint32) Inc() uint32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Uint32) Dec() uint32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint32) CAS(old, new uint32) bool { + return atomic.CompareAndSwapUint32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint32) Store(n uint32) { + atomic.StoreUint32(&i.v, n) +} + +// Swap atomically swaps the wrapped uint32 and returns the old value. +func (i *Uint32) Swap(n uint32) uint32 { + return atomic.SwapUint32(&i.v, n) +} + +// Uint64 is an atomic wrapper around a uint64. +type Uint64 struct{ v uint64 } + +// NewUint64 creates a Uint64. +func NewUint64(i uint64) *Uint64 { + return &Uint64{i} +} + +// Load atomically loads the wrapped value. +func (i *Uint64) Load() uint64 { + return atomic.LoadUint64(&i.v) +} + +// Add atomically adds to the wrapped uint64 and returns the new value. +func (i *Uint64) Add(n uint64) uint64 { + return atomic.AddUint64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint64 and returns the new value. +func (i *Uint64) Sub(n uint64) uint64 { + return atomic.AddUint64(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint64 and returns the new value. +func (i *Uint64) Inc() uint64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint64 and returns the new value. +func (i *Uint64) Dec() uint64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint64) CAS(old, new uint64) bool { + return atomic.CompareAndSwapUint64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint64) Store(n uint64) { + atomic.StoreUint64(&i.v, n) +} + +// Swap atomically swaps the wrapped uint64 and returns the old value. +func (i *Uint64) Swap(n uint64) uint64 { + return atomic.SwapUint64(&i.v, n) +} + +// Bool is an atomic Boolean. +type Bool struct{ v uint32 } + +// NewBool creates a Bool. +func NewBool(initial bool) *Bool { + return &Bool{boolToInt(initial)} +} + +// Load atomically loads the Boolean. +func (b *Bool) Load() bool { + return truthy(atomic.LoadUint32(&b.v)) +} + +// CAS is an atomic compare-and-swap. +func (b *Bool) CAS(old, new bool) bool { + return atomic.CompareAndSwapUint32(&b.v, boolToInt(old), boolToInt(new)) +} + +// Store atomically stores the passed value. +func (b *Bool) Store(new bool) { + atomic.StoreUint32(&b.v, boolToInt(new)) +} + +// Swap sets the given value and returns the previous value. +func (b *Bool) Swap(new bool) bool { + return truthy(atomic.SwapUint32(&b.v, boolToInt(new))) +} + +// Toggle atomically negates the Boolean and returns the previous value. +func (b *Bool) Toggle() bool { + return truthy(atomic.AddUint32(&b.v, 1) - 1) +} + +func truthy(n uint32) bool { + return n&1 == 1 +} + +func boolToInt(b bool) uint32 { + if b { + return 1 + } + return 0 +} + +// Float64 is an atomic wrapper around float64. +type Float64 struct { + v uint64 +} + +// NewFloat64 creates a Float64. +func NewFloat64(f float64) *Float64 { + return &Float64{math.Float64bits(f)} +} + +// Load atomically loads the wrapped value. +func (f *Float64) Load() float64 { + return math.Float64frombits(atomic.LoadUint64(&f.v)) +} + +// Store atomically stores the passed value. +func (f *Float64) Store(s float64) { + atomic.StoreUint64(&f.v, math.Float64bits(s)) +} + +// Add atomically adds to the wrapped float64 and returns the new value. +func (f *Float64) Add(s float64) float64 { + for { + old := f.Load() + new := old + s + if f.CAS(old, new) { + return new + } + } +} + +// Sub atomically subtracts from the wrapped float64 and returns the new value. +func (f *Float64) Sub(s float64) float64 { + return f.Add(-s) +} + +// CAS is an atomic compare-and-swap. +func (f *Float64) CAS(old, new float64) bool { + return atomic.CompareAndSwapUint64(&f.v, math.Float64bits(old), math.Float64bits(new)) +} + +// Duration is an atomic wrapper around time.Duration +// https://godoc.org/time#Duration +type Duration struct { + v Int64 +} + +// NewDuration creates a Duration. +func NewDuration(d time.Duration) *Duration { + return &Duration{v: *NewInt64(int64(d))} +} + +// Load atomically loads the wrapped value. +func (d *Duration) Load() time.Duration { + return time.Duration(d.v.Load()) +} + +// Store atomically stores the passed value. +func (d *Duration) Store(n time.Duration) { + d.v.Store(int64(n)) +} + +// Add atomically adds to the wrapped time.Duration and returns the new value. +func (d *Duration) Add(n time.Duration) time.Duration { + return time.Duration(d.v.Add(int64(n))) +} + +// Sub atomically subtracts from the wrapped time.Duration and returns the new value. +func (d *Duration) Sub(n time.Duration) time.Duration { + return time.Duration(d.v.Sub(int64(n))) +} + +// Swap atomically swaps the wrapped time.Duration and returns the old value. +func (d *Duration) Swap(n time.Duration) time.Duration { + return time.Duration(d.v.Swap(int64(n))) +} + +// CAS is an atomic compare-and-swap. +func (d *Duration) CAS(old, new time.Duration) bool { + return d.v.CAS(int64(old), int64(new)) +} + +// Value shadows the type of the same name from sync/atomic +// https://godoc.org/sync/atomic#Value +type Value struct{ atomic.Value } diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go new file mode 100644 index 000000000..0489d19ba --- /dev/null +++ b/vendor/go.uber.org/atomic/error.go @@ -0,0 +1,55 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// Error is an atomic type-safe wrapper around Value for errors +type Error struct{ v Value } + +// errorHolder is non-nil holder for error object. +// atomic.Value panics on saving nil object, so err object needs to be +// wrapped with valid object first. +type errorHolder struct{ err error } + +// NewError creates new atomic error object +func NewError(err error) *Error { + e := &Error{} + if err != nil { + e.Store(err) + } + return e +} + +// Load atomically loads the wrapped error +func (e *Error) Load() error { + v := e.v.Load() + if v == nil { + return nil + } + + eh := v.(errorHolder) + return eh.err +} + +// Store atomically stores error. +// NOTE: a holder object is allocated on each Store call. +func (e *Error) Store(err error) { + e.v.Store(errorHolder{err: err}) +} diff --git a/vendor/go.uber.org/atomic/glide.lock b/vendor/go.uber.org/atomic/glide.lock new file mode 100644 index 000000000..3c72c5997 --- /dev/null +++ b/vendor/go.uber.org/atomic/glide.lock @@ -0,0 +1,17 @@ +hash: f14d51408e3e0e4f73b34e4039484c78059cd7fc5f4996fdd73db20dc8d24f53 +updated: 2016-10-27T00:10:51.16960137-07:00 +imports: [] +testImports: +- name: github.com/davecgh/go-spew + version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d + subpackages: + - spew +- name: github.com/pmezard/go-difflib + version: d8ed2627bdf02c080bf22230dbb337003b7aba2d + subpackages: + - difflib +- name: github.com/stretchr/testify + version: d77da356e56a7428ad25149ca77381849a6a5232 + subpackages: + - assert + - require diff --git a/vendor/go.uber.org/atomic/glide.yaml b/vendor/go.uber.org/atomic/glide.yaml new file mode 100644 index 000000000..4cf608ec0 --- /dev/null +++ b/vendor/go.uber.org/atomic/glide.yaml @@ -0,0 +1,6 @@ +package: go.uber.org/atomic +testImport: +- package: github.com/stretchr/testify + subpackages: + - assert + - require diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go new file mode 100644 index 000000000..ede8136fa --- /dev/null +++ b/vendor/go.uber.org/atomic/string.go @@ -0,0 +1,49 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// String is an atomic type-safe wrapper around Value for strings. +type String struct{ v Value } + +// NewString creates a String. +func NewString(str string) *String { + s := &String{} + if str != "" { + s.Store(str) + } + return s +} + +// Load atomically loads the wrapped string. +func (s *String) Load() string { + v := s.v.Load() + if v == nil { + return "" + } + return v.(string) +} + +// Store atomically stores the passed string. +// Note: Converting the string to an interface{} to store in the Value +// requires an allocation. +func (s *String) Store(str string) { + s.v.Store(str) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 53239b7cd..138c546c2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -42,13 +42,14 @@ github.com/containerd/containerd/errdefs github.com/containerd/continuity/fs github.com/containerd/continuity/sysx github.com/containerd/continuity/syscallx -# github.com/containernetworking/cni v0.7.1 +# github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 github.com/containernetworking/cni/pkg/types github.com/containernetworking/cni/pkg/types/current github.com/containernetworking/cni/pkg/version github.com/containernetworking/cni/libcni github.com/containernetworking/cni/pkg/invoke github.com/containernetworking/cni/pkg/types/020 +github.com/containernetworking/cni/pkg/utils # github.com/containernetworking/plugins v0.8.2 github.com/containernetworking/plugins/pkg/ns github.com/containernetworking/plugins/pkg/ip @@ -169,7 +170,7 @@ github.com/coreos/go-systemd/sdjournal github.com/coreos/go-systemd/journal # github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f github.com/coreos/pkg/dlopen -# github.com/cri-o/ocicni v0.1.1-0.20190702175919-7762645d18ca +# github.com/cri-o/ocicni v0.1.1-0.20190920040751-deac903fd99b github.com/cri-o/ocicni/pkg/ocicni # github.com/cyphar/filepath-securejoin v0.2.2 github.com/cyphar/filepath-securejoin @@ -284,7 +285,7 @@ github.com/imdario/mergo github.com/inconshreveable/mousetrap # github.com/ishidawataru/sctp v0.0.0-20180918013207-6e2cb1366111 github.com/ishidawataru/sctp -# github.com/json-iterator/go v1.1.7 +# github.com/json-iterator/go v1.1.8 github.com/json-iterator/go # github.com/klauspost/compress v1.8.1 github.com/klauspost/compress/zstd @@ -344,7 +345,7 @@ github.com/onsi/ginkgo/internal/containernode github.com/onsi/ginkgo/internal/leafnodes github.com/onsi/ginkgo/internal/spec github.com/onsi/ginkgo/internal/specrunner -# github.com/onsi/gomega v1.7.0 +# github.com/onsi/gomega v1.7.1 github.com/onsi/gomega github.com/onsi/gomega/gexec github.com/onsi/gomega/format @@ -442,7 +443,7 @@ github.com/stretchr/testify/require github.com/syndtr/gocapability/capability # github.com/tchap/go-patricia v2.3.0+incompatible github.com/tchap/go-patricia/patricia -# github.com/uber/jaeger-client-go v2.19.0+incompatible +# github.com/uber/jaeger-client-go v2.20.0+incompatible github.com/uber/jaeger-client-go github.com/uber/jaeger-client-go/config github.com/uber/jaeger-client-go/internal/baggage @@ -491,6 +492,8 @@ github.com/xeipuuv/gojsonpointer github.com/xeipuuv/gojsonreference # github.com/xeipuuv/gojsonschema v1.1.0 github.com/xeipuuv/gojsonschema +# go.uber.org/atomic v1.4.0 +go.uber.org/atomic # golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad golang.org/x/crypto/ssh/terminal golang.org/x/crypto/openpgp |