diff options
101 files changed, 2429 insertions, 639 deletions
diff --git a/.cirrus.yml b/.cirrus.yml index fe7a1c332..145e49457 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -136,7 +136,9 @@ gating_task: # in sync at all times. vendor_task: - only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*' + only_if: >- + $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' && + $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*' depends_on: - "gating" @@ -166,7 +168,9 @@ vendor_task: # whether the git tree is clean. varlink_api_task: - only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*' + only_if: >- + $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' && + $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*' depends_on: - "gating" @@ -201,8 +205,10 @@ build_each_commit_task: - "vendor" - "varlink_api" - only_if: $CIRRUS_BRANCH != $DEST_BRANCH && - $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*' + only_if: >- + $CIRRUS_BRANCH != $DEST_BRANCH && + $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' && + $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*' gce_instance: image_project: "libpod-218412" @@ -232,8 +238,10 @@ build_without_cgo_task: - "vendor" - "varlink_api" - only_if: $CIRRUS_BRANCH != $DEST_BRANCH && - $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*' + only_if: >- + $CIRRUS_BRANCH != $DEST_BRANCH && + $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' && + $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*' gce_instance: image_project: "libpod-218412" @@ -326,7 +334,9 @@ testing_task: - "build_without_cgo" # Only test build cache-images, if that's what's requested - only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*' + only_if: >- + $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' && + $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*' gce_instance: matrix: @@ -381,7 +391,9 @@ special_testing_rootless_task: - "build_each_commit" - "build_without_cgo" - only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*' + only_if: >- + $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' && + $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*' env: ADD_SECOND_PARTITION: true @@ -413,7 +425,9 @@ special_testing_in_podman_task: - "build_each_commit" - "build_without_cgo" - only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*' + only_if: >- + $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' && + $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*' env: ADD_SECOND_PARTITION: true @@ -439,7 +453,9 @@ special_testing_cross_task: - "varlink_api" - "vendor" - only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*' + only_if: >- + $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' && + $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*' env: matrix: @@ -473,7 +489,9 @@ special_testing_cgroupv2_task: - "varlink_api" - "vendor" - only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*' + only_if: >- + $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' && + $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*' gce_instance: image_name: "${SPECIAL_FEDORA_CACHE_IMAGE_NAME}" @@ -503,7 +521,9 @@ special_testing_endpoint_task: - "varlink_api" - "vendor" - only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*' + only_if: >- + $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' && + $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*' env: SPECIALMODE: 'endpoint' # See docs @@ -525,7 +545,9 @@ test_building_snap_task: depends_on: - "gating" - only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*' + only_if: >- + $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' && + $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*' container: image: yakshaveinc/snapcraft:core18 @@ -539,7 +561,7 @@ test_build_cache_images_task: only_if: >- $CIRRUS_BRANCH != $DEST_BRANCH && - $CIRRUS_CHANGE_MESSAGE =~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*' + $CIRRUS_CHANGE_MESSAGE =~ '.*CI:IMG.*' depends_on: - "gating" @@ -571,7 +593,7 @@ verify_test_built_images_task: only_if: >- $CIRRUS_BRANCH != $DEST_BRANCH && - $CIRRUS_CHANGE_MESSAGE =~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*' + $CIRRUS_CHANGE_MESSAGE =~ '.*CI:IMG.*' depends_on: @@ -638,6 +660,26 @@ upload_snap_task: - 'cd contrib/snapcraft && snapcraft && snapcraft push *.snap --release edge' +test_docs_task: + + only_if: >- + $CIRRUS_BRANCH != $DEST_BRANCH && + $CIRRUS_CHANGE_MESSAGE =~ '.*CI:DOCS.*' + depends_on: + - "gating" + - "vendor" + #- "test_docs" + #- "varlink_api" + #- "build_each_commit" + stub_script: '/bin/true' + + +#publish_docs_task: +# +# only_if: $CIRRUS_BRANCH == $DEST_BRANCH && +# $CIRRUS_TAG =~ '^v[0-9]\.[0-9]\.[0-9]$' + + # Post message to IRC if everything passed PR testing success_task: @@ -664,6 +706,7 @@ success_task: - "test_building_snap" - "upload_snap" - "verify_test_built_images" + - "test_docs" env: CIRRUS_WORKING_DIR: "/usr/src/libpod" @@ -376,7 +376,7 @@ podman-remote-%-release: $(MAKE) podman-remote-v$(RELEASE_NUMBER)-$*.zip docker-docs: docs - (cd docs; ./dckrman.sh *.1) + (cd docs; ./dckrman.sh ./build/man/*.1) changelog: ## Generate changelog @echo "Creating changelog from $(CHANGELOG_BASE) to $(CHANGELOG_TARGET)" @@ -425,7 +425,7 @@ install.cni: install.docker: docker-docs install ${SELINUXOPT} -d -m 755 $(DESTDIR)$(BINDIR) $(DESTDIR)$(MANDIR)/man1 install ${SELINUXOPT} -m 755 docker $(DESTDIR)$(BINDIR)/docker - install ${SELINUXOPT} -m 644 docs/docker*.1 -t $(DESTDIR)$(MANDIR)/man1 + install ${SELINUXOPT} -m 644 docs/build/man/docker*.1 -t $(DESTDIR)$(MANDIR)/man1 install.systemd: install ${SELINUXOPT} -m 755 -d ${DESTDIR}${SYSTEMDDIR} ${DESTDIR}${USERSYSTEMDDIR} ${DESTDIR}${TMPFILESDIR} @@ -5,7 +5,7 @@ Libpod provides a library for applications looking to use the Container Pod concept, popularized by Kubernetes. Libpod also contains the Pod Manager tool `(Podman)`. Podman manages pods, containers, container images, and container volumes. -* [Latest Version: 1.6.2](https://github.com/containers/libpod/releases/latest) +* [Latest Version: 1.6.3](https://github.com/containers/libpod/releases/latest) * [Continuous Integration:](contrib/cirrus/README.md) [![Build Status](https://api.cirrus-ci.com/github/containers/libpod.svg)](https://cirrus-ci.com/github/containers/libpod/master) * [GoDoc: ![GoDoc](https://godoc.org/github.com/containers/libpod/libpod?status.svg)](https://godoc.org/github.com/containers/libpod/libpod) * Automated continuous release downloads (including remote-client): @@ -106,7 +106,7 @@ Information on how Podman configures [OCI Hooks][spec-hooks] to run when launchi **[Podman API](API.md)** Documentation on the Podman API using [Varlink](https://www.varlink.org/). -**[Podman Commands](commands.md)** +**[Podman Commands](https://podman.readthedocs.io/en/latest/Commands.html)** A list of the Podman commands with links to their man pages and in many cases videos showing the commands in use. diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index 235871273..cefad25f4 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -1,5 +1,34 @@ # Release Notes +## 1.6.3 +### Features +- Handling of the `libpod.conf` configuration file has seen major changes. Most significantly, rootless users will no longer automatically receive a complete configuration file when they first use Podman, and will instead only receive differences from the global configuration. +- Initial support for the CNI DNS plugin, which allows containers to resolve the IPs of other containers via DNS name, has been added +- Podman now supports anonymous named volumes, created by specifying only a destination to the `-v` flag to the `podman create` and `podman run` commands +- Named volumes now support `uid` and `gid` options in `--opt o=...` to set UID and GID of the created volume + +### Bugfixes +- Fixed a bug where the `podman start` command would print container ID, instead of name, when starting containers given their name +- Fixed a bug where named volumes with options did not properly detect issues with mounting the volume, leading to an inconsistent state ([#4303](https://github.com/containers/libpod/issues/4303)) +- Fixed a bug where incorrect Seccomp profiles were used in containers generated by `podman play kube` +- Fixed a bug where processes started by `podman exec` would have the wrong SELinux label in some circumstances ([#4361](https://github.com/containers/libpod/issues/4361)) +- Fixed a bug where error messages from `slirp4netns` would be lost +- Fixed a bug where `podman run --network=$NAME` would not throw an error in rootless Podman, where CNI networks are not supported +- Fixed a bug where `podman network create` would throw confusing errors when trying to create a volume with a name that already exists +- Fixed a bug where Podman would not error if the `systemd` CGroup manager was specified, but systemd could not be contacted over DBus +- Fixed a bug where image volumes were mounted `noexec` ([#4318](https://github.com/containers/libpod/issues/4318)) +- Fixed a bug where the `podman stats` command required the name of a container to be given, instead of showing all containers when no container was specified ([#4274](https://github.com/containers/libpod/issues/4274)) +- Fixed a bug where the `podman volume inspect` command would not show the options that named volumes were created with +- Fixed a bug where custom storage configuration was not written to `storage.conf` at time of first creation for rootless Podman ([#2659](https://github.com/containers/libpod/issues/2659)) +- Fixed a bug where remote Podman did not support shell redirection of container output + +### Misc +- Updated vendored containers/image library to v5.0 +- Initial support for images using manifest lists has been added, though commands for directly interacting with manifests are still missing +- Support for pushing to and pulling from OSTree has been removed due to deprecation in the containers/image library +- Rootless Podman no longer enables linger on systems with systemd as init by default. As such, containers will now be killed when the user who ran them logs out, unless linger is explicitly enabled using [loginctl](https://www.freedesktop.org/software/systemd/man/loginctl.html) +- Podman will now check the version of `conmon` that is in use to ensure it is sufficient + ## 1.6.2 ### Features - Added a `--runtime` flag to `podman system migrate` to allow the OCI runtime for all containers to be reset, to ease transition to the `crun` runtime on CGroups V2 systems until `runc` gains full support diff --git a/cmd/podman/attach.go b/cmd/podman/attach.go index b78633ed6..b03673f29 100644 --- a/cmd/podman/attach.go +++ b/cmd/podman/attach.go @@ -2,6 +2,7 @@ package main import ( "github.com/containers/libpod/cmd/podman/cliconfig" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/adapter" "github.com/pkg/errors" "github.com/spf13/cobra" @@ -31,7 +32,7 @@ func init() { attachCommand.SetHelpTemplate(HelpTemplate()) attachCommand.SetUsageTemplate(UsageTemplate()) flags := attachCommand.Flags() - flags.StringVar(&attachCommand.DetachKeys, "detach-keys", "", "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: `a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`") + flags.StringVar(&attachCommand.DetachKeys, "detach-keys", define.DefaultDetachKeys, "Select the key sequence for detaching a container. Format is a single character `[a-Z]` or a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: `a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`") flags.BoolVar(&attachCommand.NoStdin, "no-stdin", false, "Do not attach STDIN. The default is false") flags.BoolVar(&attachCommand.SigProxy, "sig-proxy", true, "Proxy received signals to the process") flags.BoolVarP(&attachCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of") diff --git a/cmd/podman/build.go b/cmd/podman/build.go index 896d5661a..bbc1d5b5f 100644 --- a/cmd/podman/build.go +++ b/cmd/podman/build.go @@ -238,6 +238,9 @@ func buildCmd(c *cliconfig.BuildValues) error { if contextDir == "" { return errors.Errorf("no context directory specified, and no containerfile specified") } + if !fileIsDir(contextDir) { + return errors.Errorf("context must be a directory: %v", contextDir) + } if len(containerfiles) == 0 { if checkIfFileExists(filepath.Join(contextDir, "Containerfile")) { containerfiles = append(containerfiles, filepath.Join(contextDir, "Containerfile")) diff --git a/cmd/podman/common.go b/cmd/podman/common.go index 33a848553..8afbe2e0b 100644 --- a/cmd/podman/common.go +++ b/cmd/podman/common.go @@ -132,7 +132,7 @@ func getCreateFlags(c *cliconfig.PodmanCommand) { "Drop capabilities from the container", ) createFlags.String( - "cgroupns", "host", + "cgroupns", "", "cgroup namespace to use", ) createFlags.String( @@ -188,7 +188,7 @@ func getCreateFlags(c *cliconfig.PodmanCommand) { "Run container in background and print container ID", ) createFlags.String( - "detach-keys", "", + "detach-keys", define.DefaultDetachKeys, "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: `a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`", ) createFlags.StringSlice( diff --git a/cmd/podman/exec.go b/cmd/podman/exec.go index 649a7b0db..afa701897 100644 --- a/cmd/podman/exec.go +++ b/cmd/podman/exec.go @@ -2,6 +2,7 @@ package main import ( "github.com/containers/libpod/cmd/podman/cliconfig" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/adapter" "github.com/pkg/errors" "github.com/spf13/cobra" @@ -34,7 +35,7 @@ func init() { execCommand.SetUsageTemplate(UsageTemplate()) flags := execCommand.Flags() flags.SetInterspersed(false) - flags.StringVar(&execCommand.DetachKeys, "detach-keys", "", "Override the key sequence for detaching a container. Format is a single character [a-Z] or ctrl-<value> where <value> is one of: a-z, @, ^, [, , or _") + flags.StringVar(&execCommand.DetachKeys, "detach-keys", define.DefaultDetachKeys, "Select the key sequence for detaching a container. Format is a single character [a-Z] or ctrl-<value> where <value> is one of: a-z, @, ^, [, , or _") flags.StringArrayVarP(&execCommand.Env, "env", "e", []string{}, "Set environment variables") flags.BoolVarP(&execCommand.Interactive, "interactive", "i", false, "Keep STDIN open even if not attached") flags.BoolVarP(&execCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of") diff --git a/cmd/podman/images.go b/cmd/podman/images.go index 6157fda2a..7d498517c 100644 --- a/cmd/podman/images.go +++ b/cmd/podman/images.go @@ -291,6 +291,10 @@ func getImagesTemplateOutput(ctx context.Context, images []*adapter.ContainerIma if len(tag) == 71 && strings.HasPrefix(tag, "sha256:") { imageDigest = digest.Digest(tag) tag = "" + } else { + if img.Digest() != "" { + imageDigest = img.Digest() + } } params := imagesTemplateParams{ Repository: repo, diff --git a/cmd/podman/main_local.go b/cmd/podman/main_local.go index 6057eeec3..f630f1210 100644 --- a/cmd/podman/main_local.go +++ b/cmd/podman/main_local.go @@ -16,7 +16,6 @@ import ( "github.com/containers/libpod/cmd/podman/cliconfig" "github.com/containers/libpod/cmd/podman/libpodruntime" - "github.com/containers/libpod/libpod/config" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/cgroups" "github.com/containers/libpod/pkg/rootless" @@ -34,9 +33,6 @@ const remote = false func init() { cgroupManager := define.SystemdCgroupsManager - if runtimeConfig, err := config.NewConfig(""); err == nil { - cgroupManager = runtimeConfig.CgroupManager - } cgroupHelp := "Cgroup manager to use (cgroupfs or systemd)" cgroupv2, _ := cgroups.IsCgroup2UnifiedMode() if rootless.IsRootless() && !cgroupv2 { diff --git a/cmd/podman/start.go b/cmd/podman/start.go index 2d2cf74d2..d4b4534bb 100644 --- a/cmd/podman/start.go +++ b/cmd/podman/start.go @@ -35,7 +35,7 @@ func init() { startCommand.SetUsageTemplate(UsageTemplate()) flags := startCommand.Flags() flags.BoolVarP(&startCommand.Attach, "attach", "a", false, "Attach container's STDOUT and STDERR") - flags.StringVar(&startCommand.DetachKeys, "detach-keys", "", "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: `a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`") + flags.StringVar(&startCommand.DetachKeys, "detach-keys", define.DefaultDetachKeys, "Select the key sequence for detaching a container. Format is a single character `[a-Z]` or a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: `a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`") flags.BoolVarP(&startCommand.Interactive, "interactive", "i", false, "Keep STDIN open even if not attached") flags.BoolVarP(&startCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of") flags.BoolVar(&startCommand.SigProxy, "sig-proxy", false, "Proxy received signals to the process (default true if attaching, false otherwise)") diff --git a/cmd/podman/utils.go b/cmd/podman/utils.go index c19e6391e..21389b43a 100644 --- a/cmd/podman/utils.go +++ b/cmd/podman/utils.go @@ -74,3 +74,13 @@ func checkIfFileExists(name string) bool { } return !file.IsDir() } + +// Check if a file is or is not a directory +func fileIsDir(name string) bool { + file, err := os.Stat(name) + // All errors return file == nil + if err != nil { + return false + } + return file.IsDir() +} diff --git a/contrib/spec/podman.spec.in b/contrib/spec/podman.spec.in index 8e0cb9950..33ecc8eba 100644 --- a/contrib/spec/podman.spec.in +++ b/contrib/spec/podman.spec.in @@ -39,7 +39,7 @@ %global shortcommit_conmon %(c=%{commit_conmon}; echo ${c:0:7}) Name: podman -Version: 1.6.3 +Version: 1.6.4 Release: #COMMITDATE#.git%{shortcommit0}%{?dist} Summary: Manage Pods, Containers and Container Images License: ASL 2.0 diff --git a/docs/containers-mounts.conf.5.md b/docs/source/markdown/containers-mounts.conf.5.md index 130c1c523..130c1c523 100644 --- a/docs/containers-mounts.conf.5.md +++ b/docs/source/markdown/containers-mounts.conf.5.md diff --git a/docs/libpod.conf.5.md b/docs/source/markdown/libpod.conf.5.md index c28c80b56..c28c80b56 100644 --- a/docs/libpod.conf.5.md +++ b/docs/source/markdown/libpod.conf.5.md diff --git a/docs/source/markdown/podman-attach.1.md b/docs/source/markdown/podman-attach.1.md index cef01f0f6..1ac2e49a9 100644 --- a/docs/source/markdown/podman-attach.1.md +++ b/docs/source/markdown/podman-attach.1.md @@ -20,9 +20,7 @@ it in the **libpod.conf** file: see **libpod.conf(5)** for more information. ## OPTIONS **--detach-keys**=*sequence* -Override the key sequence for detaching a container. Format is a single character `[a-Z]` or -a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: -`a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`. +Specify the key sequence for detaching a container. Format is a single character `[a-Z]` or one or more `ctrl-<value>` characters where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Specifying "" will disable this feature. The default is *ctrl-p,ctrl-q*. **--latest**, **-l** diff --git a/docs/source/markdown/podman-create.1.md b/docs/source/markdown/podman-create.1.md index 6617850fd..c10cf2cfa 100644 --- a/docs/source/markdown/podman-create.1.md +++ b/docs/source/markdown/podman-create.1.md @@ -67,12 +67,14 @@ Drop Linux capabilities **--cgroupns**=*mode* -Set the cgroup namespace mode for the container, by default **host** is used. +Set the cgroup namespace mode for the container. **host**: use the host's cgroup namespace inside the container. **container:<NAME|ID>**: join the namespace of the specified container. **private**: create a new cgroup namespace. **ns:<PATH>**: join the namespace at the specified path. +If the host uses cgroups v1, the default is set to **host**. On cgroups v2 the default is **private**. + **--cgroups**=*mode* Determines whether the container will create CGroups. @@ -196,9 +198,7 @@ it in the **libpod.conf** file: see **libpod.conf(5)** for more information. **--detach-keys**=*sequence* -Override the key sequence for detaching a container. Format is a single character `[a-Z]` or -a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: -`a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`. +Specify the key sequence for detaching a container. Format is a single character `[a-Z]` or one or more `ctrl-<value>` characters where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Specifying "" will disable this feature. The default is *ctrl-p,ctrl-q*. **--device**=*device* diff --git a/docs/source/markdown/podman-exec.1.md b/docs/source/markdown/podman-exec.1.md index 4c17c056a..9624425dc 100644 --- a/docs/source/markdown/podman-exec.1.md +++ b/docs/source/markdown/podman-exec.1.md @@ -15,7 +15,7 @@ podman\-exec - Execute a command in a running container **--detach-keys**=*sequence* -Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +Specify the key sequence for detaching a container. Format is a single character `[a-Z]` or one or more `ctrl-<value>` characters where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Specifying "" will disable this feature. The default is *ctrl-p,ctrl-q*. **--env**, **-e** diff --git a/docs/podman-remote.conf.5.md b/docs/source/markdown/podman-remote.conf.5.md index e9cc05989..e9cc05989 100644 --- a/docs/podman-remote.conf.5.md +++ b/docs/source/markdown/podman-remote.conf.5.md diff --git a/docs/source/markdown/podman-run.1.md b/docs/source/markdown/podman-run.1.md index d6d8f4c1e..75f1f94ce 100644 --- a/docs/source/markdown/podman-run.1.md +++ b/docs/source/markdown/podman-run.1.md @@ -81,12 +81,14 @@ Drop Linux capabilities **--cgroupns**=*mode* -Set the cgroup namespace mode for the container, by default **host** is used. +Set the cgroup namespace mode for the container. **host**: use the host's cgroup namespace inside the container. **container:<NAME|ID>**: join the namespace of the specified container. **private**: create a new cgroup namespace. **ns:<PATH>**: join the namespace at the specified path. +If the host uses cgroups v1, the default is set to **host**. On cgroups v2 the default is **private**. + **--cgroups**=*mode* Determines whether the container will create CGroups. @@ -202,9 +204,7 @@ it in the **libpod.conf** file: see **libpod.conf(5)** for more information. **--detach-keys**=*sequence* -Override the key sequence for detaching a container. Format is a single character `[a-Z]` or -a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: -`a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`. +Specify the key sequence for detaching a container. Format is a single character `[a-Z]` or one or more `ctrl-<value>` characters where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Specifying "" will disable this feature. The default is *ctrl-p,ctrl-q*. **--device**=*device* diff --git a/docs/source/markdown/podman-start.1.md b/docs/source/markdown/podman-start.1.md index 4c81d17bd..84af9d800 100644 --- a/docs/source/markdown/podman-start.1.md +++ b/docs/source/markdown/podman-start.1.md @@ -23,9 +23,7 @@ starting multiple containers. **--detach-keys**=*sequence* -Override the key sequence for detaching a container. Format is a single character `[a-Z]` or -a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: -`a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`. +Specify the key sequence for detaching a container. Format is a single character `[a-Z]` or one or more `ctrl-<value>` characters where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Specifying "" will disable this feature. The default is *ctrl-p,ctrl-q*. **--interactive**, **-i** @@ -11,7 +11,7 @@ require ( github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc // indirect github.com/containernetworking/cni v0.7.1 github.com/containernetworking/plugins v0.8.2 - github.com/containers/buildah v1.11.4-0.20191028173731-21b4778b359e + github.com/containers/buildah v1.11.5-0.20191031204705-20e92ffe0982 github.com/containers/image/v5 v5.0.0 github.com/containers/psgo v1.3.2 github.com/containers/storage v1.13.5 @@ -59,7 +59,7 @@ require ( github.com/stretchr/testify v1.4.0 github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 github.com/uber-go/atomic v1.4.0 // indirect - github.com/uber/jaeger-client-go v2.19.0+incompatible + github.com/uber/jaeger-client-go v2.20.0+incompatible github.com/uber/jaeger-lib v0.0.0-20190122222657-d036253de8f5 // indirect github.com/varlink/go v0.0.0-20190502142041-0f1d566d194b github.com/vishvananda/netlink v1.0.0 @@ -71,7 +71,7 @@ require ( google.golang.org/appengine v1.6.1 // indirect google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v2 v2.2.4 + gopkg.in/yaml.v2 v2.2.5 k8s.io/api v0.0.0-20190813020757-36bff7324fb7 k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010 k8s.io/client-go v0.0.0-20190620085101-78d2af792bab @@ -57,6 +57,8 @@ github.com/containernetworking/plugins v0.8.2 h1:5lnwfsAYO+V7yXhysJKy3E1A2Gy9oVu github.com/containernetworking/plugins v0.8.2/go.mod h1:TxALKWZpWL79BC3GOYKJzzXr7U8R23PdhwaLp6F3adc= github.com/containers/buildah v1.11.4-0.20191028173731-21b4778b359e h1:iDavHEx5Yr7o+0l6495Ya6N0YEPplIUZuWC2e14baDM= github.com/containers/buildah v1.11.4-0.20191028173731-21b4778b359e/go.mod h1:Igrk75FAxLnzDaHUbtpWB8pwL+Bv+cnakWMvqAXW2v8= +github.com/containers/buildah v1.11.5-0.20191031204705-20e92ffe0982 h1:5WUe09k2sJSbmxwLHZLHc41TrIPrP0GlbhX+WDJBqvs= +github.com/containers/buildah v1.11.5-0.20191031204705-20e92ffe0982/go.mod h1:eGWB4tLoo0hIBuytQpvgUC0hk2mvl2ofaYBeDsU/qoc= github.com/containers/image/v5 v5.0.0 h1:arnXgbt1ucsC/ndtSpiQY87rA0UjhF+/xQnPzqdBDn4= github.com/containers/image/v5 v5.0.0/go.mod h1:MgiLzCfIeo8lrHi+4Lb8HP+rh513sm0Mlk6RrhjFOLY= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE= @@ -283,6 +285,7 @@ github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3 h1:OoxbjfXVZyod1fmWYhI7SEyaD8B00ynP3T+D5GiyHOY= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= @@ -398,6 +401,8 @@ github.com/uber-go/atomic v1.4.0 h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o= github.com/uber-go/atomic v1.4.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= github.com/uber/jaeger-client-go v2.19.0+incompatible h1:pbwbYfHUoaase0oPQOdZ1GcaUjImYGimUXSQ/+8+Z8Q= github.com/uber/jaeger-client-go v2.19.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.20.0+incompatible h1:ttG9wKdl2ikV/BGOtu+eb+VPp+R7jMeuM177Ihs5Fdc= +github.com/uber/jaeger-client-go v2.20.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v0.0.0-20190122222657-d036253de8f5 h1:CwmGyzHTzCqCdZJkWR0A7ucZXgrCY7spRcpvm7ci//s= github.com/uber/jaeger-lib v0.0.0-20190122222657-d036253de8f5/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= @@ -541,6 +546,8 @@ gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v0.0.0-20190624233834-05ebafbffc79/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= diff --git a/install.md b/install.md index 2583c4f9d..39b639176 100644 --- a/install.md +++ b/install.md @@ -72,6 +72,8 @@ sudo apt-get install -qq -y software-properties-common uidmap sudo add-apt-repository -y ppa:projectatomic/ppa sudo apt-get update -qq sudo apt-get -qq -y install podman +sudo mkdir -p /etc/containers +echo -e "[registries.search]\nregistries = ['docker.io', 'quay.io']" | sudo tee /etc/containers/registries.conf ``` #### Fedora diff --git a/libpod.conf b/libpod.conf index 47f66ecc1..7e0228c19 100644 --- a/libpod.conf +++ b/libpod.conf @@ -142,8 +142,36 @@ runc = [ ] crun = [ - "/usr/bin/crun", - "/usr/local/bin/crun", + "/usr/bin/crun", + "/usr/sbin/crun", + "/usr/local/bin/crun", + "/usr/local/sbin/crun", + "/sbin/crun", + "/bin/crun", + "/run/current-system/sw/bin/crun", +] + +# Kata Containers is an OCI runtime, where containers are run inside lightweight +# Virtual Machines (VMs). Kata provides additional isolation towards the host, +# minimizing the host attack surface and mitigating the consequences of +# containers breakout. +# Please notes that Kata does not support rootless podman yet, but we can leave +# the paths below blank to let them be discovered by the $PATH environment +# variable. + +# Kata Containers with the default configured VMM +kata-runtime = [ + "/usr/bin/kata-runtime", +] + +# Kata Containers with the QEMU VMM +kata-qemu = [ + "/usr/bin/kata-qemu", +] + +# Kata Containers with the Firecracker VMM +kata-fc = [ + "/usr/bin/kata-fc", ] # The [runtimes] table MUST be the last thing in this file. diff --git a/libpod/config/default.go b/libpod/config/default.go index 17574c059..5decaeab7 100644 --- a/libpod/config/default.go +++ b/libpod/config/default.go @@ -6,6 +6,7 @@ import ( "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" + "github.com/containers/libpod/pkg/cgroups" "github.com/containers/libpod/pkg/rootless" "github.com/containers/libpod/pkg/util" "github.com/containers/storage" @@ -47,6 +48,12 @@ func defaultConfigFromMemory() (*Config, error) { c.ImageDefaultTransport = _defaultTransport c.StateType = define.BoltDBStateStore c.OCIRuntime = "runc" + + // If we're running on cgroups v2, default to using crun. + if onCgroupsv2, _ := cgroups.IsCgroup2UnifiedMode(); onCgroupsv2 { + c.OCIRuntime = "crun" + } + c.OCIRuntimes = map[string][]string{ "runc": { "/usr/bin/runc", @@ -58,7 +65,15 @@ func defaultConfigFromMemory() (*Config, error) { "/usr/lib/cri-o-runc/sbin/runc", "/run/current-system/sw/bin/runc", }, - // TODO - should we add "crun" defaults here as well? + "crun": { + "/usr/bin/crun", + "/usr/sbin/crun", + "/usr/local/bin/crun", + "/usr/local/sbin/crun", + "/sbin/crun", + "/bin/crun", + "/run/current-system/sw/bin/crun", + }, } c.ConmonPath = []string{ "/usr/libexec/podman/conmon", diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index 94184b6eb..471648bc8 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -1088,7 +1088,7 @@ func (c *Container) makeBindMounts() error { } // Add Secret Mounts - secretMounts := secrets.SecretMountsWithUIDGID(c.config.MountLabel, c.state.RunDir, c.runtime.config.DefaultMountsFile, c.state.RunDir, c.RootUID(), c.RootGID(), rootless.IsRootless()) + secretMounts := secrets.SecretMountsWithUIDGID(c.config.MountLabel, c.state.RunDir, c.runtime.config.DefaultMountsFile, c.state.RunDir, c.RootUID(), c.RootGID(), rootless.IsRootless(), false) for _, mount := range secretMounts { if _, ok := c.state.BindMounts[mount.Destination]; !ok { c.state.BindMounts[mount.Destination] = mount.Source diff --git a/libpod/events/journal_linux.go b/libpod/events/journal_linux.go index 470c76959..9e6fffc29 100644 --- a/libpod/events/journal_linux.go +++ b/libpod/events/journal_linux.go @@ -54,6 +54,7 @@ func (e EventJournalD) Write(ee Event) error { // Read reads events from the journal and sends qualified events to the event channel func (e EventJournalD) Read(options ReadOptions) error { + defer close(options.EventChannel) eventOptions, err := generateEventOptions(options.Filters, options.Since, options.Until) if err != nil { return errors.Wrapf(err, "failed to generate event options") @@ -87,7 +88,6 @@ func (e EventJournalD) Read(options ReadOptions) error { if err != nil { return err } - defer close(options.EventChannel) for { if _, err := j.Next(); err != nil { return err diff --git a/libpod/events/logfile.go b/libpod/events/logfile.go index 4b65b0ad0..93e6fa3c9 100644 --- a/libpod/events/logfile.go +++ b/libpod/events/logfile.go @@ -41,6 +41,7 @@ func (e EventLogFile) Write(ee Event) error { // Reads from the log file func (e EventLogFile) Read(options ReadOptions) error { + defer close(options.EventChannel) eventOptions, err := generateEventOptions(options.Filters, options.Since, options.Until) if err != nil { return errors.Wrapf(err, "unable to generate event options") @@ -68,7 +69,6 @@ func (e EventLogFile) Read(options ReadOptions) error { options.EventChannel <- event } } - close(options.EventChannel) return nil } diff --git a/libpod/image/pull.go b/libpod/image/pull.go index 7f5dc33b9..99c11e3ff 100644 --- a/libpod/image/pull.go +++ b/libpod/image/pull.go @@ -230,7 +230,12 @@ func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName s sc.BlobInfoCacheDir = filepath.Join(ir.store.GraphRoot(), "cache") srcRef, err := alltransports.ParseImageName(inputName) if err != nil { - // could be trying to pull from registry with short name + // We might be pulling with an unqualified image reference in which case + // we need to make sure that we're not using any other transport. + srcTransport := alltransports.TransportFromImageName(inputName) + if srcTransport != nil && srcTransport.Name() != DockerTransport { + return nil, err + } goal, err = ir.pullGoalFromPossiblyUnqualifiedName(inputName) if err != nil { return nil, errors.Wrap(err, "error getting default registries to try") @@ -347,6 +352,7 @@ func (ir *Runtime) pullGoalFromPossiblyUnqualifiedName(inputName string) (*pullG if err != nil { return nil, err } + if decomposedImage.hasRegistry { srcRef, err := docker.ParseReference("//" + inputName) if err != nil { diff --git a/libpod/kube.go b/libpod/kube.go index d0e7baf95..47a77991e 100644 --- a/libpod/kube.go +++ b/libpod/kube.go @@ -487,13 +487,16 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) { if err := c.syncContainer(); err != nil { return nil, errors.Wrapf(err, "unable to sync container during YAML generation") } + logrus.Debugf("Looking in container for user: %s", c.User()) - u, err := lookup.GetUser(c.state.Mountpoint, c.User()) + execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, c.User(), nil) if err != nil { return nil, err } - user := int64(u.Uid) - sc.RunAsUser = &user + uid := int64(execUser.Uid) + gid := int64(execUser.Gid) + sc.RunAsUser = &uid + sc.RunAsGroup = &gid } return &sc, nil } diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go index daa0619a2..ac1144fbe 100644 --- a/libpod/networking_linux.go +++ b/libpod/networking_linux.go @@ -295,7 +295,7 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) { defer close(chWait) // wait that API socket file appears before trying to use it. - if _, err := WaitForFile(apiSocket, chWait, pidWaitTimeout*time.Millisecond); err != nil { + if _, err := WaitForFile(apiSocket, chWait, pidWaitTimeout); err != nil { return errors.Wrapf(err, "waiting for slirp4nets to create the api socket file %s", apiSocket) } diff --git a/libpod/oci_attach_linux.go b/libpod/oci_attach_linux.go index eeaee6d43..46c70e7eb 100644 --- a/libpod/oci_attach_linux.go +++ b/libpod/oci_attach_linux.go @@ -152,7 +152,7 @@ func (c *Container) attachToExec(streams *AttachStreams, keys string, resize <-c func processDetachKeys(keys string) ([]byte, error) { // Check the validity of the provided keys first if len(keys) == 0 { - keys = define.DefaultDetachKeys + return []byte{}, nil } detachKeys, err := term.ToBytes(keys) if err != nil { diff --git a/libpod/stats.go b/libpod/stats.go index 5513abce5..3b5e0958c 100644 --- a/libpod/stats.go +++ b/libpod/stats.go @@ -3,7 +3,6 @@ package libpod import ( - "runtime" "strings" "syscall" "time" @@ -56,8 +55,8 @@ func (c *Container) GetContainerStats(previousStats *ContainerStats) (*Container } previousCPU := previousStats.CPUNano - previousSystem := previousStats.SystemNano - stats.CPU = calculateCPUPercent(cgroupStats, previousCPU, previousSystem) + now := uint64(time.Now().UnixNano()) + stats.CPU = calculateCPUPercent(cgroupStats, previousCPU, now, previousStats.SystemNano) stats.MemUsage = cgroupStats.Memory.Usage.Usage stats.MemLimit = getMemLimit(cgroupStats.Memory.Usage.Limit) stats.MemPerc = (float64(stats.MemUsage) / float64(stats.MemLimit)) * 100 @@ -67,7 +66,7 @@ func (c *Container) GetContainerStats(previousStats *ContainerStats) (*Container } stats.BlockInput, stats.BlockOutput = calculateBlockIO(cgroupStats) stats.CPUNano = cgroupStats.CPU.Usage.Total - stats.SystemNano = cgroupStats.CPU.Usage.Kernel + stats.SystemNano = now // Handle case where the container is not in a network namespace if netStats != nil { stats.NetInput = netStats.TxBytes @@ -98,20 +97,19 @@ func getMemLimit(cgroupLimit uint64) uint64 { return cgroupLimit } -func calculateCPUPercent(stats *cgroups.Metrics, previousCPU, previousSystem uint64) float64 { +// calculateCPUPercent calculates the cpu usage using the latest measurement in stats. +// previousCPU is the last value of stats.CPU.Usage.Total measured at the time previousSystem. +// (now - previousSystem) is the time delta in nanoseconds, between the measurement in previousCPU +// and the updated value in stats. +func calculateCPUPercent(stats *cgroups.Metrics, previousCPU, now, previousSystem uint64) float64 { var ( cpuPercent = 0.0 cpuDelta = float64(stats.CPU.Usage.Total - previousCPU) - systemDelta = float64(uint64(time.Now().UnixNano()) - previousSystem) + systemDelta = float64(now - previousSystem) ) if systemDelta > 0.0 && cpuDelta > 0.0 { - // gets a ratio of container cpu usage total, multiplies it by the number of cores (4 cores running - // at 100% utilization should be 400% utilization), and multiplies that by 100 to get a percentage - nCPUS := len(stats.CPU.Usage.PerCPU) - if nCPUS == 0 { - nCPUS = runtime.NumCPU() - } - cpuPercent = (cpuDelta / systemDelta) * float64(nCPUS) * 100 + // gets a ratio of container cpu usage total, and multiplies that by 100 to get a percentage + cpuPercent = (cpuDelta / systemDelta) * 100 } return cpuPercent } diff --git a/libpod/util.go b/libpod/util.go index 7bd834e30..bae2f4eb8 100644 --- a/libpod/util.go +++ b/libpod/util.go @@ -187,6 +187,9 @@ func programVersion(mountProgram string) (string, error) { return strings.TrimSuffix(output, "\n"), nil } +// DefaultSeccompPath returns the path to the default seccomp.json file +// if it exists, first it checks OverrideSeccomp and then default. +// If neither exist function returns "" func DefaultSeccompPath() (string, error) { _, err := os.Stat(config.SeccompOverridePath) if err == nil { diff --git a/logo/podman-logo-source.svg b/logo/podman-logo-source.svg index 9e5cc6625..135624a42 100644 --- a/logo/podman-logo-source.svg +++ b/logo/podman-logo-source.svg @@ -251,7 +251,7 @@ ry="0.88143349" rx="0.8483994" cy="-8.3384542" - cx="967.05286" + cx="965.94666" id="ellipse10873" style="fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.68990111;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" /> </g> @@ -386,8 +386,8 @@ <ellipse ry="0.77958798" rx="0.75153452" - cy="8.4197426" - cx="954.63715" + cy="8.5291576" + cx="953.76178" id="ellipse10927" style="fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.95576686;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" /> </g> @@ -512,8 +512,8 @@ <ellipse style="fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" id="ellipse10977" - cx="974.20233" - cy="6.3892479" + cx="973.4754" + cy="6.4801154" rx="0.66728675" ry="0.69219536" /> </g> diff --git a/logo/podman-logo.png b/logo/podman-logo.png Binary files differindex 70c8b1191..90acf9cbc 100644 --- a/logo/podman-logo.png +++ b/logo/podman-logo.png diff --git a/pkg/adapter/containers.go b/pkg/adapter/containers.go index 64550f545..a09466243 100644 --- a/pkg/adapter/containers.go +++ b/pkg/adapter/containers.go @@ -400,17 +400,8 @@ func (r *LocalRuntime) Run(ctx context.Context, c *cliconfig.RunValues, exitCode } } - config, err := r.Runtime.GetConfig() - if err != nil { - return exitCode, err - } - detachKeys := c.String("detach-keys") - if detachKeys == "" { - detachKeys = config.DetachKeys - } - // if the container was created as part of a pod, also start its dependencies, if any. - if err := StartAttachCtr(ctx, ctr, outputStream, errorStream, inputStream, detachKeys, c.Bool("sig-proxy"), true, c.IsSet("pod")); err != nil { + if err := StartAttachCtr(ctx, ctr, outputStream, errorStream, inputStream, c.String("detach-keys"), c.Bool("sig-proxy"), true, c.IsSet("pod")); err != nil { // We've manually detached from the container // Do not perform cleanup, or wait for container exit code // Just exit immediately diff --git a/pkg/adapter/pods.go b/pkg/adapter/pods.go index d8d5b884f..6648edc82 100644 --- a/pkg/adapter/pods.go +++ b/pkg/adapter/pods.go @@ -666,6 +666,58 @@ func getPodPorts(containers []v1.Container) []ocicni.PortMapping { return infraPorts } +func setupSecurityContext(containerConfig *createconfig.CreateConfig, containerYAML v1.Container) { + if containerYAML.SecurityContext == nil { + return + } + if containerYAML.SecurityContext.ReadOnlyRootFilesystem != nil { + containerConfig.ReadOnlyRootfs = *containerYAML.SecurityContext.ReadOnlyRootFilesystem + } + if containerYAML.SecurityContext.Privileged != nil { + containerConfig.Privileged = *containerYAML.SecurityContext.Privileged + } + + if containerYAML.SecurityContext.AllowPrivilegeEscalation != nil { + containerConfig.NoNewPrivs = !*containerYAML.SecurityContext.AllowPrivilegeEscalation + } + + if seopt := containerYAML.SecurityContext.SELinuxOptions; seopt != nil { + if seopt.User != "" { + containerConfig.SecurityOpts = append(containerConfig.SecurityOpts, fmt.Sprintf("label=user:%s", seopt.User)) + containerConfig.LabelOpts = append(containerConfig.LabelOpts, fmt.Sprintf("user:%s", seopt.User)) + } + if seopt.Role != "" { + containerConfig.SecurityOpts = append(containerConfig.SecurityOpts, fmt.Sprintf("label=role:%s", seopt.Role)) + containerConfig.LabelOpts = append(containerConfig.LabelOpts, fmt.Sprintf("role:%s", seopt.Role)) + } + if seopt.Type != "" { + containerConfig.SecurityOpts = append(containerConfig.SecurityOpts, fmt.Sprintf("label=type:%s", seopt.Type)) + containerConfig.LabelOpts = append(containerConfig.LabelOpts, fmt.Sprintf("type:%s", seopt.Type)) + } + if seopt.Level != "" { + containerConfig.SecurityOpts = append(containerConfig.SecurityOpts, fmt.Sprintf("label=level:%s", seopt.Level)) + containerConfig.LabelOpts = append(containerConfig.LabelOpts, fmt.Sprintf("level:%s", seopt.Level)) + } + } + if caps := containerYAML.SecurityContext.Capabilities; caps != nil { + for _, capability := range caps.Add { + containerConfig.CapAdd = append(containerConfig.CapAdd, string(capability)) + } + for _, capability := range caps.Drop { + containerConfig.CapDrop = append(containerConfig.CapDrop, string(capability)) + } + } + if containerYAML.SecurityContext.RunAsUser != nil { + containerConfig.User = fmt.Sprintf("%d", *containerYAML.SecurityContext.RunAsUser) + } + if containerYAML.SecurityContext.RunAsGroup != nil { + if containerConfig.User == "" { + containerConfig.User = "0" + } + containerConfig.User = fmt.Sprintf("%s:%d", containerConfig.User, *containerYAML.SecurityContext.RunAsGroup) + } +} + // kubeContainerToCreateConfig takes a v1.Container and returns a createconfig describing a container func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, runtime *libpod.Runtime, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID string) (*createconfig.CreateConfig, error) { var ( @@ -690,29 +742,8 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container containerConfig.User = imageData.Config.User } - if containerYAML.SecurityContext != nil { - if containerConfig.SecurityOpts != nil { - if containerYAML.SecurityContext.ReadOnlyRootFilesystem != nil { - containerConfig.ReadOnlyRootfs = *containerYAML.SecurityContext.ReadOnlyRootFilesystem - } - if containerYAML.SecurityContext.Privileged != nil { - containerConfig.Privileged = *containerYAML.SecurityContext.Privileged - } - - if containerYAML.SecurityContext.AllowPrivilegeEscalation != nil { - containerConfig.NoNewPrivs = !*containerYAML.SecurityContext.AllowPrivilegeEscalation - } + setupSecurityContext(&containerConfig, containerYAML) - } - if caps := containerYAML.SecurityContext.Capabilities; caps != nil { - for _, capability := range caps.Add { - containerConfig.CapAdd = append(containerConfig.CapAdd, string(capability)) - } - for _, capability := range caps.Drop { - containerConfig.CapDrop = append(containerConfig.CapDrop, string(capability)) - } - } - } var err error containerConfig.SeccompProfilePath, err = libpod.DefaultSeccompPath() if err != nil { diff --git a/pkg/adapter/runtime.go b/pkg/adapter/runtime.go index 4f70e90f9..81a43853c 100644 --- a/pkg/adapter/runtime.go +++ b/pkg/adapter/runtime.go @@ -338,7 +338,7 @@ func (r *LocalRuntime) SaveImage(ctx context.Context, c *cliconfig.SaveValues) e return newImage.Save(ctx, source, c.Format, c.Output, additionalTags, c.Quiet, c.Compress) } -// LoadImage is a wrapper function for libpod PruneVolumes +// LoadImage is a wrapper function for libpod LoadImage func (r *LocalRuntime) LoadImage(ctx context.Context, name string, cli *cliconfig.LoadValues) (string, error) { var ( writer io.Writer diff --git a/pkg/cgroups/cpu.go b/pkg/cgroups/cpu.go index 03677f1ef..a43a76b22 100644 --- a/pkg/cgroups/cpu.go +++ b/pkg/cgroups/cpu.go @@ -81,14 +81,14 @@ func (c *cpuHandler) Stat(ctr *CgroupControl, m *Metrics) error { return err } if val, found := values["usage_usec"]; found { - usage.Kernel, err = strconv.ParseUint(cleanString(val[0]), 10, 0) + usage.Total, err = strconv.ParseUint(cleanString(val[0]), 10, 0) if err != nil { return err } usage.Kernel *= 1000 } if val, found := values["system_usec"]; found { - usage.Total, err = strconv.ParseUint(cleanString(val[0]), 10, 0) + usage.Kernel, err = strconv.ParseUint(cleanString(val[0]), 10, 0) if err != nil { return err } diff --git a/pkg/namespaces/namespaces.go b/pkg/namespaces/namespaces.go index 9d1033b93..78b55bb2a 100644 --- a/pkg/namespaces/namespaces.go +++ b/pkg/namespaces/namespaces.go @@ -25,6 +25,11 @@ func (n CgroupMode) IsHost() bool { return n == hostType } +// IsDefaultValue indicates whether the cgroup namespace has the default value. +func (n CgroupMode) IsDefaultValue() bool { + return n == "" +} + // IsNS indicates a cgroup namespace passed in by path (ns:<path>) func (n CgroupMode) IsNS() bool { return strings.HasPrefix(string(n), nsType) diff --git a/pkg/rootless/rootless_linux.c b/pkg/rootless/rootless_linux.c index 94933ddd0..9604de638 100644 --- a/pkg/rootless/rootless_linux.c +++ b/pkg/rootless/rootless_linux.c @@ -24,12 +24,16 @@ int renameat2 (int olddirfd, const char *oldpath, int newdirfd, const char *newpath, unsigned int flags) { -# ifdef __NR_renameat2 - return (int) syscall (__NR_renameat2, olddirfd, oldpath, newdirfd, newpath, flags); +# ifdef SYS_renameat2 + return (int) syscall (SYS_renameat2, olddirfd, oldpath, newdirfd, newpath, flags); # else - /* no way to implement it atomically. */ - errno = ENOSYS; - return -1; + /* This might be an issue if another process is trying to read the file while it is empty. */ + int fd = open (newpath, O_EXCL|O_CREAT, 0700); + if (fd < 0) + return fd; + close (fd); + /* We are sure we created the file, let's overwrite it. */ + return rename (oldpath, newpath); # endif } #endif diff --git a/pkg/spec/spec.go b/pkg/spec/spec.go index 86d701f7e..33e9ec076 100644 --- a/pkg/spec/spec.go +++ b/pkg/spec/spec.go @@ -631,6 +631,19 @@ func addIpcNS(config *CreateConfig, g *generate.Generator) error { func addCgroupNS(config *CreateConfig, g *generate.Generator) error { cgroupMode := config.CgroupMode + + if cgroupMode.IsDefaultValue() { + // If the value is not specified, default to "private" on cgroups v2 and "host" on cgroups v1. + unified, err := cgroups.IsCgroup2UnifiedMode() + if err != nil { + return err + } + if unified { + cgroupMode = "private" + } else { + cgroupMode = "host" + } + } if cgroupMode.IsNS() { return g.AddOrReplaceLinuxNamespace(string(spec.CgroupNamespace), NS(string(cgroupMode))) } diff --git a/rootless.md b/rootless.md index 8cccb86eb..4fb3c7deb 100644 --- a/rootless.md +++ b/rootless.md @@ -6,6 +6,7 @@ Contributors are more than welcomed to help with this work. If you decide to ca * Podman can not create containers that bind to ports < 1024. * The kernel does not allow processes without CAP_NET_BIND_SERVICE to bind to low ports. + * You can modify the `net.ipv4.ip_unprivileged_port_start` sysctl to change the lowest port. For example `sysctl net.ipv4.ip_unprivileged_port_start=443` allows rootless Podman containers to bind to ports >= 443. * “How To” documentation is patchy at best. * If /etc/subuid and /etc/subgid are not setup for a user, then podman commands can easily fail @@ -13,11 +14,11 @@ can easily fail * We are working to get support for NSSWITCH on the /etc/subuid and /etc/subgid files. * No cgroup V1 Support * cgroup V1 does not safely support cgroup delegation. - * However, cgroup V2 provides cgroup delegation and is available on Fedora starting with version 29 and other Linux distributions. -* Some systemd's unit configuration options do not work in the rootless container - * systemd fails to apply several options and failures are silently ignored (e.g. CPUShares, MemoryLimit). + * As of Fedora 31 defaults to cgroup V2, which has full support of rootless cgroup management. Note this requires the --cgroup-manager within rootless containers to use systemd, which new containers will get by default. +* Some system unit configuration options do not work in the rootless container + * systemd fails to apply several options and failures are silently ignored (e.g. CPUShares, MemoryLimit). Should work on cgroup V2. * Use of certain options will cause service startup failures (e.g. PrivateNetwork). -* Can not share container images with CRI-O or other users +* Can not share container images with CRI-O or other rootfull users * Difficult to use additional stores for sharing content * Does not work on NFS or parallel filesystem homedirs (e.g. [GPFS](https://www.ibm.com/support/knowledgecenter/en/SSFKCN/gpfs_welcome.html)) * NFS and parallel filesystems enforce file creation on different UIDs on the server side and does not understand User Namespace. @@ -35,9 +36,9 @@ can easily fail * Requires new shadow-utils (not found in older (RHEL7/Centos7 distros) Should be fixed in RHEL7.7 release) * A few commands do not work. * mount/unmount (on fuse-overlay) - * Only works if you enter the mount namespace with a tool like buildah unshare - * podman stats (Without cgroup V2 support) + * Only works if you enter the mount namespace with podman unshare + * podman stats (Works with cgroup V2 support) * Checkpoint and Restore (CRIU requires root) - * Pause and Unpause (no freezer cgroup) + * Pause and Unpause (Works with cgroup V2 support) * Issues with higher UIDs can cause builds to fail * If a build is attempting to use a UID that is not mapped into the user namespace mapping for a container, then builds will not be able to put the UID in an image. diff --git a/test/e2e/generate_kube_test.go b/test/e2e/generate_kube_test.go index 5d3b1238a..603edbe6b 100644 --- a/test/e2e/generate_kube_test.go +++ b/test/e2e/generate_kube_test.go @@ -208,6 +208,39 @@ var _ = Describe("Podman generate kube", func() { Expect(psOut).To(ContainSubstring("test2")) }) + It("podman generate with user and reimport kube on pod", func() { + podName := "toppod" + _, rc, _ := podmanTest.CreatePod(podName) + Expect(rc).To(Equal(0)) + + session := podmanTest.Podman([]string{"create", "--pod", podName, "--name", "test1", "--user", "100:200", ALPINE, "top"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + inspect := podmanTest.Podman([]string{"inspect", "--format", "{{.Config.User}}", "test1"}) + inspect.WaitWithDefaultTimeout() + Expect(inspect.ExitCode()).To(Equal(0)) + Expect(inspect.OutputToString()).To(ContainSubstring("100:200")) + + outputFile := filepath.Join(podmanTest.RunRoot, "pod.yaml") + kube := podmanTest.Podman([]string{"generate", "kube", "-f", outputFile, podName}) + kube.WaitWithDefaultTimeout() + Expect(kube.ExitCode()).To(Equal(0)) + + session = podmanTest.Podman([]string{"pod", "rm", "-af"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + session = podmanTest.Podman([]string{"play", "kube", outputFile}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + inspect1 := podmanTest.Podman([]string{"inspect", "--format", "{{.Config.User}}", "test1"}) + inspect1.WaitWithDefaultTimeout() + Expect(inspect1.ExitCode()).To(Equal(0)) + Expect(inspect1.OutputToString()).To(ContainSubstring(inspect.OutputToString())) + }) + It("podman generate kube with volume", func() { vol1 := filepath.Join(podmanTest.TempDir, "vol-test1") err := os.MkdirAll(vol1, 0755) diff --git a/test/e2e/run_cgroup_parent_test.go b/test/e2e/run_cgroup_parent_test.go index 1fb9f6871..14294eeac 100644 --- a/test/e2e/run_cgroup_parent_test.go +++ b/test/e2e/run_cgroup_parent_test.go @@ -40,7 +40,7 @@ var _ = Describe("Podman run with --cgroup-parent", func() { Skip("Must be containerized to run this test.") } cgroup := "/zzz" - run := podmanTest.Podman([]string{"run", "--cgroup-parent", cgroup, fedoraMinimal, "cat", "/proc/self/cgroup"}) + run := podmanTest.Podman([]string{"run", "--cgroupns=host", "--cgroup-parent", cgroup, fedoraMinimal, "cat", "/proc/self/cgroup"}) run.WaitWithDefaultTimeout() Expect(run.ExitCode()).To(Equal(0)) ok, _ := run.GrepString(cgroup) @@ -52,7 +52,7 @@ var _ = Describe("Podman run with --cgroup-parent", func() { if !Containerized() && podmanTest.CgroupManager != "cgroupfs" { cgroup = "/machine.slice" } - run := podmanTest.Podman([]string{"run", fedoraMinimal, "cat", "/proc/self/cgroup"}) + run := podmanTest.Podman([]string{"run", "--cgroupns=host", fedoraMinimal, "cat", "/proc/self/cgroup"}) run.WaitWithDefaultTimeout() Expect(run.ExitCode()).To(Equal(0)) ok, _ := run.GrepString(cgroup) @@ -64,7 +64,7 @@ var _ = Describe("Podman run with --cgroup-parent", func() { Skip("Requires Systemd cgroup manager support") } cgroup := "aaaa.slice" - run := podmanTest.Podman([]string{"run", "--cgroup-parent", cgroup, fedoraMinimal, "cat", "/proc/1/cgroup"}) + run := podmanTest.Podman([]string{"run", "--cgroupns=host", "--cgroup-parent", cgroup, fedoraMinimal, "cat", "/proc/1/cgroup"}) run.WaitWithDefaultTimeout() Expect(run.ExitCode()).To(Equal(0)) ok, _ := run.GrepString(cgroup) diff --git a/test/e2e/test.yaml b/test/e2e/test.yaml new file mode 100644 index 000000000..98d2c91df --- /dev/null +++ b/test/e2e/test.yaml @@ -0,0 +1,37 @@ +# Save the output of this file and use kubectl create -f to import +# it into Kubernetes. +# +# Created with podman-1.6.2 +apiVersion: v1 +kind: Pod +metadata: + labels: + app: test + name: test +spec: + containers: + - command: + - sleep + - "100" + env: + - name: PATH + value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + - name: TERM + value: xterm + - name: container + value: podman + image: docker.io/library/fedora:latest + name: test + resources: {} + securityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + allowPrivilegeEscalation: true + capabilities: {} + privileged: false + seLinuxOptions: + level: "s0:c1,c2" + readOnlyRootFilesystem: false + workingDir: / +status: {} diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml index 1f7815073..66774c226 100644 --- a/vendor/github.com/containers/buildah/.cirrus.yml +++ b/vendor/github.com/containers/buildah/.cirrus.yml @@ -29,7 +29,7 @@ env: #### # Command to prefix every output line with a timestamp # (can't do inline awk script, Cirrus-CI or YAML mangles quoting) - _TIMESTAMP: 'awk --file ${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/timestamp.awk' + _TIMESTAMP: 'awk -f ${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/timestamp.awk' _DFCMD: 'df -lhTx tmpfs' _RAUDITCMD: 'cat /var/log/audit/audit.log' _UAUDITCMD: 'cat /var/log/kern.log' @@ -75,9 +75,37 @@ testing_task: failure_journal_log_script: '${_JOURNALCMD} || true' +# This task runs `make vendor` followed by ./hack/tree_status.sh to check +# whether the git tree is clean. The reasoning for that is to make sure +# that the vendor.conf, the code and the vendored packages in ./vendor are +# in sync at all times. +vendor_task: + + only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*' + + env: + CIRRUS_WORKING_DIR: "/var/tmp/go/src/github.com/containers/buildah" + GOPATH: "/go" + GOSRC: "/go/src/github.com/containers/buildah" + + # Runs within Cirrus's "community cluster" + container: + image: docker.io/library/golang:1.13 + cpu: 1 + memory: 1 + + timeout_in: 30m + + vendor_script: + - 'cd ${CIRRUS_WORKING_DIR} && make vendor' + - 'cd ${CIRRUS_WORKING_DIR} && ./hack/tree_status.sh' + # Update metadata on VM images referenced by this repository state meta_task: + depends_on: + - "vendor" + container: image: "quay.io/libpod/imgts:latest" # see contrib/imgts cpu: 1 diff --git a/vendor/github.com/containers/buildah/.golangci.yml b/vendor/github.com/containers/buildah/.golangci.yml index 044bc1f1b..dde37ad79 100644 --- a/vendor/github.com/containers/buildah/.golangci.yml +++ b/vendor/github.com/containers/buildah/.golangci.yml @@ -2,7 +2,6 @@ run: build-tags: - apparmor - - ostree - seccomp - selinux concurrency: 6 diff --git a/vendor/github.com/containers/buildah/.papr.sh b/vendor/github.com/containers/buildah/.papr.sh index 25ab4c29d..2795e9ec0 100644 --- a/vendor/github.com/containers/buildah/.papr.sh +++ b/vendor/github.com/containers/buildah/.papr.sh @@ -26,7 +26,6 @@ dnf install -y \ libselinux-utils \ make \ openssl \ - ostree-devel \ skopeo-containers \ which diff --git a/vendor/github.com/containers/buildah/.papr.yml b/vendor/github.com/containers/buildah/.papr.yml index 6eaba332c..4be12a18e 100644 --- a/vendor/github.com/containers/buildah/.papr.yml +++ b/vendor/github.com/containers/buildah/.papr.yml @@ -68,7 +68,6 @@ packages: - golang - libassuan-devel - make - - ostree-devel - skopeo-containers required: false diff --git a/vendor/github.com/containers/buildah/.travis.yml b/vendor/github.com/containers/buildah/.travis.yml index fbc0a7862..a74108230 100644 --- a/vendor/github.com/containers/buildah/.travis.yml +++ b/vendor/github.com/containers/buildah/.travis.yml @@ -2,8 +2,8 @@ language: go dist: xenial sudo: required go: - - 1.11.x - 1.12.x + - 1.13.x - tip go_import_path: github.com/containers/buildah @@ -40,9 +40,6 @@ matrix: services: - docker before_install: - - make vendor - - ./hack/tree_status.sh - - sudo apt-get update - sudo apt-get -qq install software-properties-common - sudo add-apt-repository -y ppa:duggan/bats - sudo apt-get update @@ -51,15 +48,6 @@ before_install: - sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce - mkdir /home/travis/auth - sudo mkdir -p /var/lib/containers/storage/overlay - - > - OSTREE_VERSION=v2019.2; - git clone https://github.com/ostreedev/ostree && - pushd ostree && - git checkout $OSTREE_VERSION && - ./autogen.sh --prefix=/usr && - sudo make -j4 install && - popd && - sudo rm -rf ostree install: # Let's create a self signed certificate and get it in the right places - hostname @@ -98,7 +86,7 @@ script: # Setting up Docker Registry is complete, let's do Buildah testing! - make install.tools -j4 - make install.libseccomp.sudo all runc validate lint SECURITYTAGS="apparmor seccomp" - - go test -c -tags "apparmor seccomp `./btrfs_tag.sh` `./libdm_tag.sh` `./ostree_tag.sh` `./selinux_tag.sh`" ./cmd/buildah + - go test -c -tags "apparmor seccomp `./btrfs_tag.sh` `./libdm_tag.sh` `./selinux_tag.sh`" ./cmd/buildah - tmp=`mktemp -d`; mkdir $tmp/root $tmp/runroot; sudo PATH="$PATH" ./buildah.test -test.v --root $tmp/root --runroot $tmp/runroot --storage-driver vfs --signature-policy `pwd`/tests/policy.json --registries-conf `pwd`/tests/registries.conf - cd tests; sudo PATH="$PATH" ./test_runner.sh - cd .. diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md index c75a055a6..b41ff8350 100644 --- a/vendor/github.com/containers/buildah/CHANGELOG.md +++ b/vendor/github.com/containers/buildah/CHANGELOG.md @@ -2,6 +2,28 @@ # Changelog +## v1.11.4 (2019-10-28) + buildah: add a "manifest" command + manifests: add the module + pkg/supplemented: add a package for grouping images together + pkg/manifests: add a manifest list build/manipulation API + Update for ErrUnauthorizedForCredentials API change in containers/image + Update for manifest-lists API changes in containers/image + version: also note the version of containers/image + Move to containers/image v5.0.0 + Enable --device directory as src device + Fix git build with branch specified + Bump github.com/openshift/imagebuilder from 1.1.0 to 1.1.1 + Bump github.com/fsouza/go-dockerclient from 1.4.4 to 1.5.0 + Add clarification to the Tutorial for new users + Silence "using cache" to ensure -q is fully quiet + Add OWNERS File to Buildah + Bump github.com/containers/storage from 1.13.4 to 1.13.5 + Move runtime flag to bud from common + Commit: check for storage.ErrImageUnknown using errors.Cause() + Fix crash when invalid COPY --from flag is specified. + Bump back to v1.12.0-dev + ## v1.11.3 (2019-10-04) Update c/image to v4.0.1 Bump github.com/spf13/pflag from 1.0.3 to 1.0.5 diff --git a/vendor/github.com/containers/buildah/Makefile b/vendor/github.com/containers/buildah/Makefile index cb0dfeb51..9d04177d0 100644 --- a/vendor/github.com/containers/buildah/Makefile +++ b/vendor/github.com/containers/buildah/Makefile @@ -2,7 +2,7 @@ export GOPROXY=https://proxy.golang.org SELINUXTAG := $(shell ./selinux_tag.sh) APPARMORTAG := $(shell hack/apparmor_tag.sh) -STORAGETAGS := $(shell ./btrfs_tag.sh) $(shell ./btrfs_installed_tag.sh) $(shell ./libdm_tag.sh) $(shell ./ostree_tag.sh) +STORAGETAGS := $(shell ./btrfs_tag.sh) $(shell ./btrfs_installed_tag.sh) $(shell ./libdm_tag.sh) SECURITYTAGS ?= seccomp $(SELINUXTAG) $(APPARMORTAG) TAGS ?= $(SECURITYTAGS) $(STORAGETAGS) BUILDTAGS += $(TAGS) @@ -24,7 +24,7 @@ endif GIT_COMMIT ?= $(if $(shell git rev-parse --short HEAD),$(shell git rev-parse --short HEAD),$(error "git failed")) SOURCE_DATE_EPOCH ?= $(if $(shell date +%s),$(shell date +%s),$(error "date failed")) -STATIC_STORAGETAGS = "containers_image_ostree_stub containers_image_openpgp exclude_graphdriver_devicemapper $(STORAGE_TAGS)" +STATIC_STORAGETAGS = "containers_image_openpgp exclude_graphdriver_devicemapper $(STORAGE_TAGS)" CNI_COMMIT := $(shell sed -n 's;\tgithub.com/containernetworking/cni \([^ \n]*\).*$\;\1;p' go.mod) #RUNC_COMMIT := $(shell sed -n 's;\tgithub.com/opencontainers/runc \([^ \n]*\).*$\;\1;p' go.mod) @@ -130,6 +130,9 @@ test-unit: tests/testreport/testreport mkdir -p $$tmp/root $$tmp/runroot; \ $(GO) test -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" ./cmd/buildah -args -root $$tmp/root -runroot $$tmp/runroot -storage-driver vfs -signature-policy $(shell pwd)/tests/policy.json -registries-conf $(shell pwd)/tests/registries.conf +vendor-in-container: + podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.13 make vendor + .PHONY: vendor vendor: export GO111MODULE=on \ diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index 59b62925a..d57eea818 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -27,7 +27,7 @@ const ( Package = "buildah" // Version for the Package. Bump version in contrib/rpm/buildah.spec // too. - Version = "1.11.4" + Version = "1.12.0-dev" // The value we use to identify what type of information, currently a // serialized Builder structure, we are using as per-container state. // This should only be changed when we make incompatible changes to diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod index 6bba4a1f8..c8741b781 100644 --- a/vendor/github.com/containers/buildah/go.mod +++ b/vendor/github.com/containers/buildah/go.mod @@ -21,7 +21,7 @@ require ( github.com/ishidawataru/sctp v0.0.0-20180918013207-6e2cb1366111 // indirect github.com/mattn/go-shellwords v1.0.6 github.com/morikuni/aec v1.0.0 // indirect - github.com/onsi/ginkgo v1.10.1 + github.com/onsi/ginkgo v1.10.2 github.com/onsi/gomega v1.7.0 github.com/opencontainers/go-digest v1.0.0-rc1 github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum index e5ce6a290..4a6673b04 100644 --- a/vendor/github.com/containers/buildah/go.sum +++ b/vendor/github.com/containers/buildah/go.sum @@ -212,6 +212,8 @@ github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.2 h1:uqH7bpe+ERSiDa34FDOF7RikN6RzXgduUF8yarlZp94= +github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= diff --git a/vendor/github.com/containers/buildah/install.md b/vendor/github.com/containers/buildah/install.md index 858364b45..f533b0fb2 100644 --- a/vendor/github.com/containers/buildah/install.md +++ b/vendor/github.com/containers/buildah/install.md @@ -139,7 +139,6 @@ Prior to installing Buildah, install the following packages on your Linux distro * glib2-devel * libassuan-devel * libseccomp-devel -* ostree-devel * runc (Requires version 1.0 RC4 or higher.) * containers-common @@ -158,7 +157,6 @@ In Fedora, you can use this command: gpgme-devel \ libassuan-devel \ libseccomp-devel \ - ostree-devel \ git \ bzip2 \ go-md2man \ @@ -196,7 +194,6 @@ run this command: gpgme-devel \ libassuan-devel \ libseccomp-devel \ - ostree-devel \ git \ bzip2 \ go-md2man \ @@ -241,7 +238,7 @@ In Ubuntu zesty and xenial, you can use these commands: add-apt-repository -y ppa:gophers/archive apt-add-repository -y ppa:projectatomic/ppa apt-get -y -qq update - apt-get -y install bats btrfs-tools git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libostree-dev libseccomp-dev libselinux1-dev skopeo-containers go-md2man + apt-get -y install bats btrfs-tools git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev skopeo-containers go-md2man apt-get -y install golang-1.10 ``` Then to install Buildah on Ubuntu follow the steps in this example: @@ -266,7 +263,7 @@ gpg --recv-keys 0x018BA5AD9DF57A4448F0E6CF8BECF1637AD8C79D gpg --export 0x018BA5AD9DF57A4448F0E6CF8BECF1637AD8C79D >> /usr/share/keyrings/projectatomic-ppa.gpg echo 'deb [signed-by=/usr/share/keyrings/projectatomic-ppa.gpg] http://ppa.launchpad.net/projectatomic/ppa/ubuntu zesty main' > /etc/apt/sources.list.d/projectatomic-ppa.list apt update -apt -y install -t stretch-backports libostree-dev golang +apt -y install -t stretch-backports golang apt -y install bats btrfs-tools git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev skopeo-containers go-md2man ``` @@ -274,7 +271,7 @@ The build steps on Debian are otherwise the same as Ubuntu, above. ## Vendoring - Dependency Management -This project is using [go modules](https://github.com/golang/go/wiki/Modules) for dependency management. If the CI is complaining about a pull request leaving behind an unclean state, it is very likely right about it. After changing dependencies, make sure to run `make vendor` to synchronize the code with the go module and repopulate the `./vendor` directory. +This project is using [go modules](https://github.com/golang/go/wiki/Modules) for dependency management. If the CI is complaining about a pull request leaving behind an unclean state, it is very likely right about it. After changing dependencies, make sure to run `make vendor-in-container` to synchronize the code with the go module and repopulate the `./vendor` directory. ## Configuration files @@ -381,7 +378,7 @@ Buildah uses Go Modules for vendoring purposes. If you need to update or add a * Enter into your sandbox `src/github.com/containers/buildah` and ensure that the GOPATH variable is set to the directory prior as noted above. * `export GO111MODULE=on` * Assuming you want to 'bump' the `github.com/containers/storage` package to version 1.12.13, use this command: `go get github.com/containers/storage@v1.12.13` - * `make vendor` + * `make vendor-in-container` * `make` * `make install` * Then add any updated or added files with `git add` then do a `git commit` and create a PR. @@ -391,10 +388,10 @@ Buildah uses Go Modules for vendoring purposes. If you need to update or add a If you wish to vendor in your personal fork to try changes out (assuming containers/storage in the below example): * `go mod edit -replace github.com/containers/storage=github.com/{mygithub_username}/storage@YOUR_BRANCH` - * `make vendor` + * `make vendor-in-container` To revert * `go mod edit -dropreplace github.com/containers/storage` - * `make vendor` + * `make vendor-in-container` To speed up fetching dependencies, you can use a [Go Module Proxy](https://proxy.golang.org) by setting `GOPROXY=https://proxy.golang.org`. diff --git a/vendor/github.com/containers/buildah/ostree_tag.sh b/vendor/github.com/containers/buildah/ostree_tag.sh deleted file mode 100644 index 537c17ff4..000000000 --- a/vendor/github.com/containers/buildah/ostree_tag.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env bash -if pkg-config ostree-1 2> /dev/null ; then - echo containers_image_ostree -else - echo containers_image_ostree_stub -fi diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go index a9bf94a32..e8979cd7f 100644 --- a/vendor/github.com/containers/buildah/pkg/cli/common.go +++ b/vendor/github.com/containers/buildah/pkg/cli/common.go @@ -7,7 +7,6 @@ package cli import ( "fmt" "os" - "path/filepath" "runtime" "strings" @@ -270,9 +269,5 @@ func GetDefaultAuthFile() string { if authfile != "" { return authfile } - runtimeDir := os.Getenv("XDG_RUNTIME_DIR") - if runtimeDir != "" { - return filepath.Join(runtimeDir, "containers/auth.json") - } return "" } diff --git a/vendor/github.com/containers/buildah/pkg/secrets/secrets.go b/vendor/github.com/containers/buildah/pkg/secrets/secrets.go index 9be9bb3b6..80ca05016 100644 --- a/vendor/github.com/containers/buildah/pkg/secrets/secrets.go +++ b/vendor/github.com/containers/buildah/pkg/secrets/secrets.go @@ -148,12 +148,12 @@ func getMountsMap(path string) (string, string, error) { } // SecretMounts copies, adds, and mounts the secrets to the container root filesystem -func SecretMounts(mountLabel, containerWorkingDir, mountFile string, rootless bool) []rspec.Mount { - return SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, containerWorkingDir, 0, 0, rootless) +func SecretMounts(mountLabel, containerWorkingDir, mountFile string, rootless, disableFips bool) []rspec.Mount { + return SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, containerWorkingDir, 0, 0, rootless, disableFips) } // SecretMountsWithUIDGID specifies the uid/gid of the owner -func SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPrefix string, uid, gid int, rootless bool) []rspec.Mount { +func SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPrefix string, uid, gid int, rootless, disableFips bool) []rspec.Mount { var ( secretMounts []rspec.Mount mountFiles []string @@ -180,6 +180,10 @@ func SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPre } } + // Only add FIPS secret mount if disableFips=false + if disableFips { + return secretMounts + } // Add FIPS mode secret if /etc/system-fips exists on the host _, err := os.Stat("/etc/system-fips") if err == nil { diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go index 624da9dae..4f507d1bc 100644 --- a/vendor/github.com/containers/buildah/run_linux.go +++ b/vendor/github.com/containers/buildah/run_linux.go @@ -460,7 +460,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st } // Get the list of secrets mounts. - secretMounts := secrets.SecretMountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, cdir, int(rootUID), int(rootGID), unshare.IsRootless()) + secretMounts := secrets.SecretMountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, cdir, int(rootUID), int(rootGID), unshare.IsRootless(), false) // Add temporary copies of the contents of volume locations at the // volume locations, unless we already have something there. diff --git a/vendor/github.com/containers/buildah/troubleshooting.md b/vendor/github.com/containers/buildah/troubleshooting.md index 96fa403f0..4ff2f06c4 100644 --- a/vendor/github.com/containers/buildah/troubleshooting.md +++ b/vendor/github.com/containers/buildah/troubleshooting.md @@ -82,9 +82,9 @@ the command with single quotes and use `bash -c`. The previous examples would b changed to: ```console -# buildah run bash -c '$whalecontainer /usr/games/fortune -a | cowsay' -# buildah run bash -c '$newcontainer echo "daemon off;" >> /etc/nginx/nginx.conf' -# buildah run bash -c '$newcontainer echo "nginx on Fedora" > /usr/share/nginx/html/index.html' +# buildah run $whalecontainer bash -c '/usr/games/fortune -a | cowsay' +# buildah run $newcontainer bash -c 'echo "daemon off;" >> /etc/nginx/nginx.conf' +# buildah run $newcontainer bash -c 'echo "nginx on Fedora" > /usr/share/nginx/html/index.html' ``` --- diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go index 44db18b45..b4670e41c 100644 --- a/vendor/github.com/containers/buildah/util.go +++ b/vendor/github.com/containers/buildah/util.go @@ -248,28 +248,36 @@ func (b *Builder) copyWithTar(tarIDMappingOptions *IDMappingOptions, chownOpts * // location into our working container, mapping permissions using the // container's ID maps, possibly overridden using the passed-in chownOpts func (b *Builder) untarPath(chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(src, dest string) error { - if hasher != nil && b.ContentDigester.Hash() != nil { - hasher = io.MultiWriter(hasher, b.ContentDigester.Hash()) - } - if hasher == nil { - hasher = b.ContentDigester.Hash() - } convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap) if dryRun { return func(src, dest string) error { - if hasher == nil { - hasher = ioutil.Discard + thisHasher := hasher + if thisHasher != nil && b.ContentDigester.Hash() != nil { + thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash()) + } + if thisHasher == nil { + thisHasher = b.ContentDigester.Hash() } f, err := os.Open(src) if err != nil { return errors.Wrapf(err, "error opening %q", src) } defer f.Close() - _, err = io.Copy(hasher, f) + _, err = io.Copy(thisHasher, f) return err } } - return chrootarchive.UntarPathAndChown(chownOpts, hasher, convertedUIDMap, convertedGIDMap) + return func(src, dest string) error { + thisHasher := hasher + if thisHasher != nil && b.ContentDigester.Hash() != nil { + thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash()) + } + if thisHasher == nil { + thisHasher = b.ContentDigester.Hash() + } + untarPathAndChown := chrootarchive.UntarPathAndChown(chownOpts, thisHasher, convertedUIDMap, convertedGIDMap) + return untarPathAndChown(src, dest) + } } // tarPath returns a function which creates an archive of a specified location, diff --git a/vendor/github.com/uber/jaeger-client-go/.travis.yml b/vendor/github.com/uber/jaeger-client-go/.travis.yml index 0d7bdd9ab..e81cc8805 100644 --- a/vendor/github.com/uber/jaeger-client-go/.travis.yml +++ b/vendor/github.com/uber/jaeger-client-go/.travis.yml @@ -7,21 +7,22 @@ dist: trusty matrix: include: - - go: 1.12.x + - go: 1.13.x env: - TESTS=true - USE_DEP=true - COVERAGE=true - - go: 1.12.x + - go: 1.13.x env: - USE_DEP=true - CROSSDOCK=true - - go: 1.12.x + - go: 1.13.x env: - TESTS=true - USE_DEP=false - USE_GLIDE=true - - go: 1.11.x + # test with previous version of Go + - go: 1.12.x env: - TESTS=true - USE_DEP=true diff --git a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md index 31b22e40c..c4590bf93 100644 --- a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md +++ b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md @@ -1,6 +1,45 @@ Changes by Version ================== +2.20.0 (2019-11-06) +------------------- + +## New Features + +- Allow all in-process spans of a trace to share sampling state (#443) -- Prithvi Raj + + Sampling state is shared between all spans of the trace that are still in memory. + This allows implementation of delayed sampling decisions (see below). + +- Support delayed sampling decisions (#449) -- Yuri Shkuro + + This is a large structural change to how the samplers work. + It allows some samplers to be executed multiple times on different + span events (like setting a tag) and make a positive sampling decision + later in the span life cycle, or even based on children spans. + See [README](./README.md#delayed-sampling) for more details. + + There is a related minor change in behavior of the adaptive (per-operation) sampler, + which will no longer re-sample the trace when `span.SetOperation()` is called, i.e. the + operation used to make the sampling decision is always the one provided at span creation. + +- Add experimental tag matching sampler (#452) -- Yuri Shkuro + + A sampler that can sample a trace based on a certain tag added to the root + span or one of its local (in-process) children. The sampler can be used with + another experimental `PrioritySampler` that allows multiple samplers to try + to make a sampling decision, in a certain priority order. + +- [log/zap] Report whether a trace was sampled (#445) -- Abhinav Gupta +- Allow config.FromEnv() to enrich an existing config object (#436) -- Vineeth Reddy + +## Minor patches + +- Expose Sampler on Tracer and accept sampler options via Configuration (#460) -- Yuri Shkuro +- Fix github.com/uber-go/atomic import (#464) -- Yuri Shkuro +- Add nodejs to crossdock tests (#441) -- Bhavin Gandhi +- Bump Go compiler version to 1.13 (#453) -- Yuri Shkuro + 2.19.0 (2019-09-23) ------------------- diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock index 1ed86f4a7..5a42ebf16 100644 --- a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock +++ b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock @@ -2,6 +2,14 @@ [[projects]] + digest = "1:9f3b30d9f8e0d7040f729b82dcbc8f0dead820a133b3147ce355fc451f32d761" + name = "github.com/BurntSushi/toml" + packages = ["."] + pruneopts = "UT" + revision = "3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005" + version = "v0.3.1" + +[[projects]] digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" name = "github.com/beorn7/perks" packages = ["quantile"] @@ -138,14 +146,6 @@ version = "v1.4.0" [[projects]] - digest = "1:a5158647b553c61877aa9ae74f4015000294e47981e6b8b07525edcbb0747c81" - name = "github.com/uber-go/atomic" - packages = ["."] - pruneopts = "UT" - revision = "df976f2515e274675050de7b3f42545de80594fd" - version = "v1.4.0" - -[[projects]] digest = "1:0ec60ffd594af00ba1660bc746aa0e443d27dd4003dee55f9d08a0b4ff5431a3" name = "github.com/uber/jaeger-lib" packages = [ @@ -158,23 +158,31 @@ version = "v2.2.0" [[projects]] - digest = "1:a5158647b553c61877aa9ae74f4015000294e47981e6b8b07525edcbb0747c81" + digest = "1:0bdcb0c740d79d400bd3f7946ac22a715c94db62b20bfd2e01cd50693aba0600" name = "go.uber.org/atomic" packages = ["."] pruneopts = "UT" - revision = "df976f2515e274675050de7b3f42545de80594fd" - version = "v1.4.0" + revision = "9dc4df04d0d1c39369750a9f6c32c39560672089" + version = "v1.5.0" [[projects]] - digest = "1:60bf2a5e347af463c42ed31a493d817f8a72f102543060ed992754e689805d1a" + digest = "1:002ebc50f3ef475ac325e1904be931d9dcba6dc6d73b5682afce0c63436e3902" name = "go.uber.org/multierr" packages = ["."] pruneopts = "UT" - revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" - version = "v1.1.0" + revision = "c3fc3d02ec864719d8e25be2d7dde1e35a36aa27" + version = "v1.3.0" + +[[projects]] + branch = "master" + digest = "1:3032e90a153750ea149f68bf081f97ca738f041fba45c41c80737f572ffdf2f4" + name = "go.uber.org/tools" + packages = ["update-license"] + pruneopts = "UT" + revision = "2cfd321de3ee5d5f8a5fda2521d1703478334d98" [[projects]] - digest = "1:676160e6a4722b08e0e26b11521d575c2cb2b6f0c679e1ee6178c5d8dee51e5e" + digest = "1:6be13632ab4bd5842a097abb3aabac045a8601e19a10da4239e7d8bd83d4b83c" name = "go.uber.org/zap" packages = [ ".", @@ -185,8 +193,19 @@ "zapcore", ] pruneopts = "UT" - revision = "27376062155ad36be76b0f12cf1572a221d3a48c" - version = "v1.10.0" + revision = "a6015e13fab9b744d96085308ce4e8f11bad1996" + version = "v1.12.0" + +[[projects]] + branch = "master" + digest = "1:21d7bad9b7da270fd2d50aba8971a041bd691165c95096a2a4c68db823cbc86a" + name = "golang.org/x/lint" + packages = [ + ".", + "golint", + ] + pruneopts = "UT" + revision = "16217165b5de779cb6a5e4fc81fa9c1166fda457" [[projects]] branch = "master" @@ -197,23 +216,81 @@ "context/ctxhttp", ] pruneopts = "UT" - revision = "aa69164e4478b84860dc6769c710c699c67058a3" + revision = "0deb6923b6d97481cb43bc1043fe5b72a0143032" [[projects]] branch = "master" - digest = "1:712252802d318c8107d8f2136b99aa10feb17eca715245ed915199fbfc260155" + digest = "1:5dfb17d45415b7b8927382f53955a66f55f9d9d11557aa82f7f481d642ab247a" name = "golang.org/x/sys" packages = ["windows"] pruneopts = "UT" - revision = "0a153f010e6963173baba2306531d173aa843137" + revision = "f43be2a4598cf3a47be9f94f0c28197ed9eae611" + +[[projects]] + branch = "master" + digest = "1:bae8b3bf837d9d7f601776f37f44e031d46943677beff8fb2eb9c7317d44de2f" + name = "golang.org/x/tools" + packages = [ + "go/analysis", + "go/analysis/passes/inspect", + "go/ast/astutil", + "go/ast/inspector", + "go/buildutil", + "go/gcexportdata", + "go/internal/gcimporter", + "go/internal/packagesdriver", + "go/packages", + "go/types/objectpath", + "go/types/typeutil", + "internal/fastwalk", + "internal/gopathwalk", + "internal/semver", + "internal/span", + ] + pruneopts = "UT" + revision = "8dbcdeb83d3faec5315146800b375c4962a42fc6" [[projects]] - digest = "1:4d2e5a73dc1500038e504a8d78b986630e3626dc027bc030ba5c75da257cdb96" + digest = "1:59f10c1537d2199d9115d946927fe31165959a95190849c82ff11e05803528b0" name = "gopkg.in/yaml.v2" packages = ["."] pruneopts = "UT" - revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" - version = "v2.2.2" + revision = "f221b8435cfb71e54062f6c6e99e9ade30b124d5" + version = "v2.2.4" + +[[projects]] + digest = "1:131158a88aad1f94854d0aa21a64af2802d0a470fb0f01cb33c04fafd2047111" + name = "honnef.co/go/tools" + packages = [ + "arg", + "cmd/staticcheck", + "config", + "deprecated", + "facts", + "functions", + "go/types/typeutil", + "internal/cache", + "internal/passes/buildssa", + "internal/renameio", + "internal/sharedcheck", + "lint", + "lint/lintdsl", + "lint/lintutil", + "lint/lintutil/format", + "loader", + "printf", + "simple", + "ssa", + "ssautil", + "staticcheck", + "staticcheck/vrp", + "stylecheck", + "unused", + "version", + ] + pruneopts = "UT" + revision = "afd67930eec2a9ed3e9b19f684d17a062285f16a" + version = "2019.2.3" [solve-meta] analyzer-name = "dep" @@ -229,10 +306,10 @@ "github.com/stretchr/testify/assert", "github.com/stretchr/testify/require", "github.com/stretchr/testify/suite", - "github.com/uber-go/atomic", "github.com/uber/jaeger-lib/metrics", "github.com/uber/jaeger-lib/metrics/metricstest", "github.com/uber/jaeger-lib/metrics/prometheus", + "go.uber.org/atomic", "go.uber.org/zap", "go.uber.org/zap/zapcore", ] diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.toml b/vendor/github.com/uber/jaeger-client-go/Gopkg.toml index 3e6ac35ae..1fed7f814 100644 --- a/vendor/github.com/uber/jaeger-client-go/Gopkg.toml +++ b/vendor/github.com/uber/jaeger-client-go/Gopkg.toml @@ -15,7 +15,7 @@ version = "^1.1.3" [[constraint]] - name = "github.com/uber-go/atomic" + name = "go.uber.org/atomic" version = "^1" [[constraint]] diff --git a/vendor/github.com/uber/jaeger-client-go/Makefile b/vendor/github.com/uber/jaeger-client-go/Makefile index 74e11787a..0cfe6a5f6 100644 --- a/vendor/github.com/uber/jaeger-client-go/Makefile +++ b/vendor/github.com/uber/jaeger-client-go/Makefile @@ -1,5 +1,5 @@ PROJECT_ROOT=github.com/uber/jaeger-client-go -PACKAGES := $(shell go list ./... | awk -F/ 'NR>1 {print "./"$$4"/..."}' | grep -v -e ./thrift-gen/... -e ./thrift/... | sort -u) +PACKAGES := . $(shell go list ./... | awk -F/ 'NR>1 {print "./"$$4"/..."}' | grep -v -e ./thrift-gen/... -e ./thrift/... | sort -u) # all .go files that don't exist in hidden directories ALL_SRC := $(shell find . -name "*.go" | grep -v -e vendor -e thrift-gen -e ./thrift/ \ -e ".*/\..*" \ @@ -125,3 +125,4 @@ ifeq ($(CI_SKIP_LINT),true) else make lint endif + diff --git a/vendor/github.com/uber/jaeger-client-go/README.md b/vendor/github.com/uber/jaeger-client-go/README.md index 604d4b571..a3366114d 100644 --- a/vendor/github.com/uber/jaeger-client-go/README.md +++ b/vendor/github.com/uber/jaeger-client-go/README.md @@ -182,6 +182,29 @@ are available: 1. `RateLimitingSampler` can be used to allow only a certain fixed number of traces to be sampled per second. +#### Delayed sampling + +Version 2.20 introduced the ability to delay sampling decisions in the life cycle +of the root span. It involves several features and architectural changes: + * **Shared sampling state**: the sampling state is shared across all local + (i.e. in-process) spans for a given trace. + * **New `SamplerV2` API** allows the sampler to be called at multiple points + in the life cycle of a span: + * on span creation + * on overwriting span operation name + * on setting span tags + * on finishing the span + * **Final/non-final sampling state**: the new `SamplerV2` API allows the sampler + to indicate if the negative sampling decision is final or not (positive sampling + decisions are always final). If the decision is not final, the sampler will be + called again on further span life cycle events, like setting tags. + +These new features are used in the experimental `x.TagMatchingSampler`, which +can sample a trace based on a certain tag added to the root +span or one of its local (in-process) children. The sampler can be used with +another experimental `x.PrioritySampler` that allows multiple samplers to try +to make a sampling decision, in a certain priority order. + ### Baggage Injection The OpenTracing spec allows for [baggage][baggage], which are key value pairs that are added diff --git a/vendor/github.com/uber/jaeger-client-go/config/config.go b/vendor/github.com/uber/jaeger-client-go/config/config.go index 6bce1b3b0..965f7c3ee 100644 --- a/vendor/github.com/uber/jaeger-client-go/config/config.go +++ b/vendor/github.com/uber/jaeger-client-go/config/config.go @@ -86,6 +86,9 @@ type SamplerConfig struct { // jaeger-agent for the appropriate sampling strategy. // Can be set by exporting an environment variable named JAEGER_SAMPLER_REFRESH_INTERVAL SamplingRefreshInterval time.Duration `yaml:"samplingRefreshInterval"` + + // Options can be used to programmatically pass additional options to the Remote sampler. + Options []jaeger.SamplerOption } // ReporterConfig configures the reporter. All fields are optional. @@ -357,6 +360,7 @@ func (sc *SamplerConfig) NewSampler( if sc.SamplingRefreshInterval != 0 { options = append(options, jaeger.SamplerOptions.SamplingRefreshInterval(sc.SamplingRefreshInterval)) } + options = append(options, sc.Options...) return jaeger.NewRemotelyControlledSampler(serviceName, options...), nil } return nil, fmt.Errorf("Unknown sampler type %v", sc.Type) diff --git a/vendor/github.com/uber/jaeger-client-go/config/config_env.go b/vendor/github.com/uber/jaeger-client-go/config/config_env.go index 14d69b11d..a729bd8fe 100644 --- a/vendor/github.com/uber/jaeger-client-go/config/config_env.go +++ b/vendor/github.com/uber/jaeger-client-go/config/config_env.go @@ -52,7 +52,11 @@ const ( // FromEnv uses environment variables to set the tracer's Configuration func FromEnv() (*Configuration, error) { c := &Configuration{} + return c.FromEnv() +} +// FromEnv uses environment variables and overrides existing tracer's Configuration +func (c *Configuration) FromEnv() (*Configuration, error) { if e := os.Getenv(envServiceName); e != "" { c.ServiceName = e } @@ -77,13 +81,21 @@ func FromEnv() (*Configuration, error) { c.Tags = parseTags(e) } - if s, err := samplerConfigFromEnv(); err == nil { + if c.Sampler == nil { + c.Sampler = &SamplerConfig{} + } + + if s, err := c.Sampler.samplerConfigFromEnv(); err == nil { c.Sampler = s } else { return nil, errors.Wrap(err, "cannot obtain sampler config from env") } - if r, err := reporterConfigFromEnv(); err == nil { + if c.Reporter == nil { + c.Reporter = &ReporterConfig{} + } + + if r, err := c.Reporter.reporterConfigFromEnv(); err == nil { c.Reporter = r } else { return nil, errors.Wrap(err, "cannot obtain reporter config from env") @@ -93,9 +105,7 @@ func FromEnv() (*Configuration, error) { } // samplerConfigFromEnv creates a new SamplerConfig based on the environment variables -func samplerConfigFromEnv() (*SamplerConfig, error) { - sc := &SamplerConfig{} - +func (sc *SamplerConfig) samplerConfigFromEnv() (*SamplerConfig, error) { if e := os.Getenv(envSamplerType); e != "" { sc.Type = e } @@ -135,9 +145,7 @@ func samplerConfigFromEnv() (*SamplerConfig, error) { } // reporterConfigFromEnv creates a new ReporterConfig based on the environment variables -func reporterConfigFromEnv() (*ReporterConfig, error) { - rc := &ReporterConfig{} - +func (rc *ReporterConfig) reporterConfigFromEnv() (*ReporterConfig, error) { if e := os.Getenv(envReporterMaxQueueSize); e != "" { if value, err := strconv.ParseInt(e, 10, 0); err == nil { rc.QueueSize = int(value) diff --git a/vendor/github.com/uber/jaeger-client-go/constants.go b/vendor/github.com/uber/jaeger-client-go/constants.go index e95b2ba09..0da47b02f 100644 --- a/vendor/github.com/uber/jaeger-client-go/constants.go +++ b/vendor/github.com/uber/jaeger-client-go/constants.go @@ -22,7 +22,7 @@ import ( const ( // JaegerClientVersion is the version of the client library reported as Span tag. - JaegerClientVersion = "Go-2.19.0" + JaegerClientVersion = "Go-2.20.0" // JaegerClientVersionTagKey is the name of the tag used to report client version. JaegerClientVersionTagKey = "jaeger.version" diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go index 6ce1caf87..f0f1afe2f 100644 --- a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go +++ b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go @@ -35,7 +35,7 @@ func BuildJaegerThrift(span *Span) *j.Span { SpanId: int64(span.context.spanID), ParentSpanId: int64(span.context.parentID), OperationName: span.operationName, - Flags: int32(span.context.flags), + Flags: int32(span.context.samplingState.flags()), StartTime: startTime, Duration: duration, Tags: buildTags(span.tags, span.tracer.options.maxTagValueLength), diff --git a/vendor/github.com/uber/jaeger-client-go/metrics.go b/vendor/github.com/uber/jaeger-client-go/metrics.go index e56db9b73..50e4e22d6 100644 --- a/vendor/github.com/uber/jaeger-client-go/metrics.go +++ b/vendor/github.com/uber/jaeger-client-go/metrics.go @@ -26,6 +26,9 @@ type Metrics struct { // Number of traces started by this tracer as not sampled TracesStartedNotSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer as not sampled"` + // Number of traces started by this tracer with delayed sampling + TracesStartedDelayedSampling metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer with delayed sampling"` + // Number of externally started sampled traces this tracer joined TracesJoinedSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=y" help:"Number of externally started sampled traces this tracer joined"` @@ -33,13 +36,22 @@ type Metrics struct { TracesJoinedNotSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=n" help:"Number of externally started not-sampled traces this tracer joined"` // Number of sampled spans started by this tracer - SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y" help:"Number of sampled spans started by this tracer"` + SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y" help:"Number of spans started by this tracer as sampled"` + + // Number of not sampled spans started by this tracer + SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n" help:"Number of spans started by this tracer as not sampled"` + + // Number of spans with delayed sampling started by this tracer + SpansStartedDelayedSampling metrics.Counter `metric:"started_spans" tags:"sampled=delayed" help:"Number of spans started by this tracer with delayed sampling"` - // Number of unsampled spans started by this tracer - SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n" help:"Number of unsampled spans started by this tracer"` + // Number of spans finished by this tracer + SpansFinishedSampled metrics.Counter `metric:"finished_spans" tags:"sampled=y" help:"Number of sampled spans finished by this tracer"` + + // Number of spans finished by this tracer + SpansFinishedNotSampled metrics.Counter `metric:"finished_spans" tags:"sampled=n" help:"Number of not-sampled spans finished by this tracer"` // Number of spans finished by this tracer - SpansFinished metrics.Counter `metric:"finished_spans" help:"Number of spans finished by this tracer"` + SpansFinishedDelayedSampling metrics.Counter `metric:"finished_spans" tags:"sampled=delayed" help:"Number of spans with delayed sampling finished by this tracer"` // Number of errors decoding tracing context DecodingErrors metrics.Counter `metric:"span_context_decoding_errors" help:"Number of errors decoding tracing context"` diff --git a/vendor/github.com/uber/jaeger-client-go/propagation.go b/vendor/github.com/uber/jaeger-client-go/propagation.go index 5b50cfb71..42fd64b58 100644 --- a/vendor/github.com/uber/jaeger-client-go/propagation.go +++ b/vendor/github.com/uber/jaeger-client-go/propagation.go @@ -193,7 +193,7 @@ func (p *BinaryPropagator) Inject( if err := binary.Write(carrier, binary.BigEndian, sc.parentID); err != nil { return err } - if err := binary.Write(carrier, binary.BigEndian, sc.flags); err != nil { + if err := binary.Write(carrier, binary.BigEndian, sc.samplingState.flags()); err != nil { return err } @@ -222,6 +222,7 @@ func (p *BinaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, er return emptyContext, opentracing.ErrInvalidCarrier } var ctx SpanContext + ctx.samplingState = &samplingState{} if err := binary.Read(carrier, binary.BigEndian, &ctx.traceID); err != nil { return emptyContext, opentracing.ErrSpanContextCorrupted @@ -232,9 +233,12 @@ func (p *BinaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, er if err := binary.Read(carrier, binary.BigEndian, &ctx.parentID); err != nil { return emptyContext, opentracing.ErrSpanContextCorrupted } - if err := binary.Read(carrier, binary.BigEndian, &ctx.flags); err != nil { + + var flags byte + if err := binary.Read(carrier, binary.BigEndian, &flags); err != nil { return emptyContext, opentracing.ErrSpanContextCorrupted } + ctx.samplingState.setFlags(flags) // Handle the baggage items var numBaggage int32 diff --git a/vendor/github.com/uber/jaeger-client-go/reporter.go b/vendor/github.com/uber/jaeger-client-go/reporter.go index 27163ebe4..0b78cec20 100644 --- a/vendor/github.com/uber/jaeger-client-go/reporter.go +++ b/vendor/github.com/uber/jaeger-client-go/reporter.go @@ -28,6 +28,8 @@ import ( // Reporter is called by the tracer when a span is completed to report the span to the tracing collector. type Reporter interface { // Report submits a new span to collectors, possibly asynchronously and/or with buffering. + // If the reporter is processing Span asynchronously then it needs to Retain() the span, + // and then Release() it when no longer needed, to avoid span data corruption. Report(span *Span) // Close does a clean shutdown of the reporter, flushing any traces that may be buffered in memory. diff --git a/vendor/github.com/uber/jaeger-client-go/sampler.go b/vendor/github.com/uber/jaeger-client-go/sampler.go index ea6984e02..6195d59c5 100644 --- a/vendor/github.com/uber/jaeger-client-go/sampler.go +++ b/vendor/github.com/uber/jaeger-client-go/sampler.go @@ -17,19 +17,14 @@ package jaeger import ( "fmt" "math" - "net/url" "sync" - "sync/atomic" - "time" - "github.com/uber/jaeger-client-go/log" "github.com/uber/jaeger-client-go/thrift-gen/sampling" "github.com/uber/jaeger-client-go/utils" ) const ( - defaultSamplingRefreshInterval = time.Minute - defaultMaxOperations = 2000 + defaultMaxOperations = 2000 ) // Sampler decides whether a new trace should be sampled or not. @@ -47,9 +42,7 @@ type Sampler interface { // Equal checks if the `other` sampler is functionally equivalent // to this sampler. - // TODO remove this function. This function is used to determine if 2 samplers are equivalent - // which does not bode well with the adaptive sampler which has to create all the composite samplers - // for the comparison to occur. This is expensive to do if only one sampler has changed. + // TODO (breaking change) remove this function. See PerOperationSampler.Equals for explanation. Equal(other Sampler) bool } @@ -57,17 +50,23 @@ type Sampler interface { // ConstSampler is a sampler that always makes the same decision. type ConstSampler struct { + legacySamplerV1Base Decision bool tags []Tag } // NewConstSampler creates a ConstSampler. -func NewConstSampler(sample bool) Sampler { +func NewConstSampler(sample bool) *ConstSampler { tags := []Tag{ {key: SamplerTypeTagKey, value: SamplerTypeConst}, {key: SamplerParamTagKey, value: sample}, } - return &ConstSampler{Decision: sample, tags: tags} + s := &ConstSampler{ + Decision: sample, + tags: tags, + } + s.delegate = s.IsSampled + return s } // IsSampled implements IsSampled() of Sampler. @@ -88,11 +87,17 @@ func (s *ConstSampler) Equal(other Sampler) bool { return false } +// String is used to log sampler details. +func (s *ConstSampler) String() string { + return fmt.Sprintf("ConstSampler(decision=%t)", s.Decision) +} + // ----------------------- // ProbabilisticSampler is a sampler that randomly samples a certain percentage // of traces. type ProbabilisticSampler struct { + legacySamplerV1Base samplingRate float64 samplingBoundary uint64 tags []Tag @@ -114,16 +119,19 @@ func NewProbabilisticSampler(samplingRate float64) (*ProbabilisticSampler, error } func newProbabilisticSampler(samplingRate float64) *ProbabilisticSampler { - samplingRate = math.Max(0.0, math.Min(samplingRate, 1.0)) - tags := []Tag{ + s := new(ProbabilisticSampler) + s.delegate = s.IsSampled + return s.init(samplingRate) +} + +func (s *ProbabilisticSampler) init(samplingRate float64) *ProbabilisticSampler { + s.samplingRate = math.Max(0.0, math.Min(samplingRate, 1.0)) + s.samplingBoundary = uint64(float64(maxRandomNumber) * s.samplingRate) + s.tags = []Tag{ {key: SamplerTypeTagKey, value: SamplerTypeProbabilistic}, - {key: SamplerParamTagKey, value: samplingRate}, - } - return &ProbabilisticSampler{ - samplingRate: samplingRate, - samplingBoundary: uint64(float64(maxRandomNumber) * samplingRate), - tags: tags, + {key: SamplerParamTagKey, value: s.samplingRate}, } + return s } // SamplingRate returns the sampling probability this sampled was constructed with. @@ -149,65 +157,104 @@ func (s *ProbabilisticSampler) Equal(other Sampler) bool { return false } +// Update modifies in-place the sampling rate. Locking must be done externally. +func (s *ProbabilisticSampler) Update(samplingRate float64) error { + if samplingRate < 0.0 || samplingRate > 1.0 { + return fmt.Errorf("Sampling Rate must be between 0.0 and 1.0, received %f", samplingRate) + } + s.init(samplingRate) + return nil +} + +// String is used to log sampler details. +func (s *ProbabilisticSampler) String() string { + return fmt.Sprintf("ProbabilisticSampler(samplingRate=%v)", s.samplingRate) +} + // ----------------------- -type rateLimitingSampler struct { +// RateLimitingSampler samples at most maxTracesPerSecond. The distribution of sampled traces follows +// burstiness of the service, i.e. a service with uniformly distributed requests will have those +// requests sampled uniformly as well, but if requests are bursty, especially sub-second, then a +// number of sequential requests can be sampled each second. +type RateLimitingSampler struct { + legacySamplerV1Base maxTracesPerSecond float64 - rateLimiter utils.RateLimiter + rateLimiter *utils.ReconfigurableRateLimiter tags []Tag } -// NewRateLimitingSampler creates a sampler that samples at most maxTracesPerSecond. The distribution of sampled -// traces follows burstiness of the service, i.e. a service with uniformly distributed requests will have those -// requests sampled uniformly as well, but if requests are bursty, especially sub-second, then a number of -// sequential requests can be sampled each second. -func NewRateLimitingSampler(maxTracesPerSecond float64) Sampler { - tags := []Tag{ +// NewRateLimitingSampler creates new RateLimitingSampler. +func NewRateLimitingSampler(maxTracesPerSecond float64) *RateLimitingSampler { + s := new(RateLimitingSampler) + s.delegate = s.IsSampled + return s.init(maxTracesPerSecond) +} + +func (s *RateLimitingSampler) init(maxTracesPerSecond float64) *RateLimitingSampler { + if s.rateLimiter == nil { + s.rateLimiter = utils.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0)) + } else { + s.rateLimiter.Update(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0)) + } + s.maxTracesPerSecond = maxTracesPerSecond + s.tags = []Tag{ {key: SamplerTypeTagKey, value: SamplerTypeRateLimiting}, {key: SamplerParamTagKey, value: maxTracesPerSecond}, } - return &rateLimitingSampler{ - maxTracesPerSecond: maxTracesPerSecond, - rateLimiter: utils.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0)), - tags: tags, - } + return s } // IsSampled implements IsSampled() of Sampler. -func (s *rateLimitingSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { +func (s *RateLimitingSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { return s.rateLimiter.CheckCredit(1.0), s.tags } -func (s *rateLimitingSampler) Close() { +// Update reconfigures the rate limiter, while preserving its accumulated balance. +// Locking must be done externally. +func (s *RateLimitingSampler) Update(maxTracesPerSecond float64) { + if s.maxTracesPerSecond != maxTracesPerSecond { + s.init(maxTracesPerSecond) + } +} + +// Close does nothing. +func (s *RateLimitingSampler) Close() { // nothing to do } -func (s *rateLimitingSampler) Equal(other Sampler) bool { - if o, ok := other.(*rateLimitingSampler); ok { +// Equal compares with another sampler. +func (s *RateLimitingSampler) Equal(other Sampler) bool { + if o, ok := other.(*RateLimitingSampler); ok { return s.maxTracesPerSecond == o.maxTracesPerSecond } return false } +// String is used to log sampler details. +func (s *RateLimitingSampler) String() string { + return fmt.Sprintf("RateLimitingSampler(maxTracesPerSecond=%v)", s.maxTracesPerSecond) +} + // ----------------------- -// GuaranteedThroughputProbabilisticSampler is a sampler that leverages both probabilisticSampler and -// rateLimitingSampler. The rateLimitingSampler is used as a guaranteed lower bound sampler such that +// GuaranteedThroughputProbabilisticSampler is a sampler that leverages both ProbabilisticSampler and +// RateLimitingSampler. The RateLimitingSampler is used as a guaranteed lower bound sampler such that // every operation is sampled at least once in a time interval defined by the lowerBound. ie a lowerBound // of 1.0 / (60 * 10) will sample an operation at least once every 10 minutes. // -// The probabilisticSampler is given higher priority when tags are emitted, ie. if IsSampled() for both -// samplers return true, the tags for probabilisticSampler will be used. +// The ProbabilisticSampler is given higher priority when tags are emitted, ie. if IsSampled() for both +// samplers return true, the tags for ProbabilisticSampler will be used. type GuaranteedThroughputProbabilisticSampler struct { probabilisticSampler *ProbabilisticSampler - lowerBoundSampler Sampler + lowerBoundSampler *RateLimitingSampler tags []Tag samplingRate float64 lowerBound float64 } // NewGuaranteedThroughputProbabilisticSampler returns a delegating sampler that applies both -// probabilisticSampler and rateLimitingSampler. +// ProbabilisticSampler and RateLimitingSampler. func NewGuaranteedThroughputProbabilisticSampler( lowerBound, samplingRate float64, ) (*GuaranteedThroughputProbabilisticSampler, error) { @@ -224,8 +271,14 @@ func newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate float6 } func (s *GuaranteedThroughputProbabilisticSampler) setProbabilisticSampler(samplingRate float64) { - if s.probabilisticSampler == nil || s.samplingRate != samplingRate { + if s.probabilisticSampler == nil { s.probabilisticSampler = newProbabilisticSampler(samplingRate) + } else if s.samplingRate != samplingRate { + s.probabilisticSampler.init(samplingRate) + } + // since we don't validate samplingRate, sampler may have clamped it to [0, 1] interval + samplingRate = s.probabilisticSampler.SamplingRate() + if s.samplingRate != samplingRate || s.tags == nil { s.samplingRate = s.probabilisticSampler.SamplingRate() s.tags = []Tag{ {key: SamplerTypeTagKey, value: SamplerTypeLowerBound}, @@ -252,7 +305,7 @@ func (s *GuaranteedThroughputProbabilisticSampler) Close() { // Equal implements Equal() of Sampler. func (s *GuaranteedThroughputProbabilisticSampler) Equal(other Sampler) bool { - // NB The Equal() function is expensive and will be removed. See adaptiveSampler.Equal() for + // NB The Equal() function is expensive and will be removed. See PerOperationSampler.Equal() for // more information. return false } @@ -261,52 +314,116 @@ func (s *GuaranteedThroughputProbabilisticSampler) Equal(other Sampler) bool { func (s *GuaranteedThroughputProbabilisticSampler) update(lowerBound, samplingRate float64) { s.setProbabilisticSampler(samplingRate) if s.lowerBound != lowerBound { - s.lowerBoundSampler = NewRateLimitingSampler(lowerBound) + s.lowerBoundSampler.Update(lowerBound) s.lowerBound = lowerBound } } // ----------------------- -type adaptiveSampler struct { +// PerOperationSampler is a delegating sampler that applies GuaranteedThroughputProbabilisticSampler +// on a per-operation basis. +type PerOperationSampler struct { sync.RWMutex samplers map[string]*GuaranteedThroughputProbabilisticSampler defaultSampler *ProbabilisticSampler lowerBound float64 maxOperations int + + // see description in PerOperationSamplerParams + operationNameLateBinding bool } -// NewAdaptiveSampler returns a delegating sampler that applies both probabilisticSampler and -// rateLimitingSampler via the guaranteedThroughputProbabilisticSampler. This sampler keeps track of all -// operations and delegates calls to the respective guaranteedThroughputProbabilisticSampler. -func NewAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) (Sampler, error) { - return newAdaptiveSampler(strategies, maxOperations), nil +// NewAdaptiveSampler returns a new PerOperationSampler. +// Deprecated: please use NewPerOperationSampler. +func NewAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) (*PerOperationSampler, error) { + return NewPerOperationSampler(PerOperationSamplerParams{ + MaxOperations: maxOperations, + Strategies: strategies, + }), nil } -func newAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) Sampler { +// PerOperationSamplerParams defines parameters when creating PerOperationSampler. +type PerOperationSamplerParams struct { + // Max number of operations that will be tracked. Other operations will be given default strategy. + MaxOperations int + + // Opt-in feature for applications that require late binding of span name via explicit call to SetOperationName. + // When this feature is enabled, the sampler will return retryable=true from OnCreateSpan(), thus leaving + // the sampling decision as non-final (and the span as writeable). This may lead to degraded performance + // in applications that always provide the correct span name on trace creation. + // + // For backwards compatibility this option is off by default. + OperationNameLateBinding bool + + // Initial configuration of the sampling strategies (usually retrieved from the backend by Remote Sampler). + Strategies *sampling.PerOperationSamplingStrategies +} + +// NewPerOperationSampler returns a new PerOperationSampler. +func NewPerOperationSampler(params PerOperationSamplerParams) *PerOperationSampler { samplers := make(map[string]*GuaranteedThroughputProbabilisticSampler) - for _, strategy := range strategies.PerOperationStrategies { + for _, strategy := range params.Strategies.PerOperationStrategies { sampler := newGuaranteedThroughputProbabilisticSampler( - strategies.DefaultLowerBoundTracesPerSecond, + params.Strategies.DefaultLowerBoundTracesPerSecond, strategy.ProbabilisticSampling.SamplingRate, ) samplers[strategy.Operation] = sampler } - return &adaptiveSampler{ - samplers: samplers, - defaultSampler: newProbabilisticSampler(strategies.DefaultSamplingProbability), - lowerBound: strategies.DefaultLowerBoundTracesPerSecond, - maxOperations: maxOperations, + return &PerOperationSampler{ + samplers: samplers, + defaultSampler: newProbabilisticSampler(params.Strategies.DefaultSamplingProbability), + lowerBound: params.Strategies.DefaultLowerBoundTracesPerSecond, + maxOperations: params.MaxOperations, + operationNameLateBinding: params.OperationNameLateBinding, } } -func (s *adaptiveSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { +// IsSampled is not used and only exists to match Sampler V1 API. +// TODO (breaking change) remove when upgrading everything to SamplerV2 +func (s *PerOperationSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { + return false, nil +} + +func (s *PerOperationSampler) trySampling(span *Span, operationName string) (bool, []Tag) { + samplerV1 := s.getSamplerForOperation(operationName) + var sampled bool + var tags []Tag + if span.context.samplingState.isLocalRootSpan(span.context.spanID) { + sampled, tags = samplerV1.IsSampled(span.context.TraceID(), operationName) + } + return sampled, tags +} + +// OnCreateSpan implements OnCreateSpan of SamplerV2. +func (s *PerOperationSampler) OnCreateSpan(span *Span) SamplingDecision { + sampled, tags := s.trySampling(span, span.OperationName()) + return SamplingDecision{Sample: sampled, Retryable: s.operationNameLateBinding, Tags: tags} +} + +// OnSetOperationName implements OnSetOperationName of SamplerV2. +func (s *PerOperationSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision { + sampled, tags := s.trySampling(span, operationName) + return SamplingDecision{Sample: sampled, Retryable: false, Tags: tags} +} + +// OnSetTag implements OnSetTag of SamplerV2. +func (s *PerOperationSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision { + return SamplingDecision{Sample: false, Retryable: true} +} + +// OnFinishSpan implements OnFinishSpan of SamplerV2. +func (s *PerOperationSampler) OnFinishSpan(span *Span) SamplingDecision { + return SamplingDecision{Sample: false, Retryable: true} +} + +func (s *PerOperationSampler) getSamplerForOperation(operation string) Sampler { s.RLock() sampler, ok := s.samplers[operation] if ok { defer s.RUnlock() - return sampler.IsSampled(id, operation) + return sampler } s.RUnlock() s.Lock() @@ -315,18 +432,19 @@ func (s *adaptiveSampler) IsSampled(id TraceID, operation string) (bool, []Tag) // Check if sampler has already been created sampler, ok = s.samplers[operation] if ok { - return sampler.IsSampled(id, operation) + return sampler } // Store only up to maxOperations of unique ops. if len(s.samplers) >= s.maxOperations { - return s.defaultSampler.IsSampled(id, operation) + return s.defaultSampler } newSampler := newGuaranteedThroughputProbabilisticSampler(s.lowerBound, s.defaultSampler.SamplingRate()) s.samplers[operation] = newSampler - return newSampler.IsSampled(id, operation) + return newSampler } -func (s *adaptiveSampler) Close() { +// Close invokes Close on all underlying samplers. +func (s *PerOperationSampler) Close() { s.Lock() defer s.Unlock() for _, sampler := range s.samplers { @@ -335,16 +453,18 @@ func (s *adaptiveSampler) Close() { s.defaultSampler.Close() } -func (s *adaptiveSampler) Equal(other Sampler) bool { - // NB The Equal() function is overly expensive for adaptiveSampler since it's composed of multiple +// Equal is not used. +// TODO (breaking change) remove this in the future +func (s *PerOperationSampler) Equal(other Sampler) bool { + // NB The Equal() function is overly expensive for PerOperationSampler since it's composed of multiple // samplers which all need to be initialized before this function can be called for a comparison. - // Therefore, adaptiveSampler uses the update() function to only alter the samplers that need + // Therefore, PerOperationSampler uses the update() function to only alter the samplers that need // changing. Hence this function always returns false so that the update function can be called. // Once the Equal() function is removed from the Sampler API, this will no longer be needed. return false } -func (s *adaptiveSampler) update(strategies *sampling.PerOperationSamplingStrategies) { +func (s *PerOperationSampler) update(strategies *sampling.PerOperationSamplingStrategies) { s.Lock() defer s.Unlock() newSamplers := map[string]*GuaranteedThroughputProbabilisticSampler{} @@ -369,191 +489,3 @@ func (s *adaptiveSampler) update(strategies *sampling.PerOperationSamplingStrate } s.samplers = newSamplers } - -// ----------------------- - -// RemotelyControlledSampler is a delegating sampler that polls a remote server -// for the appropriate sampling strategy, constructs a corresponding sampler and -// delegates to it for sampling decisions. -type RemotelyControlledSampler struct { - // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment. - // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq - closed int64 // 0 - not closed, 1 - closed - - sync.RWMutex - samplerOptions - - serviceName string - manager sampling.SamplingManager - doneChan chan *sync.WaitGroup -} - -type httpSamplingManager struct { - serverURL string -} - -func (s *httpSamplingManager) GetSamplingStrategy(serviceName string) (*sampling.SamplingStrategyResponse, error) { - var out sampling.SamplingStrategyResponse - v := url.Values{} - v.Set("service", serviceName) - if err := utils.GetJSON(s.serverURL+"?"+v.Encode(), &out); err != nil { - return nil, err - } - return &out, nil -} - -// NewRemotelyControlledSampler creates a sampler that periodically pulls -// the sampling strategy from an HTTP sampling server (e.g. jaeger-agent). -func NewRemotelyControlledSampler( - serviceName string, - opts ...SamplerOption, -) *RemotelyControlledSampler { - options := applySamplerOptions(opts...) - sampler := &RemotelyControlledSampler{ - samplerOptions: options, - serviceName: serviceName, - manager: &httpSamplingManager{serverURL: options.samplingServerURL}, - doneChan: make(chan *sync.WaitGroup), - } - go sampler.pollController() - return sampler -} - -func applySamplerOptions(opts ...SamplerOption) samplerOptions { - options := samplerOptions{} - for _, option := range opts { - option(&options) - } - if options.sampler == nil { - options.sampler = newProbabilisticSampler(0.001) - } - if options.logger == nil { - options.logger = log.NullLogger - } - if options.maxOperations <= 0 { - options.maxOperations = defaultMaxOperations - } - if options.samplingServerURL == "" { - options.samplingServerURL = DefaultSamplingServerURL - } - if options.metrics == nil { - options.metrics = NewNullMetrics() - } - if options.samplingRefreshInterval <= 0 { - options.samplingRefreshInterval = defaultSamplingRefreshInterval - } - return options -} - -// IsSampled implements IsSampled() of Sampler. -func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { - s.RLock() - defer s.RUnlock() - return s.sampler.IsSampled(id, operation) -} - -// Close implements Close() of Sampler. -func (s *RemotelyControlledSampler) Close() { - if swapped := atomic.CompareAndSwapInt64(&s.closed, 0, 1); !swapped { - s.logger.Error("Repeated attempt to close the sampler is ignored") - return - } - - var wg sync.WaitGroup - wg.Add(1) - s.doneChan <- &wg - wg.Wait() -} - -// Equal implements Equal() of Sampler. -func (s *RemotelyControlledSampler) Equal(other Sampler) bool { - // NB The Equal() function is expensive and will be removed. See adaptiveSampler.Equal() for - // more information. - if o, ok := other.(*RemotelyControlledSampler); ok { - s.RLock() - o.RLock() - defer s.RUnlock() - defer o.RUnlock() - return s.sampler.Equal(o.sampler) - } - return false -} - -func (s *RemotelyControlledSampler) pollController() { - ticker := time.NewTicker(s.samplingRefreshInterval) - defer ticker.Stop() - s.pollControllerWithTicker(ticker) -} - -func (s *RemotelyControlledSampler) pollControllerWithTicker(ticker *time.Ticker) { - for { - select { - case <-ticker.C: - s.updateSampler() - case wg := <-s.doneChan: - wg.Done() - return - } - } -} - -func (s *RemotelyControlledSampler) getSampler() Sampler { - s.Lock() - defer s.Unlock() - return s.sampler -} - -func (s *RemotelyControlledSampler) setSampler(sampler Sampler) { - s.Lock() - defer s.Unlock() - s.sampler = sampler -} - -func (s *RemotelyControlledSampler) updateSampler() { - res, err := s.manager.GetSamplingStrategy(s.serviceName) - if err != nil { - s.metrics.SamplerQueryFailure.Inc(1) - s.logger.Infof("Unable to query sampling strategy: %v", err) - return - } - s.Lock() - defer s.Unlock() - - s.metrics.SamplerRetrieved.Inc(1) - if strategies := res.GetOperationSampling(); strategies != nil { - s.updateAdaptiveSampler(strategies) - } else { - err = s.updateRateLimitingOrProbabilisticSampler(res) - } - if err != nil { - s.metrics.SamplerUpdateFailure.Inc(1) - s.logger.Infof("Unable to handle sampling strategy response %+v. Got error: %v", res, err) - return - } - s.metrics.SamplerUpdated.Inc(1) -} - -// NB: this function should only be called while holding a Write lock -func (s *RemotelyControlledSampler) updateAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies) { - if adaptiveSampler, ok := s.sampler.(*adaptiveSampler); ok { - adaptiveSampler.update(strategies) - } else { - s.sampler = newAdaptiveSampler(strategies, s.maxOperations) - } -} - -// NB: this function should only be called while holding a Write lock -func (s *RemotelyControlledSampler) updateRateLimitingOrProbabilisticSampler(res *sampling.SamplingStrategyResponse) error { - var newSampler Sampler - if probabilistic := res.GetProbabilisticSampling(); probabilistic != nil { - newSampler = newProbabilisticSampler(probabilistic.SamplingRate) - } else if rateLimiting := res.GetRateLimitingSampling(); rateLimiting != nil { - newSampler = NewRateLimitingSampler(float64(rateLimiting.MaxTracesPerSecond)) - } else { - return fmt.Errorf("Unsupported sampling strategy type %v", res.GetStrategyType()) - } - if !s.sampler.Equal(newSampler) { - s.sampler = newSampler - } - return nil -} diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_remote.go b/vendor/github.com/uber/jaeger-client-go/sampler_remote.go new file mode 100644 index 000000000..9bd0c9822 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/sampler_remote.go @@ -0,0 +1,334 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "sync" + "sync/atomic" + "time" + + "github.com/uber/jaeger-client-go/thrift-gen/sampling" +) + +const ( + defaultSamplingRefreshInterval = time.Minute +) + +// SamplingStrategyFetcher is used to fetch sampling strategy updates from remote server. +type SamplingStrategyFetcher interface { + Fetch(service string) ([]byte, error) +} + +// SamplingStrategyParser is used to parse sampling strategy updates. The output object +// should be of the type that is recognized by the SamplerUpdaters. +type SamplingStrategyParser interface { + Parse(response []byte) (interface{}, error) +} + +// SamplerUpdater is used by RemotelyControlledSampler to apply sampling strategies, +// retrieved from remote config server, to the current sampler. The updater can modify +// the sampler in-place if sampler supports it, or create a new one. +// +// If the strategy does not contain configuration for the sampler in question, +// updater must return modifiedSampler=nil to give other updaters a chance to inspect +// the sampling strategy response. +// +// RemotelyControlledSampler invokes the updaters while holding a lock on the main sampler. +type SamplerUpdater interface { + Update(sampler SamplerV2, strategy interface{}) (modified SamplerV2, err error) +} + +// RemotelyControlledSampler is a delegating sampler that polls a remote server +// for the appropriate sampling strategy, constructs a corresponding sampler and +// delegates to it for sampling decisions. +type RemotelyControlledSampler struct { + // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment. + // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq + closed int64 // 0 - not closed, 1 - closed + + sync.RWMutex + samplerOptions + + serviceName string + doneChan chan *sync.WaitGroup +} + +// NewRemotelyControlledSampler creates a sampler that periodically pulls +// the sampling strategy from an HTTP sampling server (e.g. jaeger-agent). +func NewRemotelyControlledSampler( + serviceName string, + opts ...SamplerOption, +) *RemotelyControlledSampler { + options := new(samplerOptions).applyOptionsAndDefaults(opts...) + sampler := &RemotelyControlledSampler{ + samplerOptions: *options, + serviceName: serviceName, + doneChan: make(chan *sync.WaitGroup), + } + go sampler.pollController() + return sampler +} + +// IsSampled implements IsSampled() of Sampler. +// TODO (breaking change) remove when Sampler V1 is removed +func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { + return false, nil +} + +// OnCreateSpan implements OnCreateSpan of SamplerV2. +func (s *RemotelyControlledSampler) OnCreateSpan(span *Span) SamplingDecision { + return s.sampler.OnCreateSpan(span) +} + +// OnSetOperationName implements OnSetOperationName of SamplerV2. +func (s *RemotelyControlledSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision { + return s.sampler.OnSetOperationName(span, operationName) +} + +// OnSetTag implements OnSetTag of SamplerV2. +func (s *RemotelyControlledSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision { + return s.sampler.OnSetTag(span, key, value) +} + +// OnFinishSpan implements OnFinishSpan of SamplerV2. +func (s *RemotelyControlledSampler) OnFinishSpan(span *Span) SamplingDecision { + return s.sampler.OnFinishSpan(span) +} + +// Close implements Close() of Sampler. +func (s *RemotelyControlledSampler) Close() { + if swapped := atomic.CompareAndSwapInt64(&s.closed, 0, 1); !swapped { + s.logger.Error("Repeated attempt to close the sampler is ignored") + return + } + + var wg sync.WaitGroup + wg.Add(1) + s.doneChan <- &wg + wg.Wait() +} + +// Equal implements Equal() of Sampler. +func (s *RemotelyControlledSampler) Equal(other Sampler) bool { + // NB The Equal() function is expensive and will be removed. See PerOperationSampler.Equal() for + // more information. + return false +} + +func (s *RemotelyControlledSampler) pollController() { + ticker := time.NewTicker(s.samplingRefreshInterval) + defer ticker.Stop() + s.pollControllerWithTicker(ticker) +} + +func (s *RemotelyControlledSampler) pollControllerWithTicker(ticker *time.Ticker) { + for { + select { + case <-ticker.C: + s.UpdateSampler() + case wg := <-s.doneChan: + wg.Done() + return + } + } +} + +// Sampler returns the currently active sampler. +func (s *RemotelyControlledSampler) Sampler() SamplerV2 { + s.Lock() + defer s.Unlock() + return s.sampler +} + +func (s *RemotelyControlledSampler) setSampler(sampler SamplerV2) { + s.Lock() + defer s.Unlock() + s.sampler = sampler +} + +// UpdateSampler forces the sampler to fetch sampling strategy from backend server. +// This function is called automatically on a timer, but can also be safely called manually, e.g. from tests. +func (s *RemotelyControlledSampler) UpdateSampler() { + res, err := s.samplingFetcher.Fetch(s.serviceName) + if err != nil { + s.metrics.SamplerQueryFailure.Inc(1) + s.logger.Infof("failed to fetch sampling strategy: %v", err) + return + } + strategy, err := s.samplingParser.Parse(res) + if err != nil { + s.metrics.SamplerUpdateFailure.Inc(1) + s.logger.Infof("failed to parse sampling strategy response: %v", err) + return + } + + s.Lock() + defer s.Unlock() + + s.metrics.SamplerRetrieved.Inc(1) + if err := s.updateSamplerViaUpdaters(strategy); err != nil { + s.metrics.SamplerUpdateFailure.Inc(1) + s.logger.Infof("failed to handle sampling strategy response %+v. Got error: %v", res, err) + return + } + s.metrics.SamplerUpdated.Inc(1) +} + +// NB: this function should only be called while holding a Write lock +func (s *RemotelyControlledSampler) updateSamplerViaUpdaters(strategy interface{}) error { + for _, updater := range s.updaters { + sampler, err := updater.Update(s.sampler, strategy) + if err != nil { + return err + } + if sampler != nil { + s.sampler = sampler + return nil + } + } + return fmt.Errorf("unsupported sampling strategy %+v", strategy) +} + +// ----------------------- + +// ProbabilisticSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration. +type ProbabilisticSamplerUpdater struct{} + +// Update implements Update of SamplerUpdater. +func (u *ProbabilisticSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) { + type response interface { + GetProbabilisticSampling() *sampling.ProbabilisticSamplingStrategy + } + var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check + if resp, ok := strategy.(response); ok { + if probabilistic := resp.GetProbabilisticSampling(); probabilistic != nil { + if ps, ok := sampler.(*ProbabilisticSampler); ok { + if err := ps.Update(probabilistic.SamplingRate); err != nil { + return nil, err + } + return sampler, nil + } + return newProbabilisticSampler(probabilistic.SamplingRate), nil + } + } + return nil, nil +} + +// ----------------------- + +// RateLimitingSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration. +type RateLimitingSamplerUpdater struct{} + +// Update implements Update of SamplerUpdater. +func (u *RateLimitingSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) { + type response interface { + GetRateLimitingSampling() *sampling.RateLimitingSamplingStrategy + } + var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check + if resp, ok := strategy.(response); ok { + if rateLimiting := resp.GetRateLimitingSampling(); rateLimiting != nil { + rateLimit := float64(rateLimiting.MaxTracesPerSecond) + if rl, ok := sampler.(*RateLimitingSampler); ok { + rl.Update(rateLimit) + return rl, nil + } + return NewRateLimitingSampler(rateLimit), nil + } + } + return nil, nil +} + +// ----------------------- + +// AdaptiveSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration. +type AdaptiveSamplerUpdater struct { + MaxOperations int // required + OperationNameLateBinding bool +} + +// Update implements Update of SamplerUpdater. +func (u *AdaptiveSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) { + type response interface { + GetOperationSampling() *sampling.PerOperationSamplingStrategies + } + var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check + if p, ok := strategy.(response); ok { + if operations := p.GetOperationSampling(); operations != nil { + if as, ok := sampler.(*PerOperationSampler); ok { + as.update(operations) + return as, nil + } + return NewPerOperationSampler(PerOperationSamplerParams{ + MaxOperations: u.MaxOperations, + OperationNameLateBinding: u.OperationNameLateBinding, + Strategies: operations, + }), nil + } + } + return nil, nil +} + +// ----------------------- + +type httpSamplingStrategyFetcher struct { + serverURL string + logger Logger +} + +func (f *httpSamplingStrategyFetcher) Fetch(serviceName string) ([]byte, error) { + v := url.Values{} + v.Set("service", serviceName) + uri := f.serverURL + "?" + v.Encode() + + // TODO create and reuse http.Client with proper timeout settings, etc. + resp, err := http.Get(uri) + if err != nil { + return nil, err + } + + defer func() { + if err := resp.Body.Close(); err != nil { + f.logger.Error(fmt.Sprintf("failed to close HTTP response body: %+v", err)) + } + }() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode >= 400 { + return nil, fmt.Errorf("StatusCode: %d, Body: %s", resp.StatusCode, body) + } + + return body, nil +} + +// ----------------------- + +type samplingStrategyParser struct{} + +func (p *samplingStrategyParser) Parse(response []byte) (interface{}, error) { + strategy := new(sampling.SamplingStrategyResponse) + if err := json.Unmarshal(response, strategy); err != nil { + return nil, err + } + return strategy, nil +} diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_options.go b/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go index 75d28a561..7a292effc 100644 --- a/vendor/github.com/uber/jaeger-client-go/sampler_options.go +++ b/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go @@ -16,6 +16,8 @@ package jaeger import ( "time" + + "github.com/uber/jaeger-client-go/log" ) // SamplerOption is a function that sets some option on the sampler @@ -27,10 +29,13 @@ var SamplerOptions samplerOptions type samplerOptions struct { metrics *Metrics maxOperations int - sampler Sampler + sampler SamplerV2 logger Logger samplingServerURL string samplingRefreshInterval time.Duration + samplingFetcher SamplingStrategyFetcher + samplingParser SamplingStrategyParser + updaters []SamplerUpdater } // Metrics creates a SamplerOption that initializes Metrics on the sampler, @@ -53,7 +58,7 @@ func (samplerOptions) MaxOperations(maxOperations int) SamplerOption { // to use before a remote sampler is created and used. func (samplerOptions) InitialSampler(sampler Sampler) SamplerOption { return func(o *samplerOptions) { - o.sampler = sampler + o.sampler = samplerV1toV2(sampler) } } @@ -79,3 +84,65 @@ func (samplerOptions) SamplingRefreshInterval(samplingRefreshInterval time.Durat o.samplingRefreshInterval = samplingRefreshInterval } } + +// SamplingStrategyFetcher creates a SamplerOption that initializes sampling strategy fetcher. +func (samplerOptions) SamplingStrategyFetcher(fetcher SamplingStrategyFetcher) SamplerOption { + return func(o *samplerOptions) { + o.samplingFetcher = fetcher + } +} + +// SamplingStrategyParser creates a SamplerOption that initializes sampling strategy parser. +func (samplerOptions) SamplingStrategyParser(parser SamplingStrategyParser) SamplerOption { + return func(o *samplerOptions) { + o.samplingParser = parser + } +} + +// Updaters creates a SamplerOption that initializes sampler updaters. +func (samplerOptions) Updaters(updaters ...SamplerUpdater) SamplerOption { + return func(o *samplerOptions) { + o.updaters = updaters + } +} + +func (o *samplerOptions) applyOptionsAndDefaults(opts ...SamplerOption) *samplerOptions { + for _, option := range opts { + option(o) + } + if o.sampler == nil { + o.sampler = newProbabilisticSampler(0.001) + } + if o.logger == nil { + o.logger = log.NullLogger + } + if o.maxOperations <= 0 { + o.maxOperations = defaultMaxOperations + } + if o.samplingServerURL == "" { + o.samplingServerURL = DefaultSamplingServerURL + } + if o.metrics == nil { + o.metrics = NewNullMetrics() + } + if o.samplingRefreshInterval <= 0 { + o.samplingRefreshInterval = defaultSamplingRefreshInterval + } + if o.samplingFetcher == nil { + o.samplingFetcher = &httpSamplingStrategyFetcher{ + serverURL: o.samplingServerURL, + logger: o.logger, + } + } + if o.samplingParser == nil { + o.samplingParser = new(samplingStrategyParser) + } + if o.updaters == nil { + o.updaters = []SamplerUpdater{ + &AdaptiveSamplerUpdater{MaxOperations: o.maxOperations}, + new(ProbabilisticSamplerUpdater), + new(RateLimitingSamplerUpdater), + } + } + return o +} diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_v2.go b/vendor/github.com/uber/jaeger-client-go/sampler_v2.go new file mode 100644 index 000000000..a50671a23 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/sampler_v2.go @@ -0,0 +1,93 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +// SamplingDecision is returned by the V2 samplers. +type SamplingDecision struct { + Sample bool + Retryable bool + Tags []Tag +} + +// SamplerV2 is an extension of the V1 samplers that allows sampling decisions +// be made at different points of the span lifecycle. +type SamplerV2 interface { + OnCreateSpan(span *Span) SamplingDecision + OnSetOperationName(span *Span, operationName string) SamplingDecision + OnSetTag(span *Span, key string, value interface{}) SamplingDecision + OnFinishSpan(span *Span) SamplingDecision + + // Close does a clean shutdown of the sampler, stopping any background + // go-routines it may have started. + Close() +} + +// samplerV1toV2 wraps legacy V1 sampler into an adapter that make it look like V2. +func samplerV1toV2(s Sampler) SamplerV2 { + if s2, ok := s.(SamplerV2); ok { + return s2 + } + type legacySamplerV1toV2Adapter struct { + legacySamplerV1Base + } + return &legacySamplerV1toV2Adapter{ + legacySamplerV1Base: legacySamplerV1Base{ + delegate: s.IsSampled, + }, + } +} + +// SamplerV2Base can be used by V2 samplers to implement dummy V1 methods. +// Supporting V1 API is required because Tracer configuration only accepts V1 Sampler +// for backwards compatibility reasons. +// TODO (breaking change) remove this in the next major release +type SamplerV2Base struct{} + +// IsSampled implements IsSampled of Sampler. +func (SamplerV2Base) IsSampled(id TraceID, operation string) (sampled bool, tags []Tag) { + return false, nil +} + +// Close implements Close of Sampler. +func (SamplerV2Base) Close() {} + +// Equal implements Equal of Sampler. +func (SamplerV2Base) Equal(other Sampler) bool { return false } + +// legacySamplerV1Base is used as a base for simple samplers that only implement +// the legacy isSampled() function that is not sensitive to its arguments. +type legacySamplerV1Base struct { + delegate func(id TraceID, operation string) (sampled bool, tags []Tag) +} + +func (s *legacySamplerV1Base) OnCreateSpan(span *Span) SamplingDecision { + isSampled, tags := s.delegate(span.context.traceID, span.operationName) + return SamplingDecision{Sample: isSampled, Retryable: false, Tags: tags} +} + +func (s *legacySamplerV1Base) OnSetOperationName(span *Span, operationName string) SamplingDecision { + isSampled, tags := s.delegate(span.context.traceID, span.operationName) + return SamplingDecision{Sample: isSampled, Retryable: false, Tags: tags} +} + +func (s *legacySamplerV1Base) OnSetTag(span *Span, key string, value interface{}) SamplingDecision { + return SamplingDecision{Sample: false, Retryable: true} +} + +func (s *legacySamplerV1Base) OnFinishSpan(span *Span) SamplingDecision { + return SamplingDecision{Sample: false, Retryable: true} +} + +func (s *legacySamplerV1Base) Close() {} diff --git a/vendor/github.com/uber/jaeger-client-go/span.go b/vendor/github.com/uber/jaeger-client-go/span.go index 9df8b6017..bbf6fb068 100644 --- a/vendor/github.com/uber/jaeger-client-go/span.go +++ b/vendor/github.com/uber/jaeger-client-go/span.go @@ -34,6 +34,7 @@ type Span struct { tracer *Tracer + // TODO: (breaking change) change to use a pointer context SpanContext // The name of the "operation" this span is an instance of. @@ -65,18 +66,26 @@ type Span struct { } // Tag is a simple key value wrapper. -// TODO deprecate in the next major release, use opentracing.Tag instead. +// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead. type Tag struct { key string value interface{} } +// NewTag creates a new Tag. +// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead. +func NewTag(key string, value interface{}) Tag { + return Tag{key: key, value: value} +} + // SetOperationName sets or changes the operation name. func (s *Span) SetOperationName(operationName string) opentracing.Span { s.Lock() - defer s.Unlock() - if s.context.IsSampled() { - s.operationName = operationName + s.operationName = operationName + s.Unlock() + if !s.isSamplingFinalized() { + decision := s.tracer.sampler.OnSetOperationName(s, operationName) + s.applySamplingDecision(decision, true) } s.observer.OnSetOperationName(operationName) return s @@ -84,14 +93,24 @@ func (s *Span) SetOperationName(operationName string) opentracing.Span { // SetTag implements SetTag() of opentracing.Span func (s *Span) SetTag(key string, value interface{}) opentracing.Span { + return s.setTagInternal(key, value, true) +} + +func (s *Span) setTagInternal(key string, value interface{}, lock bool) opentracing.Span { s.observer.OnSetTag(key, value) if key == string(ext.SamplingPriority) && !setSamplingPriority(s, value) { return s } - s.Lock() - defer s.Unlock() - if s.context.IsSampled() { - s.setTagNoLocking(key, value) + if !s.isSamplingFinalized() { + decision := s.tracer.sampler.OnSetTag(s, key, value) + s.applySamplingDecision(decision, lock) + } + if s.isWriteable() { + if lock { + s.Lock() + defer s.Unlock() + } + s.appendTagNoLocking(key, value) } return s } @@ -121,14 +140,38 @@ func (s *Span) Duration() time.Duration { func (s *Span) Tags() opentracing.Tags { s.Lock() defer s.Unlock() - var result = make(opentracing.Tags) + var result = make(opentracing.Tags, len(s.tags)) for _, tag := range s.tags { result[tag.key] = tag.value } return result } -func (s *Span) setTagNoLocking(key string, value interface{}) { +// Logs returns micro logs for span +func (s *Span) Logs() []opentracing.LogRecord { + s.Lock() + defer s.Unlock() + + return append([]opentracing.LogRecord(nil), s.logs...) +} + +// References returns references for this span +func (s *Span) References() []opentracing.SpanReference { + s.Lock() + defer s.Unlock() + + if s.references == nil || len(s.references) == 0 { + return nil + } + + result := make([]opentracing.SpanReference, len(s.references)) + for i, r := range s.references { + result[i] = opentracing.SpanReference{Type: r.Type, ReferencedContext: r.Context} + } + return result +} + +func (s *Span) appendTagNoLocking(key string, value interface{}) { s.tags = append(s.tags, Tag{key: key, value: value}) } @@ -148,7 +191,7 @@ func (s *Span) logFieldsNoLocking(fields ...log.Field) { Fields: fields, Timestamp: time.Now(), } - s.appendLog(lr) + s.appendLogNoLocking(lr) } // LogKV implements opentracing.Span API @@ -185,12 +228,12 @@ func (s *Span) Log(ld opentracing.LogData) { if ld.Timestamp.IsZero() { ld.Timestamp = s.tracer.timeNow() } - s.appendLog(ld.ToLogRecord()) + s.appendLogNoLocking(ld.ToLogRecord()) } } // this function should only be called while holding a Write lock -func (s *Span) appendLog(lr opentracing.LogRecord) { +func (s *Span) appendLogNoLocking(lr opentracing.LogRecord) { // TODO add logic to limit number of logs per span (issue #46) s.logs = append(s.logs, lr) } @@ -224,17 +267,25 @@ func (s *Span) FinishWithOptions(options opentracing.FinishOptions) { } s.observer.OnFinish(options) s.Lock() + s.duration = options.FinishTime.Sub(s.startTime) + s.Unlock() + if !s.isSamplingFinalized() { + decision := s.tracer.sampler.OnFinishSpan(s) + s.applySamplingDecision(decision, true) + } if s.context.IsSampled() { - s.duration = options.FinishTime.Sub(s.startTime) - // Note: bulk logs are not subject to maxLogsPerSpan limit - if options.LogRecords != nil { - s.logs = append(s.logs, options.LogRecords...) - } - for _, ld := range options.BulkLogData { - s.logs = append(s.logs, ld.ToLogRecord()) + if len(options.LogRecords) > 0 || len(options.BulkLogData) > 0 { + s.Lock() + // Note: bulk logs are not subject to maxLogsPerSpan limit + if options.LogRecords != nil { + s.logs = append(s.logs, options.LogRecords...) + } + for _, ld := range options.BulkLogData { + s.logs = append(s.logs, ld.ToLogRecord()) + } + s.Unlock() } } - s.Unlock() // call reportSpan even for non-sampled traces, to return span to the pool // and update metrics counter s.tracer.reportSpan(s) @@ -300,23 +351,62 @@ func (s *Span) serviceName() string { return s.tracer.serviceName } +func (s *Span) applySamplingDecision(decision SamplingDecision, lock bool) { + if !decision.Retryable { + s.context.samplingState.setFinal() + } + if decision.Sample { + s.context.samplingState.setSampled() + if len(decision.Tags) > 0 { + if lock { + s.Lock() + defer s.Unlock() + } + for _, tag := range decision.Tags { + s.appendTagNoLocking(tag.key, tag.value) + } + } + } +} + +// Span can be written to if it is sampled or the sampling decision has not been finalized. +func (s *Span) isWriteable() bool { + state := s.context.samplingState + return !state.isFinal() || state.isSampled() +} + +func (s *Span) isSamplingFinalized() bool { + return s.context.samplingState.isFinal() +} + // setSamplingPriority returns true if the flag was updated successfully, false otherwise. +// The behavior of setSamplingPriority is surprising +// If noDebugFlagOnForcedSampling is set +// setSamplingPriority(span, 1) always sets only flagSampled +// If noDebugFlagOnForcedSampling is unset, and isDebugAllowed passes +// setSamplingPriority(span, 1) sets both flagSampled and flagDebug +// However, +// setSamplingPriority(span, 0) always only resets flagSampled +// +// This means that doing a setSamplingPriority(span, 1) followed by setSamplingPriority(span, 0) can +// leave flagDebug set func setSamplingPriority(s *Span, value interface{}) bool { val, ok := value.(uint16) if !ok { return false } - s.Lock() - defer s.Unlock() if val == 0 { - s.context.flags = s.context.flags & (^flagSampled) + s.context.samplingState.unsetSampled() + s.context.samplingState.setFinal() return true } if s.tracer.options.noDebugFlagOnForcedSampling { - s.context.flags = s.context.flags | flagSampled + s.context.samplingState.setSampled() + s.context.samplingState.setFinal() return true } else if s.tracer.isDebugAllowed(s.operationName) { - s.context.flags = s.context.flags | flagDebug | flagSampled + s.context.samplingState.setDebugAndSampled() + s.context.samplingState.setFinal() return true } return false @@ -326,5 +416,5 @@ func setSamplingPriority(s *Span, value interface{}) bool { func EnableFirehose(s *Span) { s.Lock() defer s.Unlock() - s.context.flags |= flagFirehose + s.context.samplingState.setFirehose() } diff --git a/vendor/github.com/uber/jaeger-client-go/context.go b/vendor/github.com/uber/jaeger-client-go/span_context.go index 43553655a..b7230abfe 100644 --- a/vendor/github.com/uber/jaeger-client-go/context.go +++ b/vendor/github.com/uber/jaeger-client-go/span_context.go @@ -19,12 +19,15 @@ import ( "fmt" "strconv" "strings" + "sync" + + "go.uber.org/atomic" ) const ( - flagSampled = byte(1) - flagDebug = byte(2) - flagFirehose = byte(8) + flagSampled = 1 + flagDebug = 2 + flagFirehose = 8 ) var ( @@ -56,9 +59,6 @@ type SpanContext struct { // Should be 0 if the current span is a root span. parentID SpanID - // flags is a bitmap containing such bits as 'sampled' and 'debug'. - flags byte - // Distributed Context baggage. The is a snapshot in time. baggage map[string]string @@ -67,6 +67,102 @@ type SpanContext struct { // // See JaegerDebugHeader in constants.go debugID string + + // samplingState is shared across all spans + samplingState *samplingState + + // remote indicates that span context represents a remote parent + remote bool +} + +type samplingState struct { + // Span context's state flags that are propagated across processes. Only lower 8 bits are used. + // We use an int32 instead of byte to be able to use CAS operations. + stateFlags atomic.Int32 + + // When state is not final, sampling will be retried on other span write operations, + // like SetOperationName / SetTag, and the spans will remain writable. + final atomic.Bool + + // localRootSpan stores the SpanID of the first span created in this process for a given trace. + localRootSpan SpanID + + // extendedState allows samplers to keep intermediate state. + // The keys and values in this map are completely opaque: interface{} -> interface{}. + extendedState sync.Map +} + +func (s *samplingState) isLocalRootSpan(id SpanID) bool { + return id == s.localRootSpan +} + +func (s *samplingState) setFlag(newFlag int32) { + swapped := false + for !swapped { + old := s.stateFlags.Load() + swapped = s.stateFlags.CAS(old, old|newFlag) + } +} + +func (s *samplingState) unsetFlag(newFlag int32) { + swapped := false + for !swapped { + old := s.stateFlags.Load() + swapped = s.stateFlags.CAS(old, old&^newFlag) + } +} + +func (s *samplingState) setSampled() { + s.setFlag(flagSampled) +} + +func (s *samplingState) unsetSampled() { + s.unsetFlag(flagSampled) +} + +func (s *samplingState) setDebugAndSampled() { + s.setFlag(flagDebug | flagSampled) +} + +func (s *samplingState) setFirehose() { + s.setFlag(flagFirehose) +} + +func (s *samplingState) setFlags(flags byte) { + s.stateFlags.Store(int32(flags)) +} + +func (s *samplingState) setFinal() { + s.final.Store(true) +} + +func (s *samplingState) flags() byte { + return byte(s.stateFlags.Load()) +} + +func (s *samplingState) isSampled() bool { + return s.stateFlags.Load()&flagSampled == flagSampled +} + +func (s *samplingState) isDebug() bool { + return s.stateFlags.Load()&flagDebug == flagDebug +} + +func (s *samplingState) isFirehose() bool { + return s.stateFlags.Load()&flagFirehose == flagFirehose +} + +func (s *samplingState) isFinal() bool { + return s.final.Load() +} + +func (s *samplingState) extendedStateForKey(key interface{}, initValue func() interface{}) interface{} { + if value, ok := s.extendedState.Load(key); ok { + return value + } + value := initValue() + value, _ = s.extendedState.LoadOrStore(key, value) + return value } // ForeachBaggageItem implements ForeachBaggageItem() of opentracing.SpanContext @@ -81,17 +177,28 @@ func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) { // IsSampled returns whether this trace was chosen for permanent storage // by the sampling mechanism of the tracer. func (c SpanContext) IsSampled() bool { - return (c.flags & flagSampled) == flagSampled + return c.samplingState.isSampled() } // IsDebug indicates whether sampling was explicitly requested by the service. func (c SpanContext) IsDebug() bool { - return (c.flags & flagDebug) == flagDebug + return c.samplingState.isDebug() +} + +// IsSamplingFinalized indicates whether the sampling decision has been finalized. +func (c SpanContext) IsSamplingFinalized() bool { + return c.samplingState.isFinal() } // IsFirehose indicates whether the firehose flag was set func (c SpanContext) IsFirehose() bool { - return (c.flags & flagFirehose) == flagFirehose + return c.samplingState.isFirehose() +} + +// ExtendedSamplingState returns the custom state object for a given key. If the value for this key does not exist, +// it is initialized via initValue function. This state can be used by samplers (e.g. x.PrioritySampler). +func (c SpanContext) ExtendedSamplingState(key interface{}, initValue func() interface{}) interface{} { + return c.samplingState.extendedStateForKey(key, initValue) } // IsValid indicates whether this context actually represents a valid trace. @@ -99,11 +206,16 @@ func (c SpanContext) IsValid() bool { return c.traceID.IsValid() && c.spanID != 0 } +// SetFirehose enables firehose mode for this trace. +func (c SpanContext) SetFirehose() { + c.samplingState.setFirehose() +} + func (c SpanContext) String() string { if c.traceID.High == 0 { - return fmt.Sprintf("%x:%x:%x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.flags) + return fmt.Sprintf("%x:%x:%x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.samplingState.stateFlags.Load()) } - return fmt.Sprintf("%x%016x:%x:%x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.flags) + return fmt.Sprintf("%x%016x:%x:%x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.samplingState.stateFlags.Load()) } // ContextFromString reconstructs the Context encoded in a string @@ -130,7 +242,8 @@ func ContextFromString(value string) (SpanContext, error) { if err != nil { return emptyContext, err } - context.flags = byte(flags) + context.samplingState = &samplingState{} + context.samplingState.setFlags(byte(flags)) return context, nil } @@ -149,18 +262,24 @@ func (c SpanContext) ParentID() SpanID { return c.parentID } +// Flags returns the bitmap containing such bits as 'sampled' and 'debug'. +func (c SpanContext) Flags() byte { + return c.samplingState.flags() +} + // NewSpanContext creates a new instance of SpanContext func NewSpanContext(traceID TraceID, spanID, parentID SpanID, sampled bool, baggage map[string]string) SpanContext { - flags := byte(0) + samplingState := &samplingState{} if sampled { - flags = flagSampled + samplingState.setSampled() } + return SpanContext{ - traceID: traceID, - spanID: spanID, - parentID: parentID, - flags: flags, - baggage: baggage} + traceID: traceID, + spanID: spanID, + parentID: parentID, + samplingState: samplingState, + baggage: baggage} } // CopyFrom copies data from ctx into this context, including span identity and baggage. @@ -169,7 +288,7 @@ func (c *SpanContext) CopyFrom(ctx *SpanContext) { c.traceID = ctx.traceID c.spanID = ctx.spanID c.parentID = ctx.parentID - c.flags = ctx.flags + c.samplingState = ctx.samplingState if l := len(ctx.baggage); l > 0 { c.baggage = make(map[string]string, l) for k, v := range ctx.baggage { @@ -193,7 +312,7 @@ func (c SpanContext) WithBaggageItem(key, value string) SpanContext { newBaggage[key] = value } // Use positional parameters so the compiler will help catch new fields. - return SpanContext{c.traceID, c.spanID, c.parentID, c.flags, newBaggage, ""} + return SpanContext{c.traceID, c.spanID, c.parentID, newBaggage, "", c.samplingState, c.remote} } // isDebugIDContainerOnly returns true when the instance of the context is only diff --git a/vendor/github.com/uber/jaeger-client-go/tracer.go b/vendor/github.com/uber/jaeger-client-go/tracer.go index 745a0c38a..f03372dc7 100644 --- a/vendor/github.com/uber/jaeger-client-go/tracer.go +++ b/vendor/github.com/uber/jaeger-client-go/tracer.go @@ -38,7 +38,7 @@ type Tracer struct { serviceName string hostIPv4 uint32 // this is for zipkin endpoint conversion - sampler Sampler + sampler SamplerV2 reporter Reporter metrics Metrics logger log.Logger @@ -74,6 +74,7 @@ type Tracer struct { // NewTracer creates Tracer implementation that reports tracing to Jaeger. // The returned io.Closer can be used in shutdown hooks to ensure that the internal // queue of the Reporter is drained and all buffered spans are submitted to collectors. +// TODO (breaking change) return *Tracer only, without closer. func NewTracer( serviceName string, sampler Sampler, @@ -82,7 +83,7 @@ func NewTracer( ) (opentracing.Tracer, io.Closer) { t := &Tracer{ serviceName: serviceName, - sampler: sampler, + sampler: samplerV1toV2(sampler), reporter: reporter, injectors: make(map[interface{}]Injector), extractors: make(map[interface{}]Extractor), @@ -261,7 +262,7 @@ func (t *Tracer) startSpanWithOptions( rpcServer = (v == ext.SpanKindRPCServerEnum || v == string(ext.SpanKindRPCServerEnum)) } - var samplerTags []Tag + var internalTags []Tag newTrace := false if !isSelfRef { if !hasParent || !parent.IsValid() { @@ -272,13 +273,12 @@ func (t *Tracer) startSpanWithOptions( } ctx.spanID = SpanID(ctx.traceID.Low) ctx.parentID = 0 - ctx.flags = byte(0) + ctx.samplingState = &samplingState{ + localRootSpan: ctx.spanID, + } if hasParent && parent.isDebugIDContainerOnly() && t.isDebugAllowed(operationName) { - ctx.flags |= (flagSampled | flagDebug) - samplerTags = []Tag{{key: JaegerDebugHeader, value: parent.debugID}} - } else if sampled, tags := t.sampler.IsSampled(ctx.traceID, operationName); sampled { - ctx.flags |= flagSampled - samplerTags = tags + ctx.samplingState.setDebugAndSampled() + internalTags = append(internalTags, Tag{key: JaegerDebugHeader, value: parent.debugID}) } } else { ctx.traceID = parent.traceID @@ -290,7 +290,11 @@ func (t *Tracer) startSpanWithOptions( ctx.spanID = SpanID(t.randomID()) ctx.parentID = parent.spanID } - ctx.flags = parent.flags + ctx.samplingState = parent.samplingState + if parent.remote { + ctx.samplingState.setFinal() + ctx.samplingState.localRootSpan = ctx.spanID + } } if hasParent { // copy baggage items @@ -305,17 +309,30 @@ func (t *Tracer) startSpanWithOptions( sp := t.newSpan() sp.context = ctx + sp.tracer = t + sp.operationName = operationName + sp.startTime = options.StartTime + sp.duration = 0 + sp.references = references + sp.firstInProcess = rpcServer || sp.context.parentID == 0 + + if !sp.isSamplingFinalized() { + decision := t.sampler.OnCreateSpan(sp) + sp.applySamplingDecision(decision, false) + } sp.observer = t.observer.OnStartSpan(sp, operationName, options) - return t.startSpanInternal( - sp, - operationName, - options.StartTime, - samplerTags, - options.Tags, - newTrace, - rpcServer, - references, - ) + + if tagsTotalLength := len(options.Tags) + len(internalTags); tagsTotalLength > 0 { + if sp.tags == nil || cap(sp.tags) < tagsTotalLength { + sp.tags = make([]Tag, 0, tagsTotalLength) + } + sp.tags = append(sp.tags, internalTags...) + for k, v := range options.Tags { + sp.setTagInternal(k, v, false) + } + } + t.emitNewSpanMetrics(sp, newTrace) + return sp } // Inject implements Inject() method of opentracing.Tracer @@ -340,6 +357,7 @@ func (t *Tracer) Extract( if err != nil { return nil, err // ensure returned spanCtx is nil } + spanCtx.remote = true return spanCtx, nil } return nil, opentracing.ErrUnsupportedFormat @@ -350,10 +368,10 @@ func (t *Tracer) Close() error { t.reporter.Close() t.sampler.Close() if mgr, ok := t.baggageRestrictionManager.(io.Closer); ok { - mgr.Close() + _ = mgr.Close() } if throttler, ok := t.debugThrottler.(io.Closer); ok { - throttler.Close() + _ = throttler.Close() } return nil } @@ -368,6 +386,7 @@ func (t *Tracer) Tags() []opentracing.Tag { } // getTag returns the value of specific tag, if not exists, return nil. +// TODO only used by tests, move there. func (t *Tracer) getTag(key string) (interface{}, bool) { for _, tag := range t.tags { if tag.key == key { @@ -383,41 +402,21 @@ func (t *Tracer) newSpan() *Span { return t.spanAllocator.Get() } -func (t *Tracer) startSpanInternal( - sp *Span, - operationName string, - startTime time.Time, - internalTags []Tag, - tags opentracing.Tags, - newTrace bool, - rpcServer bool, - references []Reference, -) *Span { - sp.tracer = t - sp.operationName = operationName - sp.startTime = startTime - sp.duration = 0 - sp.references = references - sp.firstInProcess = rpcServer || sp.context.parentID == 0 - if len(tags) > 0 || len(internalTags) > 0 { - sp.tags = make([]Tag, len(internalTags), len(tags)+len(internalTags)) - copy(sp.tags, internalTags) - for k, v := range tags { - sp.observer.OnSetTag(k, v) - if k == string(ext.SamplingPriority) && !setSamplingPriority(sp, v) { - continue - } - sp.setTagNoLocking(k, v) +// emitNewSpanMetrics generates metrics on the number of started spans and traces. +// newTrace param: we cannot simply check for parentID==0 because in Zipkin model the +// server-side RPC span has the exact same trace/span/parent IDs as the +// calling client-side span, but obviously the server side span is +// no longer a root span of the trace. +func (t *Tracer) emitNewSpanMetrics(sp *Span, newTrace bool) { + if !sp.isSamplingFinalized() { + t.metrics.SpansStartedDelayedSampling.Inc(1) + if newTrace { + t.metrics.TracesStartedDelayedSampling.Inc(1) } - } - // emit metrics - if sp.context.IsSampled() { + // joining a trace is not possible, because sampling decision inherited from upstream is final + } else if sp.context.IsSampled() { t.metrics.SpansStartedSampled.Inc(1) if newTrace { - // We cannot simply check for parentID==0 because in Zipkin model the - // server-side RPC span has the exact same trace/span/parent IDs as the - // calling client-side span, but obviously the server side span is - // no longer a root span of the trace. t.metrics.TracesStartedSampled.Inc(1) } else if sp.firstInProcess { t.metrics.TracesJoinedSampled.Inc(1) @@ -430,15 +429,20 @@ func (t *Tracer) startSpanInternal( t.metrics.TracesJoinedNotSampled.Inc(1) } } - return sp } func (t *Tracer) reportSpan(sp *Span) { - t.metrics.SpansFinished.Inc(1) + if !sp.isSamplingFinalized() { + t.metrics.SpansFinishedDelayedSampling.Inc(1) + } else if sp.context.IsSampled() { + t.metrics.SpansFinishedSampled.Inc(1) + } else { + t.metrics.SpansFinishedNotSampled.Inc(1) + } - // Note: if the reporter is processing Span asynchronously need to Retain() it - // otherwise, in the racing condition will be rewritten span data before it will be sent - // * To remove object use method span.Release() + // Note: if the reporter is processing Span asynchronously then it needs to Retain() the span, + // and then Release() it when no longer needed. + // Otherwise, the span may be reused for another trace and its data may be overwritten. if sp.context.IsSampled() { t.reporter.Report(sp) } @@ -466,6 +470,11 @@ func (t *Tracer) isDebugAllowed(operation string) bool { return t.debugThrottler.IsAllowed(operation) } +// Sampler returns the sampler given to the tracer at creation. +func (t *Tracer) Sampler() SamplerV2 { + return t.sampler +} + // SelfRef creates an opentracing compliant SpanReference from a jaeger // SpanContext. This is a factory function in order to encapsulate jaeger specific // types. diff --git a/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go index 1b8db9758..bf2f13165 100644 --- a/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go +++ b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go @@ -20,22 +20,15 @@ import ( ) // RateLimiter is a filter used to check if a message that is worth itemCost units is within the rate limits. +// +// TODO (breaking change) remove this interface in favor of public struct below +// +// Deprecated, use ReconfigurableRateLimiter. type RateLimiter interface { CheckCredit(itemCost float64) bool } -type rateLimiter struct { - sync.Mutex - - creditsPerSecond float64 - balance float64 - maxBalance float64 - lastTick time.Time - - timeNow func() time.Time -} - -// NewRateLimiter creates a new rate limiter based on leaky bucket algorithm, formulated in terms of a +// ReconfigurableRateLimiter is a rate limiter based on leaky bucket algorithm, formulated in terms of a // credits balance that is replenished every time CheckCredit() method is called (tick) by the amount proportional // to the time elapsed since the last tick, up to max of creditsPerSecond. A call to CheckCredit() takes a cost // of an item we want to pay with the balance. If the balance exceeds the cost of the item, the item is "purchased" @@ -47,31 +40,73 @@ type rateLimiter struct { // // It can also be used to limit the rate of traffic in bytes, by setting creditsPerSecond to desired throughput // as bytes/second, and calling CheckCredit() with the actual message size. -func NewRateLimiter(creditsPerSecond, maxBalance float64) RateLimiter { - return &rateLimiter{ +// +// TODO (breaking change) rename to RateLimiter once the interface is removed +type ReconfigurableRateLimiter struct { + lock sync.Mutex + + creditsPerSecond float64 + balance float64 + maxBalance float64 + lastTick time.Time + + timeNow func() time.Time +} + +// NewRateLimiter creates a new ReconfigurableRateLimiter. +func NewRateLimiter(creditsPerSecond, maxBalance float64) *ReconfigurableRateLimiter { + return &ReconfigurableRateLimiter{ creditsPerSecond: creditsPerSecond, balance: maxBalance, maxBalance: maxBalance, lastTick: time.Now(), - timeNow: time.Now} + timeNow: time.Now, + } } -func (b *rateLimiter) CheckCredit(itemCost float64) bool { - b.Lock() - defer b.Unlock() - // calculate how much time passed since the last tick, and update current tick - currentTime := b.timeNow() - elapsedTime := currentTime.Sub(b.lastTick) - b.lastTick = currentTime - // calculate how much credit have we accumulated since the last tick - b.balance += elapsedTime.Seconds() * b.creditsPerSecond - if b.balance > b.maxBalance { - b.balance = b.maxBalance - } +// CheckCredit tries to reduce the current balance by itemCost provided that the current balance +// is not lest than itemCost. +func (rl *ReconfigurableRateLimiter) CheckCredit(itemCost float64) bool { + rl.lock.Lock() + defer rl.lock.Unlock() + // if we have enough credits to pay for current item, then reduce balance and allow - if b.balance >= itemCost { - b.balance -= itemCost + if rl.balance >= itemCost { + rl.balance -= itemCost + return true + } + // otherwise check if balance can be increased due to time elapsed, and try again + rl.updateBalance() + if rl.balance >= itemCost { + rl.balance -= itemCost return true } return false } + +// updateBalance recalculates current balance based on time elapsed. Must be called while holding a lock. +func (rl *ReconfigurableRateLimiter) updateBalance() { + // calculate how much time passed since the last tick, and update current tick + currentTime := rl.timeNow() + elapsedTime := currentTime.Sub(rl.lastTick) + rl.lastTick = currentTime + // calculate how much credit have we accumulated since the last tick + rl.balance += elapsedTime.Seconds() * rl.creditsPerSecond + if rl.balance > rl.maxBalance { + rl.balance = rl.maxBalance + } +} + +// Update changes the main parameters of the rate limiter in-place, while retaining +// the current accumulated balance (pro-rated to the new maxBalance value). Using this method +// instead of creating a new rate limiter helps to avoid thundering herd when sampling +// strategies are updated. +func (rl *ReconfigurableRateLimiter) Update(creditsPerSecond, maxBalance float64) { + rl.lock.Lock() + defer rl.lock.Unlock() + + rl.updateBalance() // get up to date balance + rl.balance = rl.balance * maxBalance / rl.maxBalance + rl.creditsPerSecond = creditsPerSecond + rl.maxBalance = maxBalance +} diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin.go b/vendor/github.com/uber/jaeger-client-go/zipkin.go index 636952b7f..98cab4b6e 100644 --- a/vendor/github.com/uber/jaeger-client-go/zipkin.go +++ b/vendor/github.com/uber/jaeger-client-go/zipkin.go @@ -55,7 +55,7 @@ func (p *zipkinPropagator) Inject( carrier.SetTraceID(ctx.TraceID().Low) // TODO this cannot work with 128bit IDs carrier.SetSpanID(uint64(ctx.SpanID())) carrier.SetParentID(uint64(ctx.ParentID())) - carrier.SetFlags(ctx.flags) + carrier.SetFlags(ctx.samplingState.flags()) return nil } @@ -71,6 +71,7 @@ func (p *zipkinPropagator) Extract(abstractCarrier interface{}) (SpanContext, er ctx.traceID.Low = carrier.TraceID() ctx.spanID = SpanID(carrier.SpanID()) ctx.parentID = SpanID(carrier.ParentID()) - ctx.flags = carrier.Flags() + ctx.samplingState = &samplingState{} + ctx.samplingState.setFlags(carrier.Flags()) return ctx, nil } diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml new file mode 100644 index 000000000..6d4d1be7b --- /dev/null +++ b/vendor/go.uber.org/atomic/.codecov.yml @@ -0,0 +1,15 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 100 # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure + diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore new file mode 100644 index 000000000..0a4504f11 --- /dev/null +++ b/vendor/go.uber.org/atomic/.gitignore @@ -0,0 +1,11 @@ +.DS_Store +/vendor +/cover +cover.out +lint.log + +# Binaries +*.test + +# Profiling output +*.prof diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml new file mode 100644 index 000000000..0f3769e5f --- /dev/null +++ b/vendor/go.uber.org/atomic/.travis.yml @@ -0,0 +1,27 @@ +sudo: false +language: go +go_import_path: go.uber.org/atomic + +go: + - 1.11.x + - 1.12.x + +matrix: + include: + - go: 1.12.x + env: NO_TEST=yes LINT=yes + +cache: + directories: + - vendor + +install: + - make install_ci + +script: + - test -n "$NO_TEST" || make test_ci + - test -n "$NO_TEST" || scripts/test-ubergo.sh + - test -z "$LINT" || make install_lint lint + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt new file mode 100644 index 000000000..8765c9fbc --- /dev/null +++ b/vendor/go.uber.org/atomic/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile new file mode 100644 index 000000000..1ef263075 --- /dev/null +++ b/vendor/go.uber.org/atomic/Makefile @@ -0,0 +1,51 @@ +# Many Go tools take file globs or directories as arguments instead of packages. +PACKAGE_FILES ?= *.go + +# For pre go1.6 +export GO15VENDOREXPERIMENT=1 + + +.PHONY: build +build: + go build -i ./... + + +.PHONY: install +install: + glide --version || go get github.com/Masterminds/glide + glide install + + +.PHONY: test +test: + go test -cover -race ./... + + +.PHONY: install_ci +install_ci: install + go get github.com/wadey/gocovmerge + go get github.com/mattn/goveralls + go get golang.org/x/tools/cmd/cover + +.PHONY: install_lint +install_lint: + go get golang.org/x/lint/golint + + +.PHONY: lint +lint: + @rm -rf lint.log + @echo "Checking formatting..." + @gofmt -d -s $(PACKAGE_FILES) 2>&1 | tee lint.log + @echo "Checking vet..." + @go vet ./... 2>&1 | tee -a lint.log;) + @echo "Checking lint..." + @golint $$(go list ./...) 2>&1 | tee -a lint.log + @echo "Checking for unresolved FIXMEs..." + @git grep -i fixme | grep -v -e vendor -e Makefile | tee -a lint.log + @[ ! -s lint.log ] + + +.PHONY: test_ci +test_ci: install_ci build + ./scripts/cover.sh $(shell go list $(PACKAGES)) diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md new file mode 100644 index 000000000..62eb8e576 --- /dev/null +++ b/vendor/go.uber.org/atomic/README.md @@ -0,0 +1,36 @@ +# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard] + +Simple wrappers for primitive types to enforce atomic access. + +## Installation +`go get -u go.uber.org/atomic` + +## Usage +The standard library's `sync/atomic` is powerful, but it's easy to forget which +variables must be accessed atomically. `go.uber.org/atomic` preserves all the +functionality of the standard library, but wraps the primitive types to +provide a safer, more convenient API. + +```go +var atom atomic.Uint32 +atom.Store(42) +atom.Sub(2) +atom.CAS(40, 11) +``` + +See the [documentation][doc] for a complete API specification. + +## Development Status +Stable. + +___ +Released under the [MIT License](LICENSE.txt). + +[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg +[doc]: https://godoc.org/go.uber.org/atomic +[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master +[ci]: https://travis-ci.com/uber-go/atomic +[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/atomic +[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic +[reportcard]: https://goreportcard.com/report/go.uber.org/atomic diff --git a/vendor/go.uber.org/atomic/atomic.go b/vendor/go.uber.org/atomic/atomic.go new file mode 100644 index 000000000..1db6849fc --- /dev/null +++ b/vendor/go.uber.org/atomic/atomic.go @@ -0,0 +1,351 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package atomic provides simple wrappers around numerics to enforce atomic +// access. +package atomic + +import ( + "math" + "sync/atomic" + "time" +) + +// Int32 is an atomic wrapper around an int32. +type Int32 struct{ v int32 } + +// NewInt32 creates an Int32. +func NewInt32(i int32) *Int32 { + return &Int32{i} +} + +// Load atomically loads the wrapped value. +func (i *Int32) Load() int32 { + return atomic.LoadInt32(&i.v) +} + +// Add atomically adds to the wrapped int32 and returns the new value. +func (i *Int32) Add(n int32) int32 { + return atomic.AddInt32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int32 and returns the new value. +func (i *Int32) Sub(n int32) int32 { + return atomic.AddInt32(&i.v, -n) +} + +// Inc atomically increments the wrapped int32 and returns the new value. +func (i *Int32) Inc() int32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Int32) Dec() int32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int32) CAS(old, new int32) bool { + return atomic.CompareAndSwapInt32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int32) Store(n int32) { + atomic.StoreInt32(&i.v, n) +} + +// Swap atomically swaps the wrapped int32 and returns the old value. +func (i *Int32) Swap(n int32) int32 { + return atomic.SwapInt32(&i.v, n) +} + +// Int64 is an atomic wrapper around an int64. +type Int64 struct{ v int64 } + +// NewInt64 creates an Int64. +func NewInt64(i int64) *Int64 { + return &Int64{i} +} + +// Load atomically loads the wrapped value. +func (i *Int64) Load() int64 { + return atomic.LoadInt64(&i.v) +} + +// Add atomically adds to the wrapped int64 and returns the new value. +func (i *Int64) Add(n int64) int64 { + return atomic.AddInt64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int64 and returns the new value. +func (i *Int64) Sub(n int64) int64 { + return atomic.AddInt64(&i.v, -n) +} + +// Inc atomically increments the wrapped int64 and returns the new value. +func (i *Int64) Inc() int64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int64 and returns the new value. +func (i *Int64) Dec() int64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int64) CAS(old, new int64) bool { + return atomic.CompareAndSwapInt64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int64) Store(n int64) { + atomic.StoreInt64(&i.v, n) +} + +// Swap atomically swaps the wrapped int64 and returns the old value. +func (i *Int64) Swap(n int64) int64 { + return atomic.SwapInt64(&i.v, n) +} + +// Uint32 is an atomic wrapper around an uint32. +type Uint32 struct{ v uint32 } + +// NewUint32 creates a Uint32. +func NewUint32(i uint32) *Uint32 { + return &Uint32{i} +} + +// Load atomically loads the wrapped value. +func (i *Uint32) Load() uint32 { + return atomic.LoadUint32(&i.v) +} + +// Add atomically adds to the wrapped uint32 and returns the new value. +func (i *Uint32) Add(n uint32) uint32 { + return atomic.AddUint32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint32 and returns the new value. +func (i *Uint32) Sub(n uint32) uint32 { + return atomic.AddUint32(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint32 and returns the new value. +func (i *Uint32) Inc() uint32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Uint32) Dec() uint32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint32) CAS(old, new uint32) bool { + return atomic.CompareAndSwapUint32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint32) Store(n uint32) { + atomic.StoreUint32(&i.v, n) +} + +// Swap atomically swaps the wrapped uint32 and returns the old value. +func (i *Uint32) Swap(n uint32) uint32 { + return atomic.SwapUint32(&i.v, n) +} + +// Uint64 is an atomic wrapper around a uint64. +type Uint64 struct{ v uint64 } + +// NewUint64 creates a Uint64. +func NewUint64(i uint64) *Uint64 { + return &Uint64{i} +} + +// Load atomically loads the wrapped value. +func (i *Uint64) Load() uint64 { + return atomic.LoadUint64(&i.v) +} + +// Add atomically adds to the wrapped uint64 and returns the new value. +func (i *Uint64) Add(n uint64) uint64 { + return atomic.AddUint64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint64 and returns the new value. +func (i *Uint64) Sub(n uint64) uint64 { + return atomic.AddUint64(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint64 and returns the new value. +func (i *Uint64) Inc() uint64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint64 and returns the new value. +func (i *Uint64) Dec() uint64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint64) CAS(old, new uint64) bool { + return atomic.CompareAndSwapUint64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint64) Store(n uint64) { + atomic.StoreUint64(&i.v, n) +} + +// Swap atomically swaps the wrapped uint64 and returns the old value. +func (i *Uint64) Swap(n uint64) uint64 { + return atomic.SwapUint64(&i.v, n) +} + +// Bool is an atomic Boolean. +type Bool struct{ v uint32 } + +// NewBool creates a Bool. +func NewBool(initial bool) *Bool { + return &Bool{boolToInt(initial)} +} + +// Load atomically loads the Boolean. +func (b *Bool) Load() bool { + return truthy(atomic.LoadUint32(&b.v)) +} + +// CAS is an atomic compare-and-swap. +func (b *Bool) CAS(old, new bool) bool { + return atomic.CompareAndSwapUint32(&b.v, boolToInt(old), boolToInt(new)) +} + +// Store atomically stores the passed value. +func (b *Bool) Store(new bool) { + atomic.StoreUint32(&b.v, boolToInt(new)) +} + +// Swap sets the given value and returns the previous value. +func (b *Bool) Swap(new bool) bool { + return truthy(atomic.SwapUint32(&b.v, boolToInt(new))) +} + +// Toggle atomically negates the Boolean and returns the previous value. +func (b *Bool) Toggle() bool { + return truthy(atomic.AddUint32(&b.v, 1) - 1) +} + +func truthy(n uint32) bool { + return n&1 == 1 +} + +func boolToInt(b bool) uint32 { + if b { + return 1 + } + return 0 +} + +// Float64 is an atomic wrapper around float64. +type Float64 struct { + v uint64 +} + +// NewFloat64 creates a Float64. +func NewFloat64(f float64) *Float64 { + return &Float64{math.Float64bits(f)} +} + +// Load atomically loads the wrapped value. +func (f *Float64) Load() float64 { + return math.Float64frombits(atomic.LoadUint64(&f.v)) +} + +// Store atomically stores the passed value. +func (f *Float64) Store(s float64) { + atomic.StoreUint64(&f.v, math.Float64bits(s)) +} + +// Add atomically adds to the wrapped float64 and returns the new value. +func (f *Float64) Add(s float64) float64 { + for { + old := f.Load() + new := old + s + if f.CAS(old, new) { + return new + } + } +} + +// Sub atomically subtracts from the wrapped float64 and returns the new value. +func (f *Float64) Sub(s float64) float64 { + return f.Add(-s) +} + +// CAS is an atomic compare-and-swap. +func (f *Float64) CAS(old, new float64) bool { + return atomic.CompareAndSwapUint64(&f.v, math.Float64bits(old), math.Float64bits(new)) +} + +// Duration is an atomic wrapper around time.Duration +// https://godoc.org/time#Duration +type Duration struct { + v Int64 +} + +// NewDuration creates a Duration. +func NewDuration(d time.Duration) *Duration { + return &Duration{v: *NewInt64(int64(d))} +} + +// Load atomically loads the wrapped value. +func (d *Duration) Load() time.Duration { + return time.Duration(d.v.Load()) +} + +// Store atomically stores the passed value. +func (d *Duration) Store(n time.Duration) { + d.v.Store(int64(n)) +} + +// Add atomically adds to the wrapped time.Duration and returns the new value. +func (d *Duration) Add(n time.Duration) time.Duration { + return time.Duration(d.v.Add(int64(n))) +} + +// Sub atomically subtracts from the wrapped time.Duration and returns the new value. +func (d *Duration) Sub(n time.Duration) time.Duration { + return time.Duration(d.v.Sub(int64(n))) +} + +// Swap atomically swaps the wrapped time.Duration and returns the old value. +func (d *Duration) Swap(n time.Duration) time.Duration { + return time.Duration(d.v.Swap(int64(n))) +} + +// CAS is an atomic compare-and-swap. +func (d *Duration) CAS(old, new time.Duration) bool { + return d.v.CAS(int64(old), int64(new)) +} + +// Value shadows the type of the same name from sync/atomic +// https://godoc.org/sync/atomic#Value +type Value struct{ atomic.Value } diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go new file mode 100644 index 000000000..0489d19ba --- /dev/null +++ b/vendor/go.uber.org/atomic/error.go @@ -0,0 +1,55 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// Error is an atomic type-safe wrapper around Value for errors +type Error struct{ v Value } + +// errorHolder is non-nil holder for error object. +// atomic.Value panics on saving nil object, so err object needs to be +// wrapped with valid object first. +type errorHolder struct{ err error } + +// NewError creates new atomic error object +func NewError(err error) *Error { + e := &Error{} + if err != nil { + e.Store(err) + } + return e +} + +// Load atomically loads the wrapped error +func (e *Error) Load() error { + v := e.v.Load() + if v == nil { + return nil + } + + eh := v.(errorHolder) + return eh.err +} + +// Store atomically stores error. +// NOTE: a holder object is allocated on each Store call. +func (e *Error) Store(err error) { + e.v.Store(errorHolder{err: err}) +} diff --git a/vendor/go.uber.org/atomic/glide.lock b/vendor/go.uber.org/atomic/glide.lock new file mode 100644 index 000000000..3c72c5997 --- /dev/null +++ b/vendor/go.uber.org/atomic/glide.lock @@ -0,0 +1,17 @@ +hash: f14d51408e3e0e4f73b34e4039484c78059cd7fc5f4996fdd73db20dc8d24f53 +updated: 2016-10-27T00:10:51.16960137-07:00 +imports: [] +testImports: +- name: github.com/davecgh/go-spew + version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d + subpackages: + - spew +- name: github.com/pmezard/go-difflib + version: d8ed2627bdf02c080bf22230dbb337003b7aba2d + subpackages: + - difflib +- name: github.com/stretchr/testify + version: d77da356e56a7428ad25149ca77381849a6a5232 + subpackages: + - assert + - require diff --git a/vendor/go.uber.org/atomic/glide.yaml b/vendor/go.uber.org/atomic/glide.yaml new file mode 100644 index 000000000..4cf608ec0 --- /dev/null +++ b/vendor/go.uber.org/atomic/glide.yaml @@ -0,0 +1,6 @@ +package: go.uber.org/atomic +testImport: +- package: github.com/stretchr/testify + subpackages: + - assert + - require diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go new file mode 100644 index 000000000..ede8136fa --- /dev/null +++ b/vendor/go.uber.org/atomic/string.go @@ -0,0 +1,49 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// String is an atomic type-safe wrapper around Value for strings. +type String struct{ v Value } + +// NewString creates a String. +func NewString(str string) *String { + s := &String{} + if str != "" { + s.Store(str) + } + return s +} + +// Load atomically loads the wrapped string. +func (s *String) Load() string { + v := s.v.Load() + if v == nil { + return "" + } + return v.(string) +} + +// Store atomically stores the passed string. +// Note: Converting the string to an interface{} to store in the Value +// requires an allocation. +func (s *String) Store(str string) { + s.v.Store(str) +} diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go index 531087655..129bc2a97 100644 --- a/vendor/gopkg.in/yaml.v2/decode.go +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -319,10 +319,14 @@ func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unm } const ( - // 400,000 decode operations is ~500kb of dense object declarations, or ~5kb of dense object declarations with 10000% alias expansion + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion alias_ratio_range_low = 400000 - // 4,000,000 decode operations is ~5MB of dense object declarations, or ~4.5MB of dense object declarations with 10% alias expansion + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion alias_ratio_range_high = 4000000 + // alias_ratio_range is the range over which we scale allowed alias ratios alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) ) @@ -784,8 +788,7 @@ func (d *decoder) merge(n *node, out reflect.Value) { case mappingNode: d.unmarshal(n, out) case aliasNode: - an, ok := d.doc.anchors[n.value] - if ok && an.kind != mappingNode { + if n.alias != nil && n.alias.kind != mappingNode { failWantMap() } d.unmarshal(n, out) @@ -794,8 +797,7 @@ func (d *decoder) merge(n *node, out reflect.Value) { for i := len(n.children) - 1; i >= 0; i-- { ni := n.children[i] if ni.kind == aliasNode { - an, ok := d.doc.anchors[ni.value] - if ok && an.kind != mappingNode { + if ni.alias != nil && ni.alias.kind != mappingNode { failWantMap() } } else if ni.kind != mappingNode { diff --git a/vendor/modules.txt b/vendor/modules.txt index 3e858dcac..c7c71ce88 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -55,7 +55,7 @@ github.com/containernetworking/plugins/pkg/ip github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator github.com/containernetworking/plugins/pkg/utils/hwaddr github.com/containernetworking/plugins/plugins/ipam/host-local/backend -# github.com/containers/buildah v1.11.4-0.20191028173731-21b4778b359e +# github.com/containers/buildah v1.11.5-0.20191031204705-20e92ffe0982 github.com/containers/buildah github.com/containers/buildah/imagebuildah github.com/containers/buildah/pkg/chrootuser @@ -442,7 +442,7 @@ github.com/stretchr/testify/require github.com/syndtr/gocapability/capability # github.com/tchap/go-patricia v2.3.0+incompatible github.com/tchap/go-patricia/patricia -# github.com/uber/jaeger-client-go v2.19.0+incompatible +# github.com/uber/jaeger-client-go v2.20.0+incompatible github.com/uber/jaeger-client-go github.com/uber/jaeger-client-go/config github.com/uber/jaeger-client-go/internal/baggage @@ -491,6 +491,8 @@ github.com/xeipuuv/gojsonpointer github.com/xeipuuv/gojsonreference # github.com/xeipuuv/gojsonschema v1.1.0 github.com/xeipuuv/gojsonschema +# go.uber.org/atomic v1.4.0 +go.uber.org/atomic # golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad golang.org/x/crypto/ssh/terminal golang.org/x/crypto/openpgp @@ -566,7 +568,7 @@ gopkg.in/fsnotify.v1 gopkg.in/inf.v0 # gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 gopkg.in/tomb.v1 -# gopkg.in/yaml.v2 v2.2.4 +# gopkg.in/yaml.v2 v2.2.5 gopkg.in/yaml.v2 # k8s.io/api v0.0.0-20190813020757-36bff7324fb7 k8s.io/api/core/v1 diff --git a/version/version.go b/version/version.go index c0dbeadfe..129a2cd4b 100644 --- a/version/version.go +++ b/version/version.go @@ -4,7 +4,7 @@ package version // NOTE: remember to bump the version at the top // of the top-level README.md file when this is // bumped. -const Version = "1.6.3-dev" +const Version = "1.6.4-dev" // RemoteAPIVersion is the version for the remote // client API. It is used to determine compatibility |