aboutsummaryrefslogtreecommitdiff
path: root/vendor
diff options
context:
space:
mode:
authorSebastian Jug <sejug@redhat.com>2018-10-16 16:30:53 -0400
committerSebastian Jug <seb@stianj.ug>2019-02-18 09:57:08 -0500
commit7141f972700ed454438d8539dd0bec79c0b61cf4 (patch)
treebfa2e524b71757a514a02ef68661b46dca9a3dfe /vendor
parente738ef16225395f5f5e4b93ba1a43ae9449ae11b (diff)
downloadpodman-7141f972700ed454438d8539dd0bec79c0b61cf4.tar.gz
podman-7141f972700ed454438d8539dd0bec79c0b61cf4.tar.bz2
podman-7141f972700ed454438d8539dd0bec79c0b61cf4.zip
OpenTracing support added to start, stop, run, create, pull, and ps
Drop context.Context field from cli.Context Signed-off-by: Sebastian Jug <sejug@redhat.com>
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/opentracing/opentracing-go/LICENSE201
-rw-r--r--vendor/github.com/opentracing/opentracing-go/README.md171
-rw-r--r--vendor/github.com/opentracing/opentracing-go/ext/tags.go210
-rw-r--r--vendor/github.com/opentracing/opentracing-go/globaltracer.go42
-rw-r--r--vendor/github.com/opentracing/opentracing-go/gocontext.go54
-rw-r--r--vendor/github.com/opentracing/opentracing-go/log/field.go269
-rw-r--r--vendor/github.com/opentracing/opentracing-go/log/util.go54
-rw-r--r--vendor/github.com/opentracing/opentracing-go/noop.go64
-rw-r--r--vendor/github.com/opentracing/opentracing-go/propagation.go176
-rw-r--r--vendor/github.com/opentracing/opentracing-go/span.go189
-rw-r--r--vendor/github.com/opentracing/opentracing-go/tracer.go305
-rw-r--r--vendor/github.com/uber/jaeger-client-go/LICENSE201
-rw-r--r--vendor/github.com/uber/jaeger-client-go/README.md270
-rw-r--r--vendor/github.com/uber/jaeger-client-go/baggage_setter.go77
-rw-r--r--vendor/github.com/uber/jaeger-client-go/config/config.go395
-rw-r--r--vendor/github.com/uber/jaeger-client-go/config/config_env.go221
-rw-r--r--vendor/github.com/uber/jaeger-client-go/config/options.go148
-rw-r--r--vendor/github.com/uber/jaeger-client-go/constants.go88
-rw-r--r--vendor/github.com/uber/jaeger-client-go/context.go258
-rw-r--r--vendor/github.com/uber/jaeger-client-go/contrib_observer.go56
-rw-r--r--vendor/github.com/uber/jaeger-client-go/doc.go24
-rw-r--r--vendor/github.com/uber/jaeger-client-go/header.go64
-rw-r--r--vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go101
-rw-r--r--vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go157
-rw-r--r--vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go71
-rw-r--r--vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go81
-rw-r--r--vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go99
-rw-r--r--vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go216
-rw-r--r--vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go32
-rw-r--r--vendor/github.com/uber/jaeger-client-go/interop.go55
-rw-r--r--vendor/github.com/uber/jaeger-client-go/jaeger_tag.go84
-rw-r--r--vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go179
-rw-r--r--vendor/github.com/uber/jaeger-client-go/log/logger.go90
-rw-r--r--vendor/github.com/uber/jaeger-client-go/logger.go53
-rw-r--r--vendor/github.com/uber/jaeger-client-go/metrics.go107
-rw-r--r--vendor/github.com/uber/jaeger-client-go/observer.go88
-rw-r--r--vendor/github.com/uber/jaeger-client-go/process.go29
-rw-r--r--vendor/github.com/uber/jaeger-client-go/propagation.go300
-rw-r--r--vendor/github.com/uber/jaeger-client-go/reference.go23
-rw-r--r--vendor/github.com/uber/jaeger-client-go/reporter.go289
-rw-r--r--vendor/github.com/uber/jaeger-client-go/reporter_options.go69
-rw-r--r--vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md5
-rw-r--r--vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go16
-rw-r--r--vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go63
-rw-r--r--vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go124
-rw-r--r--vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go101
-rw-r--r--vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go171
-rw-r--r--vendor/github.com/uber/jaeger-client-go/sampler.go557
-rw-r--r--vendor/github.com/uber/jaeger-client-go/sampler_options.go81
-rw-r--r--vendor/github.com/uber/jaeger-client-go/span.go249
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go411
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go23
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go21
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go435
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/constants.go18
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/ttypes.go154
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go242
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go18
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go1838
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go18
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go410
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go873
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go32
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go1247
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go446
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/README.md7
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go142
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go514
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go815
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/exception.go44
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go79
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go31
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/numeric.go164
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/processor.go30
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/protocol.go175
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go78
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go25
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go69
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/serializer.go75
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go1337
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/transport.go68
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go90
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go39
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/type.go69
-rw-r--r--vendor/github.com/uber/jaeger-client-go/tracer.go441
-rw-r--r--vendor/github.com/uber/jaeger-client-go/tracer_options.go159
-rw-r--r--vendor/github.com/uber/jaeger-client-go/transport.go38
-rw-r--r--vendor/github.com/uber/jaeger-client-go/transport/doc.go23
-rw-r--r--vendor/github.com/uber/jaeger-client-go/transport/http.go163
-rw-r--r--vendor/github.com/uber/jaeger-client-go/transport_udp.go131
-rw-r--r--vendor/github.com/uber/jaeger-client-go/utils/http_json.go54
-rw-r--r--vendor/github.com/uber/jaeger-client-go/utils/localip.go84
-rw-r--r--vendor/github.com/uber/jaeger-client-go/utils/rand.go46
-rw-r--r--vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go77
-rw-r--r--vendor/github.com/uber/jaeger-client-go/utils/udp_client.go98
-rw-r--r--vendor/github.com/uber/jaeger-client-go/utils/utils.go87
-rw-r--r--vendor/github.com/uber/jaeger-client-go/zipkin.go76
-rw-r--r--vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go322
-rw-r--r--vendor/github.com/uber/jaeger-lib/LICENSE201
-rw-r--r--vendor/github.com/uber/jaeger-lib/README.md27
-rw-r--r--vendor/github.com/uber/jaeger-lib/metrics/counter.go28
-rw-r--r--vendor/github.com/uber/jaeger-lib/metrics/factory.go78
-rw-r--r--vendor/github.com/uber/jaeger-lib/metrics/gauge.go28
-rw-r--r--vendor/github.com/uber/jaeger-lib/metrics/histogram.go28
-rw-r--r--vendor/github.com/uber/jaeger-lib/metrics/keys.go35
-rw-r--r--vendor/github.com/uber/jaeger-lib/metrics/metrics.go137
-rw-r--r--vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go43
-rw-r--r--vendor/github.com/uber/jaeger-lib/metrics/timer.go33
108 files changed, 19501 insertions, 0 deletions
diff --git a/vendor/github.com/opentracing/opentracing-go/LICENSE b/vendor/github.com/opentracing/opentracing-go/LICENSE
new file mode 100644
index 000000000..f0027349e
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016 The OpenTracing Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/opentracing/opentracing-go/README.md b/vendor/github.com/opentracing/opentracing-go/README.md
new file mode 100644
index 000000000..6ef1d7c9d
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/README.md
@@ -0,0 +1,171 @@
+[![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/opentracing/public) [![Build Status](https://travis-ci.org/opentracing/opentracing-go.svg?branch=master)](https://travis-ci.org/opentracing/opentracing-go) [![GoDoc](https://godoc.org/github.com/opentracing/opentracing-go?status.svg)](http://godoc.org/github.com/opentracing/opentracing-go)
+[![Sourcegraph Badge](https://sourcegraph.com/github.com/opentracing/opentracing-go/-/badge.svg)](https://sourcegraph.com/github.com/opentracing/opentracing-go?badge)
+
+# OpenTracing API for Go
+
+This package is a Go platform API for OpenTracing.
+
+## Required Reading
+
+In order to understand the Go platform API, one must first be familiar with the
+[OpenTracing project](https://opentracing.io) and
+[terminology](https://opentracing.io/specification/) more specifically.
+
+## API overview for those adding instrumentation
+
+Everyday consumers of this `opentracing` package really only need to worry
+about a couple of key abstractions: the `StartSpan` function, the `Span`
+interface, and binding a `Tracer` at `main()`-time. Here are code snippets
+demonstrating some important use cases.
+
+#### Singleton initialization
+
+The simplest starting point is `./default_tracer.go`. As early as possible, call
+
+```go
+ import "github.com/opentracing/opentracing-go"
+ import ".../some_tracing_impl"
+
+ func main() {
+ opentracing.SetGlobalTracer(
+ // tracing impl specific:
+ some_tracing_impl.New(...),
+ )
+ ...
+ }
+```
+
+#### Non-Singleton initialization
+
+If you prefer direct control to singletons, manage ownership of the
+`opentracing.Tracer` implementation explicitly.
+
+#### Creating a Span given an existing Go `context.Context`
+
+If you use `context.Context` in your application, OpenTracing's Go library will
+happily rely on it for `Span` propagation. To start a new (blocking child)
+`Span`, you can use `StartSpanFromContext`.
+
+```go
+ func xyz(ctx context.Context, ...) {
+ ...
+ span, ctx := opentracing.StartSpanFromContext(ctx, "operation_name")
+ defer span.Finish()
+ span.LogFields(
+ log.String("event", "soft error"),
+ log.String("type", "cache timeout"),
+ log.Int("waited.millis", 1500))
+ ...
+ }
+```
+
+#### Starting an empty trace by creating a "root span"
+
+It's always possible to create a "root" `Span` with no parent or other causal
+reference.
+
+```go
+ func xyz() {
+ ...
+ sp := opentracing.StartSpan("operation_name")
+ defer sp.Finish()
+ ...
+ }
+```
+
+#### Creating a (child) Span given an existing (parent) Span
+
+```go
+ func xyz(parentSpan opentracing.Span, ...) {
+ ...
+ sp := opentracing.StartSpan(
+ "operation_name",
+ opentracing.ChildOf(parentSpan.Context()))
+ defer sp.Finish()
+ ...
+ }
+```
+
+#### Serializing to the wire
+
+```go
+ func makeSomeRequest(ctx context.Context) ... {
+ if span := opentracing.SpanFromContext(ctx); span != nil {
+ httpClient := &http.Client{}
+ httpReq, _ := http.NewRequest("GET", "http://myservice/", nil)
+
+ // Transmit the span's TraceContext as HTTP headers on our
+ // outbound request.
+ opentracing.GlobalTracer().Inject(
+ span.Context(),
+ opentracing.HTTPHeaders,
+ opentracing.HTTPHeadersCarrier(httpReq.Header))
+
+ resp, err := httpClient.Do(httpReq)
+ ...
+ }
+ ...
+ }
+```
+
+#### Deserializing from the wire
+
+```go
+ http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
+ var serverSpan opentracing.Span
+ appSpecificOperationName := ...
+ wireContext, err := opentracing.GlobalTracer().Extract(
+ opentracing.HTTPHeaders,
+ opentracing.HTTPHeadersCarrier(req.Header))
+ if err != nil {
+ // Optionally record something about err here
+ }
+
+ // Create the span referring to the RPC client if available.
+ // If wireContext == nil, a root span will be created.
+ serverSpan = opentracing.StartSpan(
+ appSpecificOperationName,
+ ext.RPCServerOption(wireContext))
+
+ defer serverSpan.Finish()
+
+ ctx := opentracing.ContextWithSpan(context.Background(), serverSpan)
+ ...
+ }
+```
+
+#### Conditionally capture a field using `log.Noop`
+
+In some situations, you may want to dynamically decide whether or not
+to log a field. For example, you may want to capture additional data,
+such as a customer ID, in non-production environments:
+
+```go
+ func Customer(order *Order) log.Field {
+ if os.Getenv("ENVIRONMENT") == "dev" {
+ return log.String("customer", order.Customer.ID)
+ }
+ return log.Noop()
+ }
+```
+
+#### Goroutine-safety
+
+The entire public API is goroutine-safe and does not require external
+synchronization.
+
+## API pointers for those implementing a tracing system
+
+Tracing system implementors may be able to reuse or copy-paste-modify the `basictracer` package, found [here](https://github.com/opentracing/basictracer-go). In particular, see `basictracer.New(...)`.
+
+## API compatibility
+
+For the time being, "mild" backwards-incompatible changes may be made without changing the major version number. As OpenTracing and `opentracing-go` mature, backwards compatibility will become more of a priority.
+
+## Tracer test suite
+
+A test suite is available in the [harness](https://godoc.org/github.com/opentracing/opentracing-go/harness) package that can assist Tracer implementors to assert that their Tracer is working correctly.
+
+## Licensing
+
+[Apache 2.0 License](./LICENSE).
diff --git a/vendor/github.com/opentracing/opentracing-go/ext/tags.go b/vendor/github.com/opentracing/opentracing-go/ext/tags.go
new file mode 100644
index 000000000..52e889582
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/ext/tags.go
@@ -0,0 +1,210 @@
+package ext
+
+import "github.com/opentracing/opentracing-go"
+
+// These constants define common tag names recommended for better portability across
+// tracing systems and languages/platforms.
+//
+// The tag names are defined as typed strings, so that in addition to the usual use
+//
+// span.setTag(TagName, value)
+//
+// they also support value type validation via this additional syntax:
+//
+// TagName.Set(span, value)
+//
+var (
+ //////////////////////////////////////////////////////////////////////
+ // SpanKind (client/server or producer/consumer)
+ //////////////////////////////////////////////////////////////////////
+
+ // SpanKind hints at relationship between spans, e.g. client/server
+ SpanKind = spanKindTagName("span.kind")
+
+ // SpanKindRPCClient marks a span representing the client-side of an RPC
+ // or other remote call
+ SpanKindRPCClientEnum = SpanKindEnum("client")
+ SpanKindRPCClient = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCClientEnum}
+
+ // SpanKindRPCServer marks a span representing the server-side of an RPC
+ // or other remote call
+ SpanKindRPCServerEnum = SpanKindEnum("server")
+ SpanKindRPCServer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCServerEnum}
+
+ // SpanKindProducer marks a span representing the producer-side of a
+ // message bus
+ SpanKindProducerEnum = SpanKindEnum("producer")
+ SpanKindProducer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindProducerEnum}
+
+ // SpanKindConsumer marks a span representing the consumer-side of a
+ // message bus
+ SpanKindConsumerEnum = SpanKindEnum("consumer")
+ SpanKindConsumer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindConsumerEnum}
+
+ //////////////////////////////////////////////////////////////////////
+ // Component name
+ //////////////////////////////////////////////////////////////////////
+
+ // Component is a low-cardinality identifier of the module, library,
+ // or package that is generating a span.
+ Component = stringTagName("component")
+
+ //////////////////////////////////////////////////////////////////////
+ // Sampling hint
+ //////////////////////////////////////////////////////////////////////
+
+ // SamplingPriority determines the priority of sampling this Span.
+ SamplingPriority = uint16TagName("sampling.priority")
+
+ //////////////////////////////////////////////////////////////////////
+ // Peer tags. These tags can be emitted by either client-side of
+ // server-side to describe the other side/service in a peer-to-peer
+ // communications, like an RPC call.
+ //////////////////////////////////////////////////////////////////////
+
+ // PeerService records the service name of the peer.
+ PeerService = stringTagName("peer.service")
+
+ // PeerAddress records the address name of the peer. This may be a "ip:port",
+ // a bare "hostname", a FQDN or even a database DSN substring
+ // like "mysql://username@127.0.0.1:3306/dbname"
+ PeerAddress = stringTagName("peer.address")
+
+ // PeerHostname records the host name of the peer
+ PeerHostname = stringTagName("peer.hostname")
+
+ // PeerHostIPv4 records IP v4 host address of the peer
+ PeerHostIPv4 = ipv4Tag("peer.ipv4")
+
+ // PeerHostIPv6 records IP v6 host address of the peer
+ PeerHostIPv6 = stringTagName("peer.ipv6")
+
+ // PeerPort records port number of the peer
+ PeerPort = uint16TagName("peer.port")
+
+ //////////////////////////////////////////////////////////////////////
+ // HTTP Tags
+ //////////////////////////////////////////////////////////////////////
+
+ // HTTPUrl should be the URL of the request being handled in this segment
+ // of the trace, in standard URI format. The protocol is optional.
+ HTTPUrl = stringTagName("http.url")
+
+ // HTTPMethod is the HTTP method of the request, and is case-insensitive.
+ HTTPMethod = stringTagName("http.method")
+
+ // HTTPStatusCode is the numeric HTTP status code (200, 404, etc) of the
+ // HTTP response.
+ HTTPStatusCode = uint16TagName("http.status_code")
+
+ //////////////////////////////////////////////////////////////////////
+ // DB Tags
+ //////////////////////////////////////////////////////////////////////
+
+ // DBInstance is database instance name.
+ DBInstance = stringTagName("db.instance")
+
+ // DBStatement is a database statement for the given database type.
+ // It can be a query or a prepared statement (i.e., before substitution).
+ DBStatement = stringTagName("db.statement")
+
+ // DBType is a database type. For any SQL database, "sql".
+ // For others, the lower-case database category, e.g. "redis"
+ DBType = stringTagName("db.type")
+
+ // DBUser is a username for accessing database.
+ DBUser = stringTagName("db.user")
+
+ //////////////////////////////////////////////////////////////////////
+ // Message Bus Tag
+ //////////////////////////////////////////////////////////////////////
+
+ // MessageBusDestination is an address at which messages can be exchanged
+ MessageBusDestination = stringTagName("message_bus.destination")
+
+ //////////////////////////////////////////////////////////////////////
+ // Error Tag
+ //////////////////////////////////////////////////////////////////////
+
+ // Error indicates that operation represented by the span resulted in an error.
+ Error = boolTagName("error")
+)
+
+// ---
+
+// SpanKindEnum represents common span types
+type SpanKindEnum string
+
+type spanKindTagName string
+
+// Set adds a string tag to the `span`
+func (tag spanKindTagName) Set(span opentracing.Span, value SpanKindEnum) {
+ span.SetTag(string(tag), value)
+}
+
+type rpcServerOption struct {
+ clientContext opentracing.SpanContext
+}
+
+func (r rpcServerOption) Apply(o *opentracing.StartSpanOptions) {
+ if r.clientContext != nil {
+ opentracing.ChildOf(r.clientContext).Apply(o)
+ }
+ SpanKindRPCServer.Apply(o)
+}
+
+// RPCServerOption returns a StartSpanOption appropriate for an RPC server span
+// with `client` representing the metadata for the remote peer Span if available.
+// In case client == nil, due to the client not being instrumented, this RPC
+// server span will be a root span.
+func RPCServerOption(client opentracing.SpanContext) opentracing.StartSpanOption {
+ return rpcServerOption{client}
+}
+
+// ---
+
+type stringTagName string
+
+// Set adds a string tag to the `span`
+func (tag stringTagName) Set(span opentracing.Span, value string) {
+ span.SetTag(string(tag), value)
+}
+
+// ---
+
+type uint32TagName string
+
+// Set adds a uint32 tag to the `span`
+func (tag uint32TagName) Set(span opentracing.Span, value uint32) {
+ span.SetTag(string(tag), value)
+}
+
+// ---
+
+type uint16TagName string
+
+// Set adds a uint16 tag to the `span`
+func (tag uint16TagName) Set(span opentracing.Span, value uint16) {
+ span.SetTag(string(tag), value)
+}
+
+// ---
+
+type boolTagName string
+
+// Add adds a bool tag to the `span`
+func (tag boolTagName) Set(span opentracing.Span, value bool) {
+ span.SetTag(string(tag), value)
+}
+
+type ipv4Tag string
+
+// Set adds IP v4 host address of the peer as an uint32 value to the `span`, keep this for backward and zipkin compatibility
+func (tag ipv4Tag) Set(span opentracing.Span, value uint32) {
+ span.SetTag(string(tag), value)
+}
+
+// SetString records IP v4 host address of the peer as a .-separated tuple to the `span`. E.g., "127.0.0.1"
+func (tag ipv4Tag) SetString(span opentracing.Span, value string) {
+ span.SetTag(string(tag), value)
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/globaltracer.go b/vendor/github.com/opentracing/opentracing-go/globaltracer.go
new file mode 100644
index 000000000..4f7066a92
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/globaltracer.go
@@ -0,0 +1,42 @@
+package opentracing
+
+type registeredTracer struct {
+ tracer Tracer
+ isRegistered bool
+}
+
+var (
+ globalTracer = registeredTracer{NoopTracer{}, false}
+)
+
+// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by
+// GlobalTracer(). Those who use GlobalTracer (rather than directly manage an
+// opentracing.Tracer instance) should call SetGlobalTracer as early as
+// possible in main(), prior to calling the `StartSpan` global func below.
+// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan`
+// (etc) globals are noops.
+func SetGlobalTracer(tracer Tracer) {
+ globalTracer = registeredTracer{tracer, true}
+}
+
+// GlobalTracer returns the global singleton `Tracer` implementation.
+// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop
+// implementation that drops all data handed to it.
+func GlobalTracer() Tracer {
+ return globalTracer.tracer
+}
+
+// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`.
+func StartSpan(operationName string, opts ...StartSpanOption) Span {
+ return globalTracer.tracer.StartSpan(operationName, opts...)
+}
+
+// InitGlobalTracer is deprecated. Please use SetGlobalTracer.
+func InitGlobalTracer(tracer Tracer) {
+ SetGlobalTracer(tracer)
+}
+
+// IsGlobalTracerRegistered returns a `bool` to indicate if a tracer has been globally registered
+func IsGlobalTracerRegistered() bool {
+ return globalTracer.isRegistered
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext.go b/vendor/github.com/opentracing/opentracing-go/gocontext.go
new file mode 100644
index 000000000..05a62e70b
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/gocontext.go
@@ -0,0 +1,54 @@
+package opentracing
+
+import "context"
+
+type contextKey struct{}
+
+var activeSpanKey = contextKey{}
+
+// ContextWithSpan returns a new `context.Context` that holds a reference to
+// `span`'s SpanContext.
+func ContextWithSpan(ctx context.Context, span Span) context.Context {
+ return context.WithValue(ctx, activeSpanKey, span)
+}
+
+// SpanFromContext returns the `Span` previously associated with `ctx`, or
+// `nil` if no such `Span` could be found.
+//
+// NOTE: context.Context != SpanContext: the former is Go's intra-process
+// context propagation mechanism, and the latter houses OpenTracing's per-Span
+// identity and baggage information.
+func SpanFromContext(ctx context.Context) Span {
+ val := ctx.Value(activeSpanKey)
+ if sp, ok := val.(Span); ok {
+ return sp
+ }
+ return nil
+}
+
+// StartSpanFromContext starts and returns a Span with `operationName`, using
+// any Span found within `ctx` as a ChildOfRef. If no such parent could be
+// found, StartSpanFromContext creates a root (parentless) Span.
+//
+// The second return value is a context.Context object built around the
+// returned Span.
+//
+// Example usage:
+//
+// SomeFunction(ctx context.Context, ...) {
+// sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction")
+// defer sp.Finish()
+// ...
+// }
+func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) {
+ return startSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...)
+}
+
+// startSpanFromContextWithTracer is factored out for testing purposes.
+func startSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) {
+ if parentSpan := SpanFromContext(ctx); parentSpan != nil {
+ opts = append(opts, ChildOf(parentSpan.Context()))
+ }
+ span := tracer.StartSpan(operationName, opts...)
+ return span, ContextWithSpan(ctx, span)
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/log/field.go b/vendor/github.com/opentracing/opentracing-go/log/field.go
new file mode 100644
index 000000000..50feea341
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/log/field.go
@@ -0,0 +1,269 @@
+package log
+
+import (
+ "fmt"
+ "math"
+)
+
+type fieldType int
+
+const (
+ stringType fieldType = iota
+ boolType
+ intType
+ int32Type
+ uint32Type
+ int64Type
+ uint64Type
+ float32Type
+ float64Type
+ errorType
+ objectType
+ lazyLoggerType
+ noopType
+)
+
+// Field instances are constructed via LogBool, LogString, and so on.
+// Tracing implementations may then handle them via the Field.Marshal
+// method.
+//
+// "heavily influenced by" (i.e., partially stolen from)
+// https://github.com/uber-go/zap
+type Field struct {
+ key string
+ fieldType fieldType
+ numericVal int64
+ stringVal string
+ interfaceVal interface{}
+}
+
+// String adds a string-valued key:value pair to a Span.LogFields() record
+func String(key, val string) Field {
+ return Field{
+ key: key,
+ fieldType: stringType,
+ stringVal: val,
+ }
+}
+
+// Bool adds a bool-valued key:value pair to a Span.LogFields() record
+func Bool(key string, val bool) Field {
+ var numericVal int64
+ if val {
+ numericVal = 1
+ }
+ return Field{
+ key: key,
+ fieldType: boolType,
+ numericVal: numericVal,
+ }
+}
+
+// Int adds an int-valued key:value pair to a Span.LogFields() record
+func Int(key string, val int) Field {
+ return Field{
+ key: key,
+ fieldType: intType,
+ numericVal: int64(val),
+ }
+}
+
+// Int32 adds an int32-valued key:value pair to a Span.LogFields() record
+func Int32(key string, val int32) Field {
+ return Field{
+ key: key,
+ fieldType: int32Type,
+ numericVal: int64(val),
+ }
+}
+
+// Int64 adds an int64-valued key:value pair to a Span.LogFields() record
+func Int64(key string, val int64) Field {
+ return Field{
+ key: key,
+ fieldType: int64Type,
+ numericVal: val,
+ }
+}
+
+// Uint32 adds a uint32-valued key:value pair to a Span.LogFields() record
+func Uint32(key string, val uint32) Field {
+ return Field{
+ key: key,
+ fieldType: uint32Type,
+ numericVal: int64(val),
+ }
+}
+
+// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record
+func Uint64(key string, val uint64) Field {
+ return Field{
+ key: key,
+ fieldType: uint64Type,
+ numericVal: int64(val),
+ }
+}
+
+// Float32 adds a float32-valued key:value pair to a Span.LogFields() record
+func Float32(key string, val float32) Field {
+ return Field{
+ key: key,
+ fieldType: float32Type,
+ numericVal: int64(math.Float32bits(val)),
+ }
+}
+
+// Float64 adds a float64-valued key:value pair to a Span.LogFields() record
+func Float64(key string, val float64) Field {
+ return Field{
+ key: key,
+ fieldType: float64Type,
+ numericVal: int64(math.Float64bits(val)),
+ }
+}
+
+// Error adds an error with the key "error" to a Span.LogFields() record
+func Error(err error) Field {
+ return Field{
+ key: "error",
+ fieldType: errorType,
+ interfaceVal: err,
+ }
+}
+
+// Object adds an object-valued key:value pair to a Span.LogFields() record
+func Object(key string, obj interface{}) Field {
+ return Field{
+ key: key,
+ fieldType: objectType,
+ interfaceVal: obj,
+ }
+}
+
+// LazyLogger allows for user-defined, late-bound logging of arbitrary data
+type LazyLogger func(fv Encoder)
+
+// Lazy adds a LazyLogger to a Span.LogFields() record; the tracing
+// implementation will call the LazyLogger function at an indefinite time in
+// the future (after Lazy() returns).
+func Lazy(ll LazyLogger) Field {
+ return Field{
+ fieldType: lazyLoggerType,
+ interfaceVal: ll,
+ }
+}
+
+// Noop creates a no-op log field that should be ignored by the tracer.
+// It can be used to capture optional fields, for example those that should
+// only be logged in non-production environment:
+//
+// func customerField(order *Order) log.Field {
+// if os.Getenv("ENVIRONMENT") == "dev" {
+// return log.String("customer", order.Customer.ID)
+// }
+// return log.Noop()
+// }
+//
+// span.LogFields(log.String("event", "purchase"), customerField(order))
+//
+func Noop() Field {
+ return Field{
+ fieldType: noopType,
+ }
+}
+
+// Encoder allows access to the contents of a Field (via a call to
+// Field.Marshal).
+//
+// Tracer implementations typically provide an implementation of Encoder;
+// OpenTracing callers typically do not need to concern themselves with it.
+type Encoder interface {
+ EmitString(key, value string)
+ EmitBool(key string, value bool)
+ EmitInt(key string, value int)
+ EmitInt32(key string, value int32)
+ EmitInt64(key string, value int64)
+ EmitUint32(key string, value uint32)
+ EmitUint64(key string, value uint64)
+ EmitFloat32(key string, value float32)
+ EmitFloat64(key string, value float64)
+ EmitObject(key string, value interface{})
+ EmitLazyLogger(value LazyLogger)
+}
+
+// Marshal passes a Field instance through to the appropriate
+// field-type-specific method of an Encoder.
+func (lf Field) Marshal(visitor Encoder) {
+ switch lf.fieldType {
+ case stringType:
+ visitor.EmitString(lf.key, lf.stringVal)
+ case boolType:
+ visitor.EmitBool(lf.key, lf.numericVal != 0)
+ case intType:
+ visitor.EmitInt(lf.key, int(lf.numericVal))
+ case int32Type:
+ visitor.EmitInt32(lf.key, int32(lf.numericVal))
+ case int64Type:
+ visitor.EmitInt64(lf.key, int64(lf.numericVal))
+ case uint32Type:
+ visitor.EmitUint32(lf.key, uint32(lf.numericVal))
+ case uint64Type:
+ visitor.EmitUint64(lf.key, uint64(lf.numericVal))
+ case float32Type:
+ visitor.EmitFloat32(lf.key, math.Float32frombits(uint32(lf.numericVal)))
+ case float64Type:
+ visitor.EmitFloat64(lf.key, math.Float64frombits(uint64(lf.numericVal)))
+ case errorType:
+ if err, ok := lf.interfaceVal.(error); ok {
+ visitor.EmitString(lf.key, err.Error())
+ } else {
+ visitor.EmitString(lf.key, "<nil>")
+ }
+ case objectType:
+ visitor.EmitObject(lf.key, lf.interfaceVal)
+ case lazyLoggerType:
+ visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger))
+ case noopType:
+ // intentionally left blank
+ }
+}
+
+// Key returns the field's key.
+func (lf Field) Key() string {
+ return lf.key
+}
+
+// Value returns the field's value as interface{}.
+func (lf Field) Value() interface{} {
+ switch lf.fieldType {
+ case stringType:
+ return lf.stringVal
+ case boolType:
+ return lf.numericVal != 0
+ case intType:
+ return int(lf.numericVal)
+ case int32Type:
+ return int32(lf.numericVal)
+ case int64Type:
+ return int64(lf.numericVal)
+ case uint32Type:
+ return uint32(lf.numericVal)
+ case uint64Type:
+ return uint64(lf.numericVal)
+ case float32Type:
+ return math.Float32frombits(uint32(lf.numericVal))
+ case float64Type:
+ return math.Float64frombits(uint64(lf.numericVal))
+ case errorType, objectType, lazyLoggerType:
+ return lf.interfaceVal
+ case noopType:
+ return nil
+ default:
+ return nil
+ }
+}
+
+// String returns a string representation of the key and value.
+func (lf Field) String() string {
+ return fmt.Sprint(lf.key, ":", lf.Value())
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/log/util.go b/vendor/github.com/opentracing/opentracing-go/log/util.go
new file mode 100644
index 000000000..3832feb5c
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/log/util.go
@@ -0,0 +1,54 @@
+package log
+
+import "fmt"
+
+// InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice
+// a la Span.LogFields().
+func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) {
+ if len(keyValues)%2 != 0 {
+ return nil, fmt.Errorf("non-even keyValues len: %d", len(keyValues))
+ }
+ fields := make([]Field, len(keyValues)/2)
+ for i := 0; i*2 < len(keyValues); i++ {
+ key, ok := keyValues[i*2].(string)
+ if !ok {
+ return nil, fmt.Errorf(
+ "non-string key (pair #%d): %T",
+ i, keyValues[i*2])
+ }
+ switch typedVal := keyValues[i*2+1].(type) {
+ case bool:
+ fields[i] = Bool(key, typedVal)
+ case string:
+ fields[i] = String(key, typedVal)
+ case int:
+ fields[i] = Int(key, typedVal)
+ case int8:
+ fields[i] = Int32(key, int32(typedVal))
+ case int16:
+ fields[i] = Int32(key, int32(typedVal))
+ case int32:
+ fields[i] = Int32(key, typedVal)
+ case int64:
+ fields[i] = Int64(key, typedVal)
+ case uint:
+ fields[i] = Uint64(key, uint64(typedVal))
+ case uint64:
+ fields[i] = Uint64(key, typedVal)
+ case uint8:
+ fields[i] = Uint32(key, uint32(typedVal))
+ case uint16:
+ fields[i] = Uint32(key, uint32(typedVal))
+ case uint32:
+ fields[i] = Uint32(key, typedVal)
+ case float32:
+ fields[i] = Float32(key, typedVal)
+ case float64:
+ fields[i] = Float64(key, typedVal)
+ default:
+ // When in doubt, coerce to a string
+ fields[i] = String(key, fmt.Sprint(typedVal))
+ }
+ }
+ return fields, nil
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/noop.go b/vendor/github.com/opentracing/opentracing-go/noop.go
new file mode 100644
index 000000000..0d32f692c
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/noop.go
@@ -0,0 +1,64 @@
+package opentracing
+
+import "github.com/opentracing/opentracing-go/log"
+
+// A NoopTracer is a trivial, minimum overhead implementation of Tracer
+// for which all operations are no-ops.
+//
+// The primary use of this implementation is in libraries, such as RPC
+// frameworks, that make tracing an optional feature controlled by the
+// end user. A no-op implementation allows said libraries to use it
+// as the default Tracer and to write instrumentation that does
+// not need to keep checking if the tracer instance is nil.
+//
+// For the same reason, the NoopTracer is the default "global" tracer
+// (see GlobalTracer and SetGlobalTracer functions).
+//
+// WARNING: NoopTracer does not support baggage propagation.
+type NoopTracer struct{}
+
+type noopSpan struct{}
+type noopSpanContext struct{}
+
+var (
+ defaultNoopSpanContext = noopSpanContext{}
+ defaultNoopSpan = noopSpan{}
+ defaultNoopTracer = NoopTracer{}
+)
+
+const (
+ emptyString = ""
+)
+
+// noopSpanContext:
+func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {}
+
+// noopSpan:
+func (n noopSpan) Context() SpanContext { return defaultNoopSpanContext }
+func (n noopSpan) SetBaggageItem(key, val string) Span { return defaultNoopSpan }
+func (n noopSpan) BaggageItem(key string) string { return emptyString }
+func (n noopSpan) SetTag(key string, value interface{}) Span { return n }
+func (n noopSpan) LogFields(fields ...log.Field) {}
+func (n noopSpan) LogKV(keyVals ...interface{}) {}
+func (n noopSpan) Finish() {}
+func (n noopSpan) FinishWithOptions(opts FinishOptions) {}
+func (n noopSpan) SetOperationName(operationName string) Span { return n }
+func (n noopSpan) Tracer() Tracer { return defaultNoopTracer }
+func (n noopSpan) LogEvent(event string) {}
+func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {}
+func (n noopSpan) Log(data LogData) {}
+
+// StartSpan belongs to the Tracer interface.
+func (n NoopTracer) StartSpan(operationName string, opts ...StartSpanOption) Span {
+ return defaultNoopSpan
+}
+
+// Inject belongs to the Tracer interface.
+func (n NoopTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error {
+ return nil
+}
+
+// Extract belongs to the Tracer interface.
+func (n NoopTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) {
+ return nil, ErrSpanContextNotFound
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/propagation.go b/vendor/github.com/opentracing/opentracing-go/propagation.go
new file mode 100644
index 000000000..b0c275eb0
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/propagation.go
@@ -0,0 +1,176 @@
+package opentracing
+
+import (
+ "errors"
+ "net/http"
+)
+
+///////////////////////////////////////////////////////////////////////////////
+// CORE PROPAGATION INTERFACES:
+///////////////////////////////////////////////////////////////////////////////
+
+var (
+ // ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or
+ // Tracer.Extract() is not recognized by the Tracer implementation.
+ ErrUnsupportedFormat = errors.New("opentracing: Unknown or unsupported Inject/Extract format")
+
+ // ErrSpanContextNotFound occurs when the `carrier` passed to
+ // Tracer.Extract() is valid and uncorrupted but has insufficient
+ // information to extract a SpanContext.
+ ErrSpanContextNotFound = errors.New("opentracing: SpanContext not found in Extract carrier")
+
+ // ErrInvalidSpanContext errors occur when Tracer.Inject() is asked to
+ // operate on a SpanContext which it is not prepared to handle (for
+ // example, since it was created by a different tracer implementation).
+ ErrInvalidSpanContext = errors.New("opentracing: SpanContext type incompatible with tracer")
+
+ // ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract()
+ // implementations expect a different type of `carrier` than they are
+ // given.
+ ErrInvalidCarrier = errors.New("opentracing: Invalid Inject/Extract carrier")
+
+ // ErrSpanContextCorrupted occurs when the `carrier` passed to
+ // Tracer.Extract() is of the expected type but is corrupted.
+ ErrSpanContextCorrupted = errors.New("opentracing: SpanContext data corrupted in Extract carrier")
+)
+
+///////////////////////////////////////////////////////////////////////////////
+// BUILTIN PROPAGATION FORMATS:
+///////////////////////////////////////////////////////////////////////////////
+
+// BuiltinFormat is used to demarcate the values within package `opentracing`
+// that are intended for use with the Tracer.Inject() and Tracer.Extract()
+// methods.
+type BuiltinFormat byte
+
+const (
+ // Binary represents SpanContexts as opaque binary data.
+ //
+ // For Tracer.Inject(): the carrier must be an `io.Writer`.
+ //
+ // For Tracer.Extract(): the carrier must be an `io.Reader`.
+ Binary BuiltinFormat = iota
+
+ // TextMap represents SpanContexts as key:value string pairs.
+ //
+ // Unlike HTTPHeaders, the TextMap format does not restrict the key or
+ // value character sets in any way.
+ //
+ // For Tracer.Inject(): the carrier must be a `TextMapWriter`.
+ //
+ // For Tracer.Extract(): the carrier must be a `TextMapReader`.
+ TextMap
+
+ // HTTPHeaders represents SpanContexts as HTTP header string pairs.
+ //
+ // Unlike TextMap, the HTTPHeaders format requires that the keys and values
+ // be valid as HTTP headers as-is (i.e., character casing may be unstable
+ // and special characters are disallowed in keys, values should be
+ // URL-escaped, etc).
+ //
+ // For Tracer.Inject(): the carrier must be a `TextMapWriter`.
+ //
+ // For Tracer.Extract(): the carrier must be a `TextMapReader`.
+ //
+ // See HTTPHeadersCarrier for an implementation of both TextMapWriter
+ // and TextMapReader that defers to an http.Header instance for storage.
+ // For example, Inject():
+ //
+ // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+ // err := span.Tracer().Inject(
+ // span.Context(), opentracing.HTTPHeaders, carrier)
+ //
+ // Or Extract():
+ //
+ // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+ // clientContext, err := tracer.Extract(
+ // opentracing.HTTPHeaders, carrier)
+ //
+ HTTPHeaders
+)
+
+// TextMapWriter is the Inject() carrier for the TextMap builtin format. With
+// it, the caller can encode a SpanContext for propagation as entries in a map
+// of unicode strings.
+type TextMapWriter interface {
+ // Set a key:value pair to the carrier. Multiple calls to Set() for the
+ // same key leads to undefined behavior.
+ //
+ // NOTE: The backing store for the TextMapWriter may contain data unrelated
+ // to SpanContext. As such, Inject() and Extract() implementations that
+ // call the TextMapWriter and TextMapReader interfaces must agree on a
+ // prefix or other convention to distinguish their own key:value pairs.
+ Set(key, val string)
+}
+
+// TextMapReader is the Extract() carrier for the TextMap builtin format. With it,
+// the caller can decode a propagated SpanContext as entries in a map of
+// unicode strings.
+type TextMapReader interface {
+ // ForeachKey returns TextMap contents via repeated calls to the `handler`
+ // function. If any call to `handler` returns a non-nil error, ForeachKey
+ // terminates and returns that error.
+ //
+ // NOTE: The backing store for the TextMapReader may contain data unrelated
+ // to SpanContext. As such, Inject() and Extract() implementations that
+ // call the TextMapWriter and TextMapReader interfaces must agree on a
+ // prefix or other convention to distinguish their own key:value pairs.
+ //
+ // The "foreach" callback pattern reduces unnecessary copying in some cases
+ // and also allows implementations to hold locks while the map is read.
+ ForeachKey(handler func(key, val string) error) error
+}
+
+// TextMapCarrier allows the use of regular map[string]string
+// as both TextMapWriter and TextMapReader.
+type TextMapCarrier map[string]string
+
+// ForeachKey conforms to the TextMapReader interface.
+func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error {
+ for k, v := range c {
+ if err := handler(k, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Set implements Set() of opentracing.TextMapWriter
+func (c TextMapCarrier) Set(key, val string) {
+ c[key] = val
+}
+
+// HTTPHeadersCarrier satisfies both TextMapWriter and TextMapReader.
+//
+// Example usage for server side:
+//
+// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
+//
+// Example usage for client side:
+//
+// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+// err := tracer.Inject(
+// span.Context(),
+// opentracing.HTTPHeaders,
+// carrier)
+//
+type HTTPHeadersCarrier http.Header
+
+// Set conforms to the TextMapWriter interface.
+func (c HTTPHeadersCarrier) Set(key, val string) {
+ h := http.Header(c)
+ h.Set(key, val)
+}
+
+// ForeachKey conforms to the TextMapReader interface.
+func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error {
+ for k, vals := range c {
+ for _, v := range vals {
+ if err := handler(k, v); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/span.go b/vendor/github.com/opentracing/opentracing-go/span.go
new file mode 100644
index 000000000..0d3fb5341
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/span.go
@@ -0,0 +1,189 @@
+package opentracing
+
+import (
+ "time"
+
+ "github.com/opentracing/opentracing-go/log"
+)
+
+// SpanContext represents Span state that must propagate to descendant Spans and across process
+// boundaries (e.g., a <trace_id, span_id, sampled> tuple).
+type SpanContext interface {
+ // ForeachBaggageItem grants access to all baggage items stored in the
+ // SpanContext.
+ // The handler function will be called for each baggage key/value pair.
+ // The ordering of items is not guaranteed.
+ //
+ // The bool return value indicates if the handler wants to continue iterating
+ // through the rest of the baggage items; for example if the handler is trying to
+ // find some baggage item by pattern matching the name, it can return false
+ // as soon as the item is found to stop further iterations.
+ ForeachBaggageItem(handler func(k, v string) bool)
+}
+
+// Span represents an active, un-finished span in the OpenTracing system.
+//
+// Spans are created by the Tracer interface.
+type Span interface {
+ // Sets the end timestamp and finalizes Span state.
+ //
+ // With the exception of calls to Context() (which are always allowed),
+ // Finish() must be the last call made to any span instance, and to do
+ // otherwise leads to undefined behavior.
+ Finish()
+ // FinishWithOptions is like Finish() but with explicit control over
+ // timestamps and log data.
+ FinishWithOptions(opts FinishOptions)
+
+ // Context() yields the SpanContext for this Span. Note that the return
+ // value of Context() is still valid after a call to Span.Finish(), as is
+ // a call to Span.Context() after a call to Span.Finish().
+ Context() SpanContext
+
+ // Sets or changes the operation name.
+ //
+ // Returns a reference to this Span for chaining.
+ SetOperationName(operationName string) Span
+
+ // Adds a tag to the span.
+ //
+ // If there is a pre-existing tag set for `key`, it is overwritten.
+ //
+ // Tag values can be numeric types, strings, or bools. The behavior of
+ // other tag value types is undefined at the OpenTracing level. If a
+ // tracing system does not know how to handle a particular value type, it
+ // may ignore the tag, but shall not panic.
+ //
+ // Returns a reference to this Span for chaining.
+ SetTag(key string, value interface{}) Span
+
+ // LogFields is an efficient and type-checked way to record key:value
+ // logging data about a Span, though the programming interface is a little
+ // more verbose than LogKV(). Here's an example:
+ //
+ // span.LogFields(
+ // log.String("event", "soft error"),
+ // log.String("type", "cache timeout"),
+ // log.Int("waited.millis", 1500))
+ //
+ // Also see Span.FinishWithOptions() and FinishOptions.BulkLogData.
+ LogFields(fields ...log.Field)
+
+ // LogKV is a concise, readable way to record key:value logging data about
+ // a Span, though unfortunately this also makes it less efficient and less
+ // type-safe than LogFields(). Here's an example:
+ //
+ // span.LogKV(
+ // "event", "soft error",
+ // "type", "cache timeout",
+ // "waited.millis", 1500)
+ //
+ // For LogKV (as opposed to LogFields()), the parameters must appear as
+ // key-value pairs, like
+ //
+ // span.LogKV(key1, val1, key2, val2, key3, val3, ...)
+ //
+ // The keys must all be strings. The values may be strings, numeric types,
+ // bools, Go error instances, or arbitrary structs.
+ //
+ // (Note to implementors: consider the log.InterleavedKVToFields() helper)
+ LogKV(alternatingKeyValues ...interface{})
+
+ // SetBaggageItem sets a key:value pair on this Span and its SpanContext
+ // that also propagates to descendants of this Span.
+ //
+ // SetBaggageItem() enables powerful functionality given a full-stack
+ // opentracing integration (e.g., arbitrary application data from a mobile
+ // app can make it, transparently, all the way into the depths of a storage
+ // system), and with it some powerful costs: use this feature with care.
+ //
+ // IMPORTANT NOTE #1: SetBaggageItem() will only propagate baggage items to
+ // *future* causal descendants of the associated Span.
+ //
+ // IMPORTANT NOTE #2: Use this thoughtfully and with care. Every key and
+ // value is copied into every local *and remote* child of the associated
+ // Span, and that can add up to a lot of network and cpu overhead.
+ //
+ // Returns a reference to this Span for chaining.
+ SetBaggageItem(restrictedKey, value string) Span
+
+ // Gets the value for a baggage item given its key. Returns the empty string
+ // if the value isn't found in this Span.
+ BaggageItem(restrictedKey string) string
+
+ // Provides access to the Tracer that created this Span.
+ Tracer() Tracer
+
+ // Deprecated: use LogFields or LogKV
+ LogEvent(event string)
+ // Deprecated: use LogFields or LogKV
+ LogEventWithPayload(event string, payload interface{})
+ // Deprecated: use LogFields or LogKV
+ Log(data LogData)
+}
+
+// LogRecord is data associated with a single Span log. Every LogRecord
+// instance must specify at least one Field.
+type LogRecord struct {
+ Timestamp time.Time
+ Fields []log.Field
+}
+
+// FinishOptions allows Span.FinishWithOptions callers to override the finish
+// timestamp and provide log data via a bulk interface.
+type FinishOptions struct {
+ // FinishTime overrides the Span's finish time, or implicitly becomes
+ // time.Now() if FinishTime.IsZero().
+ //
+ // FinishTime must resolve to a timestamp that's >= the Span's StartTime
+ // (per StartSpanOptions).
+ FinishTime time.Time
+
+ // LogRecords allows the caller to specify the contents of many LogFields()
+ // calls with a single slice. May be nil.
+ //
+ // None of the LogRecord.Timestamp values may be .IsZero() (i.e., they must
+ // be set explicitly). Also, they must be >= the Span's start timestamp and
+ // <= the FinishTime (or time.Now() if FinishTime.IsZero()). Otherwise the
+ // behavior of FinishWithOptions() is undefined.
+ //
+ // If specified, the caller hands off ownership of LogRecords at
+ // FinishWithOptions() invocation time.
+ //
+ // If specified, the (deprecated) BulkLogData must be nil or empty.
+ LogRecords []LogRecord
+
+ // BulkLogData is DEPRECATED.
+ BulkLogData []LogData
+}
+
+// LogData is DEPRECATED
+type LogData struct {
+ Timestamp time.Time
+ Event string
+ Payload interface{}
+}
+
+// ToLogRecord converts a deprecated LogData to a non-deprecated LogRecord
+func (ld *LogData) ToLogRecord() LogRecord {
+ var literalTimestamp time.Time
+ if ld.Timestamp.IsZero() {
+ literalTimestamp = time.Now()
+ } else {
+ literalTimestamp = ld.Timestamp
+ }
+ rval := LogRecord{
+ Timestamp: literalTimestamp,
+ }
+ if ld.Payload == nil {
+ rval.Fields = []log.Field{
+ log.String("event", ld.Event),
+ }
+ } else {
+ rval.Fields = []log.Field{
+ log.String("event", ld.Event),
+ log.Object("payload", ld.Payload),
+ }
+ }
+ return rval
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/tracer.go b/vendor/github.com/opentracing/opentracing-go/tracer.go
new file mode 100644
index 000000000..7bca1f736
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/tracer.go
@@ -0,0 +1,305 @@
+package opentracing
+
+import "time"
+
+// Tracer is a simple, thin interface for Span creation and SpanContext
+// propagation.
+type Tracer interface {
+
+ // Create, start, and return a new Span with the given `operationName` and
+ // incorporate the given StartSpanOption `opts`. (Note that `opts` borrows
+ // from the "functional options" pattern, per
+ // http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis)
+ //
+ // A Span with no SpanReference options (e.g., opentracing.ChildOf() or
+ // opentracing.FollowsFrom()) becomes the root of its own trace.
+ //
+ // Examples:
+ //
+ // var tracer opentracing.Tracer = ...
+ //
+ // // The root-span case:
+ // sp := tracer.StartSpan("GetFeed")
+ //
+ // // The vanilla child span case:
+ // sp := tracer.StartSpan(
+ // "GetFeed",
+ // opentracing.ChildOf(parentSpan.Context()))
+ //
+ // // All the bells and whistles:
+ // sp := tracer.StartSpan(
+ // "GetFeed",
+ // opentracing.ChildOf(parentSpan.Context()),
+ // opentracing.Tag{"user_agent", loggedReq.UserAgent},
+ // opentracing.StartTime(loggedReq.Timestamp),
+ // )
+ //
+ StartSpan(operationName string, opts ...StartSpanOption) Span
+
+ // Inject() takes the `sm` SpanContext instance and injects it for
+ // propagation within `carrier`. The actual type of `carrier` depends on
+ // the value of `format`.
+ //
+ // OpenTracing defines a common set of `format` values (see BuiltinFormat),
+ // and each has an expected carrier type.
+ //
+ // Other packages may declare their own `format` values, much like the keys
+ // used by `context.Context` (see
+ // https://godoc.org/golang.org/x/net/context#WithValue).
+ //
+ // Example usage (sans error handling):
+ //
+ // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+ // err := tracer.Inject(
+ // span.Context(),
+ // opentracing.HTTPHeaders,
+ // carrier)
+ //
+ // NOTE: All opentracing.Tracer implementations MUST support all
+ // BuiltinFormats.
+ //
+ // Implementations may return opentracing.ErrUnsupportedFormat if `format`
+ // is not supported by (or not known by) the implementation.
+ //
+ // Implementations may return opentracing.ErrInvalidCarrier or any other
+ // implementation-specific error if the format is supported but injection
+ // fails anyway.
+ //
+ // See Tracer.Extract().
+ Inject(sm SpanContext, format interface{}, carrier interface{}) error
+
+ // Extract() returns a SpanContext instance given `format` and `carrier`.
+ //
+ // OpenTracing defines a common set of `format` values (see BuiltinFormat),
+ // and each has an expected carrier type.
+ //
+ // Other packages may declare their own `format` values, much like the keys
+ // used by `context.Context` (see
+ // https://godoc.org/golang.org/x/net/context#WithValue).
+ //
+ // Example usage (with StartSpan):
+ //
+ //
+ // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+ // clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
+ //
+ // // ... assuming the ultimate goal here is to resume the trace with a
+ // // server-side Span:
+ // var serverSpan opentracing.Span
+ // if err == nil {
+ // span = tracer.StartSpan(
+ // rpcMethodName, ext.RPCServerOption(clientContext))
+ // } else {
+ // span = tracer.StartSpan(rpcMethodName)
+ // }
+ //
+ //
+ // NOTE: All opentracing.Tracer implementations MUST support all
+ // BuiltinFormats.
+ //
+ // Return values:
+ // - A successful Extract returns a SpanContext instance and a nil error
+ // - If there was simply no SpanContext to extract in `carrier`, Extract()
+ // returns (nil, opentracing.ErrSpanContextNotFound)
+ // - If `format` is unsupported or unrecognized, Extract() returns (nil,
+ // opentracing.ErrUnsupportedFormat)
+ // - If there are more fundamental problems with the `carrier` object,
+ // Extract() may return opentracing.ErrInvalidCarrier,
+ // opentracing.ErrSpanContextCorrupted, or implementation-specific
+ // errors.
+ //
+ // See Tracer.Inject().
+ Extract(format interface{}, carrier interface{}) (SpanContext, error)
+}
+
+// StartSpanOptions allows Tracer.StartSpan() callers and implementors a
+// mechanism to override the start timestamp, specify Span References, and make
+// a single Tag or multiple Tags available at Span start time.
+//
+// StartSpan() callers should look at the StartSpanOption interface and
+// implementations available in this package.
+//
+// Tracer implementations can convert a slice of `StartSpanOption` instances
+// into a `StartSpanOptions` struct like so:
+//
+// func StartSpan(opName string, opts ...opentracing.StartSpanOption) {
+// sso := opentracing.StartSpanOptions{}
+// for _, o := range opts {
+// o.Apply(&sso)
+// }
+// ...
+// }
+//
+type StartSpanOptions struct {
+ // Zero or more causal references to other Spans (via their SpanContext).
+ // If empty, start a "root" Span (i.e., start a new trace).
+ References []SpanReference
+
+ // StartTime overrides the Span's start time, or implicitly becomes
+ // time.Now() if StartTime.IsZero().
+ StartTime time.Time
+
+ // Tags may have zero or more entries; the restrictions on map values are
+ // identical to those for Span.SetTag(). May be nil.
+ //
+ // If specified, the caller hands off ownership of Tags at
+ // StartSpan() invocation time.
+ Tags map[string]interface{}
+}
+
+// StartSpanOption instances (zero or more) may be passed to Tracer.StartSpan.
+//
+// StartSpanOption borrows from the "functional options" pattern, per
+// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
+type StartSpanOption interface {
+ Apply(*StartSpanOptions)
+}
+
+// SpanReferenceType is an enum type describing different categories of
+// relationships between two Spans. If Span-2 refers to Span-1, the
+// SpanReferenceType describes Span-1 from Span-2's perspective. For example,
+// ChildOfRef means that Span-1 created Span-2.
+//
+// NOTE: Span-1 and Span-2 do *not* necessarily depend on each other for
+// completion; e.g., Span-2 may be part of a background job enqueued by Span-1,
+// or Span-2 may be sitting in a distributed queue behind Span-1.
+type SpanReferenceType int
+
+const (
+ // ChildOfRef refers to a parent Span that caused *and* somehow depends
+ // upon the new child Span. Often (but not always), the parent Span cannot
+ // finish until the child Span does.
+ //
+ // An timing diagram for a ChildOfRef that's blocked on the new Span:
+ //
+ // [-Parent Span---------]
+ // [-Child Span----]
+ //
+ // See http://opentracing.io/spec/
+ //
+ // See opentracing.ChildOf()
+ ChildOfRef SpanReferenceType = iota
+
+ // FollowsFromRef refers to a parent Span that does not depend in any way
+ // on the result of the new child Span. For instance, one might use
+ // FollowsFromRefs to describe pipeline stages separated by queues,
+ // or a fire-and-forget cache insert at the tail end of a web request.
+ //
+ // A FollowsFromRef Span is part of the same logical trace as the new Span:
+ // i.e., the new Span is somehow caused by the work of its FollowsFromRef.
+ //
+ // All of the following could be valid timing diagrams for children that
+ // "FollowFrom" a parent.
+ //
+ // [-Parent Span-] [-Child Span-]
+ //
+ //
+ // [-Parent Span--]
+ // [-Child Span-]
+ //
+ //
+ // [-Parent Span-]
+ // [-Child Span-]
+ //
+ // See http://opentracing.io/spec/
+ //
+ // See opentracing.FollowsFrom()
+ FollowsFromRef
+)
+
+// SpanReference is a StartSpanOption that pairs a SpanReferenceType and a
+// referenced SpanContext. See the SpanReferenceType documentation for
+// supported relationships. If SpanReference is created with
+// ReferencedContext==nil, it has no effect. Thus it allows for a more concise
+// syntax for starting spans:
+//
+// sc, _ := tracer.Extract(someFormat, someCarrier)
+// span := tracer.StartSpan("operation", opentracing.ChildOf(sc))
+//
+// The `ChildOf(sc)` option above will not panic if sc == nil, it will just
+// not add the parent span reference to the options.
+type SpanReference struct {
+ Type SpanReferenceType
+ ReferencedContext SpanContext
+}
+
+// Apply satisfies the StartSpanOption interface.
+func (r SpanReference) Apply(o *StartSpanOptions) {
+ if r.ReferencedContext != nil {
+ o.References = append(o.References, r)
+ }
+}
+
+// ChildOf returns a StartSpanOption pointing to a dependent parent span.
+// If sc == nil, the option has no effect.
+//
+// See ChildOfRef, SpanReference
+func ChildOf(sc SpanContext) SpanReference {
+ return SpanReference{
+ Type: ChildOfRef,
+ ReferencedContext: sc,
+ }
+}
+
+// FollowsFrom returns a StartSpanOption pointing to a parent Span that caused
+// the child Span but does not directly depend on its result in any way.
+// If sc == nil, the option has no effect.
+//
+// See FollowsFromRef, SpanReference
+func FollowsFrom(sc SpanContext) SpanReference {
+ return SpanReference{
+ Type: FollowsFromRef,
+ ReferencedContext: sc,
+ }
+}
+
+// StartTime is a StartSpanOption that sets an explicit start timestamp for the
+// new Span.
+type StartTime time.Time
+
+// Apply satisfies the StartSpanOption interface.
+func (t StartTime) Apply(o *StartSpanOptions) {
+ o.StartTime = time.Time(t)
+}
+
+// Tags are a generic map from an arbitrary string key to an opaque value type.
+// The underlying tracing system is responsible for interpreting and
+// serializing the values.
+type Tags map[string]interface{}
+
+// Apply satisfies the StartSpanOption interface.
+func (t Tags) Apply(o *StartSpanOptions) {
+ if o.Tags == nil {
+ o.Tags = make(map[string]interface{})
+ }
+ for k, v := range t {
+ o.Tags[k] = v
+ }
+}
+
+// Tag may be passed as a StartSpanOption to add a tag to new spans,
+// or its Set method may be used to apply the tag to an existing Span,
+// for example:
+//
+// tracer.StartSpan("opName", Tag{"Key", value})
+//
+// or
+//
+// Tag{"key", value}.Set(span)
+type Tag struct {
+ Key string
+ Value interface{}
+}
+
+// Apply satisfies the StartSpanOption interface.
+func (t Tag) Apply(o *StartSpanOptions) {
+ if o.Tags == nil {
+ o.Tags = make(map[string]interface{})
+ }
+ o.Tags[t.Key] = t.Value
+}
+
+// Set applies the tag to an existing Span.
+func (t Tag) Set(s Span) {
+ s.SetTag(t.Key, t.Value)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/LICENSE b/vendor/github.com/uber/jaeger-client-go/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/uber/jaeger-client-go/README.md b/vendor/github.com/uber/jaeger-client-go/README.md
new file mode 100644
index 000000000..6d9546e5b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/README.md
@@ -0,0 +1,270 @@
+[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![OpenTracing 1.0 Enabled][ot-img]][ot-url]
+
+# Jaeger Bindings for Go OpenTracing API
+
+Instrumentation library that implements an
+[OpenTracing](http://opentracing.io) Tracer for Jaeger (https://jaegertracing.io).
+
+**IMPORTANT**: The library's import path is based on its original location under `github.com/uber`. Do not try to import it as `github.com/jaegertracing`, it will not compile. We might revisit this in the next major release.
+ * :white_check_mark: `import "github.com/uber/jaeger-client-go"`
+ * :x: `import "github.com/jaegertracing/jaeger-client-go"`
+
+## How to Contribute
+
+Please see [CONTRIBUTING.md](CONTRIBUTING.md).
+
+## Installation
+
+We recommended using a dependency manager like [glide](https://github.com/Masterminds/glide)
+and [semantic versioning](http://semver.org/) when including this library into an application.
+For example, Jaeger backend imports this library like this:
+
+```yaml
+- package: github.com/uber/jaeger-client-go
+ version: ^2.7.0
+```
+
+If you instead want to use the latest version in `master`, you can pull it via `go get`.
+Note that during `go get` you may see build errors due to incompatible dependencies, which is why
+we recommend using semantic versions for dependencies. The error may be fixed by running
+`make install` (it will install `glide` if you don't have it):
+
+```shell
+go get -u github.com/uber/jaeger-client-go/
+cd $GOPATH/src/github.com/uber/jaeger-client-go/
+git submodule update --init --recursive
+make install
+```
+
+## Initialization
+
+See tracer initialization examples in [godoc](https://godoc.org/github.com/uber/jaeger-client-go/config#pkg-examples)
+and [config/example_test.go](./config/example_test.go).
+
+### Environment variables
+
+The tracer can be initialized with values coming from environment variables. None of the env vars are required
+and all of them can be overriden via direct setting of the property on the configuration object.
+
+Property| Description
+--- | ---
+JAEGER_SERVICE_NAME | The service name
+JAEGER_AGENT_HOST | The hostname for communicating with agent via UDP
+JAEGER_AGENT_PORT | The port for communicating with agent via UDP
+JAEGER_ENDPOINT | The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces
+JAEGER_USER | Username to send as part of "Basic" authentication to the collector endpoint
+JAEGER_PASSWORD | Password to send as part of "Basic" authentication to the collector endpoint
+JAEGER_REPORTER_LOG_SPANS | Whether the reporter should also log the spans
+JAEGER_REPORTER_MAX_QUEUE_SIZE | The reporter's maximum queue size
+JAEGER_REPORTER_FLUSH_INTERVAL | The reporter's flush interval, with units, e.g. "500ms" or "2s" ([valid units][timeunits])
+JAEGER_SAMPLER_TYPE | The sampler type
+JAEGER_SAMPLER_PARAM | The sampler parameter (number)
+JAEGER_SAMPLER_MANAGER_HOST_PORT | The HTTP endpoint when using the remote sampler, i.e. http://jaeger-agent:5778/sampling
+JAEGER_SAMPLER_MAX_OPERATIONS | The maximum number of operations that the sampler will keep track of
+JAEGER_SAMPLER_REFRESH_INTERVAL | How often the remotely controlled sampler will poll jaeger-agent for the appropriate sampling strategy, with units, e.g. "1m" or "30s" ([valid units][timeunits])
+JAEGER_TAGS | A comma separated list of `name = value` tracer level tags, which get added to all reported spans. The value can also refer to an environment variable using the format `${envVarName:default}`, where the `:default` is optional, and identifies a value to be used if the environment variable cannot be found
+JAEGER_DISABLED | Whether the tracer is disabled or not. If true, the default `opentracing.NoopTracer` is used.
+JAEGER_RPC_METRICS | Whether to store RPC metrics
+
+By default, the client sends traces via UDP to the agent at `localhost:6831`. Use `JAEGER_AGENT_HOST` and
+`JAEGER_AGENT_PORT` to send UDP traces to a different `host:port`. If `JAEGER_ENDPOINT` is set, the client sends traces
+to the endpoint via `HTTP`, making the `JAEGER_AGENT_HOST` and `JAEGER_AGENT_PORT` unused. If `JAEGER_ENDPOINT` is
+secured, HTTP basic authentication can be performed by setting the `JAEGER_USER` and `JAEGER_PASSWORD` environment
+variables.
+
+### Closing the tracer via `io.Closer`
+
+The constructor function for Jaeger Tracer returns the tracer itself and an `io.Closer` instance.
+It is recommended to structure your `main()` so that it calls the `Close()` function on the closer
+before exiting, e.g.
+
+```go
+tracer, closer, err := cfg.NewTracer(...)
+defer closer.Close()
+```
+
+This is especially useful for command-line tools that enable tracing, as well as
+for the long-running apps that support graceful shutdown. For example, if your deployment
+system sends SIGTERM instead of killing the process and you trap that signal to do a graceful
+exit, then having `defer closer.Closer()` ensures that all buffered spans are flushed.
+
+### Metrics & Monitoring
+
+The tracer emits a number of different metrics, defined in
+[metrics.go](metrics.go). The monitoring backend is expected to support
+tag-based metric names, e.g. instead of `statsd`-style string names
+like `counters.my-service.jaeger.spans.started.sampled`, the metrics
+are defined by a short name and a collection of key/value tags, for
+example: `name:jaeger.traces, state:started, sampled:y`. See [metrics.go](./metrics.go)
+file for the full list and descriptions of emitted metrics.
+
+The monitoring backend is represented by the `metrics.Factory` interface from package
+[`"github.com/uber/jaeger-lib/metrics"`](https://github.com/jaegertracing/jaeger-lib/tree/master/metrics). An implementation
+of that interface can be passed as an option to either the Configuration object or the Tracer
+constructor, for example:
+
+```go
+import (
+ "github.com/uber/jaeger-client-go/config"
+ "github.com/uber/jaeger-lib/metrics/prometheus"
+)
+
+ metricsFactory := prometheus.New()
+ tracer, closer, err := config.Configuration{
+ ServiceName: "your-service-name",
+ }.NewTracer(
+ config.Metrics(metricsFactory),
+ )
+```
+
+By default, a no-op `metrics.NullFactory` is used.
+
+### Logging
+
+The tracer can be configured with an optional logger, which will be
+used to log communication errors, or log spans if a logging reporter
+option is specified in the configuration. The logging API is abstracted
+by the [Logger](logger.go) interface. A logger instance implementing
+this interface can be set on the `Config` object before calling the
+`New` method.
+
+Besides the [zap](https://github.com/uber-go/zap) implementation
+bundled with this package there is also a [go-kit](https://github.com/go-kit/kit)
+one in the [jaeger-lib](https://github.com/jaegertracing/jaeger-lib) repository.
+
+## Instrumentation for Tracing
+
+Since this tracer is fully compliant with OpenTracing API 1.0,
+all code instrumentation should only use the API itself, as described
+in the [opentracing-go](https://github.com/opentracing/opentracing-go) documentation.
+
+## Features
+
+### Reporters
+
+A "reporter" is a component that receives the finished spans and reports
+them to somewhere. Under normal circumstances, the Tracer
+should use the default `RemoteReporter`, which sends the spans out of
+process via configurable "transport". For testing purposes, one can
+use an `InMemoryReporter` that accumulates spans in a buffer and
+allows to retrieve them for later verification. Also available are
+`NullReporter`, a no-op reporter that does nothing, a `LoggingReporter`
+which logs all finished spans using their `String()` method, and a
+`CompositeReporter` that can be used to combine more than one reporter
+into one, e.g. to attach a logging reporter to the main remote reporter.
+
+### Span Reporting Transports
+
+The remote reporter uses "transports" to actually send the spans out
+of process. Currently the supported transports include:
+ * [Jaeger Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/agent.thrift) over UDP or HTTP,
+ * [Zipkin Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/zipkincore.thrift) over HTTP.
+
+### Sampling
+
+The tracer does not record all spans, but only those that have the
+sampling bit set in the `flags`. When a new trace is started and a new
+unique ID is generated, a sampling decision is made whether this trace
+should be sampled. The sampling decision is propagated to all downstream
+calls via the `flags` field of the trace context. The following samplers
+are available:
+ 1. `RemotelyControlledSampler` uses one of the other simpler samplers
+ and periodically updates it by polling an external server. This
+ allows dynamic control of the sampling strategies.
+ 1. `ConstSampler` always makes the same sampling decision for all
+ trace IDs. it can be configured to either sample all traces, or
+ to sample none.
+ 1. `ProbabilisticSampler` uses a fixed sampling rate as a probability
+ for a given trace to be sampled. The actual decision is made by
+ comparing the trace ID with a random number multiplied by the
+ sampling rate.
+ 1. `RateLimitingSampler` can be used to allow only a certain fixed
+ number of traces to be sampled per second.
+
+### Baggage Injection
+
+The OpenTracing spec allows for [baggage][baggage], which are key value pairs that are added
+to the span context and propagated throughout the trace. An external process can inject baggage
+by setting the special HTTP Header `jaeger-baggage` on a request:
+
+```sh
+curl -H "jaeger-baggage: key1=value1, key2=value2" http://myhost.com
+```
+
+Baggage can also be programatically set inside your service:
+
+```go
+if span := opentracing.SpanFromContext(ctx); span != nil {
+ span.SetBaggageItem("key", "value")
+}
+```
+
+Another service downstream of that can retrieve the baggage in a similar way:
+
+```go
+if span := opentracing.SpanFromContext(ctx); span != nil {
+ val := span.BaggageItem("key")
+ println(val)
+}
+```
+
+### Debug Traces (Forced Sampling)
+
+#### Programmatically
+
+The OpenTracing API defines a `sampling.priority` standard tag that
+can be used to affect the sampling of a span and its children:
+
+```go
+import (
+ "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/ext"
+)
+
+span := opentracing.SpanFromContext(ctx)
+ext.SamplingPriority.Set(span, 1)
+```
+
+#### Via HTTP Headers
+
+Jaeger Tracer also understands a special HTTP Header `jaeger-debug-id`,
+which can be set in the incoming request, e.g.
+
+```sh
+curl -H "jaeger-debug-id: some-correlation-id" http://myhost.com
+```
+
+When Jaeger sees this header in the request that otherwise has no
+tracing context, it ensures that the new trace started for this
+request will be sampled in the "debug" mode (meaning it should survive
+all downsampling that might happen in the collection pipeline), and the
+root span will have a tag as if this statement was executed:
+
+```go
+span.SetTag("jaeger-debug-id", "some-correlation-id")
+```
+
+This allows using Jaeger UI to find the trace by this tag.
+
+### Zipkin HTTP B3 compatible header propagation
+
+Jaeger Tracer supports Zipkin B3 Propagation HTTP headers, which are used
+by a lot of Zipkin tracers. This means that you can use Jaeger in conjunction with e.g. [these OpenZipkin tracers](https://github.com/openzipkin).
+
+However it is not the default propagation format, see [here](zipkin/README.md#NewZipkinB3HTTPHeaderPropagator) how to set it up.
+
+## License
+
+[Apache 2.0 License](LICENSE).
+
+
+[doc-img]: https://godoc.org/github.com/uber/jaeger-client-go?status.svg
+[doc]: https://godoc.org/github.com/uber/jaeger-client-go
+[ci-img]: https://travis-ci.org/jaegertracing/jaeger-client-go.svg?branch=master
+[ci]: https://travis-ci.org/jaegertracing/jaeger-client-go
+[cov-img]: https://codecov.io/gh/jaegertracing/jaeger-client-go/branch/master/graph/badge.svg
+[cov]: https://codecov.io/gh/jaegertracing/jaeger-client-go
+[ot-img]: https://img.shields.io/badge/OpenTracing--1.0-enabled-blue.svg
+[ot-url]: http://opentracing.io
+[baggage]: https://github.com/opentracing/specification/blob/master/specification.md#set-a-baggage-item
+[timeunits]: https://golang.org/pkg/time/#ParseDuration
diff --git a/vendor/github.com/uber/jaeger-client-go/baggage_setter.go b/vendor/github.com/uber/jaeger-client-go/baggage_setter.go
new file mode 100644
index 000000000..1037ca0e8
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/baggage_setter.go
@@ -0,0 +1,77 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "github.com/opentracing/opentracing-go/log"
+
+ "github.com/uber/jaeger-client-go/internal/baggage"
+)
+
+// baggageSetter is an actor that can set a baggage value on a Span given certain
+// restrictions (eg. maxValueLength).
+type baggageSetter struct {
+ restrictionManager baggage.RestrictionManager
+ metrics *Metrics
+}
+
+func newBaggageSetter(restrictionManager baggage.RestrictionManager, metrics *Metrics) *baggageSetter {
+ return &baggageSetter{
+ restrictionManager: restrictionManager,
+ metrics: metrics,
+ }
+}
+
+// (NB) span should hold the lock before making this call
+func (s *baggageSetter) setBaggage(span *Span, key, value string) {
+ var truncated bool
+ var prevItem string
+ restriction := s.restrictionManager.GetRestriction(span.serviceName(), key)
+ if !restriction.KeyAllowed() {
+ s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed())
+ s.metrics.BaggageUpdateFailure.Inc(1)
+ return
+ }
+ if len(value) > restriction.MaxValueLength() {
+ truncated = true
+ value = value[:restriction.MaxValueLength()]
+ s.metrics.BaggageTruncate.Inc(1)
+ }
+ prevItem = span.context.baggage[key]
+ s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed())
+ span.context = span.context.WithBaggageItem(key, value)
+ s.metrics.BaggageUpdateSuccess.Inc(1)
+}
+
+func (s *baggageSetter) logFields(span *Span, key, value, prevItem string, truncated, valid bool) {
+ if !span.context.IsSampled() {
+ return
+ }
+ fields := []log.Field{
+ log.String("event", "baggage"),
+ log.String("key", key),
+ log.String("value", value),
+ }
+ if prevItem != "" {
+ fields = append(fields, log.String("override", "true"))
+ }
+ if truncated {
+ fields = append(fields, log.String("truncated", "true"))
+ }
+ if !valid {
+ fields = append(fields, log.String("invalid", "true"))
+ }
+ span.logFieldsNoLocking(fields...)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/config/config.go b/vendor/github.com/uber/jaeger-client-go/config/config.go
new file mode 100644
index 000000000..821333ddb
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/config/config.go
@@ -0,0 +1,395 @@
+// Copyright (c) 2017-2018 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/opentracing/opentracing-go"
+
+ "github.com/uber/jaeger-client-go"
+ "github.com/uber/jaeger-client-go/internal/baggage/remote"
+ throttler "github.com/uber/jaeger-client-go/internal/throttler/remote"
+ "github.com/uber/jaeger-client-go/rpcmetrics"
+ "github.com/uber/jaeger-client-go/transport"
+ "github.com/uber/jaeger-lib/metrics"
+)
+
+const defaultSamplingProbability = 0.001
+
+// Configuration configures and creates Jaeger Tracer
+type Configuration struct {
+ // ServiceName specifies the service name to use on the tracer.
+ // Can be provided via environment variable named JAEGER_SERVICE_NAME
+ ServiceName string `yaml:"serviceName"`
+
+ // Disabled can be provided via environment variable named JAEGER_DISABLED
+ Disabled bool `yaml:"disabled"`
+
+ // RPCMetrics can be provided via environment variable named JAEGER_RPC_METRICS
+ RPCMetrics bool `yaml:"rpc_metrics"`
+
+ // Tags can be provided via environment variable named JAEGER_TAGS
+ Tags []opentracing.Tag `yaml:"tags"`
+
+ Sampler *SamplerConfig `yaml:"sampler"`
+ Reporter *ReporterConfig `yaml:"reporter"`
+ Headers *jaeger.HeadersConfig `yaml:"headers"`
+ BaggageRestrictions *BaggageRestrictionsConfig `yaml:"baggage_restrictions"`
+ Throttler *ThrottlerConfig `yaml:"throttler"`
+}
+
+// SamplerConfig allows initializing a non-default sampler. All fields are optional.
+type SamplerConfig struct {
+ // Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote
+ // Can be set by exporting an environment variable named JAEGER_SAMPLER_TYPE
+ Type string `yaml:"type"`
+
+ // Param is a value passed to the sampler.
+ // Valid values for Param field are:
+ // - for "const" sampler, 0 or 1 for always false/true respectively
+ // - for "probabilistic" sampler, a probability between 0 and 1
+ // - for "rateLimiting" sampler, the number of spans per second
+ // - for "remote" sampler, param is the same as for "probabilistic"
+ // and indicates the initial sampling rate before the actual one
+ // is received from the mothership.
+ // Can be set by exporting an environment variable named JAEGER_SAMPLER_PARAM
+ Param float64 `yaml:"param"`
+
+ // SamplingServerURL is the address of jaeger-agent's HTTP sampling server
+ // Can be set by exporting an environment variable named JAEGER_SAMPLER_MANAGER_HOST_PORT
+ SamplingServerURL string `yaml:"samplingServerURL"`
+
+ // MaxOperations is the maximum number of operations that the sampler
+ // will keep track of. If an operation is not tracked, a default probabilistic
+ // sampler will be used rather than the per operation specific sampler.
+ // Can be set by exporting an environment variable named JAEGER_SAMPLER_MAX_OPERATIONS
+ MaxOperations int `yaml:"maxOperations"`
+
+ // SamplingRefreshInterval controls how often the remotely controlled sampler will poll
+ // jaeger-agent for the appropriate sampling strategy.
+ // Can be set by exporting an environment variable named JAEGER_SAMPLER_REFRESH_INTERVAL
+ SamplingRefreshInterval time.Duration `yaml:"samplingRefreshInterval"`
+}
+
+// ReporterConfig configures the reporter. All fields are optional.
+type ReporterConfig struct {
+ // QueueSize controls how many spans the reporter can keep in memory before it starts dropping
+ // new spans. The queue is continuously drained by a background go-routine, as fast as spans
+ // can be sent out of process.
+ // Can be set by exporting an environment variable named JAEGER_REPORTER_MAX_QUEUE_SIZE
+ QueueSize int `yaml:"queueSize"`
+
+ // BufferFlushInterval controls how often the buffer is force-flushed, even if it's not full.
+ // It is generally not useful, as it only matters for very low traffic services.
+ // Can be set by exporting an environment variable named JAEGER_REPORTER_FLUSH_INTERVAL
+ BufferFlushInterval time.Duration
+
+ // LogSpans, when true, enables LoggingReporter that runs in parallel with the main reporter
+ // and logs all submitted spans. Main Configuration.Logger must be initialized in the code
+ // for this option to have any effect.
+ // Can be set by exporting an environment variable named JAEGER_REPORTER_LOG_SPANS
+ LogSpans bool `yaml:"logSpans"`
+
+ // LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address
+ // Can be set by exporting an environment variable named JAEGER_AGENT_HOST / JAEGER_AGENT_PORT
+ LocalAgentHostPort string `yaml:"localAgentHostPort"`
+
+ // CollectorEndpoint instructs reporter to send spans to jaeger-collector at this URL
+ // Can be set by exporting an environment variable named JAEGER_ENDPOINT
+ CollectorEndpoint string `yaml:"collectorEndpoint"`
+
+ // User instructs reporter to include a user for basic http authentication when sending spans to jaeger-collector.
+ // Can be set by exporting an environment variable named JAEGER_USER
+ User string `yaml:"user"`
+
+ // Password instructs reporter to include a password for basic http authentication when sending spans to
+ // jaeger-collector. Can be set by exporting an environment variable named JAEGER_PASSWORD
+ Password string `yaml:"password"`
+}
+
+// BaggageRestrictionsConfig configures the baggage restrictions manager which can be used to whitelist
+// certain baggage keys. All fields are optional.
+type BaggageRestrictionsConfig struct {
+ // DenyBaggageOnInitializationFailure controls the startup failure mode of the baggage restriction
+ // manager. If true, the manager will not allow any baggage to be written until baggage restrictions have
+ // been retrieved from jaeger-agent. If false, the manager wil allow any baggage to be written until baggage
+ // restrictions have been retrieved from jaeger-agent.
+ DenyBaggageOnInitializationFailure bool `yaml:"denyBaggageOnInitializationFailure"`
+
+ // HostPort is the hostPort of jaeger-agent's baggage restrictions server
+ HostPort string `yaml:"hostPort"`
+
+ // RefreshInterval controls how often the baggage restriction manager will poll
+ // jaeger-agent for the most recent baggage restrictions.
+ RefreshInterval time.Duration `yaml:"refreshInterval"`
+}
+
+// ThrottlerConfig configures the throttler which can be used to throttle the
+// rate at which the client may send debug requests.
+type ThrottlerConfig struct {
+ // HostPort of jaeger-agent's credit server.
+ HostPort string `yaml:"hostPort"`
+
+ // RefreshInterval controls how often the throttler will poll jaeger-agent
+ // for more throttling credits.
+ RefreshInterval time.Duration `yaml:"refreshInterval"`
+
+ // SynchronousInitialization determines whether or not the throttler should
+ // synchronously fetch credits from the agent when an operation is seen for
+ // the first time. This should be set to true if the client will be used by
+ // a short lived service that needs to ensure that credits are fetched
+ // upfront such that sampling or throttling occurs.
+ SynchronousInitialization bool `yaml:"synchronousInitialization"`
+}
+
+type nullCloser struct{}
+
+func (*nullCloser) Close() error { return nil }
+
+// New creates a new Jaeger Tracer, and a closer func that can be used to flush buffers
+// before shutdown.
+//
+// Deprecated: use NewTracer() function
+func (c Configuration) New(
+ serviceName string,
+ options ...Option,
+) (opentracing.Tracer, io.Closer, error) {
+ if serviceName != "" {
+ c.ServiceName = serviceName
+ }
+
+ return c.NewTracer(options...)
+}
+
+// NewTracer returns a new tracer based on the current configuration, using the given options,
+// and a closer func that can be used to flush buffers before shutdown.
+func (c Configuration) NewTracer(options ...Option) (opentracing.Tracer, io.Closer, error) {
+ if c.ServiceName == "" {
+ return nil, nil, errors.New("no service name provided")
+ }
+
+ if c.Disabled {
+ return &opentracing.NoopTracer{}, &nullCloser{}, nil
+ }
+ opts := applyOptions(options...)
+ tracerMetrics := jaeger.NewMetrics(opts.metrics, nil)
+ if c.RPCMetrics {
+ Observer(
+ rpcmetrics.NewObserver(
+ opts.metrics.Namespace(metrics.NSOptions{Name: "jaeger-rpc", Tags: map[string]string{"component": "jaeger"}}),
+ rpcmetrics.DefaultNameNormalizer,
+ ),
+ )(&opts) // adds to c.observers
+ }
+ if c.Sampler == nil {
+ c.Sampler = &SamplerConfig{
+ Type: jaeger.SamplerTypeRemote,
+ Param: defaultSamplingProbability,
+ }
+ }
+ if c.Reporter == nil {
+ c.Reporter = &ReporterConfig{}
+ }
+
+ sampler := opts.sampler
+ if sampler == nil {
+ s, err := c.Sampler.NewSampler(c.ServiceName, tracerMetrics)
+ if err != nil {
+ return nil, nil, err
+ }
+ sampler = s
+ }
+
+ reporter := opts.reporter
+ if reporter == nil {
+ r, err := c.Reporter.NewReporter(c.ServiceName, tracerMetrics, opts.logger)
+ if err != nil {
+ return nil, nil, err
+ }
+ reporter = r
+ }
+
+ tracerOptions := []jaeger.TracerOption{
+ jaeger.TracerOptions.Metrics(tracerMetrics),
+ jaeger.TracerOptions.Logger(opts.logger),
+ jaeger.TracerOptions.CustomHeaderKeys(c.Headers),
+ jaeger.TracerOptions.Gen128Bit(opts.gen128Bit),
+ jaeger.TracerOptions.ZipkinSharedRPCSpan(opts.zipkinSharedRPCSpan),
+ jaeger.TracerOptions.MaxTagValueLength(opts.maxTagValueLength),
+ }
+
+ for _, tag := range opts.tags {
+ tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value))
+ }
+
+ for _, tag := range c.Tags {
+ tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value))
+ }
+
+ for _, obs := range opts.observers {
+ tracerOptions = append(tracerOptions, jaeger.TracerOptions.Observer(obs))
+ }
+
+ for _, cobs := range opts.contribObservers {
+ tracerOptions = append(tracerOptions, jaeger.TracerOptions.ContribObserver(cobs))
+ }
+
+ for format, injector := range opts.injectors {
+ tracerOptions = append(tracerOptions, jaeger.TracerOptions.Injector(format, injector))
+ }
+
+ for format, extractor := range opts.extractors {
+ tracerOptions = append(tracerOptions, jaeger.TracerOptions.Extractor(format, extractor))
+ }
+
+ if c.BaggageRestrictions != nil {
+ mgr := remote.NewRestrictionManager(
+ c.ServiceName,
+ remote.Options.Metrics(tracerMetrics),
+ remote.Options.Logger(opts.logger),
+ remote.Options.HostPort(c.BaggageRestrictions.HostPort),
+ remote.Options.RefreshInterval(c.BaggageRestrictions.RefreshInterval),
+ remote.Options.DenyBaggageOnInitializationFailure(
+ c.BaggageRestrictions.DenyBaggageOnInitializationFailure,
+ ),
+ )
+ tracerOptions = append(tracerOptions, jaeger.TracerOptions.BaggageRestrictionManager(mgr))
+ }
+
+ if c.Throttler != nil {
+ debugThrottler := throttler.NewThrottler(
+ c.ServiceName,
+ throttler.Options.Metrics(tracerMetrics),
+ throttler.Options.Logger(opts.logger),
+ throttler.Options.HostPort(c.Throttler.HostPort),
+ throttler.Options.RefreshInterval(c.Throttler.RefreshInterval),
+ throttler.Options.SynchronousInitialization(
+ c.Throttler.SynchronousInitialization,
+ ),
+ )
+
+ tracerOptions = append(tracerOptions, jaeger.TracerOptions.DebugThrottler(debugThrottler))
+ }
+
+ tracer, closer := jaeger.NewTracer(
+ c.ServiceName,
+ sampler,
+ reporter,
+ tracerOptions...,
+ )
+
+ return tracer, closer, nil
+}
+
+// InitGlobalTracer creates a new Jaeger Tracer, and sets it as global OpenTracing Tracer.
+// It returns a closer func that can be used to flush buffers before shutdown.
+func (c Configuration) InitGlobalTracer(
+ serviceName string,
+ options ...Option,
+) (io.Closer, error) {
+ if c.Disabled {
+ return &nullCloser{}, nil
+ }
+ tracer, closer, err := c.New(serviceName, options...)
+ if err != nil {
+ return nil, err
+ }
+ opentracing.SetGlobalTracer(tracer)
+ return closer, nil
+}
+
+// NewSampler creates a new sampler based on the configuration
+func (sc *SamplerConfig) NewSampler(
+ serviceName string,
+ metrics *jaeger.Metrics,
+) (jaeger.Sampler, error) {
+ samplerType := strings.ToLower(sc.Type)
+ if samplerType == jaeger.SamplerTypeConst {
+ return jaeger.NewConstSampler(sc.Param != 0), nil
+ }
+ if samplerType == jaeger.SamplerTypeProbabilistic {
+ if sc.Param >= 0 && sc.Param <= 1.0 {
+ return jaeger.NewProbabilisticSampler(sc.Param)
+ }
+ return nil, fmt.Errorf(
+ "Invalid Param for probabilistic sampler: %v. Expecting value between 0 and 1",
+ sc.Param,
+ )
+ }
+ if samplerType == jaeger.SamplerTypeRateLimiting {
+ return jaeger.NewRateLimitingSampler(sc.Param), nil
+ }
+ if samplerType == jaeger.SamplerTypeRemote || sc.Type == "" {
+ sc2 := *sc
+ sc2.Type = jaeger.SamplerTypeProbabilistic
+ initSampler, err := sc2.NewSampler(serviceName, nil)
+ if err != nil {
+ return nil, err
+ }
+ options := []jaeger.SamplerOption{
+ jaeger.SamplerOptions.Metrics(metrics),
+ jaeger.SamplerOptions.InitialSampler(initSampler),
+ jaeger.SamplerOptions.SamplingServerURL(sc.SamplingServerURL),
+ }
+ if sc.MaxOperations != 0 {
+ options = append(options, jaeger.SamplerOptions.MaxOperations(sc.MaxOperations))
+ }
+ if sc.SamplingRefreshInterval != 0 {
+ options = append(options, jaeger.SamplerOptions.SamplingRefreshInterval(sc.SamplingRefreshInterval))
+ }
+ return jaeger.NewRemotelyControlledSampler(serviceName, options...), nil
+ }
+ return nil, fmt.Errorf("Unknown sampler type %v", sc.Type)
+}
+
+// NewReporter instantiates a new reporter that submits spans to the collector
+func (rc *ReporterConfig) NewReporter(
+ serviceName string,
+ metrics *jaeger.Metrics,
+ logger jaeger.Logger,
+) (jaeger.Reporter, error) {
+ sender, err := rc.newTransport()
+ if err != nil {
+ return nil, err
+ }
+ reporter := jaeger.NewRemoteReporter(
+ sender,
+ jaeger.ReporterOptions.QueueSize(rc.QueueSize),
+ jaeger.ReporterOptions.BufferFlushInterval(rc.BufferFlushInterval),
+ jaeger.ReporterOptions.Logger(logger),
+ jaeger.ReporterOptions.Metrics(metrics))
+ if rc.LogSpans && logger != nil {
+ logger.Infof("Initializing logging reporter\n")
+ reporter = jaeger.NewCompositeReporter(jaeger.NewLoggingReporter(logger), reporter)
+ }
+ return reporter, err
+}
+
+func (rc *ReporterConfig) newTransport() (jaeger.Transport, error) {
+ switch {
+ case rc.CollectorEndpoint != "" && rc.User != "" && rc.Password != "":
+ return transport.NewHTTPTransport(rc.CollectorEndpoint, transport.HTTPBatchSize(1),
+ transport.HTTPBasicAuth(rc.User, rc.Password)), nil
+ case rc.CollectorEndpoint != "":
+ return transport.NewHTTPTransport(rc.CollectorEndpoint, transport.HTTPBatchSize(1)), nil
+ default:
+ return jaeger.NewUDPTransport(rc.LocalAgentHostPort, 0)
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/config/config_env.go b/vendor/github.com/uber/jaeger-client-go/config/config_env.go
new file mode 100644
index 000000000..ff70ae12c
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/config/config_env.go
@@ -0,0 +1,221 @@
+// Copyright (c) 2018 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+ "fmt"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/opentracing/opentracing-go"
+ "github.com/pkg/errors"
+
+ "github.com/uber/jaeger-client-go"
+)
+
+const (
+ // environment variable names
+ envServiceName = "JAEGER_SERVICE_NAME"
+ envDisabled = "JAEGER_DISABLED"
+ envRPCMetrics = "JAEGER_RPC_METRICS"
+ envTags = "JAEGER_TAGS"
+ envSamplerType = "JAEGER_SAMPLER_TYPE"
+ envSamplerParam = "JAEGER_SAMPLER_PARAM"
+ envSamplerManagerHostPort = "JAEGER_SAMPLER_MANAGER_HOST_PORT"
+ envSamplerMaxOperations = "JAEGER_SAMPLER_MAX_OPERATIONS"
+ envSamplerRefreshInterval = "JAEGER_SAMPLER_REFRESH_INTERVAL"
+ envReporterMaxQueueSize = "JAEGER_REPORTER_MAX_QUEUE_SIZE"
+ envReporterFlushInterval = "JAEGER_REPORTER_FLUSH_INTERVAL"
+ envReporterLogSpans = "JAEGER_REPORTER_LOG_SPANS"
+ envEndpoint = "JAEGER_ENDPOINT"
+ envUser = "JAEGER_USER"
+ envPassword = "JAEGER_PASSWORD"
+ envAgentHost = "JAEGER_AGENT_HOST"
+ envAgentPort = "JAEGER_AGENT_PORT"
+)
+
+// FromEnv uses environment variables to set the tracer's Configuration
+func FromEnv() (*Configuration, error) {
+ c := &Configuration{}
+
+ if e := os.Getenv(envServiceName); e != "" {
+ c.ServiceName = e
+ }
+
+ if e := os.Getenv(envRPCMetrics); e != "" {
+ if value, err := strconv.ParseBool(e); err == nil {
+ c.RPCMetrics = value
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envRPCMetrics, e)
+ }
+ }
+
+ if e := os.Getenv(envDisabled); e != "" {
+ if value, err := strconv.ParseBool(e); err == nil {
+ c.Disabled = value
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envDisabled, e)
+ }
+ }
+
+ if e := os.Getenv(envTags); e != "" {
+ c.Tags = parseTags(e)
+ }
+
+ if s, err := samplerConfigFromEnv(); err == nil {
+ c.Sampler = s
+ } else {
+ return nil, errors.Wrap(err, "cannot obtain sampler config from env")
+ }
+
+ if r, err := reporterConfigFromEnv(); err == nil {
+ c.Reporter = r
+ } else {
+ return nil, errors.Wrap(err, "cannot obtain reporter config from env")
+ }
+
+ return c, nil
+}
+
+// samplerConfigFromEnv creates a new SamplerConfig based on the environment variables
+func samplerConfigFromEnv() (*SamplerConfig, error) {
+ sc := &SamplerConfig{}
+
+ if e := os.Getenv(envSamplerType); e != "" {
+ sc.Type = e
+ }
+
+ if e := os.Getenv(envSamplerParam); e != "" {
+ if value, err := strconv.ParseFloat(e, 64); err == nil {
+ sc.Param = value
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerParam, e)
+ }
+ }
+
+ if e := os.Getenv(envSamplerManagerHostPort); e != "" {
+ sc.SamplingServerURL = e
+ }
+
+ if e := os.Getenv(envSamplerMaxOperations); e != "" {
+ if value, err := strconv.ParseInt(e, 10, 0); err == nil {
+ sc.MaxOperations = int(value)
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerMaxOperations, e)
+ }
+ }
+
+ if e := os.Getenv(envSamplerRefreshInterval); e != "" {
+ if value, err := time.ParseDuration(e); err == nil {
+ sc.SamplingRefreshInterval = value
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerRefreshInterval, e)
+ }
+ }
+
+ return sc, nil
+}
+
+// reporterConfigFromEnv creates a new ReporterConfig based on the environment variables
+func reporterConfigFromEnv() (*ReporterConfig, error) {
+ rc := &ReporterConfig{}
+
+ if e := os.Getenv(envReporterMaxQueueSize); e != "" {
+ if value, err := strconv.ParseInt(e, 10, 0); err == nil {
+ rc.QueueSize = int(value)
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterMaxQueueSize, e)
+ }
+ }
+
+ if e := os.Getenv(envReporterFlushInterval); e != "" {
+ if value, err := time.ParseDuration(e); err == nil {
+ rc.BufferFlushInterval = value
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterFlushInterval, e)
+ }
+ }
+
+ if e := os.Getenv(envReporterLogSpans); e != "" {
+ if value, err := strconv.ParseBool(e); err == nil {
+ rc.LogSpans = value
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterLogSpans, e)
+ }
+ }
+
+ if e := os.Getenv(envEndpoint); e != "" {
+ u, err := url.ParseRequestURI(e)
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envEndpoint, e)
+ }
+ rc.CollectorEndpoint = u.String()
+ user := os.Getenv(envUser)
+ pswd := os.Getenv(envPassword)
+ if user != "" && pswd == "" || user == "" && pswd != "" {
+ return nil, errors.Errorf("you must set %s and %s env vars together", envUser, envPassword)
+ }
+ rc.User = user
+ rc.Password = pswd
+ } else {
+ host := jaeger.DefaultUDPSpanServerHost
+ if e := os.Getenv(envAgentHost); e != "" {
+ host = e
+ }
+
+ port := jaeger.DefaultUDPSpanServerPort
+ if e := os.Getenv(envAgentPort); e != "" {
+ if value, err := strconv.ParseInt(e, 10, 0); err == nil {
+ port = int(value)
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envAgentPort, e)
+ }
+ }
+ rc.LocalAgentHostPort = fmt.Sprintf("%s:%d", host, port)
+ }
+
+ return rc, nil
+}
+
+// parseTags parses the given string into a collection of Tags.
+// Spec for this value:
+// - comma separated list of key=value
+// - value can be specified using the notation ${envVar:defaultValue}, where `envVar`
+// is an environment variable and `defaultValue` is the value to use in case the env var is not set
+func parseTags(sTags string) []opentracing.Tag {
+ pairs := strings.Split(sTags, ",")
+ tags := make([]opentracing.Tag, 0)
+ for _, p := range pairs {
+ kv := strings.SplitN(p, "=", 2)
+ k, v := strings.TrimSpace(kv[0]), strings.TrimSpace(kv[1])
+
+ if strings.HasPrefix(v, "${") && strings.HasSuffix(v, "}") {
+ ed := strings.SplitN(v[2:len(v)-1], ":", 2)
+ e, d := ed[0], ed[1]
+ v = os.Getenv(e)
+ if v == "" && d != "" {
+ v = d
+ }
+ }
+
+ tag := opentracing.Tag{Key: k, Value: v}
+ tags = append(tags, tag)
+ }
+
+ return tags
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/config/options.go b/vendor/github.com/uber/jaeger-client-go/config/options.go
new file mode 100644
index 000000000..9eed0ec75
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/config/options.go
@@ -0,0 +1,148 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+ opentracing "github.com/opentracing/opentracing-go"
+ "github.com/uber/jaeger-lib/metrics"
+
+ "github.com/uber/jaeger-client-go"
+)
+
+// Option is a function that sets some option on the client.
+type Option func(c *Options)
+
+// Options control behavior of the client.
+type Options struct {
+ metrics metrics.Factory
+ logger jaeger.Logger
+ reporter jaeger.Reporter
+ sampler jaeger.Sampler
+ contribObservers []jaeger.ContribObserver
+ observers []jaeger.Observer
+ gen128Bit bool
+ zipkinSharedRPCSpan bool
+ maxTagValueLength int
+ tags []opentracing.Tag
+ injectors map[interface{}]jaeger.Injector
+ extractors map[interface{}]jaeger.Extractor
+}
+
+// Metrics creates an Option that initializes Metrics in the tracer,
+// which is used to emit statistics about spans.
+func Metrics(factory metrics.Factory) Option {
+ return func(c *Options) {
+ c.metrics = factory
+ }
+}
+
+// Logger can be provided to log Reporter errors, as well as to log spans
+// if Reporter.LogSpans is set to true.
+func Logger(logger jaeger.Logger) Option {
+ return func(c *Options) {
+ c.logger = logger
+ }
+}
+
+// Reporter can be provided explicitly to override the configuration.
+// Useful for testing, e.g. by passing InMemoryReporter.
+func Reporter(reporter jaeger.Reporter) Option {
+ return func(c *Options) {
+ c.reporter = reporter
+ }
+}
+
+// Sampler can be provided explicitly to override the configuration.
+func Sampler(sampler jaeger.Sampler) Option {
+ return func(c *Options) {
+ c.sampler = sampler
+ }
+}
+
+// Observer can be registered with the Tracer to receive notifications about new Spans.
+func Observer(observer jaeger.Observer) Option {
+ return func(c *Options) {
+ c.observers = append(c.observers, observer)
+ }
+}
+
+// ContribObserver can be registered with the Tracer to receive notifications
+// about new spans.
+func ContribObserver(observer jaeger.ContribObserver) Option {
+ return func(c *Options) {
+ c.contribObservers = append(c.contribObservers, observer)
+ }
+}
+
+// Gen128Bit specifies whether to generate 128bit trace IDs.
+func Gen128Bit(gen128Bit bool) Option {
+ return func(c *Options) {
+ c.gen128Bit = gen128Bit
+ }
+}
+
+// ZipkinSharedRPCSpan creates an option that enables sharing span ID between client
+// and server spans a la zipkin. If false, client and server spans will be assigned
+// different IDs.
+func ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) Option {
+ return func(c *Options) {
+ c.zipkinSharedRPCSpan = zipkinSharedRPCSpan
+ }
+}
+
+// MaxTagValueLength can be provided to override the default max tag value length.
+func MaxTagValueLength(maxTagValueLength int) Option {
+ return func(c *Options) {
+ c.maxTagValueLength = maxTagValueLength
+ }
+}
+
+// Tag creates an option that adds a tracer-level tag.
+func Tag(key string, value interface{}) Option {
+ return func(c *Options) {
+ c.tags = append(c.tags, opentracing.Tag{Key: key, Value: value})
+ }
+}
+
+// Injector registers an Injector with the given format.
+func Injector(format interface{}, injector jaeger.Injector) Option {
+ return func(c *Options) {
+ c.injectors[format] = injector
+ }
+}
+
+// Extractor registers an Extractor with the given format.
+func Extractor(format interface{}, extractor jaeger.Extractor) Option {
+ return func(c *Options) {
+ c.extractors[format] = extractor
+ }
+}
+
+func applyOptions(options ...Option) Options {
+ opts := Options{
+ injectors: make(map[interface{}]jaeger.Injector),
+ extractors: make(map[interface{}]jaeger.Extractor),
+ }
+ for _, option := range options {
+ option(&opts)
+ }
+ if opts.metrics == nil {
+ opts.metrics = metrics.NullFactory
+ }
+ if opts.logger == nil {
+ opts.logger = jaeger.NullLogger
+ }
+ return opts
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/constants.go b/vendor/github.com/uber/jaeger-client-go/constants.go
new file mode 100644
index 000000000..5a4e18752
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/constants.go
@@ -0,0 +1,88 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+const (
+ // JaegerClientVersion is the version of the client library reported as Span tag.
+ JaegerClientVersion = "Go-2.15.1dev"
+
+ // JaegerClientVersionTagKey is the name of the tag used to report client version.
+ JaegerClientVersionTagKey = "jaeger.version"
+
+ // JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which,
+ // if found in the carrier, forces the trace to be sampled as "debug" trace.
+ // The value of the header is recorded as the tag on the root span, so that the
+ // trace can be found in the UI using this value as a correlation ID.
+ JaegerDebugHeader = "jaeger-debug-id"
+
+ // JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage.
+ // It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where
+ // a root span does not exist.
+ JaegerBaggageHeader = "jaeger-baggage"
+
+ // TracerHostnameTagKey used to report host name of the process.
+ TracerHostnameTagKey = "hostname"
+
+ // TracerIPTagKey used to report ip of the process.
+ TracerIPTagKey = "ip"
+
+ // TracerUUIDTagKey used to report UUID of the client process.
+ TracerUUIDTagKey = "client-uuid"
+
+ // SamplerTypeTagKey reports which sampler was used on the root span.
+ SamplerTypeTagKey = "sampler.type"
+
+ // SamplerParamTagKey reports the parameter of the sampler, like sampling probability.
+ SamplerParamTagKey = "sampler.param"
+
+ // TraceContextHeaderName is the http header name used to propagate tracing context.
+ // This must be in lower-case to avoid mismatches when decoding incoming headers.
+ TraceContextHeaderName = "uber-trace-id"
+
+ // TracerStateHeaderName is deprecated.
+ // Deprecated: use TraceContextHeaderName
+ TracerStateHeaderName = TraceContextHeaderName
+
+ // TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage.
+ // This must be in lower-case to avoid mismatches when decoding incoming headers.
+ TraceBaggageHeaderPrefix = "uberctx-"
+
+ // SamplerTypeConst is the type of sampler that always makes the same decision.
+ SamplerTypeConst = "const"
+
+ // SamplerTypeRemote is the type of sampler that polls Jaeger agent for sampling strategy.
+ SamplerTypeRemote = "remote"
+
+ // SamplerTypeProbabilistic is the type of sampler that samples traces
+ // with a certain fixed probability.
+ SamplerTypeProbabilistic = "probabilistic"
+
+ // SamplerTypeRateLimiting is the type of sampler that samples
+ // only up to a fixed number of traces per second.
+ SamplerTypeRateLimiting = "ratelimiting"
+
+ // SamplerTypeLowerBound is the type of sampler that samples
+ // at least a fixed number of traces per second.
+ SamplerTypeLowerBound = "lowerbound"
+
+ // DefaultUDPSpanServerHost is the default host to send the spans to, via UDP
+ DefaultUDPSpanServerHost = "localhost"
+
+ // DefaultUDPSpanServerPort is the default port to send the spans to, via UDP
+ DefaultUDPSpanServerPort = 6831
+
+ // DefaultMaxTagValueLength is the default max length of byte array or string allowed in the tag value.
+ DefaultMaxTagValueLength = 256
+)
diff --git a/vendor/github.com/uber/jaeger-client-go/context.go b/vendor/github.com/uber/jaeger-client-go/context.go
new file mode 100644
index 000000000..8b06173d9
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/context.go
@@ -0,0 +1,258 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+const (
+ flagSampled = byte(1)
+ flagDebug = byte(2)
+)
+
+var (
+ errEmptyTracerStateString = errors.New("Cannot convert empty string to tracer state")
+ errMalformedTracerStateString = errors.New("String does not match tracer state format")
+
+ emptyContext = SpanContext{}
+)
+
+// TraceID represents unique 128bit identifier of a trace
+type TraceID struct {
+ High, Low uint64
+}
+
+// SpanID represents unique 64bit identifier of a span
+type SpanID uint64
+
+// SpanContext represents propagated span identity and state
+type SpanContext struct {
+ // traceID represents globally unique ID of the trace.
+ // Usually generated as a random number.
+ traceID TraceID
+
+ // spanID represents span ID that must be unique within its trace,
+ // but does not have to be globally unique.
+ spanID SpanID
+
+ // parentID refers to the ID of the parent span.
+ // Should be 0 if the current span is a root span.
+ parentID SpanID
+
+ // flags is a bitmap containing such bits as 'sampled' and 'debug'.
+ flags byte
+
+ // Distributed Context baggage. The is a snapshot in time.
+ baggage map[string]string
+
+ // debugID can be set to some correlation ID when the context is being
+ // extracted from a TextMap carrier.
+ //
+ // See JaegerDebugHeader in constants.go
+ debugID string
+}
+
+// ForeachBaggageItem implements ForeachBaggageItem() of opentracing.SpanContext
+func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) {
+ for k, v := range c.baggage {
+ if !handler(k, v) {
+ break
+ }
+ }
+}
+
+// IsSampled returns whether this trace was chosen for permanent storage
+// by the sampling mechanism of the tracer.
+func (c SpanContext) IsSampled() bool {
+ return (c.flags & flagSampled) == flagSampled
+}
+
+// IsDebug indicates whether sampling was explicitly requested by the service.
+func (c SpanContext) IsDebug() bool {
+ return (c.flags & flagDebug) == flagDebug
+}
+
+// IsValid indicates whether this context actually represents a valid trace.
+func (c SpanContext) IsValid() bool {
+ return c.traceID.IsValid() && c.spanID != 0
+}
+
+func (c SpanContext) String() string {
+ if c.traceID.High == 0 {
+ return fmt.Sprintf("%x:%x:%x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.flags)
+ }
+ return fmt.Sprintf("%x%016x:%x:%x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.flags)
+}
+
+// ContextFromString reconstructs the Context encoded in a string
+func ContextFromString(value string) (SpanContext, error) {
+ var context SpanContext
+ if value == "" {
+ return emptyContext, errEmptyTracerStateString
+ }
+ parts := strings.Split(value, ":")
+ if len(parts) != 4 {
+ return emptyContext, errMalformedTracerStateString
+ }
+ var err error
+ if context.traceID, err = TraceIDFromString(parts[0]); err != nil {
+ return emptyContext, err
+ }
+ if context.spanID, err = SpanIDFromString(parts[1]); err != nil {
+ return emptyContext, err
+ }
+ if context.parentID, err = SpanIDFromString(parts[2]); err != nil {
+ return emptyContext, err
+ }
+ flags, err := strconv.ParseUint(parts[3], 10, 8)
+ if err != nil {
+ return emptyContext, err
+ }
+ context.flags = byte(flags)
+ return context, nil
+}
+
+// TraceID returns the trace ID of this span context
+func (c SpanContext) TraceID() TraceID {
+ return c.traceID
+}
+
+// SpanID returns the span ID of this span context
+func (c SpanContext) SpanID() SpanID {
+ return c.spanID
+}
+
+// ParentID returns the parent span ID of this span context
+func (c SpanContext) ParentID() SpanID {
+ return c.parentID
+}
+
+// NewSpanContext creates a new instance of SpanContext
+func NewSpanContext(traceID TraceID, spanID, parentID SpanID, sampled bool, baggage map[string]string) SpanContext {
+ flags := byte(0)
+ if sampled {
+ flags = flagSampled
+ }
+ return SpanContext{
+ traceID: traceID,
+ spanID: spanID,
+ parentID: parentID,
+ flags: flags,
+ baggage: baggage}
+}
+
+// CopyFrom copies data from ctx into this context, including span identity and baggage.
+// TODO This is only used by interop.go. Remove once TChannel Go supports OpenTracing.
+func (c *SpanContext) CopyFrom(ctx *SpanContext) {
+ c.traceID = ctx.traceID
+ c.spanID = ctx.spanID
+ c.parentID = ctx.parentID
+ c.flags = ctx.flags
+ if l := len(ctx.baggage); l > 0 {
+ c.baggage = make(map[string]string, l)
+ for k, v := range ctx.baggage {
+ c.baggage[k] = v
+ }
+ } else {
+ c.baggage = nil
+ }
+}
+
+// WithBaggageItem creates a new context with an extra baggage item.
+func (c SpanContext) WithBaggageItem(key, value string) SpanContext {
+ var newBaggage map[string]string
+ if c.baggage == nil {
+ newBaggage = map[string]string{key: value}
+ } else {
+ newBaggage = make(map[string]string, len(c.baggage)+1)
+ for k, v := range c.baggage {
+ newBaggage[k] = v
+ }
+ newBaggage[key] = value
+ }
+ // Use positional parameters so the compiler will help catch new fields.
+ return SpanContext{c.traceID, c.spanID, c.parentID, c.flags, newBaggage, ""}
+}
+
+// isDebugIDContainerOnly returns true when the instance of the context is only
+// used to return the debug/correlation ID from extract() method. This happens
+// in the situation when "jaeger-debug-id" header is passed in the carrier to
+// the extract() method, but the request otherwise has no span context in it.
+// Previously this would've returned opentracing.ErrSpanContextNotFound from the
+// extract method, but now it returns a dummy context with only debugID filled in.
+//
+// See JaegerDebugHeader in constants.go
+// See textMapPropagator#Extract
+func (c *SpanContext) isDebugIDContainerOnly() bool {
+ return !c.traceID.IsValid() && c.debugID != ""
+}
+
+// ------- TraceID -------
+
+func (t TraceID) String() string {
+ if t.High == 0 {
+ return fmt.Sprintf("%x", t.Low)
+ }
+ return fmt.Sprintf("%x%016x", t.High, t.Low)
+}
+
+// TraceIDFromString creates a TraceID from a hexadecimal string
+func TraceIDFromString(s string) (TraceID, error) {
+ var hi, lo uint64
+ var err error
+ if len(s) > 32 {
+ return TraceID{}, fmt.Errorf("TraceID cannot be longer than 32 hex characters: %s", s)
+ } else if len(s) > 16 {
+ hiLen := len(s) - 16
+ if hi, err = strconv.ParseUint(s[0:hiLen], 16, 64); err != nil {
+ return TraceID{}, err
+ }
+ if lo, err = strconv.ParseUint(s[hiLen:], 16, 64); err != nil {
+ return TraceID{}, err
+ }
+ } else {
+ if lo, err = strconv.ParseUint(s, 16, 64); err != nil {
+ return TraceID{}, err
+ }
+ }
+ return TraceID{High: hi, Low: lo}, nil
+}
+
+// IsValid checks if the trace ID is valid, i.e. not zero.
+func (t TraceID) IsValid() bool {
+ return t.High != 0 || t.Low != 0
+}
+
+// ------- SpanID -------
+
+func (s SpanID) String() string {
+ return fmt.Sprintf("%x", uint64(s))
+}
+
+// SpanIDFromString creates a SpanID from a hexadecimal string
+func SpanIDFromString(s string) (SpanID, error) {
+ if len(s) > 16 {
+ return SpanID(0), fmt.Errorf("SpanID cannot be longer than 16 hex characters: %s", s)
+ }
+ id, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return SpanID(0), err
+ }
+ return SpanID(id), nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/contrib_observer.go b/vendor/github.com/uber/jaeger-client-go/contrib_observer.go
new file mode 100644
index 000000000..4ce1881f3
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/contrib_observer.go
@@ -0,0 +1,56 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ opentracing "github.com/opentracing/opentracing-go"
+)
+
+// ContribObserver can be registered with the Tracer to receive notifications
+// about new Spans. Modelled after github.com/opentracing-contrib/go-observer.
+type ContribObserver interface {
+ // Create and return a span observer. Called when a span starts.
+ // If the Observer is not interested in the given span, it must return (nil, false).
+ // E.g :
+ // func StartSpan(opName string, opts ...opentracing.StartSpanOption) {
+ // var sp opentracing.Span
+ // sso := opentracing.StartSpanOptions{}
+ // if spanObserver, ok := Observer.OnStartSpan(span, opName, sso); ok {
+ // // we have a valid SpanObserver
+ // }
+ // ...
+ // }
+ OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool)
+}
+
+// ContribSpanObserver is created by the Observer and receives notifications
+// about other Span events. This interface is meant to match
+// github.com/opentracing-contrib/go-observer, via duck typing, without
+// directly importing the go-observer package.
+type ContribSpanObserver interface {
+ OnSetOperationName(operationName string)
+ OnSetTag(key string, value interface{})
+ OnFinish(options opentracing.FinishOptions)
+}
+
+// wrapper observer for the old observers (see observer.go)
+type oldObserver struct {
+ obs Observer
+}
+
+func (o *oldObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool) {
+ spanObserver := o.obs.OnStartSpan(operationName, options)
+ return spanObserver, spanObserver != nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/doc.go b/vendor/github.com/uber/jaeger-client-go/doc.go
new file mode 100644
index 000000000..4f5549033
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/doc.go
@@ -0,0 +1,24 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package jaeger implements an OpenTracing (http://opentracing.io) Tracer.
+It is currently using Zipkin-compatible data model and can be directly
+itegrated with Zipkin backend (http://zipkin.io).
+
+For integration instructions please refer to the README:
+
+https://github.com/uber/jaeger-client-go/blob/master/README.md
+*/
+package jaeger
diff --git a/vendor/github.com/uber/jaeger-client-go/header.go b/vendor/github.com/uber/jaeger-client-go/header.go
new file mode 100644
index 000000000..19c2c055b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/header.go
@@ -0,0 +1,64 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+// HeadersConfig contains the values for the header keys that Jaeger will use.
+// These values may be either custom or default depending on whether custom
+// values were provided via a configuration.
+type HeadersConfig struct {
+ // JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which,
+ // if found in the carrier, forces the trace to be sampled as "debug" trace.
+ // The value of the header is recorded as the tag on the root span, so that the
+ // trace can be found in the UI using this value as a correlation ID.
+ JaegerDebugHeader string `yaml:"jaegerDebugHeader"`
+
+ // JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage.
+ // It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where
+ // a root span does not exist.
+ JaegerBaggageHeader string `yaml:"jaegerBaggageHeader"`
+
+ // TraceContextHeaderName is the http header name used to propagate tracing context.
+ // This must be in lower-case to avoid mismatches when decoding incoming headers.
+ TraceContextHeaderName string `yaml:"TraceContextHeaderName"`
+
+ // TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage.
+ // This must be in lower-case to avoid mismatches when decoding incoming headers.
+ TraceBaggageHeaderPrefix string `yaml:"traceBaggageHeaderPrefix"`
+}
+
+func (c *HeadersConfig) applyDefaults() *HeadersConfig {
+ if c.JaegerBaggageHeader == "" {
+ c.JaegerBaggageHeader = JaegerBaggageHeader
+ }
+ if c.JaegerDebugHeader == "" {
+ c.JaegerDebugHeader = JaegerDebugHeader
+ }
+ if c.TraceBaggageHeaderPrefix == "" {
+ c.TraceBaggageHeaderPrefix = TraceBaggageHeaderPrefix
+ }
+ if c.TraceContextHeaderName == "" {
+ c.TraceContextHeaderName = TraceContextHeaderName
+ }
+ return c
+}
+
+func getDefaultHeadersConfig() *HeadersConfig {
+ return &HeadersConfig{
+ JaegerDebugHeader: JaegerDebugHeader,
+ JaegerBaggageHeader: JaegerBaggageHeader,
+ TraceContextHeaderName: TraceContextHeaderName,
+ TraceBaggageHeaderPrefix: TraceBaggageHeaderPrefix,
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go
new file mode 100644
index 000000000..745729319
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go
@@ -0,0 +1,101 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "time"
+
+ "github.com/uber/jaeger-client-go"
+)
+
+const (
+ defaultMaxValueLength = 2048
+ defaultRefreshInterval = time.Minute
+ defaultHostPort = "localhost:5778"
+)
+
+// Option is a function that sets some option on the RestrictionManager
+type Option func(options *options)
+
+// Options is a factory for all available options
+var Options options
+
+type options struct {
+ denyBaggageOnInitializationFailure bool
+ metrics *jaeger.Metrics
+ logger jaeger.Logger
+ hostPort string
+ refreshInterval time.Duration
+}
+
+// DenyBaggageOnInitializationFailure creates an Option that determines the startup failure mode of RestrictionManager.
+// If DenyBaggageOnInitializationFailure is true, RestrictionManager will not allow any baggage to be written until baggage
+// restrictions have been retrieved from agent.
+// If DenyBaggageOnInitializationFailure is false, RestrictionManager will allow any baggage to be written until baggage
+// restrictions have been retrieved from agent.
+func (options) DenyBaggageOnInitializationFailure(b bool) Option {
+ return func(o *options) {
+ o.denyBaggageOnInitializationFailure = b
+ }
+}
+
+// Metrics creates an Option that initializes Metrics on the RestrictionManager, which is used to emit statistics.
+func (options) Metrics(m *jaeger.Metrics) Option {
+ return func(o *options) {
+ o.metrics = m
+ }
+}
+
+// Logger creates an Option that sets the logger used by the RestrictionManager.
+func (options) Logger(logger jaeger.Logger) Option {
+ return func(o *options) {
+ o.logger = logger
+ }
+}
+
+// HostPort creates an Option that sets the hostPort of the local agent that contains the baggage restrictions.
+func (options) HostPort(hostPort string) Option {
+ return func(o *options) {
+ o.hostPort = hostPort
+ }
+}
+
+// RefreshInterval creates an Option that sets how often the RestrictionManager will poll local agent for
+// the baggage restrictions.
+func (options) RefreshInterval(refreshInterval time.Duration) Option {
+ return func(o *options) {
+ o.refreshInterval = refreshInterval
+ }
+}
+
+func applyOptions(o ...Option) options {
+ opts := options{}
+ for _, option := range o {
+ option(&opts)
+ }
+ if opts.metrics == nil {
+ opts.metrics = jaeger.NewNullMetrics()
+ }
+ if opts.logger == nil {
+ opts.logger = jaeger.NullLogger
+ }
+ if opts.hostPort == "" {
+ opts.hostPort = defaultHostPort
+ }
+ if opts.refreshInterval == 0 {
+ opts.refreshInterval = defaultRefreshInterval
+ }
+ return opts
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go
new file mode 100644
index 000000000..a56515aca
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go
@@ -0,0 +1,157 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "fmt"
+ "net/url"
+ "sync"
+ "time"
+
+ "github.com/uber/jaeger-client-go/internal/baggage"
+ thrift "github.com/uber/jaeger-client-go/thrift-gen/baggage"
+ "github.com/uber/jaeger-client-go/utils"
+)
+
+type httpBaggageRestrictionManagerProxy struct {
+ url string
+}
+
+func newHTTPBaggageRestrictionManagerProxy(hostPort, serviceName string) *httpBaggageRestrictionManagerProxy {
+ v := url.Values{}
+ v.Set("service", serviceName)
+ return &httpBaggageRestrictionManagerProxy{
+ url: fmt.Sprintf("http://%s/baggageRestrictions?%s", hostPort, v.Encode()),
+ }
+}
+
+func (s *httpBaggageRestrictionManagerProxy) GetBaggageRestrictions(serviceName string) ([]*thrift.BaggageRestriction, error) {
+ var out []*thrift.BaggageRestriction
+ if err := utils.GetJSON(s.url, &out); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// RestrictionManager manages baggage restrictions by retrieving baggage restrictions from agent
+type RestrictionManager struct {
+ options
+
+ mux sync.RWMutex
+ serviceName string
+ restrictions map[string]*baggage.Restriction
+ thriftProxy thrift.BaggageRestrictionManager
+ pollStopped sync.WaitGroup
+ stopPoll chan struct{}
+ invalidRestriction *baggage.Restriction
+ validRestriction *baggage.Restriction
+
+ // Determines if the manager has successfully retrieved baggage restrictions from agent
+ initialized bool
+}
+
+// NewRestrictionManager returns a BaggageRestrictionManager that polls the agent for the latest
+// baggage restrictions.
+func NewRestrictionManager(serviceName string, options ...Option) *RestrictionManager {
+ // TODO there is a developing use case where a single tracer can generate traces on behalf of many services.
+ // restrictionsMap will need to exist per service
+ opts := applyOptions(options...)
+ m := &RestrictionManager{
+ serviceName: serviceName,
+ options: opts,
+ restrictions: make(map[string]*baggage.Restriction),
+ thriftProxy: newHTTPBaggageRestrictionManagerProxy(opts.hostPort, serviceName),
+ stopPoll: make(chan struct{}),
+ invalidRestriction: baggage.NewRestriction(false, 0),
+ validRestriction: baggage.NewRestriction(true, defaultMaxValueLength),
+ }
+ m.pollStopped.Add(1)
+ go m.pollManager()
+ return m
+}
+
+// isReady returns true if the manager has retrieved baggage restrictions from the remote source.
+func (m *RestrictionManager) isReady() bool {
+ m.mux.RLock()
+ defer m.mux.RUnlock()
+ return m.initialized
+}
+
+// GetRestriction implements RestrictionManager#GetRestriction.
+func (m *RestrictionManager) GetRestriction(service, key string) *baggage.Restriction {
+ m.mux.RLock()
+ defer m.mux.RUnlock()
+ if !m.initialized {
+ if m.denyBaggageOnInitializationFailure {
+ return m.invalidRestriction
+ }
+ return m.validRestriction
+ }
+ if restriction, ok := m.restrictions[key]; ok {
+ return restriction
+ }
+ return m.invalidRestriction
+}
+
+// Close stops remote polling and closes the RemoteRestrictionManager.
+func (m *RestrictionManager) Close() error {
+ close(m.stopPoll)
+ m.pollStopped.Wait()
+ return nil
+}
+
+func (m *RestrictionManager) pollManager() {
+ defer m.pollStopped.Done()
+ // attempt to initialize baggage restrictions
+ if err := m.updateRestrictions(); err != nil {
+ m.logger.Error(fmt.Sprintf("Failed to initialize baggage restrictions: %s", err.Error()))
+ }
+ ticker := time.NewTicker(m.refreshInterval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ if err := m.updateRestrictions(); err != nil {
+ m.logger.Error(fmt.Sprintf("Failed to update baggage restrictions: %s", err.Error()))
+ }
+ case <-m.stopPoll:
+ return
+ }
+ }
+}
+
+func (m *RestrictionManager) updateRestrictions() error {
+ restrictions, err := m.thriftProxy.GetBaggageRestrictions(m.serviceName)
+ if err != nil {
+ m.metrics.BaggageRestrictionsUpdateFailure.Inc(1)
+ return err
+ }
+ newRestrictions := m.parseRestrictions(restrictions)
+ m.metrics.BaggageRestrictionsUpdateSuccess.Inc(1)
+ m.mux.Lock()
+ defer m.mux.Unlock()
+ m.initialized = true
+ m.restrictions = newRestrictions
+ return nil
+}
+
+func (m *RestrictionManager) parseRestrictions(restrictions []*thrift.BaggageRestriction) map[string]*baggage.Restriction {
+ setters := make(map[string]*baggage.Restriction, len(restrictions))
+ for _, restriction := range restrictions {
+ setters[restriction.BaggageKey] = baggage.NewRestriction(true, int(restriction.MaxValueLength))
+ }
+ return setters
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go
new file mode 100644
index 000000000..c16a5c566
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go
@@ -0,0 +1,71 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package baggage
+
+const (
+ defaultMaxValueLength = 2048
+)
+
+// Restriction determines whether a baggage key is allowed and contains any restrictions on the baggage value.
+type Restriction struct {
+ keyAllowed bool
+ maxValueLength int
+}
+
+// NewRestriction returns a new Restriction.
+func NewRestriction(keyAllowed bool, maxValueLength int) *Restriction {
+ return &Restriction{
+ keyAllowed: keyAllowed,
+ maxValueLength: maxValueLength,
+ }
+}
+
+// KeyAllowed returns whether the baggage key for this restriction is allowed.
+func (r *Restriction) KeyAllowed() bool {
+ return r.keyAllowed
+}
+
+// MaxValueLength returns the max length for the baggage value.
+func (r *Restriction) MaxValueLength() int {
+ return r.maxValueLength
+}
+
+// RestrictionManager keeps track of valid baggage keys and their restrictions. The manager
+// will return a Restriction for a specific baggage key which will determine whether the baggage
+// key is allowed for the current service and any other applicable restrictions on the baggage
+// value.
+type RestrictionManager interface {
+ GetRestriction(service, key string) *Restriction
+}
+
+// DefaultRestrictionManager allows any baggage key.
+type DefaultRestrictionManager struct {
+ defaultRestriction *Restriction
+}
+
+// NewDefaultRestrictionManager returns a DefaultRestrictionManager.
+func NewDefaultRestrictionManager(maxValueLength int) *DefaultRestrictionManager {
+ if maxValueLength == 0 {
+ maxValueLength = defaultMaxValueLength
+ }
+ return &DefaultRestrictionManager{
+ defaultRestriction: &Restriction{keyAllowed: true, maxValueLength: maxValueLength},
+ }
+}
+
+// GetRestriction implements RestrictionManager#GetRestriction.
+func (m *DefaultRestrictionManager) GetRestriction(service, key string) *Restriction {
+ return m.defaultRestriction
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go b/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go
new file mode 100644
index 000000000..0e10b8a5a
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go
@@ -0,0 +1,81 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spanlog
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/opentracing/opentracing-go/log"
+)
+
+type fieldsAsMap map[string]string
+
+// MaterializeWithJSON converts log Fields into JSON string
+// TODO refactor into pluggable materializer
+func MaterializeWithJSON(logFields []log.Field) ([]byte, error) {
+ fields := fieldsAsMap(make(map[string]string, len(logFields)))
+ for _, field := range logFields {
+ field.Marshal(fields)
+ }
+ if event, ok := fields["event"]; ok && len(fields) == 1 {
+ return []byte(event), nil
+ }
+ return json.Marshal(fields)
+}
+
+func (ml fieldsAsMap) EmitString(key, value string) {
+ ml[key] = value
+}
+
+func (ml fieldsAsMap) EmitBool(key string, value bool) {
+ ml[key] = fmt.Sprintf("%t", value)
+}
+
+func (ml fieldsAsMap) EmitInt(key string, value int) {
+ ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitInt32(key string, value int32) {
+ ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitInt64(key string, value int64) {
+ ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitUint32(key string, value uint32) {
+ ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitUint64(key string, value uint64) {
+ ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitFloat32(key string, value float32) {
+ ml[key] = fmt.Sprintf("%f", value)
+}
+
+func (ml fieldsAsMap) EmitFloat64(key string, value float64) {
+ ml[key] = fmt.Sprintf("%f", value)
+}
+
+func (ml fieldsAsMap) EmitObject(key string, value interface{}) {
+ ml[key] = fmt.Sprintf("%+v", value)
+}
+
+func (ml fieldsAsMap) EmitLazyLogger(value log.LazyLogger) {
+ value(ml)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go
new file mode 100644
index 000000000..f52c322fb
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go
@@ -0,0 +1,99 @@
+// Copyright (c) 2018 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "time"
+
+ "github.com/uber/jaeger-client-go"
+)
+
+const (
+ defaultHostPort = "localhost:5778"
+ defaultRefreshInterval = time.Second * 5
+)
+
+// Option is a function that sets some option on the Throttler
+type Option func(options *options)
+
+// Options is a factory for all available options
+var Options options
+
+type options struct {
+ metrics *jaeger.Metrics
+ logger jaeger.Logger
+ hostPort string
+ refreshInterval time.Duration
+ synchronousInitialization bool
+}
+
+// Metrics creates an Option that initializes Metrics on the Throttler, which is used to emit statistics.
+func (options) Metrics(m *jaeger.Metrics) Option {
+ return func(o *options) {
+ o.metrics = m
+ }
+}
+
+// Logger creates an Option that sets the logger used by the Throttler.
+func (options) Logger(logger jaeger.Logger) Option {
+ return func(o *options) {
+ o.logger = logger
+ }
+}
+
+// HostPort creates an Option that sets the hostPort of the local agent that keeps track of credits.
+func (options) HostPort(hostPort string) Option {
+ return func(o *options) {
+ o.hostPort = hostPort
+ }
+}
+
+// RefreshInterval creates an Option that sets how often the Throttler will poll local agent for
+// credits.
+func (options) RefreshInterval(refreshInterval time.Duration) Option {
+ return func(o *options) {
+ o.refreshInterval = refreshInterval
+ }
+}
+
+// SynchronousInitialization creates an Option that determines whether the throttler should synchronously
+// fetch credits from the agent when an operation is seen for the first time. This should be set to true
+// if the client will be used by a short lived service that needs to ensure that credits are fetched upfront
+// such that sampling or throttling occurs.
+func (options) SynchronousInitialization(b bool) Option {
+ return func(o *options) {
+ o.synchronousInitialization = b
+ }
+}
+
+func applyOptions(o ...Option) options {
+ opts := options{}
+ for _, option := range o {
+ option(&opts)
+ }
+ if opts.metrics == nil {
+ opts.metrics = jaeger.NewNullMetrics()
+ }
+ if opts.logger == nil {
+ opts.logger = jaeger.NullLogger
+ }
+ if opts.hostPort == "" {
+ opts.hostPort = defaultHostPort
+ }
+ if opts.refreshInterval == 0 {
+ opts.refreshInterval = defaultRefreshInterval
+ }
+ return opts
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go
new file mode 100644
index 000000000..20f434fe4
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go
@@ -0,0 +1,216 @@
+// Copyright (c) 2018 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "fmt"
+ "net/url"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/pkg/errors"
+
+ "github.com/uber/jaeger-client-go"
+ "github.com/uber/jaeger-client-go/utils"
+)
+
+const (
+ // minimumCredits is the minimum amount of credits necessary to not be throttled.
+ // i.e. if currentCredits > minimumCredits, then the operation will not be throttled.
+ minimumCredits = 1.0
+)
+
+var (
+ errorUUIDNotSet = errors.New("Throttler UUID must be set")
+)
+
+type operationBalance struct {
+ Operation string `json:"operation"`
+ Balance float64 `json:"balance"`
+}
+
+type creditResponse struct {
+ Balances []operationBalance `json:"balances"`
+}
+
+type httpCreditManagerProxy struct {
+ hostPort string
+}
+
+func newHTTPCreditManagerProxy(hostPort string) *httpCreditManagerProxy {
+ return &httpCreditManagerProxy{
+ hostPort: hostPort,
+ }
+}
+
+// N.B. Operations list must not be empty.
+func (m *httpCreditManagerProxy) FetchCredits(uuid, serviceName string, operations []string) (*creditResponse, error) {
+ params := url.Values{}
+ params.Set("service", serviceName)
+ params.Set("uuid", uuid)
+ for _, op := range operations {
+ params.Add("operations", op)
+ }
+ var resp creditResponse
+ if err := utils.GetJSON(fmt.Sprintf("http://%s/credits?%s", m.hostPort, params.Encode()), &resp); err != nil {
+ return nil, errors.Wrap(err, "Failed to receive credits from agent")
+ }
+ return &resp, nil
+}
+
+// Throttler retrieves credits from agent and uses it to throttle operations.
+type Throttler struct {
+ options
+
+ mux sync.RWMutex
+ service string
+ uuid atomic.Value
+ creditManager *httpCreditManagerProxy
+ credits map[string]float64 // map of operation->credits
+ close chan struct{}
+ stopped sync.WaitGroup
+}
+
+// NewThrottler returns a Throttler that polls agent for credits and uses them to throttle
+// the service.
+func NewThrottler(service string, options ...Option) *Throttler {
+ opts := applyOptions(options...)
+ creditManager := newHTTPCreditManagerProxy(opts.hostPort)
+ t := &Throttler{
+ options: opts,
+ creditManager: creditManager,
+ service: service,
+ credits: make(map[string]float64),
+ close: make(chan struct{}),
+ }
+ t.stopped.Add(1)
+ go t.pollManager()
+ return t
+}
+
+// IsAllowed implements Throttler#IsAllowed.
+func (t *Throttler) IsAllowed(operation string) bool {
+ t.mux.Lock()
+ defer t.mux.Unlock()
+ value, ok := t.credits[operation]
+ if !ok || value == 0 {
+ if !ok {
+ // NOTE: This appears to be a no-op at first glance, but it stores
+ // the operation key in the map. Necessary for functionality of
+ // Throttler#operations method.
+ t.credits[operation] = 0
+ }
+ if !t.synchronousInitialization {
+ t.metrics.ThrottledDebugSpans.Inc(1)
+ return false
+ }
+ // If it is the first time this operation is being checked, synchronously fetch
+ // the credits.
+ credits, err := t.fetchCredits([]string{operation})
+ if err != nil {
+ // Failed to receive credits from agent, try again next time
+ t.logger.Error("Failed to fetch credits: " + err.Error())
+ return false
+ }
+ if len(credits.Balances) == 0 {
+ // This shouldn't happen but just in case
+ return false
+ }
+ for _, opBalance := range credits.Balances {
+ t.credits[opBalance.Operation] += opBalance.Balance
+ }
+ }
+ return t.isAllowed(operation)
+}
+
+// Close stops the throttler from fetching credits from remote.
+func (t *Throttler) Close() error {
+ close(t.close)
+ t.stopped.Wait()
+ return nil
+}
+
+// SetProcess implements ProcessSetter#SetProcess. It's imperative that the UUID is set before any remote
+// requests are made.
+func (t *Throttler) SetProcess(process jaeger.Process) {
+ if process.UUID != "" {
+ t.uuid.Store(process.UUID)
+ }
+}
+
+// N.B. This function must be called with the Write Lock
+func (t *Throttler) isAllowed(operation string) bool {
+ credits := t.credits[operation]
+ if credits < minimumCredits {
+ t.metrics.ThrottledDebugSpans.Inc(1)
+ return false
+ }
+ t.credits[operation] = credits - minimumCredits
+ return true
+}
+
+func (t *Throttler) pollManager() {
+ defer t.stopped.Done()
+ ticker := time.NewTicker(t.refreshInterval)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ticker.C:
+ t.refreshCredits()
+ case <-t.close:
+ return
+ }
+ }
+}
+
+func (t *Throttler) operations() []string {
+ t.mux.RLock()
+ defer t.mux.RUnlock()
+ operations := make([]string, 0, len(t.credits))
+ for op := range t.credits {
+ operations = append(operations, op)
+ }
+ return operations
+}
+
+func (t *Throttler) refreshCredits() {
+ operations := t.operations()
+ if len(operations) == 0 {
+ return
+ }
+ newCredits, err := t.fetchCredits(operations)
+ if err != nil {
+ t.metrics.ThrottlerUpdateFailure.Inc(1)
+ t.logger.Error("Failed to fetch credits: " + err.Error())
+ return
+ }
+ t.metrics.ThrottlerUpdateSuccess.Inc(1)
+
+ t.mux.Lock()
+ defer t.mux.Unlock()
+ for _, opBalance := range newCredits.Balances {
+ t.credits[opBalance.Operation] += opBalance.Balance
+ }
+}
+
+func (t *Throttler) fetchCredits(operations []string) (*creditResponse, error) {
+ uuid := t.uuid.Load()
+ uuidStr, _ := uuid.(string)
+ if uuid == nil || uuidStr == "" {
+ return nil, errorUUIDNotSet
+ }
+ return t.creditManager.FetchCredits(uuidStr, t.service, operations)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go
new file mode 100644
index 000000000..196ed69ca
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go
@@ -0,0 +1,32 @@
+// Copyright (c) 2018 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package throttler
+
+// Throttler is used to rate limits operations. For example, given how debug spans
+// are always sampled, a throttler can be enabled per client to rate limit the amount
+// of debug spans a client can start.
+type Throttler interface {
+ // IsAllowed determines whether the operation should be allowed and not be
+ // throttled.
+ IsAllowed(operation string) bool
+}
+
+// DefaultThrottler doesn't throttle at all.
+type DefaultThrottler struct{}
+
+// IsAllowed implements Throttler#IsAllowed.
+func (t DefaultThrottler) IsAllowed(operation string) bool {
+ return true
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/interop.go b/vendor/github.com/uber/jaeger-client-go/interop.go
new file mode 100644
index 000000000..8402d087c
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/interop.go
@@ -0,0 +1,55 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "github.com/opentracing/opentracing-go"
+)
+
+// TODO this file should not be needed after TChannel PR.
+
+type formatKey int
+
+// SpanContextFormat is a constant used as OpenTracing Format.
+// Requires *SpanContext as carrier.
+// This format is intended for interop with TChannel or other Zipkin-like tracers.
+const SpanContextFormat formatKey = iota
+
+type jaegerTraceContextPropagator struct {
+ tracer *Tracer
+}
+
+func (p *jaegerTraceContextPropagator) Inject(
+ ctx SpanContext,
+ abstractCarrier interface{},
+) error {
+ carrier, ok := abstractCarrier.(*SpanContext)
+ if !ok {
+ return opentracing.ErrInvalidCarrier
+ }
+
+ carrier.CopyFrom(&ctx)
+ return nil
+}
+
+func (p *jaegerTraceContextPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
+ carrier, ok := abstractCarrier.(*SpanContext)
+ if !ok {
+ return emptyContext, opentracing.ErrInvalidCarrier
+ }
+ ctx := new(SpanContext)
+ ctx.CopyFrom(carrier)
+ return *ctx, nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go b/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go
new file mode 100644
index 000000000..868b2a5b5
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go
@@ -0,0 +1,84 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "fmt"
+
+ "github.com/opentracing/opentracing-go/log"
+
+ j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+)
+
+type tags []*j.Tag
+
+// ConvertLogsToJaegerTags converts log Fields into jaeger tags.
+func ConvertLogsToJaegerTags(logFields []log.Field) []*j.Tag {
+ fields := tags(make([]*j.Tag, 0, len(logFields)))
+ for _, field := range logFields {
+ field.Marshal(&fields)
+ }
+ return fields
+}
+
+func (t *tags) EmitString(key, value string) {
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &value})
+}
+
+func (t *tags) EmitBool(key string, value bool) {
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_BOOL, VBool: &value})
+}
+
+func (t *tags) EmitInt(key string, value int) {
+ vLong := int64(value)
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
+}
+
+func (t *tags) EmitInt32(key string, value int32) {
+ vLong := int64(value)
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
+}
+
+func (t *tags) EmitInt64(key string, value int64) {
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &value})
+}
+
+func (t *tags) EmitUint32(key string, value uint32) {
+ vLong := int64(value)
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
+}
+
+func (t *tags) EmitUint64(key string, value uint64) {
+ vLong := int64(value)
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
+}
+
+func (t *tags) EmitFloat32(key string, value float32) {
+ vDouble := float64(value)
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &vDouble})
+}
+
+func (t *tags) EmitFloat64(key string, value float64) {
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &value})
+}
+
+func (t *tags) EmitObject(key string, value interface{}) {
+ vStr := fmt.Sprintf("%+v", value)
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &vStr})
+}
+
+func (t *tags) EmitLazyLogger(value log.LazyLogger) {
+ value(t)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
new file mode 100644
index 000000000..6ce1caf87
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
@@ -0,0 +1,179 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "time"
+
+ "github.com/opentracing/opentracing-go"
+
+ j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+ "github.com/uber/jaeger-client-go/utils"
+)
+
+// BuildJaegerThrift builds jaeger span based on internal span.
+func BuildJaegerThrift(span *Span) *j.Span {
+ span.Lock()
+ defer span.Unlock()
+ startTime := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime)
+ duration := span.duration.Nanoseconds() / int64(time.Microsecond)
+ jaegerSpan := &j.Span{
+ TraceIdLow: int64(span.context.traceID.Low),
+ TraceIdHigh: int64(span.context.traceID.High),
+ SpanId: int64(span.context.spanID),
+ ParentSpanId: int64(span.context.parentID),
+ OperationName: span.operationName,
+ Flags: int32(span.context.flags),
+ StartTime: startTime,
+ Duration: duration,
+ Tags: buildTags(span.tags, span.tracer.options.maxTagValueLength),
+ Logs: buildLogs(span.logs),
+ References: buildReferences(span.references),
+ }
+ return jaegerSpan
+}
+
+// BuildJaegerProcessThrift creates a thrift Process type.
+func BuildJaegerProcessThrift(span *Span) *j.Process {
+ span.Lock()
+ defer span.Unlock()
+ return buildJaegerProcessThrift(span.tracer)
+}
+
+func buildJaegerProcessThrift(tracer *Tracer) *j.Process {
+ process := &j.Process{
+ ServiceName: tracer.serviceName,
+ Tags: buildTags(tracer.tags, tracer.options.maxTagValueLength),
+ }
+ if tracer.process.UUID != "" {
+ process.Tags = append(process.Tags, &j.Tag{Key: TracerUUIDTagKey, VStr: &tracer.process.UUID, VType: j.TagType_STRING})
+ }
+ return process
+}
+
+func buildTags(tags []Tag, maxTagValueLength int) []*j.Tag {
+ jTags := make([]*j.Tag, 0, len(tags))
+ for _, tag := range tags {
+ jTag := buildTag(&tag, maxTagValueLength)
+ jTags = append(jTags, jTag)
+ }
+ return jTags
+}
+
+func buildLogs(logs []opentracing.LogRecord) []*j.Log {
+ jLogs := make([]*j.Log, 0, len(logs))
+ for _, log := range logs {
+ jLog := &j.Log{
+ Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(log.Timestamp),
+ Fields: ConvertLogsToJaegerTags(log.Fields),
+ }
+ jLogs = append(jLogs, jLog)
+ }
+ return jLogs
+}
+
+func buildTag(tag *Tag, maxTagValueLength int) *j.Tag {
+ jTag := &j.Tag{Key: tag.key}
+ switch value := tag.value.(type) {
+ case string:
+ vStr := truncateString(value, maxTagValueLength)
+ jTag.VStr = &vStr
+ jTag.VType = j.TagType_STRING
+ case []byte:
+ if len(value) > maxTagValueLength {
+ value = value[:maxTagValueLength]
+ }
+ jTag.VBinary = value
+ jTag.VType = j.TagType_BINARY
+ case int:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case uint:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case int8:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case uint8:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case int16:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case uint16:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case int32:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case uint32:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case int64:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case uint64:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case float32:
+ vDouble := float64(value)
+ jTag.VDouble = &vDouble
+ jTag.VType = j.TagType_DOUBLE
+ case float64:
+ vDouble := float64(value)
+ jTag.VDouble = &vDouble
+ jTag.VType = j.TagType_DOUBLE
+ case bool:
+ vBool := value
+ jTag.VBool = &vBool
+ jTag.VType = j.TagType_BOOL
+ default:
+ vStr := truncateString(stringify(value), maxTagValueLength)
+ jTag.VStr = &vStr
+ jTag.VType = j.TagType_STRING
+ }
+ return jTag
+}
+
+func buildReferences(references []Reference) []*j.SpanRef {
+ retMe := make([]*j.SpanRef, 0, len(references))
+ for _, ref := range references {
+ if ref.Type == opentracing.ChildOfRef {
+ retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_CHILD_OF))
+ } else if ref.Type == opentracing.FollowsFromRef {
+ retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_FOLLOWS_FROM))
+ }
+ }
+ return retMe
+}
+
+func spanRef(ctx SpanContext, refType j.SpanRefType) *j.SpanRef {
+ return &j.SpanRef{
+ RefType: refType,
+ TraceIdLow: int64(ctx.traceID.Low),
+ TraceIdHigh: int64(ctx.traceID.High),
+ SpanId: int64(ctx.spanID),
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/log/logger.go b/vendor/github.com/uber/jaeger-client-go/log/logger.go
new file mode 100644
index 000000000..894bb3dbf
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/log/logger.go
@@ -0,0 +1,90 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package log
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "sync"
+)
+
+// Logger provides an abstract interface for logging from Reporters.
+// Applications can provide their own implementation of this interface to adapt
+// reporters logging to whatever logging library they prefer (stdlib log,
+// logrus, go-logging, etc).
+type Logger interface {
+ // Error logs a message at error priority
+ Error(msg string)
+
+ // Infof logs a message at info priority
+ Infof(msg string, args ...interface{})
+}
+
+// StdLogger is implementation of the Logger interface that delegates to default `log` package
+var StdLogger = &stdLogger{}
+
+type stdLogger struct{}
+
+func (l *stdLogger) Error(msg string) {
+ log.Printf("ERROR: %s", msg)
+}
+
+// Infof logs a message at info priority
+func (l *stdLogger) Infof(msg string, args ...interface{}) {
+ log.Printf(msg, args...)
+}
+
+// NullLogger is implementation of the Logger interface that is no-op
+var NullLogger = &nullLogger{}
+
+type nullLogger struct{}
+
+func (l *nullLogger) Error(msg string) {}
+func (l *nullLogger) Infof(msg string, args ...interface{}) {}
+
+// BytesBufferLogger implements Logger backed by a bytes.Buffer.
+type BytesBufferLogger struct {
+ mux sync.Mutex
+ buf bytes.Buffer
+}
+
+// Error implements Logger.
+func (l *BytesBufferLogger) Error(msg string) {
+ l.mux.Lock()
+ l.buf.WriteString(fmt.Sprintf("ERROR: %s\n", msg))
+ l.mux.Unlock()
+}
+
+// Infof implements Logger.
+func (l *BytesBufferLogger) Infof(msg string, args ...interface{}) {
+ l.mux.Lock()
+ l.buf.WriteString("INFO: " + fmt.Sprintf(msg, args...) + "\n")
+ l.mux.Unlock()
+}
+
+// String returns string representation of the underlying buffer.
+func (l *BytesBufferLogger) String() string {
+ l.mux.Lock()
+ defer l.mux.Unlock()
+ return l.buf.String()
+}
+
+// Flush empties the underlying buffer.
+func (l *BytesBufferLogger) Flush() {
+ l.mux.Lock()
+ defer l.mux.Unlock()
+ l.buf.Reset()
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/logger.go b/vendor/github.com/uber/jaeger-client-go/logger.go
new file mode 100644
index 000000000..d4f0b5019
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/logger.go
@@ -0,0 +1,53 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import "log"
+
+// NB This will be deprecated in 3.0.0, please use jaeger-client-go/log/logger instead.
+
+// Logger provides an abstract interface for logging from Reporters.
+// Applications can provide their own implementation of this interface to adapt
+// reporters logging to whatever logging library they prefer (stdlib log,
+// logrus, go-logging, etc).
+type Logger interface {
+ // Error logs a message at error priority
+ Error(msg string)
+
+ // Infof logs a message at info priority
+ Infof(msg string, args ...interface{})
+}
+
+// StdLogger is implementation of the Logger interface that delegates to default `log` package
+var StdLogger = &stdLogger{}
+
+type stdLogger struct{}
+
+func (l *stdLogger) Error(msg string) {
+ log.Printf("ERROR: %s", msg)
+}
+
+// Infof logs a message at info priority
+func (l *stdLogger) Infof(msg string, args ...interface{}) {
+ log.Printf(msg, args...)
+}
+
+// NullLogger is implementation of the Logger interface that delegates to default `log` package
+var NullLogger = &nullLogger{}
+
+type nullLogger struct{}
+
+func (l *nullLogger) Error(msg string) {}
+func (l *nullLogger) Infof(msg string, args ...interface{}) {}
diff --git a/vendor/github.com/uber/jaeger-client-go/metrics.go b/vendor/github.com/uber/jaeger-client-go/metrics.go
new file mode 100644
index 000000000..e56db9b73
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/metrics.go
@@ -0,0 +1,107 @@
+// Copyright (c) 2017-2018 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "github.com/uber/jaeger-lib/metrics"
+)
+
+// Metrics is a container of all stats emitted by Jaeger tracer.
+type Metrics struct {
+ // Number of traces started by this tracer as sampled
+ TracesStartedSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=y" help:"Number of traces started by this tracer as sampled"`
+
+ // Number of traces started by this tracer as not sampled
+ TracesStartedNotSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer as not sampled"`
+
+ // Number of externally started sampled traces this tracer joined
+ TracesJoinedSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=y" help:"Number of externally started sampled traces this tracer joined"`
+
+ // Number of externally started not-sampled traces this tracer joined
+ TracesJoinedNotSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=n" help:"Number of externally started not-sampled traces this tracer joined"`
+
+ // Number of sampled spans started by this tracer
+ SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y" help:"Number of sampled spans started by this tracer"`
+
+ // Number of unsampled spans started by this tracer
+ SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n" help:"Number of unsampled spans started by this tracer"`
+
+ // Number of spans finished by this tracer
+ SpansFinished metrics.Counter `metric:"finished_spans" help:"Number of spans finished by this tracer"`
+
+ // Number of errors decoding tracing context
+ DecodingErrors metrics.Counter `metric:"span_context_decoding_errors" help:"Number of errors decoding tracing context"`
+
+ // Number of spans successfully reported
+ ReporterSuccess metrics.Counter `metric:"reporter_spans" tags:"result=ok" help:"Number of spans successfully reported"`
+
+ // Number of spans not reported due to a Sender failure
+ ReporterFailure metrics.Counter `metric:"reporter_spans" tags:"result=err" help:"Number of spans not reported due to a Sender failure"`
+
+ // Number of spans dropped due to internal queue overflow
+ ReporterDropped metrics.Counter `metric:"reporter_spans" tags:"result=dropped" help:"Number of spans dropped due to internal queue overflow"`
+
+ // Current number of spans in the reporter queue
+ ReporterQueueLength metrics.Gauge `metric:"reporter_queue_length" help:"Current number of spans in the reporter queue"`
+
+ // Number of times the Sampler succeeded to retrieve sampling strategy
+ SamplerRetrieved metrics.Counter `metric:"sampler_queries" tags:"result=ok" help:"Number of times the Sampler succeeded to retrieve sampling strategy"`
+
+ // Number of times the Sampler failed to retrieve sampling strategy
+ SamplerQueryFailure metrics.Counter `metric:"sampler_queries" tags:"result=err" help:"Number of times the Sampler failed to retrieve sampling strategy"`
+
+ // Number of times the Sampler succeeded to retrieve and update sampling strategy
+ SamplerUpdated metrics.Counter `metric:"sampler_updates" tags:"result=ok" help:"Number of times the Sampler succeeded to retrieve and update sampling strategy"`
+
+ // Number of times the Sampler failed to update sampling strategy
+ SamplerUpdateFailure metrics.Counter `metric:"sampler_updates" tags:"result=err" help:"Number of times the Sampler failed to update sampling strategy"`
+
+ // Number of times baggage was successfully written or updated on spans.
+ BaggageUpdateSuccess metrics.Counter `metric:"baggage_updates" tags:"result=ok" help:"Number of times baggage was successfully written or updated on spans"`
+
+ // Number of times baggage failed to write or update on spans.
+ BaggageUpdateFailure metrics.Counter `metric:"baggage_updates" tags:"result=err" help:"Number of times baggage failed to write or update on spans"`
+
+ // Number of times baggage was truncated as per baggage restrictions.
+ BaggageTruncate metrics.Counter `metric:"baggage_truncations" help:"Number of times baggage was truncated as per baggage restrictions"`
+
+ // Number of times baggage restrictions were successfully updated.
+ BaggageRestrictionsUpdateSuccess metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=ok" help:"Number of times baggage restrictions were successfully updated"`
+
+ // Number of times baggage restrictions failed to update.
+ BaggageRestrictionsUpdateFailure metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=err" help:"Number of times baggage restrictions failed to update"`
+
+ // Number of times debug spans were throttled.
+ ThrottledDebugSpans metrics.Counter `metric:"throttled_debug_spans" help:"Number of times debug spans were throttled"`
+
+ // Number of times throttler successfully updated.
+ ThrottlerUpdateSuccess metrics.Counter `metric:"throttler_updates" tags:"result=ok" help:"Number of times throttler successfully updated"`
+
+ // Number of times throttler failed to update.
+ ThrottlerUpdateFailure metrics.Counter `metric:"throttler_updates" tags:"result=err" help:"Number of times throttler failed to update"`
+}
+
+// NewMetrics creates a new Metrics struct and initializes it.
+func NewMetrics(factory metrics.Factory, globalTags map[string]string) *Metrics {
+ m := &Metrics{}
+ // TODO the namespace "jaeger" should be configurable
+ metrics.MustInit(m, factory.Namespace(metrics.NSOptions{Name: "jaeger"}).Namespace(metrics.NSOptions{Name: "tracer"}), globalTags)
+ return m
+}
+
+// NewNullMetrics creates a new Metrics struct that won't report any metrics.
+func NewNullMetrics() *Metrics {
+ return NewMetrics(metrics.NullFactory, nil)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/observer.go b/vendor/github.com/uber/jaeger-client-go/observer.go
new file mode 100644
index 000000000..7bbd02889
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/observer.go
@@ -0,0 +1,88 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import opentracing "github.com/opentracing/opentracing-go"
+
+// Observer can be registered with the Tracer to receive notifications about
+// new Spans.
+//
+// Deprecated: use jaeger.ContribObserver instead.
+type Observer interface {
+ OnStartSpan(operationName string, options opentracing.StartSpanOptions) SpanObserver
+}
+
+// SpanObserver is created by the Observer and receives notifications about
+// other Span events.
+//
+// Deprecated: use jaeger.ContribSpanObserver instead.
+type SpanObserver interface {
+ OnSetOperationName(operationName string)
+ OnSetTag(key string, value interface{})
+ OnFinish(options opentracing.FinishOptions)
+}
+
+// compositeObserver is a dispatcher to other observers
+type compositeObserver struct {
+ observers []ContribObserver
+}
+
+// compositeSpanObserver is a dispatcher to other span observers
+type compositeSpanObserver struct {
+ observers []ContribSpanObserver
+}
+
+// noopSpanObserver is used when there are no observers registered
+// on the Tracer or none of them returns span observers from OnStartSpan.
+var noopSpanObserver = &compositeSpanObserver{}
+
+func (o *compositeObserver) append(contribObserver ContribObserver) {
+ o.observers = append(o.observers, contribObserver)
+}
+
+func (o *compositeObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) ContribSpanObserver {
+ var spanObservers []ContribSpanObserver
+ for _, obs := range o.observers {
+ spanObs, ok := obs.OnStartSpan(sp, operationName, options)
+ if ok {
+ if spanObservers == nil {
+ spanObservers = make([]ContribSpanObserver, 0, len(o.observers))
+ }
+ spanObservers = append(spanObservers, spanObs)
+ }
+ }
+ if len(spanObservers) == 0 {
+ return noopSpanObserver
+ }
+ return &compositeSpanObserver{observers: spanObservers}
+}
+
+func (o *compositeSpanObserver) OnSetOperationName(operationName string) {
+ for _, obs := range o.observers {
+ obs.OnSetOperationName(operationName)
+ }
+}
+
+func (o *compositeSpanObserver) OnSetTag(key string, value interface{}) {
+ for _, obs := range o.observers {
+ obs.OnSetTag(key, value)
+ }
+}
+
+func (o *compositeSpanObserver) OnFinish(options opentracing.FinishOptions) {
+ for _, obs := range o.observers {
+ obs.OnFinish(options)
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/process.go b/vendor/github.com/uber/jaeger-client-go/process.go
new file mode 100644
index 000000000..30cbf9962
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/process.go
@@ -0,0 +1,29 @@
+// Copyright (c) 2018 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+// Process holds process specific metadata that's relevant to this client.
+type Process struct {
+ Service string
+ UUID string
+ Tags []Tag
+}
+
+// ProcessSetter sets a process. This can be used by any class that requires
+// the process to be set as part of initialization.
+// See internal/throttler/remote/throttler.go for an example.
+type ProcessSetter interface {
+ SetProcess(process Process)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/propagation.go b/vendor/github.com/uber/jaeger-client-go/propagation.go
new file mode 100644
index 000000000..abca67a3c
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/propagation.go
@@ -0,0 +1,300 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "log"
+ "net/url"
+ "strings"
+ "sync"
+
+ opentracing "github.com/opentracing/opentracing-go"
+)
+
+// Injector is responsible for injecting SpanContext instances in a manner suitable
+// for propagation via a format-specific "carrier" object. Typically the
+// injection will take place across an RPC boundary, but message queues and
+// other IPC mechanisms are also reasonable places to use an Injector.
+type Injector interface {
+ // Inject takes `SpanContext` and injects it into `carrier`. The actual type
+ // of `carrier` depends on the `format` passed to `Tracer.Inject()`.
+ //
+ // Implementations may return opentracing.ErrInvalidCarrier or any other
+ // implementation-specific error if injection fails.
+ Inject(ctx SpanContext, carrier interface{}) error
+}
+
+// Extractor is responsible for extracting SpanContext instances from a
+// format-specific "carrier" object. Typically the extraction will take place
+// on the server side of an RPC boundary, but message queues and other IPC
+// mechanisms are also reasonable places to use an Extractor.
+type Extractor interface {
+ // Extract decodes a SpanContext instance from the given `carrier`,
+ // or (nil, opentracing.ErrSpanContextNotFound) if no context could
+ // be found in the `carrier`.
+ Extract(carrier interface{}) (SpanContext, error)
+}
+
+type textMapPropagator struct {
+ headerKeys *HeadersConfig
+ metrics Metrics
+ encodeValue func(string) string
+ decodeValue func(string) string
+}
+
+func newTextMapPropagator(headerKeys *HeadersConfig, metrics Metrics) *textMapPropagator {
+ return &textMapPropagator{
+ headerKeys: headerKeys,
+ metrics: metrics,
+ encodeValue: func(val string) string {
+ return val
+ },
+ decodeValue: func(val string) string {
+ return val
+ },
+ }
+}
+
+func newHTTPHeaderPropagator(headerKeys *HeadersConfig, metrics Metrics) *textMapPropagator {
+ return &textMapPropagator{
+ headerKeys: headerKeys,
+ metrics: metrics,
+ encodeValue: func(val string) string {
+ return url.QueryEscape(val)
+ },
+ decodeValue: func(val string) string {
+ // ignore decoding errors, cannot do anything about them
+ if v, err := url.QueryUnescape(val); err == nil {
+ return v
+ }
+ return val
+ },
+ }
+}
+
+type binaryPropagator struct {
+ tracer *Tracer
+ buffers sync.Pool
+}
+
+func newBinaryPropagator(tracer *Tracer) *binaryPropagator {
+ return &binaryPropagator{
+ tracer: tracer,
+ buffers: sync.Pool{New: func() interface{} { return &bytes.Buffer{} }},
+ }
+}
+
+func (p *textMapPropagator) Inject(
+ sc SpanContext,
+ abstractCarrier interface{},
+) error {
+ textMapWriter, ok := abstractCarrier.(opentracing.TextMapWriter)
+ if !ok {
+ return opentracing.ErrInvalidCarrier
+ }
+
+ // Do not encode the string with trace context to avoid accidental double-encoding
+ // if people are using opentracing < 0.10.0. Our colon-separated representation
+ // of the trace context is already safe for HTTP headers.
+ textMapWriter.Set(p.headerKeys.TraceContextHeaderName, sc.String())
+ for k, v := range sc.baggage {
+ safeKey := p.addBaggageKeyPrefix(k)
+ safeVal := p.encodeValue(v)
+ textMapWriter.Set(safeKey, safeVal)
+ }
+ return nil
+}
+
+func (p *textMapPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
+ textMapReader, ok := abstractCarrier.(opentracing.TextMapReader)
+ if !ok {
+ return emptyContext, opentracing.ErrInvalidCarrier
+ }
+ var ctx SpanContext
+ var baggage map[string]string
+ err := textMapReader.ForeachKey(func(rawKey, value string) error {
+ key := strings.ToLower(rawKey) // TODO not necessary for plain TextMap
+ if key == p.headerKeys.TraceContextHeaderName {
+ var err error
+ safeVal := p.decodeValue(value)
+ if ctx, err = ContextFromString(safeVal); err != nil {
+ return err
+ }
+ } else if key == p.headerKeys.JaegerDebugHeader {
+ ctx.debugID = p.decodeValue(value)
+ } else if key == p.headerKeys.JaegerBaggageHeader {
+ if baggage == nil {
+ baggage = make(map[string]string)
+ }
+ for k, v := range p.parseCommaSeparatedMap(value) {
+ baggage[k] = v
+ }
+ } else if strings.HasPrefix(key, p.headerKeys.TraceBaggageHeaderPrefix) {
+ if baggage == nil {
+ baggage = make(map[string]string)
+ }
+ safeKey := p.removeBaggageKeyPrefix(key)
+ safeVal := p.decodeValue(value)
+ baggage[safeKey] = safeVal
+ }
+ return nil
+ })
+ if err != nil {
+ p.metrics.DecodingErrors.Inc(1)
+ return emptyContext, err
+ }
+ if !ctx.traceID.IsValid() && ctx.debugID == "" && len(baggage) == 0 {
+ return emptyContext, opentracing.ErrSpanContextNotFound
+ }
+ ctx.baggage = baggage
+ return ctx, nil
+}
+
+func (p *binaryPropagator) Inject(
+ sc SpanContext,
+ abstractCarrier interface{},
+) error {
+ carrier, ok := abstractCarrier.(io.Writer)
+ if !ok {
+ return opentracing.ErrInvalidCarrier
+ }
+
+ // Handle the tracer context
+ if err := binary.Write(carrier, binary.BigEndian, sc.traceID); err != nil {
+ return err
+ }
+ if err := binary.Write(carrier, binary.BigEndian, sc.spanID); err != nil {
+ return err
+ }
+ if err := binary.Write(carrier, binary.BigEndian, sc.parentID); err != nil {
+ return err
+ }
+ if err := binary.Write(carrier, binary.BigEndian, sc.flags); err != nil {
+ return err
+ }
+
+ // Handle the baggage items
+ if err := binary.Write(carrier, binary.BigEndian, int32(len(sc.baggage))); err != nil {
+ return err
+ }
+ for k, v := range sc.baggage {
+ if err := binary.Write(carrier, binary.BigEndian, int32(len(k))); err != nil {
+ return err
+ }
+ io.WriteString(carrier, k)
+ if err := binary.Write(carrier, binary.BigEndian, int32(len(v))); err != nil {
+ return err
+ }
+ io.WriteString(carrier, v)
+ }
+
+ return nil
+}
+
+func (p *binaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
+ carrier, ok := abstractCarrier.(io.Reader)
+ if !ok {
+ return emptyContext, opentracing.ErrInvalidCarrier
+ }
+ var ctx SpanContext
+
+ if err := binary.Read(carrier, binary.BigEndian, &ctx.traceID); err != nil {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+ if err := binary.Read(carrier, binary.BigEndian, &ctx.spanID); err != nil {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+ if err := binary.Read(carrier, binary.BigEndian, &ctx.parentID); err != nil {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+ if err := binary.Read(carrier, binary.BigEndian, &ctx.flags); err != nil {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+
+ // Handle the baggage items
+ var numBaggage int32
+ if err := binary.Read(carrier, binary.BigEndian, &numBaggage); err != nil {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+ if iNumBaggage := int(numBaggage); iNumBaggage > 0 {
+ ctx.baggage = make(map[string]string, iNumBaggage)
+ buf := p.buffers.Get().(*bytes.Buffer)
+ defer p.buffers.Put(buf)
+
+ var keyLen, valLen int32
+ for i := 0; i < iNumBaggage; i++ {
+ if err := binary.Read(carrier, binary.BigEndian, &keyLen); err != nil {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+ buf.Reset()
+ buf.Grow(int(keyLen))
+ if n, err := io.CopyN(buf, carrier, int64(keyLen)); err != nil || int32(n) != keyLen {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+ key := buf.String()
+
+ if err := binary.Read(carrier, binary.BigEndian, &valLen); err != nil {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+ buf.Reset()
+ buf.Grow(int(valLen))
+ if n, err := io.CopyN(buf, carrier, int64(valLen)); err != nil || int32(n) != valLen {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+ ctx.baggage[key] = buf.String()
+ }
+ }
+
+ return ctx, nil
+}
+
+// Converts a comma separated key value pair list into a map
+// e.g. key1=value1, key2=value2, key3 = value3
+// is converted to map[string]string { "key1" : "value1",
+// "key2" : "value2",
+// "key3" : "value3" }
+func (p *textMapPropagator) parseCommaSeparatedMap(value string) map[string]string {
+ baggage := make(map[string]string)
+ value, err := url.QueryUnescape(value)
+ if err != nil {
+ log.Printf("Unable to unescape %s, %v", value, err)
+ return baggage
+ }
+ for _, kvpair := range strings.Split(value, ",") {
+ kv := strings.Split(strings.TrimSpace(kvpair), "=")
+ if len(kv) == 2 {
+ baggage[kv[0]] = kv[1]
+ } else {
+ log.Printf("Malformed value passed in for %s", p.headerKeys.JaegerBaggageHeader)
+ }
+ }
+ return baggage
+}
+
+// Converts a baggage item key into an http header format,
+// by prepending TraceBaggageHeaderPrefix and encoding the key string
+func (p *textMapPropagator) addBaggageKeyPrefix(key string) string {
+ // TODO encodeBaggageKeyAsHeader add caching and escaping
+ return fmt.Sprintf("%v%v", p.headerKeys.TraceBaggageHeaderPrefix, key)
+}
+
+func (p *textMapPropagator) removeBaggageKeyPrefix(key string) string {
+ // TODO decodeBaggageHeaderKey add caching and escaping
+ return key[len(p.headerKeys.TraceBaggageHeaderPrefix):]
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/reference.go b/vendor/github.com/uber/jaeger-client-go/reference.go
new file mode 100644
index 000000000..5646e78bb
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/reference.go
@@ -0,0 +1,23 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import "github.com/opentracing/opentracing-go"
+
+// Reference represents a causal reference to other Spans (via their SpanContext).
+type Reference struct {
+ Type opentracing.SpanReferenceType
+ Context SpanContext
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/reporter.go b/vendor/github.com/uber/jaeger-client-go/reporter.go
new file mode 100644
index 000000000..fe6288c4b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/reporter.go
@@ -0,0 +1,289 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/opentracing/opentracing-go"
+
+ "github.com/uber/jaeger-client-go/log"
+)
+
+// Reporter is called by the tracer when a span is completed to report the span to the tracing collector.
+type Reporter interface {
+ // Report submits a new span to collectors, possibly asynchronously and/or with buffering.
+ Report(span *Span)
+
+ // Close does a clean shutdown of the reporter, flushing any traces that may be buffered in memory.
+ Close()
+}
+
+// ------------------------------
+
+type nullReporter struct{}
+
+// NewNullReporter creates a no-op reporter that ignores all reported spans.
+func NewNullReporter() Reporter {
+ return &nullReporter{}
+}
+
+// Report implements Report() method of Reporter by doing nothing.
+func (r *nullReporter) Report(span *Span) {
+ // no-op
+}
+
+// Close implements Close() method of Reporter by doing nothing.
+func (r *nullReporter) Close() {
+ // no-op
+}
+
+// ------------------------------
+
+type loggingReporter struct {
+ logger Logger
+}
+
+// NewLoggingReporter creates a reporter that logs all reported spans to provided logger.
+func NewLoggingReporter(logger Logger) Reporter {
+ return &loggingReporter{logger}
+}
+
+// Report implements Report() method of Reporter by logging the span to the logger.
+func (r *loggingReporter) Report(span *Span) {
+ r.logger.Infof("Reporting span %+v", span)
+}
+
+// Close implements Close() method of Reporter by doing nothing.
+func (r *loggingReporter) Close() {
+ // no-op
+}
+
+// ------------------------------
+
+// InMemoryReporter is used for testing, and simply collects spans in memory.
+type InMemoryReporter struct {
+ spans []opentracing.Span
+ lock sync.Mutex
+}
+
+// NewInMemoryReporter creates a reporter that stores spans in memory.
+// NOTE: the Tracer should be created with options.PoolSpans = false.
+func NewInMemoryReporter() *InMemoryReporter {
+ return &InMemoryReporter{
+ spans: make([]opentracing.Span, 0, 10),
+ }
+}
+
+// Report implements Report() method of Reporter by storing the span in the buffer.
+func (r *InMemoryReporter) Report(span *Span) {
+ r.lock.Lock()
+ r.spans = append(r.spans, span)
+ r.lock.Unlock()
+}
+
+// Close implements Close() method of Reporter by doing nothing.
+func (r *InMemoryReporter) Close() {
+ // no-op
+}
+
+// SpansSubmitted returns the number of spans accumulated in the buffer.
+func (r *InMemoryReporter) SpansSubmitted() int {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+ return len(r.spans)
+}
+
+// GetSpans returns accumulated spans as a copy of the buffer.
+func (r *InMemoryReporter) GetSpans() []opentracing.Span {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+ copied := make([]opentracing.Span, len(r.spans))
+ copy(copied, r.spans)
+ return copied
+}
+
+// Reset clears all accumulated spans.
+func (r *InMemoryReporter) Reset() {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+ r.spans = nil
+}
+
+// ------------------------------
+
+type compositeReporter struct {
+ reporters []Reporter
+}
+
+// NewCompositeReporter creates a reporter that ignores all reported spans.
+func NewCompositeReporter(reporters ...Reporter) Reporter {
+ return &compositeReporter{reporters: reporters}
+}
+
+// Report implements Report() method of Reporter by delegating to each underlying reporter.
+func (r *compositeReporter) Report(span *Span) {
+ for _, reporter := range r.reporters {
+ reporter.Report(span)
+ }
+}
+
+// Close implements Close() method of Reporter by closing each underlying reporter.
+func (r *compositeReporter) Close() {
+ for _, reporter := range r.reporters {
+ reporter.Close()
+ }
+}
+
+// ------------- REMOTE REPORTER -----------------
+
+type reporterQueueItemType int
+
+const (
+ defaultQueueSize = 100
+ defaultBufferFlushInterval = 1 * time.Second
+
+ reporterQueueItemSpan reporterQueueItemType = iota
+ reporterQueueItemClose
+)
+
+type reporterQueueItem struct {
+ itemType reporterQueueItemType
+ span *Span
+ close *sync.WaitGroup
+}
+
+type remoteReporter struct {
+ // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
+ // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
+ queueLength int64
+ closed int64 // 0 - not closed, 1 - closed
+
+ reporterOptions
+
+ sender Transport
+ queue chan reporterQueueItem
+}
+
+// NewRemoteReporter creates a new reporter that sends spans out of process by means of Sender.
+// Calls to Report(Span) return immediately (side effect: if internal buffer is full the span is dropped).
+// Periodically the transport buffer is flushed even if it hasn't reached max packet size.
+// Calls to Close() block until all spans reported prior to the call to Close are flushed.
+func NewRemoteReporter(sender Transport, opts ...ReporterOption) Reporter {
+ options := reporterOptions{}
+ for _, option := range opts {
+ option(&options)
+ }
+ if options.bufferFlushInterval <= 0 {
+ options.bufferFlushInterval = defaultBufferFlushInterval
+ }
+ if options.logger == nil {
+ options.logger = log.NullLogger
+ }
+ if options.metrics == nil {
+ options.metrics = NewNullMetrics()
+ }
+ if options.queueSize <= 0 {
+ options.queueSize = defaultQueueSize
+ }
+ reporter := &remoteReporter{
+ reporterOptions: options,
+ sender: sender,
+ queue: make(chan reporterQueueItem, options.queueSize),
+ }
+ go reporter.processQueue()
+ return reporter
+}
+
+// Report implements Report() method of Reporter.
+// It passes the span to a background go-routine for submission to Jaeger backend.
+// If the internal queue is full, the span is dropped and metrics.ReporterDropped counter is incremented.
+// If Report() is called after the reporter has been Close()-ed, the additional spans will not be
+// sent to the backend, but the metrics.ReporterDropped counter may not reflect them correctly,
+// because some of them may still be successfully added to the queue.
+func (r *remoteReporter) Report(span *Span) {
+ select {
+ case r.queue <- reporterQueueItem{itemType: reporterQueueItemSpan, span: span}:
+ atomic.AddInt64(&r.queueLength, 1)
+ default:
+ r.metrics.ReporterDropped.Inc(1)
+ }
+}
+
+// Close implements Close() method of Reporter by waiting for the queue to be drained.
+func (r *remoteReporter) Close() {
+ if swapped := atomic.CompareAndSwapInt64(&r.closed, 0, 1); !swapped {
+ r.logger.Error("Repeated attempt to close the reporter is ignored")
+ return
+ }
+ r.sendCloseEvent()
+ r.sender.Close()
+}
+
+func (r *remoteReporter) sendCloseEvent() {
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+ item := reporterQueueItem{itemType: reporterQueueItemClose, close: wg}
+
+ r.queue <- item // if the queue is full we will block until there is space
+ atomic.AddInt64(&r.queueLength, 1)
+ wg.Wait()
+}
+
+// processQueue reads spans from the queue, converts them to Thrift, and stores them in an internal buffer.
+// When the buffer length reaches batchSize, it is flushed by submitting the accumulated spans to Jaeger.
+// Buffer also gets flushed automatically every batchFlushInterval seconds, just in case the tracer stopped
+// reporting new spans.
+func (r *remoteReporter) processQueue() {
+ // flush causes the Sender to flush its accumulated spans and clear the buffer
+ flush := func() {
+ if flushed, err := r.sender.Flush(); err != nil {
+ r.metrics.ReporterFailure.Inc(int64(flushed))
+ r.logger.Error(fmt.Sprintf("error when flushing the buffer: %s", err.Error()))
+ } else if flushed > 0 {
+ r.metrics.ReporterSuccess.Inc(int64(flushed))
+ }
+ }
+
+ timer := time.NewTicker(r.bufferFlushInterval)
+ for {
+ select {
+ case <-timer.C:
+ flush()
+ case item := <-r.queue:
+ atomic.AddInt64(&r.queueLength, -1)
+ switch item.itemType {
+ case reporterQueueItemSpan:
+ span := item.span
+ if flushed, err := r.sender.Append(span); err != nil {
+ r.metrics.ReporterFailure.Inc(int64(flushed))
+ r.logger.Error(fmt.Sprintf("error reporting span %q: %s", span.OperationName(), err.Error()))
+ } else if flushed > 0 {
+ r.metrics.ReporterSuccess.Inc(int64(flushed))
+ // to reduce the number of gauge stats, we only emit queue length on flush
+ r.metrics.ReporterQueueLength.Update(atomic.LoadInt64(&r.queueLength))
+ }
+ case reporterQueueItemClose:
+ timer.Stop()
+ flush()
+ item.close.Done()
+ return
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/reporter_options.go b/vendor/github.com/uber/jaeger-client-go/reporter_options.go
new file mode 100644
index 000000000..65012d701
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/reporter_options.go
@@ -0,0 +1,69 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "time"
+)
+
+// ReporterOption is a function that sets some option on the reporter.
+type ReporterOption func(c *reporterOptions)
+
+// ReporterOptions is a factory for all available ReporterOption's
+var ReporterOptions reporterOptions
+
+// reporterOptions control behavior of the reporter.
+type reporterOptions struct {
+ // queueSize is the size of internal queue where reported spans are stored before they are processed in the background
+ queueSize int
+ // bufferFlushInterval is how often the buffer is force-flushed, even if it's not full
+ bufferFlushInterval time.Duration
+ // logger is used to log errors of span submissions
+ logger Logger
+ // metrics is used to record runtime stats
+ metrics *Metrics
+}
+
+// QueueSize creates a ReporterOption that sets the size of the internal queue where
+// spans are stored before they are processed.
+func (reporterOptions) QueueSize(queueSize int) ReporterOption {
+ return func(r *reporterOptions) {
+ r.queueSize = queueSize
+ }
+}
+
+// Metrics creates a ReporterOption that initializes Metrics in the reporter,
+// which is used to record runtime statistics.
+func (reporterOptions) Metrics(metrics *Metrics) ReporterOption {
+ return func(r *reporterOptions) {
+ r.metrics = metrics
+ }
+}
+
+// BufferFlushInterval creates a ReporterOption that sets how often the queue
+// is force-flushed.
+func (reporterOptions) BufferFlushInterval(bufferFlushInterval time.Duration) ReporterOption {
+ return func(r *reporterOptions) {
+ r.bufferFlushInterval = bufferFlushInterval
+ }
+}
+
+// Logger creates a ReporterOption that initializes the logger used to log
+// errors of span submissions.
+func (reporterOptions) Logger(logger Logger) ReporterOption {
+ return func(r *reporterOptions) {
+ r.logger = logger
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md
new file mode 100644
index 000000000..879948e9c
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md
@@ -0,0 +1,5 @@
+An Observer that can be used to emit RPC metrics
+================================================
+
+It can be attached to the tracer during tracer construction.
+See `ExampleObserver` function in [observer_test.go](./observer_test.go).
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go
new file mode 100644
index 000000000..51aa11b35
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go
@@ -0,0 +1,16 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package rpcmetrics implements an Observer that can be used to emit RPC metrics.
+package rpcmetrics
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go
new file mode 100644
index 000000000..30555243d
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go
@@ -0,0 +1,63 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpcmetrics
+
+import "sync"
+
+// normalizedEndpoints is a cache for endpointName -> safeName mappings.
+type normalizedEndpoints struct {
+ names map[string]string
+ maxSize int
+ defaultName string
+ normalizer NameNormalizer
+ mux sync.RWMutex
+}
+
+func newNormalizedEndpoints(maxSize int, normalizer NameNormalizer) *normalizedEndpoints {
+ return &normalizedEndpoints{
+ maxSize: maxSize,
+ normalizer: normalizer,
+ names: make(map[string]string, maxSize),
+ }
+}
+
+// normalize looks up the name in the cache, if not found it uses normalizer
+// to convert the name to a safe name. If called with more than maxSize unique
+// names it returns "" for all other names beyond those already cached.
+func (n *normalizedEndpoints) normalize(name string) string {
+ n.mux.RLock()
+ norm, ok := n.names[name]
+ l := len(n.names)
+ n.mux.RUnlock()
+ if ok {
+ return norm
+ }
+ if l >= n.maxSize {
+ return ""
+ }
+ return n.normalizeWithLock(name)
+}
+
+func (n *normalizedEndpoints) normalizeWithLock(name string) string {
+ norm := n.normalizer.Normalize(name)
+ n.mux.Lock()
+ defer n.mux.Unlock()
+ // cache may have grown while we were not holding the lock
+ if len(n.names) >= n.maxSize {
+ return ""
+ }
+ n.names[name] = norm
+ return norm
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go
new file mode 100644
index 000000000..a8cec2fa6
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go
@@ -0,0 +1,124 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpcmetrics
+
+import (
+ "sync"
+
+ "github.com/uber/jaeger-lib/metrics"
+)
+
+const (
+ otherEndpointsPlaceholder = "other"
+ endpointNameMetricTag = "endpoint"
+)
+
+// Metrics is a collection of metrics for an endpoint describing
+// throughput, success, errors, and performance.
+type Metrics struct {
+ // RequestCountSuccess is a counter of the total number of successes.
+ RequestCountSuccess metrics.Counter `metric:"requests" tags:"error=false"`
+
+ // RequestCountFailures is a counter of the number of times any failure has been observed.
+ RequestCountFailures metrics.Counter `metric:"requests" tags:"error=true"`
+
+ // RequestLatencySuccess is a latency histogram of successful requests.
+ RequestLatencySuccess metrics.Timer `metric:"request_latency" tags:"error=false"`
+
+ // RequestLatencyFailures is a latency histogram of failed requests.
+ RequestLatencyFailures metrics.Timer `metric:"request_latency" tags:"error=true"`
+
+ // HTTPStatusCode2xx is a counter of the total number of requests with HTTP status code 200-299
+ HTTPStatusCode2xx metrics.Counter `metric:"http_requests" tags:"status_code=2xx"`
+
+ // HTTPStatusCode3xx is a counter of the total number of requests with HTTP status code 300-399
+ HTTPStatusCode3xx metrics.Counter `metric:"http_requests" tags:"status_code=3xx"`
+
+ // HTTPStatusCode4xx is a counter of the total number of requests with HTTP status code 400-499
+ HTTPStatusCode4xx metrics.Counter `metric:"http_requests" tags:"status_code=4xx"`
+
+ // HTTPStatusCode5xx is a counter of the total number of requests with HTTP status code 500-599
+ HTTPStatusCode5xx metrics.Counter `metric:"http_requests" tags:"status_code=5xx"`
+}
+
+func (m *Metrics) recordHTTPStatusCode(statusCode uint16) {
+ if statusCode >= 200 && statusCode < 300 {
+ m.HTTPStatusCode2xx.Inc(1)
+ } else if statusCode >= 300 && statusCode < 400 {
+ m.HTTPStatusCode3xx.Inc(1)
+ } else if statusCode >= 400 && statusCode < 500 {
+ m.HTTPStatusCode4xx.Inc(1)
+ } else if statusCode >= 500 && statusCode < 600 {
+ m.HTTPStatusCode5xx.Inc(1)
+ }
+}
+
+// MetricsByEndpoint is a registry/cache of metrics for each unique endpoint name.
+// Only maxNumberOfEndpoints Metrics are stored, all other endpoint names are mapped
+// to a generic endpoint name "other".
+type MetricsByEndpoint struct {
+ metricsFactory metrics.Factory
+ endpoints *normalizedEndpoints
+ metricsByEndpoint map[string]*Metrics
+ mux sync.RWMutex
+}
+
+func newMetricsByEndpoint(
+ metricsFactory metrics.Factory,
+ normalizer NameNormalizer,
+ maxNumberOfEndpoints int,
+) *MetricsByEndpoint {
+ return &MetricsByEndpoint{
+ metricsFactory: metricsFactory,
+ endpoints: newNormalizedEndpoints(maxNumberOfEndpoints, normalizer),
+ metricsByEndpoint: make(map[string]*Metrics, maxNumberOfEndpoints+1), // +1 for "other"
+ }
+}
+
+func (m *MetricsByEndpoint) get(endpoint string) *Metrics {
+ safeName := m.endpoints.normalize(endpoint)
+ if safeName == "" {
+ safeName = otherEndpointsPlaceholder
+ }
+ m.mux.RLock()
+ met := m.metricsByEndpoint[safeName]
+ m.mux.RUnlock()
+ if met != nil {
+ return met
+ }
+
+ return m.getWithWriteLock(safeName)
+}
+
+// split to make easier to test
+func (m *MetricsByEndpoint) getWithWriteLock(safeName string) *Metrics {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ // it is possible that the name has been already registered after we released
+ // the read lock and before we grabbed the write lock, so check for that.
+ if met, ok := m.metricsByEndpoint[safeName]; ok {
+ return met
+ }
+
+ // it would be nice to create the struct before locking, since Init() is somewhat
+ // expensive, however some metrics backends (e.g. expvar) may not like duplicate metrics.
+ met := &Metrics{}
+ tags := map[string]string{endpointNameMetricTag: safeName}
+ metrics.Init(met, m.metricsFactory, tags)
+
+ m.metricsByEndpoint[safeName] = met
+ return met
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go
new file mode 100644
index 000000000..148d84b3a
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go
@@ -0,0 +1,101 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpcmetrics
+
+// NameNormalizer is used to convert the endpoint names to strings
+// that can be safely used as tags in the metrics.
+type NameNormalizer interface {
+ Normalize(name string) string
+}
+
+// DefaultNameNormalizer converts endpoint names so that they contain only characters
+// from the safe charset [a-zA-Z0-9-./_]. All other characters are replaced with '-'.
+var DefaultNameNormalizer = &SimpleNameNormalizer{
+ SafeSets: []SafeCharacterSet{
+ &Range{From: 'a', To: 'z'},
+ &Range{From: 'A', To: 'Z'},
+ &Range{From: '0', To: '9'},
+ &Char{'-'},
+ &Char{'_'},
+ &Char{'/'},
+ &Char{'.'},
+ },
+ Replacement: '-',
+}
+
+// SimpleNameNormalizer uses a set of safe character sets.
+type SimpleNameNormalizer struct {
+ SafeSets []SafeCharacterSet
+ Replacement byte
+}
+
+// SafeCharacterSet determines if the given character is "safe"
+type SafeCharacterSet interface {
+ IsSafe(c byte) bool
+}
+
+// Range implements SafeCharacterSet
+type Range struct {
+ From, To byte
+}
+
+// IsSafe implements SafeCharacterSet
+func (r *Range) IsSafe(c byte) bool {
+ return c >= r.From && c <= r.To
+}
+
+// Char implements SafeCharacterSet
+type Char struct {
+ Val byte
+}
+
+// IsSafe implements SafeCharacterSet
+func (ch *Char) IsSafe(c byte) bool {
+ return c == ch.Val
+}
+
+// Normalize checks each character in the string against SafeSets,
+// and if it's not safe substitutes it with Replacement.
+func (n *SimpleNameNormalizer) Normalize(name string) string {
+ var retMe []byte
+ nameBytes := []byte(name)
+ for i, b := range nameBytes {
+ if n.safeByte(b) {
+ if retMe != nil {
+ retMe[i] = b
+ }
+ } else {
+ if retMe == nil {
+ retMe = make([]byte, len(nameBytes))
+ copy(retMe[0:i], nameBytes[0:i])
+ }
+ retMe[i] = n.Replacement
+ }
+ }
+ if retMe == nil {
+ return name
+ }
+ return string(retMe)
+}
+
+// safeByte checks if b against all safe charsets.
+func (n *SimpleNameNormalizer) safeByte(b byte) bool {
+ for i := range n.SafeSets {
+ if n.SafeSets[i].IsSafe(b) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go
new file mode 100644
index 000000000..eca5ff6f3
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go
@@ -0,0 +1,171 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpcmetrics
+
+import (
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/ext"
+ "github.com/uber/jaeger-lib/metrics"
+
+ jaeger "github.com/uber/jaeger-client-go"
+)
+
+const defaultMaxNumberOfEndpoints = 200
+
+// Observer is an observer that can emit RPC metrics.
+type Observer struct {
+ metricsByEndpoint *MetricsByEndpoint
+}
+
+// NewObserver creates a new observer that can emit RPC metrics.
+func NewObserver(metricsFactory metrics.Factory, normalizer NameNormalizer) *Observer {
+ return &Observer{
+ metricsByEndpoint: newMetricsByEndpoint(
+ metricsFactory,
+ normalizer,
+ defaultMaxNumberOfEndpoints,
+ ),
+ }
+}
+
+// OnStartSpan creates a new Observer for the span.
+func (o *Observer) OnStartSpan(
+ operationName string,
+ options opentracing.StartSpanOptions,
+) jaeger.SpanObserver {
+ return NewSpanObserver(o.metricsByEndpoint, operationName, options)
+}
+
+// SpanKind identifies the span as inboud, outbound, or internal
+type SpanKind int
+
+const (
+ // Local span kind
+ Local SpanKind = iota
+ // Inbound span kind
+ Inbound
+ // Outbound span kind
+ Outbound
+)
+
+// SpanObserver collects RPC metrics
+type SpanObserver struct {
+ metricsByEndpoint *MetricsByEndpoint
+ operationName string
+ startTime time.Time
+ mux sync.Mutex
+ kind SpanKind
+ httpStatusCode uint16
+ err bool
+}
+
+// NewSpanObserver creates a new SpanObserver that can emit RPC metrics.
+func NewSpanObserver(
+ metricsByEndpoint *MetricsByEndpoint,
+ operationName string,
+ options opentracing.StartSpanOptions,
+) *SpanObserver {
+ so := &SpanObserver{
+ metricsByEndpoint: metricsByEndpoint,
+ operationName: operationName,
+ startTime: options.StartTime,
+ }
+ for k, v := range options.Tags {
+ so.handleTagInLock(k, v)
+ }
+ return so
+}
+
+// handleTags watches for special tags
+// - SpanKind
+// - HttpStatusCode
+// - Error
+func (so *SpanObserver) handleTagInLock(key string, value interface{}) {
+ if key == string(ext.SpanKind) {
+ if v, ok := value.(ext.SpanKindEnum); ok {
+ value = string(v)
+ }
+ if v, ok := value.(string); ok {
+ if v == string(ext.SpanKindRPCClientEnum) {
+ so.kind = Outbound
+ } else if v == string(ext.SpanKindRPCServerEnum) {
+ so.kind = Inbound
+ }
+ }
+ return
+ }
+ if key == string(ext.HTTPStatusCode) {
+ if v, ok := value.(uint16); ok {
+ so.httpStatusCode = v
+ } else if v, ok := value.(int); ok {
+ so.httpStatusCode = uint16(v)
+ } else if v, ok := value.(string); ok {
+ if vv, err := strconv.Atoi(v); err == nil {
+ so.httpStatusCode = uint16(vv)
+ }
+ }
+ return
+ }
+ if key == string(ext.Error) {
+ if v, ok := value.(bool); ok {
+ so.err = v
+ } else if v, ok := value.(string); ok {
+ if vv, err := strconv.ParseBool(v); err == nil {
+ so.err = vv
+ }
+ }
+ return
+ }
+}
+
+// OnFinish emits the RPC metrics. It only has an effect when operation name
+// is not blank, and the span kind is an RPC server.
+func (so *SpanObserver) OnFinish(options opentracing.FinishOptions) {
+ so.mux.Lock()
+ defer so.mux.Unlock()
+
+ if so.operationName == "" || so.kind != Inbound {
+ return
+ }
+
+ mets := so.metricsByEndpoint.get(so.operationName)
+ latency := options.FinishTime.Sub(so.startTime)
+ if so.err {
+ mets.RequestCountFailures.Inc(1)
+ mets.RequestLatencyFailures.Record(latency)
+ } else {
+ mets.RequestCountSuccess.Inc(1)
+ mets.RequestLatencySuccess.Record(latency)
+ }
+ mets.recordHTTPStatusCode(so.httpStatusCode)
+}
+
+// OnSetOperationName records new operation name.
+func (so *SpanObserver) OnSetOperationName(operationName string) {
+ so.mux.Lock()
+ so.operationName = operationName
+ so.mux.Unlock()
+}
+
+// OnSetTag implements SpanObserver
+func (so *SpanObserver) OnSetTag(key string, value interface{}) {
+ so.mux.Lock()
+ so.handleTagInLock(key, value)
+ so.mux.Unlock()
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler.go b/vendor/github.com/uber/jaeger-client-go/sampler.go
new file mode 100644
index 000000000..3e1630953
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/sampler.go
@@ -0,0 +1,557 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "fmt"
+ "math"
+ "net/url"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/uber/jaeger-client-go/log"
+ "github.com/uber/jaeger-client-go/thrift-gen/sampling"
+ "github.com/uber/jaeger-client-go/utils"
+)
+
+const (
+ defaultSamplingServerURL = "http://localhost:5778/sampling"
+ defaultSamplingRefreshInterval = time.Minute
+ defaultMaxOperations = 2000
+)
+
+// Sampler decides whether a new trace should be sampled or not.
+type Sampler interface {
+ // IsSampled decides whether a trace with given `id` and `operation`
+ // should be sampled. This function will also return the tags that
+ // can be used to identify the type of sampling that was applied to
+ // the root span. Most simple samplers would return two tags,
+ // sampler.type and sampler.param, similar to those used in the Configuration
+ IsSampled(id TraceID, operation string) (sampled bool, tags []Tag)
+
+ // Close does a clean shutdown of the sampler, stopping any background
+ // go-routines it may have started.
+ Close()
+
+ // Equal checks if the `other` sampler is functionally equivalent
+ // to this sampler.
+ // TODO remove this function. This function is used to determine if 2 samplers are equivalent
+ // which does not bode well with the adaptive sampler which has to create all the composite samplers
+ // for the comparison to occur. This is expensive to do if only one sampler has changed.
+ Equal(other Sampler) bool
+}
+
+// -----------------------
+
+// ConstSampler is a sampler that always makes the same decision.
+type ConstSampler struct {
+ Decision bool
+ tags []Tag
+}
+
+// NewConstSampler creates a ConstSampler.
+func NewConstSampler(sample bool) Sampler {
+ tags := []Tag{
+ {key: SamplerTypeTagKey, value: SamplerTypeConst},
+ {key: SamplerParamTagKey, value: sample},
+ }
+ return &ConstSampler{Decision: sample, tags: tags}
+}
+
+// IsSampled implements IsSampled() of Sampler.
+func (s *ConstSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+ return s.Decision, s.tags
+}
+
+// Close implements Close() of Sampler.
+func (s *ConstSampler) Close() {
+ // nothing to do
+}
+
+// Equal implements Equal() of Sampler.
+func (s *ConstSampler) Equal(other Sampler) bool {
+ if o, ok := other.(*ConstSampler); ok {
+ return s.Decision == o.Decision
+ }
+ return false
+}
+
+// -----------------------
+
+// ProbabilisticSampler is a sampler that randomly samples a certain percentage
+// of traces.
+type ProbabilisticSampler struct {
+ samplingRate float64
+ samplingBoundary uint64
+ tags []Tag
+}
+
+const maxRandomNumber = ^(uint64(1) << 63) // i.e. 0x7fffffffffffffff
+
+// NewProbabilisticSampler creates a sampler that randomly samples a certain percentage of traces specified by the
+// samplingRate, in the range between 0.0 and 1.0.
+//
+// It relies on the fact that new trace IDs are 63bit random numbers themselves, thus making the sampling decision
+// without generating a new random number, but simply calculating if traceID < (samplingRate * 2^63).
+// TODO remove the error from this function for next major release
+func NewProbabilisticSampler(samplingRate float64) (*ProbabilisticSampler, error) {
+ if samplingRate < 0.0 || samplingRate > 1.0 {
+ return nil, fmt.Errorf("Sampling Rate must be between 0.0 and 1.0, received %f", samplingRate)
+ }
+ return newProbabilisticSampler(samplingRate), nil
+}
+
+func newProbabilisticSampler(samplingRate float64) *ProbabilisticSampler {
+ samplingRate = math.Max(0.0, math.Min(samplingRate, 1.0))
+ tags := []Tag{
+ {key: SamplerTypeTagKey, value: SamplerTypeProbabilistic},
+ {key: SamplerParamTagKey, value: samplingRate},
+ }
+ return &ProbabilisticSampler{
+ samplingRate: samplingRate,
+ samplingBoundary: uint64(float64(maxRandomNumber) * samplingRate),
+ tags: tags,
+ }
+}
+
+// SamplingRate returns the sampling probability this sampled was constructed with.
+func (s *ProbabilisticSampler) SamplingRate() float64 {
+ return s.samplingRate
+}
+
+// IsSampled implements IsSampled() of Sampler.
+func (s *ProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+ return s.samplingBoundary >= id.Low, s.tags
+}
+
+// Close implements Close() of Sampler.
+func (s *ProbabilisticSampler) Close() {
+ // nothing to do
+}
+
+// Equal implements Equal() of Sampler.
+func (s *ProbabilisticSampler) Equal(other Sampler) bool {
+ if o, ok := other.(*ProbabilisticSampler); ok {
+ return s.samplingBoundary == o.samplingBoundary
+ }
+ return false
+}
+
+// -----------------------
+
+type rateLimitingSampler struct {
+ maxTracesPerSecond float64
+ rateLimiter utils.RateLimiter
+ tags []Tag
+}
+
+// NewRateLimitingSampler creates a sampler that samples at most maxTracesPerSecond. The distribution of sampled
+// traces follows burstiness of the service, i.e. a service with uniformly distributed requests will have those
+// requests sampled uniformly as well, but if requests are bursty, especially sub-second, then a number of
+// sequential requests can be sampled each second.
+func NewRateLimitingSampler(maxTracesPerSecond float64) Sampler {
+ tags := []Tag{
+ {key: SamplerTypeTagKey, value: SamplerTypeRateLimiting},
+ {key: SamplerParamTagKey, value: maxTracesPerSecond},
+ }
+ return &rateLimitingSampler{
+ maxTracesPerSecond: maxTracesPerSecond,
+ rateLimiter: utils.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0)),
+ tags: tags,
+ }
+}
+
+// IsSampled implements IsSampled() of Sampler.
+func (s *rateLimitingSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+ return s.rateLimiter.CheckCredit(1.0), s.tags
+}
+
+func (s *rateLimitingSampler) Close() {
+ // nothing to do
+}
+
+func (s *rateLimitingSampler) Equal(other Sampler) bool {
+ if o, ok := other.(*rateLimitingSampler); ok {
+ return s.maxTracesPerSecond == o.maxTracesPerSecond
+ }
+ return false
+}
+
+// -----------------------
+
+// GuaranteedThroughputProbabilisticSampler is a sampler that leverages both probabilisticSampler and
+// rateLimitingSampler. The rateLimitingSampler is used as a guaranteed lower bound sampler such that
+// every operation is sampled at least once in a time interval defined by the lowerBound. ie a lowerBound
+// of 1.0 / (60 * 10) will sample an operation at least once every 10 minutes.
+//
+// The probabilisticSampler is given higher priority when tags are emitted, ie. if IsSampled() for both
+// samplers return true, the tags for probabilisticSampler will be used.
+type GuaranteedThroughputProbabilisticSampler struct {
+ probabilisticSampler *ProbabilisticSampler
+ lowerBoundSampler Sampler
+ tags []Tag
+ samplingRate float64
+ lowerBound float64
+}
+
+// NewGuaranteedThroughputProbabilisticSampler returns a delegating sampler that applies both
+// probabilisticSampler and rateLimitingSampler.
+func NewGuaranteedThroughputProbabilisticSampler(
+ lowerBound, samplingRate float64,
+) (*GuaranteedThroughputProbabilisticSampler, error) {
+ return newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate), nil
+}
+
+func newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate float64) *GuaranteedThroughputProbabilisticSampler {
+ s := &GuaranteedThroughputProbabilisticSampler{
+ lowerBoundSampler: NewRateLimitingSampler(lowerBound),
+ lowerBound: lowerBound,
+ }
+ s.setProbabilisticSampler(samplingRate)
+ return s
+}
+
+func (s *GuaranteedThroughputProbabilisticSampler) setProbabilisticSampler(samplingRate float64) {
+ if s.probabilisticSampler == nil || s.samplingRate != samplingRate {
+ s.probabilisticSampler = newProbabilisticSampler(samplingRate)
+ s.samplingRate = s.probabilisticSampler.SamplingRate()
+ s.tags = []Tag{
+ {key: SamplerTypeTagKey, value: SamplerTypeLowerBound},
+ {key: SamplerParamTagKey, value: s.samplingRate},
+ }
+ }
+}
+
+// IsSampled implements IsSampled() of Sampler.
+func (s *GuaranteedThroughputProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+ if sampled, tags := s.probabilisticSampler.IsSampled(id, operation); sampled {
+ s.lowerBoundSampler.IsSampled(id, operation)
+ return true, tags
+ }
+ sampled, _ := s.lowerBoundSampler.IsSampled(id, operation)
+ return sampled, s.tags
+}
+
+// Close implements Close() of Sampler.
+func (s *GuaranteedThroughputProbabilisticSampler) Close() {
+ s.probabilisticSampler.Close()
+ s.lowerBoundSampler.Close()
+}
+
+// Equal implements Equal() of Sampler.
+func (s *GuaranteedThroughputProbabilisticSampler) Equal(other Sampler) bool {
+ // NB The Equal() function is expensive and will be removed. See adaptiveSampler.Equal() for
+ // more information.
+ return false
+}
+
+// this function should only be called while holding a Write lock
+func (s *GuaranteedThroughputProbabilisticSampler) update(lowerBound, samplingRate float64) {
+ s.setProbabilisticSampler(samplingRate)
+ if s.lowerBound != lowerBound {
+ s.lowerBoundSampler = NewRateLimitingSampler(lowerBound)
+ s.lowerBound = lowerBound
+ }
+}
+
+// -----------------------
+
+type adaptiveSampler struct {
+ sync.RWMutex
+
+ samplers map[string]*GuaranteedThroughputProbabilisticSampler
+ defaultSampler *ProbabilisticSampler
+ lowerBound float64
+ maxOperations int
+}
+
+// NewAdaptiveSampler returns a delegating sampler that applies both probabilisticSampler and
+// rateLimitingSampler via the guaranteedThroughputProbabilisticSampler. This sampler keeps track of all
+// operations and delegates calls to the respective guaranteedThroughputProbabilisticSampler.
+func NewAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) (Sampler, error) {
+ return newAdaptiveSampler(strategies, maxOperations), nil
+}
+
+func newAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) Sampler {
+ samplers := make(map[string]*GuaranteedThroughputProbabilisticSampler)
+ for _, strategy := range strategies.PerOperationStrategies {
+ sampler := newGuaranteedThroughputProbabilisticSampler(
+ strategies.DefaultLowerBoundTracesPerSecond,
+ strategy.ProbabilisticSampling.SamplingRate,
+ )
+ samplers[strategy.Operation] = sampler
+ }
+ return &adaptiveSampler{
+ samplers: samplers,
+ defaultSampler: newProbabilisticSampler(strategies.DefaultSamplingProbability),
+ lowerBound: strategies.DefaultLowerBoundTracesPerSecond,
+ maxOperations: maxOperations,
+ }
+}
+
+func (s *adaptiveSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+ s.RLock()
+ sampler, ok := s.samplers[operation]
+ if ok {
+ defer s.RUnlock()
+ return sampler.IsSampled(id, operation)
+ }
+ s.RUnlock()
+ s.Lock()
+ defer s.Unlock()
+
+ // Check if sampler has already been created
+ sampler, ok = s.samplers[operation]
+ if ok {
+ return sampler.IsSampled(id, operation)
+ }
+ // Store only up to maxOperations of unique ops.
+ if len(s.samplers) >= s.maxOperations {
+ return s.defaultSampler.IsSampled(id, operation)
+ }
+ newSampler := newGuaranteedThroughputProbabilisticSampler(s.lowerBound, s.defaultSampler.SamplingRate())
+ s.samplers[operation] = newSampler
+ return newSampler.IsSampled(id, operation)
+}
+
+func (s *adaptiveSampler) Close() {
+ s.Lock()
+ defer s.Unlock()
+ for _, sampler := range s.samplers {
+ sampler.Close()
+ }
+ s.defaultSampler.Close()
+}
+
+func (s *adaptiveSampler) Equal(other Sampler) bool {
+ // NB The Equal() function is overly expensive for adaptiveSampler since it's composed of multiple
+ // samplers which all need to be initialized before this function can be called for a comparison.
+ // Therefore, adaptiveSampler uses the update() function to only alter the samplers that need
+ // changing. Hence this function always returns false so that the update function can be called.
+ // Once the Equal() function is removed from the Sampler API, this will no longer be needed.
+ return false
+}
+
+func (s *adaptiveSampler) update(strategies *sampling.PerOperationSamplingStrategies) {
+ s.Lock()
+ defer s.Unlock()
+ for _, strategy := range strategies.PerOperationStrategies {
+ operation := strategy.Operation
+ samplingRate := strategy.ProbabilisticSampling.SamplingRate
+ lowerBound := strategies.DefaultLowerBoundTracesPerSecond
+ if sampler, ok := s.samplers[operation]; ok {
+ sampler.update(lowerBound, samplingRate)
+ } else {
+ sampler := newGuaranteedThroughputProbabilisticSampler(
+ lowerBound,
+ samplingRate,
+ )
+ s.samplers[operation] = sampler
+ }
+ }
+ s.lowerBound = strategies.DefaultLowerBoundTracesPerSecond
+ if s.defaultSampler.SamplingRate() != strategies.DefaultSamplingProbability {
+ s.defaultSampler = newProbabilisticSampler(strategies.DefaultSamplingProbability)
+ }
+}
+
+// -----------------------
+
+// RemotelyControlledSampler is a delegating sampler that polls a remote server
+// for the appropriate sampling strategy, constructs a corresponding sampler and
+// delegates to it for sampling decisions.
+type RemotelyControlledSampler struct {
+ // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
+ // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
+ closed int64 // 0 - not closed, 1 - closed
+
+ sync.RWMutex
+ samplerOptions
+
+ serviceName string
+ manager sampling.SamplingManager
+ doneChan chan *sync.WaitGroup
+}
+
+type httpSamplingManager struct {
+ serverURL string
+}
+
+func (s *httpSamplingManager) GetSamplingStrategy(serviceName string) (*sampling.SamplingStrategyResponse, error) {
+ var out sampling.SamplingStrategyResponse
+ v := url.Values{}
+ v.Set("service", serviceName)
+ if err := utils.GetJSON(s.serverURL+"?"+v.Encode(), &out); err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+// NewRemotelyControlledSampler creates a sampler that periodically pulls
+// the sampling strategy from an HTTP sampling server (e.g. jaeger-agent).
+func NewRemotelyControlledSampler(
+ serviceName string,
+ opts ...SamplerOption,
+) *RemotelyControlledSampler {
+ options := applySamplerOptions(opts...)
+ sampler := &RemotelyControlledSampler{
+ samplerOptions: options,
+ serviceName: serviceName,
+ manager: &httpSamplingManager{serverURL: options.samplingServerURL},
+ doneChan: make(chan *sync.WaitGroup),
+ }
+ go sampler.pollController()
+ return sampler
+}
+
+func applySamplerOptions(opts ...SamplerOption) samplerOptions {
+ options := samplerOptions{}
+ for _, option := range opts {
+ option(&options)
+ }
+ if options.sampler == nil {
+ options.sampler = newProbabilisticSampler(0.001)
+ }
+ if options.logger == nil {
+ options.logger = log.NullLogger
+ }
+ if options.maxOperations <= 0 {
+ options.maxOperations = defaultMaxOperations
+ }
+ if options.samplingServerURL == "" {
+ options.samplingServerURL = defaultSamplingServerURL
+ }
+ if options.metrics == nil {
+ options.metrics = NewNullMetrics()
+ }
+ if options.samplingRefreshInterval <= 0 {
+ options.samplingRefreshInterval = defaultSamplingRefreshInterval
+ }
+ return options
+}
+
+// IsSampled implements IsSampled() of Sampler.
+func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+ s.RLock()
+ defer s.RUnlock()
+ return s.sampler.IsSampled(id, operation)
+}
+
+// Close implements Close() of Sampler.
+func (s *RemotelyControlledSampler) Close() {
+ if swapped := atomic.CompareAndSwapInt64(&s.closed, 0, 1); !swapped {
+ s.logger.Error("Repeated attempt to close the sampler is ignored")
+ return
+ }
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ s.doneChan <- &wg
+ wg.Wait()
+}
+
+// Equal implements Equal() of Sampler.
+func (s *RemotelyControlledSampler) Equal(other Sampler) bool {
+ // NB The Equal() function is expensive and will be removed. See adaptiveSampler.Equal() for
+ // more information.
+ if o, ok := other.(*RemotelyControlledSampler); ok {
+ s.RLock()
+ o.RLock()
+ defer s.RUnlock()
+ defer o.RUnlock()
+ return s.sampler.Equal(o.sampler)
+ }
+ return false
+}
+
+func (s *RemotelyControlledSampler) pollController() {
+ ticker := time.NewTicker(s.samplingRefreshInterval)
+ defer ticker.Stop()
+ s.pollControllerWithTicker(ticker)
+}
+
+func (s *RemotelyControlledSampler) pollControllerWithTicker(ticker *time.Ticker) {
+ for {
+ select {
+ case <-ticker.C:
+ s.updateSampler()
+ case wg := <-s.doneChan:
+ wg.Done()
+ return
+ }
+ }
+}
+
+func (s *RemotelyControlledSampler) getSampler() Sampler {
+ s.Lock()
+ defer s.Unlock()
+ return s.sampler
+}
+
+func (s *RemotelyControlledSampler) setSampler(sampler Sampler) {
+ s.Lock()
+ defer s.Unlock()
+ s.sampler = sampler
+}
+
+func (s *RemotelyControlledSampler) updateSampler() {
+ res, err := s.manager.GetSamplingStrategy(s.serviceName)
+ if err != nil {
+ s.metrics.SamplerQueryFailure.Inc(1)
+ s.logger.Infof("Unable to query sampling strategy: %v", err)
+ return
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ s.metrics.SamplerRetrieved.Inc(1)
+ if strategies := res.GetOperationSampling(); strategies != nil {
+ s.updateAdaptiveSampler(strategies)
+ } else {
+ err = s.updateRateLimitingOrProbabilisticSampler(res)
+ }
+ if err != nil {
+ s.metrics.SamplerUpdateFailure.Inc(1)
+ s.logger.Infof("Unable to handle sampling strategy response %+v. Got error: %v", res, err)
+ return
+ }
+ s.metrics.SamplerUpdated.Inc(1)
+}
+
+// NB: this function should only be called while holding a Write lock
+func (s *RemotelyControlledSampler) updateAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies) {
+ if adaptiveSampler, ok := s.sampler.(*adaptiveSampler); ok {
+ adaptiveSampler.update(strategies)
+ } else {
+ s.sampler = newAdaptiveSampler(strategies, s.maxOperations)
+ }
+}
+
+// NB: this function should only be called while holding a Write lock
+func (s *RemotelyControlledSampler) updateRateLimitingOrProbabilisticSampler(res *sampling.SamplingStrategyResponse) error {
+ var newSampler Sampler
+ if probabilistic := res.GetProbabilisticSampling(); probabilistic != nil {
+ newSampler = newProbabilisticSampler(probabilistic.SamplingRate)
+ } else if rateLimiting := res.GetRateLimitingSampling(); rateLimiting != nil {
+ newSampler = NewRateLimitingSampler(float64(rateLimiting.MaxTracesPerSecond))
+ } else {
+ return fmt.Errorf("Unsupported sampling strategy type %v", res.GetStrategyType())
+ }
+ if !s.sampler.Equal(newSampler) {
+ s.sampler = newSampler
+ }
+ return nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_options.go b/vendor/github.com/uber/jaeger-client-go/sampler_options.go
new file mode 100644
index 000000000..75d28a561
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/sampler_options.go
@@ -0,0 +1,81 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "time"
+)
+
+// SamplerOption is a function that sets some option on the sampler
+type SamplerOption func(options *samplerOptions)
+
+// SamplerOptions is a factory for all available SamplerOption's
+var SamplerOptions samplerOptions
+
+type samplerOptions struct {
+ metrics *Metrics
+ maxOperations int
+ sampler Sampler
+ logger Logger
+ samplingServerURL string
+ samplingRefreshInterval time.Duration
+}
+
+// Metrics creates a SamplerOption that initializes Metrics on the sampler,
+// which is used to emit statistics.
+func (samplerOptions) Metrics(m *Metrics) SamplerOption {
+ return func(o *samplerOptions) {
+ o.metrics = m
+ }
+}
+
+// MaxOperations creates a SamplerOption that sets the maximum number of
+// operations the sampler will keep track of.
+func (samplerOptions) MaxOperations(maxOperations int) SamplerOption {
+ return func(o *samplerOptions) {
+ o.maxOperations = maxOperations
+ }
+}
+
+// InitialSampler creates a SamplerOption that sets the initial sampler
+// to use before a remote sampler is created and used.
+func (samplerOptions) InitialSampler(sampler Sampler) SamplerOption {
+ return func(o *samplerOptions) {
+ o.sampler = sampler
+ }
+}
+
+// Logger creates a SamplerOption that sets the logger used by the sampler.
+func (samplerOptions) Logger(logger Logger) SamplerOption {
+ return func(o *samplerOptions) {
+ o.logger = logger
+ }
+}
+
+// SamplingServerURL creates a SamplerOption that sets the sampling server url
+// of the local agent that contains the sampling strategies.
+func (samplerOptions) SamplingServerURL(samplingServerURL string) SamplerOption {
+ return func(o *samplerOptions) {
+ o.samplingServerURL = samplingServerURL
+ }
+}
+
+// SamplingRefreshInterval creates a SamplerOption that sets how often the
+// sampler will poll local agent for the appropriate sampling strategy.
+func (samplerOptions) SamplingRefreshInterval(samplingRefreshInterval time.Duration) SamplerOption {
+ return func(o *samplerOptions) {
+ o.samplingRefreshInterval = samplingRefreshInterval
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/span.go b/vendor/github.com/uber/jaeger-client-go/span.go
new file mode 100644
index 000000000..f0b497a90
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/span.go
@@ -0,0 +1,249 @@
+// Copyright (c) 2017-2018 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "sync"
+ "time"
+
+ "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/ext"
+ "github.com/opentracing/opentracing-go/log"
+)
+
+// Span implements opentracing.Span
+type Span struct {
+ sync.RWMutex
+
+ tracer *Tracer
+
+ context SpanContext
+
+ // The name of the "operation" this span is an instance of.
+ // Known as a "span name" in some implementations.
+ operationName string
+
+ // firstInProcess, if true, indicates that this span is the root of the (sub)tree
+ // of spans in the current process. In other words it's true for the root spans,
+ // and the ingress spans when the process joins another trace.
+ firstInProcess bool
+
+ // startTime is the timestamp indicating when the span began, with microseconds precision.
+ startTime time.Time
+
+ // duration returns duration of the span with microseconds precision.
+ // Zero value means duration is unknown.
+ duration time.Duration
+
+ // tags attached to this span
+ tags []Tag
+
+ // The span's "micro-log"
+ logs []opentracing.LogRecord
+
+ // references for this span
+ references []Reference
+
+ observer ContribSpanObserver
+}
+
+// Tag is a simple key value wrapper.
+// TODO deprecate in the next major release, use opentracing.Tag instead.
+type Tag struct {
+ key string
+ value interface{}
+}
+
+// SetOperationName sets or changes the operation name.
+func (s *Span) SetOperationName(operationName string) opentracing.Span {
+ s.Lock()
+ defer s.Unlock()
+ if s.context.IsSampled() {
+ s.operationName = operationName
+ }
+ s.observer.OnSetOperationName(operationName)
+ return s
+}
+
+// SetTag implements SetTag() of opentracing.Span
+func (s *Span) SetTag(key string, value interface{}) opentracing.Span {
+ s.observer.OnSetTag(key, value)
+ if key == string(ext.SamplingPriority) && !setSamplingPriority(s, value) {
+ return s
+ }
+ s.Lock()
+ defer s.Unlock()
+ if s.context.IsSampled() {
+ s.setTagNoLocking(key, value)
+ }
+ return s
+}
+
+func (s *Span) setTagNoLocking(key string, value interface{}) {
+ s.tags = append(s.tags, Tag{key: key, value: value})
+}
+
+// LogFields implements opentracing.Span API
+func (s *Span) LogFields(fields ...log.Field) {
+ s.Lock()
+ defer s.Unlock()
+ if !s.context.IsSampled() {
+ return
+ }
+ s.logFieldsNoLocking(fields...)
+}
+
+// this function should only be called while holding a Write lock
+func (s *Span) logFieldsNoLocking(fields ...log.Field) {
+ lr := opentracing.LogRecord{
+ Fields: fields,
+ Timestamp: time.Now(),
+ }
+ s.appendLog(lr)
+}
+
+// LogKV implements opentracing.Span API
+func (s *Span) LogKV(alternatingKeyValues ...interface{}) {
+ s.RLock()
+ sampled := s.context.IsSampled()
+ s.RUnlock()
+ if !sampled {
+ return
+ }
+ fields, err := log.InterleavedKVToFields(alternatingKeyValues...)
+ if err != nil {
+ s.LogFields(log.Error(err), log.String("function", "LogKV"))
+ return
+ }
+ s.LogFields(fields...)
+}
+
+// LogEvent implements opentracing.Span API
+func (s *Span) LogEvent(event string) {
+ s.Log(opentracing.LogData{Event: event})
+}
+
+// LogEventWithPayload implements opentracing.Span API
+func (s *Span) LogEventWithPayload(event string, payload interface{}) {
+ s.Log(opentracing.LogData{Event: event, Payload: payload})
+}
+
+// Log implements opentracing.Span API
+func (s *Span) Log(ld opentracing.LogData) {
+ s.Lock()
+ defer s.Unlock()
+ if s.context.IsSampled() {
+ if ld.Timestamp.IsZero() {
+ ld.Timestamp = s.tracer.timeNow()
+ }
+ s.appendLog(ld.ToLogRecord())
+ }
+}
+
+// this function should only be called while holding a Write lock
+func (s *Span) appendLog(lr opentracing.LogRecord) {
+ // TODO add logic to limit number of logs per span (issue #46)
+ s.logs = append(s.logs, lr)
+}
+
+// SetBaggageItem implements SetBaggageItem() of opentracing.SpanContext
+func (s *Span) SetBaggageItem(key, value string) opentracing.Span {
+ s.Lock()
+ defer s.Unlock()
+ s.tracer.setBaggage(s, key, value)
+ return s
+}
+
+// BaggageItem implements BaggageItem() of opentracing.SpanContext
+func (s *Span) BaggageItem(key string) string {
+ s.RLock()
+ defer s.RUnlock()
+ return s.context.baggage[key]
+}
+
+// Finish implements opentracing.Span API
+func (s *Span) Finish() {
+ s.FinishWithOptions(opentracing.FinishOptions{})
+}
+
+// FinishWithOptions implements opentracing.Span API
+func (s *Span) FinishWithOptions(options opentracing.FinishOptions) {
+ if options.FinishTime.IsZero() {
+ options.FinishTime = s.tracer.timeNow()
+ }
+ s.observer.OnFinish(options)
+ s.Lock()
+ if s.context.IsSampled() {
+ s.duration = options.FinishTime.Sub(s.startTime)
+ // Note: bulk logs are not subject to maxLogsPerSpan limit
+ if options.LogRecords != nil {
+ s.logs = append(s.logs, options.LogRecords...)
+ }
+ for _, ld := range options.BulkLogData {
+ s.logs = append(s.logs, ld.ToLogRecord())
+ }
+ }
+ s.Unlock()
+ // call reportSpan even for non-sampled traces, to return span to the pool
+ s.tracer.reportSpan(s)
+}
+
+// Context implements opentracing.Span API
+func (s *Span) Context() opentracing.SpanContext {
+ s.Lock()
+ defer s.Unlock()
+ return s.context
+}
+
+// Tracer implements opentracing.Span API
+func (s *Span) Tracer() opentracing.Tracer {
+ return s.tracer
+}
+
+func (s *Span) String() string {
+ s.RLock()
+ defer s.RUnlock()
+ return s.context.String()
+}
+
+// OperationName allows retrieving current operation name.
+func (s *Span) OperationName() string {
+ s.RLock()
+ defer s.RUnlock()
+ return s.operationName
+}
+
+func (s *Span) serviceName() string {
+ return s.tracer.serviceName
+}
+
+// setSamplingPriority returns true if the flag was updated successfully, false otherwise.
+func setSamplingPriority(s *Span, value interface{}) bool {
+ s.Lock()
+ defer s.Unlock()
+ val, ok := value.(uint16)
+ if !ok {
+ return false
+ }
+ if val == 0 {
+ s.context.flags = s.context.flags & (^flagSampled)
+ return true
+ }
+ if s.tracer.isDebugAllowed(s.operationName) {
+ s.context.flags = s.context.flags | flagDebug | flagSampled
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go
new file mode 100644
index 000000000..e48811c50
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go
@@ -0,0 +1,411 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package agent
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+ "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+ "github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+var _ = jaeger.GoUnusedProtection__
+var _ = zipkincore.GoUnusedProtection__
+
+type Agent interface {
+ // Parameters:
+ // - Spans
+ EmitZipkinBatch(spans []*zipkincore.Span) (err error)
+ // Parameters:
+ // - Batch
+ EmitBatch(batch *jaeger.Batch) (err error)
+}
+
+type AgentClient struct {
+ Transport thrift.TTransport
+ ProtocolFactory thrift.TProtocolFactory
+ InputProtocol thrift.TProtocol
+ OutputProtocol thrift.TProtocol
+ SeqId int32
+}
+
+func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient {
+ return &AgentClient{Transport: t,
+ ProtocolFactory: f,
+ InputProtocol: f.GetProtocol(t),
+ OutputProtocol: f.GetProtocol(t),
+ SeqId: 0,
+ }
+}
+
+func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient {
+ return &AgentClient{Transport: t,
+ ProtocolFactory: nil,
+ InputProtocol: iprot,
+ OutputProtocol: oprot,
+ SeqId: 0,
+ }
+}
+
+// Parameters:
+// - Spans
+func (p *AgentClient) EmitZipkinBatch(spans []*zipkincore.Span) (err error) {
+ if err = p.sendEmitZipkinBatch(spans); err != nil {
+ return
+ }
+ return
+}
+
+func (p *AgentClient) sendEmitZipkinBatch(spans []*zipkincore.Span) (err error) {
+ oprot := p.OutputProtocol
+ if oprot == nil {
+ oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.OutputProtocol = oprot
+ }
+ p.SeqId++
+ if err = oprot.WriteMessageBegin("emitZipkinBatch", thrift.ONEWAY, p.SeqId); err != nil {
+ return
+ }
+ args := AgentEmitZipkinBatchArgs{
+ Spans: spans,
+ }
+ if err = args.Write(oprot); err != nil {
+ return
+ }
+ if err = oprot.WriteMessageEnd(); err != nil {
+ return
+ }
+ return oprot.Flush()
+}
+
+// Parameters:
+// - Batch
+func (p *AgentClient) EmitBatch(batch *jaeger.Batch) (err error) {
+ if err = p.sendEmitBatch(batch); err != nil {
+ return
+ }
+ return
+}
+
+func (p *AgentClient) sendEmitBatch(batch *jaeger.Batch) (err error) {
+ oprot := p.OutputProtocol
+ if oprot == nil {
+ oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.OutputProtocol = oprot
+ }
+ p.SeqId++
+ if err = oprot.WriteMessageBegin("emitBatch", thrift.ONEWAY, p.SeqId); err != nil {
+ return
+ }
+ args := AgentEmitBatchArgs{
+ Batch: batch,
+ }
+ if err = args.Write(oprot); err != nil {
+ return
+ }
+ if err = oprot.WriteMessageEnd(); err != nil {
+ return
+ }
+ return oprot.Flush()
+}
+
+type AgentProcessor struct {
+ processorMap map[string]thrift.TProcessorFunction
+ handler Agent
+}
+
+func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+ p.processorMap[key] = processor
+}
+
+func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+ processor, ok = p.processorMap[key]
+ return processor, ok
+}
+
+func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+ return p.processorMap
+}
+
+func NewAgentProcessor(handler Agent) *AgentProcessor {
+
+ self0 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
+ self0.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler: handler}
+ self0.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler}
+ return self0
+}
+
+func (p *AgentProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ name, _, seqId, err := iprot.ReadMessageBegin()
+ if err != nil {
+ return false, err
+ }
+ if processor, ok := p.GetProcessorFunction(name); ok {
+ return processor.Process(seqId, iprot, oprot)
+ }
+ iprot.Skip(thrift.STRUCT)
+ iprot.ReadMessageEnd()
+ x1 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
+ oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
+ x1.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, x1
+
+}
+
+type agentProcessorEmitZipkinBatch struct {
+ handler Agent
+}
+
+func (p *agentProcessorEmitZipkinBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := AgentEmitZipkinBatchArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ if err2 = p.handler.EmitZipkinBatch(args.Spans); err2 != nil {
+ return true, err2
+ }
+ return true, nil
+}
+
+type agentProcessorEmitBatch struct {
+ handler Agent
+}
+
+func (p *agentProcessorEmitBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := AgentEmitBatchArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ if err2 = p.handler.EmitBatch(args.Batch); err2 != nil {
+ return true, err2
+ }
+ return true, nil
+}
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+// - Spans
+type AgentEmitZipkinBatchArgs struct {
+ Spans []*zipkincore.Span `thrift:"spans,1" json:"spans"`
+}
+
+func NewAgentEmitZipkinBatchArgs() *AgentEmitZipkinBatchArgs {
+ return &AgentEmitZipkinBatchArgs{}
+}
+
+func (p *AgentEmitZipkinBatchArgs) GetSpans() []*zipkincore.Span {
+ return p.Spans
+}
+func (p *AgentEmitZipkinBatchArgs) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *AgentEmitZipkinBatchArgs) readField1(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*zipkincore.Span, 0, size)
+ p.Spans = tSlice
+ for i := 0; i < size; i++ {
+ _elem2 := &zipkincore.Span{}
+ if err := _elem2.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err)
+ }
+ p.Spans = append(p.Spans, _elem2)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *AgentEmitZipkinBatchArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("emitZipkinBatch_args"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *AgentEmitZipkinBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("spans", thrift.LIST, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err)
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Spans {
+ if err := v.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err)
+ }
+ return err
+}
+
+func (p *AgentEmitZipkinBatchArgs) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p)
+}
+
+// Attributes:
+// - Batch
+type AgentEmitBatchArgs struct {
+ Batch *jaeger.Batch `thrift:"batch,1" json:"batch"`
+}
+
+func NewAgentEmitBatchArgs() *AgentEmitBatchArgs {
+ return &AgentEmitBatchArgs{}
+}
+
+var AgentEmitBatchArgs_Batch_DEFAULT *jaeger.Batch
+
+func (p *AgentEmitBatchArgs) GetBatch() *jaeger.Batch {
+ if !p.IsSetBatch() {
+ return AgentEmitBatchArgs_Batch_DEFAULT
+ }
+ return p.Batch
+}
+func (p *AgentEmitBatchArgs) IsSetBatch() bool {
+ return p.Batch != nil
+}
+
+func (p *AgentEmitBatchArgs) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *AgentEmitBatchArgs) readField1(iprot thrift.TProtocol) error {
+ p.Batch = &jaeger.Batch{}
+ if err := p.Batch.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err)
+ }
+ return nil
+}
+
+func (p *AgentEmitBatchArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("emitBatch_args"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *AgentEmitBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err)
+ }
+ if err := p.Batch.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err)
+ }
+ return err
+}
+
+func (p *AgentEmitBatchArgs) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go
new file mode 100644
index 000000000..aa9857bb8
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go
@@ -0,0 +1,23 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package agent
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+ "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+ "github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+var _ = jaeger.GoUnusedProtection__
+var _ = zipkincore.GoUnusedProtection__
+
+func init() {
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go
new file mode 100644
index 000000000..9c28f11c1
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go
@@ -0,0 +1,21 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package agent
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+ "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+ "github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+var _ = jaeger.GoUnusedProtection__
+var _ = zipkincore.GoUnusedProtection__
+var GoUnusedProtection__ int
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go
new file mode 100644
index 000000000..1f79c1255
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go
@@ -0,0 +1,435 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package baggage
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+type BaggageRestrictionManager interface {
+ // getBaggageRestrictions retrieves the baggage restrictions for a specific service.
+ // Usually, baggageRestrictions apply to all services however there may be situations
+ // where a baggageKey might only be allowed to be set by a specific service.
+ //
+ // Parameters:
+ // - ServiceName
+ GetBaggageRestrictions(serviceName string) (r []*BaggageRestriction, err error)
+}
+
+type BaggageRestrictionManagerClient struct {
+ Transport thrift.TTransport
+ ProtocolFactory thrift.TProtocolFactory
+ InputProtocol thrift.TProtocol
+ OutputProtocol thrift.TProtocol
+ SeqId int32
+}
+
+func NewBaggageRestrictionManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *BaggageRestrictionManagerClient {
+ return &BaggageRestrictionManagerClient{Transport: t,
+ ProtocolFactory: f,
+ InputProtocol: f.GetProtocol(t),
+ OutputProtocol: f.GetProtocol(t),
+ SeqId: 0,
+ }
+}
+
+func NewBaggageRestrictionManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *BaggageRestrictionManagerClient {
+ return &BaggageRestrictionManagerClient{Transport: t,
+ ProtocolFactory: nil,
+ InputProtocol: iprot,
+ OutputProtocol: oprot,
+ SeqId: 0,
+ }
+}
+
+// getBaggageRestrictions retrieves the baggage restrictions for a specific service.
+// Usually, baggageRestrictions apply to all services however there may be situations
+// where a baggageKey might only be allowed to be set by a specific service.
+//
+// Parameters:
+// - ServiceName
+func (p *BaggageRestrictionManagerClient) GetBaggageRestrictions(serviceName string) (r []*BaggageRestriction, err error) {
+ if err = p.sendGetBaggageRestrictions(serviceName); err != nil {
+ return
+ }
+ return p.recvGetBaggageRestrictions()
+}
+
+func (p *BaggageRestrictionManagerClient) sendGetBaggageRestrictions(serviceName string) (err error) {
+ oprot := p.OutputProtocol
+ if oprot == nil {
+ oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.OutputProtocol = oprot
+ }
+ p.SeqId++
+ if err = oprot.WriteMessageBegin("getBaggageRestrictions", thrift.CALL, p.SeqId); err != nil {
+ return
+ }
+ args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{
+ ServiceName: serviceName,
+ }
+ if err = args.Write(oprot); err != nil {
+ return
+ }
+ if err = oprot.WriteMessageEnd(); err != nil {
+ return
+ }
+ return oprot.Flush()
+}
+
+func (p *BaggageRestrictionManagerClient) recvGetBaggageRestrictions() (value []*BaggageRestriction, err error) {
+ iprot := p.InputProtocol
+ if iprot == nil {
+ iprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.InputProtocol = iprot
+ }
+ method, mTypeId, seqId, err := iprot.ReadMessageBegin()
+ if err != nil {
+ return
+ }
+ if method != "getBaggageRestrictions" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getBaggageRestrictions failed: wrong method name")
+ return
+ }
+ if p.SeqId != seqId {
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getBaggageRestrictions failed: out of sequence response")
+ return
+ }
+ if mTypeId == thrift.EXCEPTION {
+ error0 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error1 error
+ error1, err = error0.Read(iprot)
+ if err != nil {
+ return
+ }
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
+ }
+ err = error1
+ return
+ }
+ if mTypeId != thrift.REPLY {
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getBaggageRestrictions failed: invalid message type")
+ return
+ }
+ result := BaggageRestrictionManagerGetBaggageRestrictionsResult{}
+ if err = result.Read(iprot); err != nil {
+ return
+ }
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
+ }
+ value = result.GetSuccess()
+ return
+}
+
+type BaggageRestrictionManagerProcessor struct {
+ processorMap map[string]thrift.TProcessorFunction
+ handler BaggageRestrictionManager
+}
+
+func (p *BaggageRestrictionManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+ p.processorMap[key] = processor
+}
+
+func (p *BaggageRestrictionManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+ processor, ok = p.processorMap[key]
+ return processor, ok
+}
+
+func (p *BaggageRestrictionManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+ return p.processorMap
+}
+
+func NewBaggageRestrictionManagerProcessor(handler BaggageRestrictionManager) *BaggageRestrictionManagerProcessor {
+
+ self2 := &BaggageRestrictionManagerProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
+ self2.processorMap["getBaggageRestrictions"] = &baggageRestrictionManagerProcessorGetBaggageRestrictions{handler: handler}
+ return self2
+}
+
+func (p *BaggageRestrictionManagerProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ name, _, seqId, err := iprot.ReadMessageBegin()
+ if err != nil {
+ return false, err
+ }
+ if processor, ok := p.GetProcessorFunction(name); ok {
+ return processor.Process(seqId, iprot, oprot)
+ }
+ iprot.Skip(thrift.STRUCT)
+ iprot.ReadMessageEnd()
+ x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
+ oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
+ x3.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, x3
+
+}
+
+type baggageRestrictionManagerProcessorGetBaggageRestrictions struct {
+ handler BaggageRestrictionManager
+}
+
+func (p *baggageRestrictionManagerProcessorGetBaggageRestrictions) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("getBaggageRestrictions", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ result := BaggageRestrictionManagerGetBaggageRestrictionsResult{}
+ var retval []*BaggageRestriction
+ var err2 error
+ if retval, err2 = p.handler.GetBaggageRestrictions(args.ServiceName); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getBaggageRestrictions: "+err2.Error())
+ oprot.WriteMessageBegin("getBaggageRestrictions", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("getBaggageRestrictions", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+// - ServiceName
+type BaggageRestrictionManagerGetBaggageRestrictionsArgs struct {
+ ServiceName string `thrift:"serviceName,1" json:"serviceName"`
+}
+
+func NewBaggageRestrictionManagerGetBaggageRestrictionsArgs() *BaggageRestrictionManagerGetBaggageRestrictionsArgs {
+ return &BaggageRestrictionManagerGetBaggageRestrictionsArgs{}
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) GetServiceName() string {
+ return p.ServiceName
+}
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.ServiceName = v
+ }
+ return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("getBaggageRestrictions_args"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err)
+ }
+ if err := oprot.WriteString(string(p.ServiceName)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err)
+ }
+ return err
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsArgs(%+v)", *p)
+}
+
+// Attributes:
+// - Success
+type BaggageRestrictionManagerGetBaggageRestrictionsResult struct {
+ Success []*BaggageRestriction `thrift:"success,0" json:"success,omitempty"`
+}
+
+func NewBaggageRestrictionManagerGetBaggageRestrictionsResult() *BaggageRestrictionManagerGetBaggageRestrictionsResult {
+ return &BaggageRestrictionManagerGetBaggageRestrictionsResult{}
+}
+
+var BaggageRestrictionManagerGetBaggageRestrictionsResult_Success_DEFAULT []*BaggageRestriction
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) GetSuccess() []*BaggageRestriction {
+ return p.Success
+}
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 0:
+ if err := p.readField0(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) readField0(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*BaggageRestriction, 0, size)
+ p.Success = tSlice
+ for i := 0; i < size; i++ {
+ _elem4 := &BaggageRestriction{}
+ if err := _elem4.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
+ }
+ p.Success = append(p.Success, _elem4)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("getBaggageRestrictions_result"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField0(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) writeField0(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err := oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Success {
+ if err := v.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsResult(%+v)", *p)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/constants.go
new file mode 100644
index 000000000..ed35ce9ab
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/constants.go
@@ -0,0 +1,18 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package baggage
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+func init() {
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/ttypes.go
new file mode 100644
index 000000000..7888892f6
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/ttypes.go
@@ -0,0 +1,154 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package baggage
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+var GoUnusedProtection__ int
+
+// Attributes:
+// - BaggageKey
+// - MaxValueLength
+type BaggageRestriction struct {
+ BaggageKey string `thrift:"baggageKey,1,required" json:"baggageKey"`
+ MaxValueLength int32 `thrift:"maxValueLength,2,required" json:"maxValueLength"`
+}
+
+func NewBaggageRestriction() *BaggageRestriction {
+ return &BaggageRestriction{}
+}
+
+func (p *BaggageRestriction) GetBaggageKey() string {
+ return p.BaggageKey
+}
+
+func (p *BaggageRestriction) GetMaxValueLength() int32 {
+ return p.MaxValueLength
+}
+func (p *BaggageRestriction) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetBaggageKey bool = false
+ var issetMaxValueLength bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetBaggageKey = true
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ issetMaxValueLength = true
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetBaggageKey {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field BaggageKey is not set"))
+ }
+ if !issetMaxValueLength {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxValueLength is not set"))
+ }
+ return nil
+}
+
+func (p *BaggageRestriction) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.BaggageKey = v
+ }
+ return nil
+}
+
+func (p *BaggageRestriction) readField2(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI32(); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+ } else {
+ p.MaxValueLength = v
+ }
+ return nil
+}
+
+func (p *BaggageRestriction) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("BaggageRestriction"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *BaggageRestriction) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("baggageKey", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:baggageKey: ", p), err)
+ }
+ if err := oprot.WriteString(string(p.BaggageKey)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.baggageKey (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:baggageKey: ", p), err)
+ }
+ return err
+}
+
+func (p *BaggageRestriction) writeField2(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("maxValueLength", thrift.I32, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:maxValueLength: ", p), err)
+ }
+ if err := oprot.WriteI32(int32(p.MaxValueLength)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.maxValueLength (2) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:maxValueLength: ", p), err)
+ }
+ return err
+}
+
+func (p *BaggageRestriction) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("BaggageRestriction(%+v)", *p)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go
new file mode 100644
index 000000000..b32c37dd2
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go
@@ -0,0 +1,242 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package jaeger
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+type Agent interface {
+ // Parameters:
+ // - Batch
+ EmitBatch(batch *Batch) (err error)
+}
+
+type AgentClient struct {
+ Transport thrift.TTransport
+ ProtocolFactory thrift.TProtocolFactory
+ InputProtocol thrift.TProtocol
+ OutputProtocol thrift.TProtocol
+ SeqId int32
+}
+
+func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient {
+ return &AgentClient{Transport: t,
+ ProtocolFactory: f,
+ InputProtocol: f.GetProtocol(t),
+ OutputProtocol: f.GetProtocol(t),
+ SeqId: 0,
+ }
+}
+
+func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient {
+ return &AgentClient{Transport: t,
+ ProtocolFactory: nil,
+ InputProtocol: iprot,
+ OutputProtocol: oprot,
+ SeqId: 0,
+ }
+}
+
+// Parameters:
+// - Batch
+func (p *AgentClient) EmitBatch(batch *Batch) (err error) {
+ if err = p.sendEmitBatch(batch); err != nil {
+ return
+ }
+ return
+}
+
+func (p *AgentClient) sendEmitBatch(batch *Batch) (err error) {
+ oprot := p.OutputProtocol
+ if oprot == nil {
+ oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.OutputProtocol = oprot
+ }
+ p.SeqId++
+ if err = oprot.WriteMessageBegin("emitBatch", thrift.ONEWAY, p.SeqId); err != nil {
+ return
+ }
+ args := AgentEmitBatchArgs{
+ Batch: batch,
+ }
+ if err = args.Write(oprot); err != nil {
+ return
+ }
+ if err = oprot.WriteMessageEnd(); err != nil {
+ return
+ }
+ return oprot.Flush()
+}
+
+type AgentProcessor struct {
+ processorMap map[string]thrift.TProcessorFunction
+ handler Agent
+}
+
+func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+ p.processorMap[key] = processor
+}
+
+func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+ processor, ok = p.processorMap[key]
+ return processor, ok
+}
+
+func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+ return p.processorMap
+}
+
+func NewAgentProcessor(handler Agent) *AgentProcessor {
+
+ self6 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
+ self6.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler}
+ return self6
+}
+
+func (p *AgentProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ name, _, seqId, err := iprot.ReadMessageBegin()
+ if err != nil {
+ return false, err
+ }
+ if processor, ok := p.GetProcessorFunction(name); ok {
+ return processor.Process(seqId, iprot, oprot)
+ }
+ iprot.Skip(thrift.STRUCT)
+ iprot.ReadMessageEnd()
+ x7 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
+ oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
+ x7.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, x7
+
+}
+
+type agentProcessorEmitBatch struct {
+ handler Agent
+}
+
+func (p *agentProcessorEmitBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := AgentEmitBatchArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ if err2 = p.handler.EmitBatch(args.Batch); err2 != nil {
+ return true, err2
+ }
+ return true, nil
+}
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+// - Batch
+type AgentEmitBatchArgs struct {
+ Batch *Batch `thrift:"batch,1" json:"batch"`
+}
+
+func NewAgentEmitBatchArgs() *AgentEmitBatchArgs {
+ return &AgentEmitBatchArgs{}
+}
+
+var AgentEmitBatchArgs_Batch_DEFAULT *Batch
+
+func (p *AgentEmitBatchArgs) GetBatch() *Batch {
+ if !p.IsSetBatch() {
+ return AgentEmitBatchArgs_Batch_DEFAULT
+ }
+ return p.Batch
+}
+func (p *AgentEmitBatchArgs) IsSetBatch() bool {
+ return p.Batch != nil
+}
+
+func (p *AgentEmitBatchArgs) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *AgentEmitBatchArgs) readField1(iprot thrift.TProtocol) error {
+ p.Batch = &Batch{}
+ if err := p.Batch.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err)
+ }
+ return nil
+}
+
+func (p *AgentEmitBatchArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("emitBatch_args"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *AgentEmitBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err)
+ }
+ if err := p.Batch.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err)
+ }
+ return err
+}
+
+func (p *AgentEmitBatchArgs) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go
new file mode 100644
index 000000000..621b8b1c2
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go
@@ -0,0 +1,18 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package jaeger
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+func init() {
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go
new file mode 100644
index 000000000..d23ed2fc2
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go
@@ -0,0 +1,1838 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package jaeger
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+var GoUnusedProtection__ int
+
+type TagType int64
+
+const (
+ TagType_STRING TagType = 0
+ TagType_DOUBLE TagType = 1
+ TagType_BOOL TagType = 2
+ TagType_LONG TagType = 3
+ TagType_BINARY TagType = 4
+)
+
+func (p TagType) String() string {
+ switch p {
+ case TagType_STRING:
+ return "STRING"
+ case TagType_DOUBLE:
+ return "DOUBLE"
+ case TagType_BOOL:
+ return "BOOL"
+ case TagType_LONG:
+ return "LONG"
+ case TagType_BINARY:
+ return "BINARY"
+ }
+ return "<UNSET>"
+}
+
+func TagTypeFromString(s string) (TagType, error) {
+ switch s {
+ case "STRING":
+ return TagType_STRING, nil
+ case "DOUBLE":
+ return TagType_DOUBLE, nil
+ case "BOOL":
+ return TagType_BOOL, nil
+ case "LONG":
+ return TagType_LONG, nil
+ case "BINARY":
+ return TagType_BINARY, nil
+ }
+ return TagType(0), fmt.Errorf("not a valid TagType string")
+}
+
+func TagTypePtr(v TagType) *TagType { return &v }
+
+func (p TagType) MarshalText() ([]byte, error) {
+ return []byte(p.String()), nil
+}
+
+func (p *TagType) UnmarshalText(text []byte) error {
+ q, err := TagTypeFromString(string(text))
+ if err != nil {
+ return err
+ }
+ *p = q
+ return nil
+}
+
+type SpanRefType int64
+
+const (
+ SpanRefType_CHILD_OF SpanRefType = 0
+ SpanRefType_FOLLOWS_FROM SpanRefType = 1
+)
+
+func (p SpanRefType) String() string {
+ switch p {
+ case SpanRefType_CHILD_OF:
+ return "CHILD_OF"
+ case SpanRefType_FOLLOWS_FROM:
+ return "FOLLOWS_FROM"
+ }
+ return "<UNSET>"
+}
+
+func SpanRefTypeFromString(s string) (SpanRefType, error) {
+ switch s {
+ case "CHILD_OF":
+ return SpanRefType_CHILD_OF, nil
+ case "FOLLOWS_FROM":
+ return SpanRefType_FOLLOWS_FROM, nil
+ }
+ return SpanRefType(0), fmt.Errorf("not a valid SpanRefType string")
+}
+
+func SpanRefTypePtr(v SpanRefType) *SpanRefType { return &v }
+
+func (p SpanRefType) MarshalText() ([]byte, error) {
+ return []byte(p.String()), nil
+}
+
+func (p *SpanRefType) UnmarshalText(text []byte) error {
+ q, err := SpanRefTypeFromString(string(text))
+ if err != nil {
+ return err
+ }
+ *p = q
+ return nil
+}
+
+// Attributes:
+// - Key
+// - VType
+// - VStr
+// - VDouble
+// - VBool
+// - VLong
+// - VBinary
+type Tag struct {
+ Key string `thrift:"key,1,required" json:"key"`
+ VType TagType `thrift:"vType,2,required" json:"vType"`
+ VStr *string `thrift:"vStr,3" json:"vStr,omitempty"`
+ VDouble *float64 `thrift:"vDouble,4" json:"vDouble,omitempty"`
+ VBool *bool `thrift:"vBool,5" json:"vBool,omitempty"`
+ VLong *int64 `thrift:"vLong,6" json:"vLong,omitempty"`
+ VBinary []byte `thrift:"vBinary,7" json:"vBinary,omitempty"`
+}
+
+func NewTag() *Tag {
+ return &Tag{}
+}
+
+func (p *Tag) GetKey() string {
+ return p.Key
+}
+
+func (p *Tag) GetVType() TagType {
+ return p.VType
+}
+
+var Tag_VStr_DEFAULT string
+
+func (p *Tag) GetVStr() string {
+ if !p.IsSetVStr() {
+ return Tag_VStr_DEFAULT
+ }
+ return *p.VStr
+}
+
+var Tag_VDouble_DEFAULT float64
+
+func (p *Tag) GetVDouble() float64 {
+ if !p.IsSetVDouble() {
+ return Tag_VDouble_DEFAULT
+ }
+ return *p.VDouble
+}
+
+var Tag_VBool_DEFAULT bool
+
+func (p *Tag) GetVBool() bool {
+ if !p.IsSetVBool() {
+ return Tag_VBool_DEFAULT
+ }
+ return *p.VBool
+}
+
+var Tag_VLong_DEFAULT int64
+
+func (p *Tag) GetVLong() int64 {
+ if !p.IsSetVLong() {
+ return Tag_VLong_DEFAULT
+ }
+ return *p.VLong
+}
+
+var Tag_VBinary_DEFAULT []byte
+
+func (p *Tag) GetVBinary() []byte {
+ return p.VBinary
+}
+func (p *Tag) IsSetVStr() bool {
+ return p.VStr != nil
+}
+
+func (p *Tag) IsSetVDouble() bool {
+ return p.VDouble != nil
+}
+
+func (p *Tag) IsSetVBool() bool {
+ return p.VBool != nil
+}
+
+func (p *Tag) IsSetVLong() bool {
+ return p.VLong != nil
+}
+
+func (p *Tag) IsSetVBinary() bool {
+ return p.VBinary != nil
+}
+
+func (p *Tag) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetKey bool = false
+ var issetVType bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetKey = true
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ issetVType = true
+ case 3:
+ if err := p.readField3(iprot); err != nil {
+ return err
+ }
+ case 4:
+ if err := p.readField4(iprot); err != nil {
+ return err
+ }
+ case 5:
+ if err := p.readField5(iprot); err != nil {
+ return err
+ }
+ case 6:
+ if err := p.readField6(iprot); err != nil {
+ return err
+ }
+ case 7:
+ if err := p.readField7(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetKey {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Key is not set"))
+ }
+ if !issetVType {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field VType is not set"))
+ }
+ return nil
+}
+
+func (p *Tag) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.Key = v
+ }
+ return nil
+}
+
+func (p *Tag) readField2(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI32(); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+ } else {
+ temp := TagType(v)
+ p.VType = temp
+ }
+ return nil
+}
+
+func (p *Tag) readField3(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+ } else {
+ p.VStr = &v
+ }
+ return nil
+}
+
+func (p *Tag) readField4(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadDouble(); err != nil {
+ return thrift.PrependError("error reading field 4: ", err)
+ } else {
+ p.VDouble = &v
+ }
+ return nil
+}
+
+func (p *Tag) readField5(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBool(); err != nil {
+ return thrift.PrependError("error reading field 5: ", err)
+ } else {
+ p.VBool = &v
+ }
+ return nil
+}
+
+func (p *Tag) readField6(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 6: ", err)
+ } else {
+ p.VLong = &v
+ }
+ return nil
+}
+
+func (p *Tag) readField7(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBinary(); err != nil {
+ return thrift.PrependError("error reading field 7: ", err)
+ } else {
+ p.VBinary = v
+ }
+ return nil
+}
+
+func (p *Tag) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("Tag"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField3(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField4(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField5(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField6(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField7(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *Tag) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err)
+ }
+ if err := oprot.WriteString(string(p.Key)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err)
+ }
+ return err
+}
+
+func (p *Tag) writeField2(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("vType", thrift.I32, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:vType: ", p), err)
+ }
+ if err := oprot.WriteI32(int32(p.VType)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.vType (2) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:vType: ", p), err)
+ }
+ return err
+}
+
+func (p *Tag) writeField3(oprot thrift.TProtocol) (err error) {
+ if p.IsSetVStr() {
+ if err := oprot.WriteFieldBegin("vStr", thrift.STRING, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:vStr: ", p), err)
+ }
+ if err := oprot.WriteString(string(*p.VStr)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.vStr (3) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:vStr: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Tag) writeField4(oprot thrift.TProtocol) (err error) {
+ if p.IsSetVDouble() {
+ if err := oprot.WriteFieldBegin("vDouble", thrift.DOUBLE, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:vDouble: ", p), err)
+ }
+ if err := oprot.WriteDouble(float64(*p.VDouble)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.vDouble (4) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:vDouble: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Tag) writeField5(oprot thrift.TProtocol) (err error) {
+ if p.IsSetVBool() {
+ if err := oprot.WriteFieldBegin("vBool", thrift.BOOL, 5); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:vBool: ", p), err)
+ }
+ if err := oprot.WriteBool(bool(*p.VBool)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.vBool (5) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 5:vBool: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Tag) writeField6(oprot thrift.TProtocol) (err error) {
+ if p.IsSetVLong() {
+ if err := oprot.WriteFieldBegin("vLong", thrift.I64, 6); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:vLong: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(*p.VLong)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.vLong (6) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 6:vLong: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Tag) writeField7(oprot thrift.TProtocol) (err error) {
+ if p.IsSetVBinary() {
+ if err := oprot.WriteFieldBegin("vBinary", thrift.STRING, 7); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:vBinary: ", p), err)
+ }
+ if err := oprot.WriteBinary(p.VBinary); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.vBinary (7) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 7:vBinary: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Tag) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Tag(%+v)", *p)
+}
+
+// Attributes:
+// - Timestamp
+// - Fields
+type Log struct {
+ Timestamp int64 `thrift:"timestamp,1,required" json:"timestamp"`
+ Fields []*Tag `thrift:"fields,2,required" json:"fields"`
+}
+
+func NewLog() *Log {
+ return &Log{}
+}
+
+func (p *Log) GetTimestamp() int64 {
+ return p.Timestamp
+}
+
+func (p *Log) GetFields() []*Tag {
+ return p.Fields
+}
+func (p *Log) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetTimestamp bool = false
+ var issetFields bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetTimestamp = true
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ issetFields = true
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetTimestamp {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Timestamp is not set"))
+ }
+ if !issetFields {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Fields is not set"))
+ }
+ return nil
+}
+
+func (p *Log) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.Timestamp = v
+ }
+ return nil
+}
+
+func (p *Log) readField2(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Tag, 0, size)
+ p.Fields = tSlice
+ for i := 0; i < size; i++ {
+ _elem0 := &Tag{}
+ if err := _elem0.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
+ }
+ p.Fields = append(p.Fields, _elem0)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Log) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("Log"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *Log) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.Timestamp)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err)
+ }
+ return err
+}
+
+func (p *Log) writeField2(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("fields", thrift.LIST, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:fields: ", p), err)
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Fields)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Fields {
+ if err := v.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:fields: ", p), err)
+ }
+ return err
+}
+
+func (p *Log) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Log(%+v)", *p)
+}
+
+// Attributes:
+// - RefType
+// - TraceIdLow
+// - TraceIdHigh
+// - SpanId
+type SpanRef struct {
+ RefType SpanRefType `thrift:"refType,1,required" json:"refType"`
+ TraceIdLow int64 `thrift:"traceIdLow,2,required" json:"traceIdLow"`
+ TraceIdHigh int64 `thrift:"traceIdHigh,3,required" json:"traceIdHigh"`
+ SpanId int64 `thrift:"spanId,4,required" json:"spanId"`
+}
+
+func NewSpanRef() *SpanRef {
+ return &SpanRef{}
+}
+
+func (p *SpanRef) GetRefType() SpanRefType {
+ return p.RefType
+}
+
+func (p *SpanRef) GetTraceIdLow() int64 {
+ return p.TraceIdLow
+}
+
+func (p *SpanRef) GetTraceIdHigh() int64 {
+ return p.TraceIdHigh
+}
+
+func (p *SpanRef) GetSpanId() int64 {
+ return p.SpanId
+}
+func (p *SpanRef) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetRefType bool = false
+ var issetTraceIdLow bool = false
+ var issetTraceIdHigh bool = false
+ var issetSpanId bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetRefType = true
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ issetTraceIdLow = true
+ case 3:
+ if err := p.readField3(iprot); err != nil {
+ return err
+ }
+ issetTraceIdHigh = true
+ case 4:
+ if err := p.readField4(iprot); err != nil {
+ return err
+ }
+ issetSpanId = true
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetRefType {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RefType is not set"))
+ }
+ if !issetTraceIdLow {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set"))
+ }
+ if !issetTraceIdHigh {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set"))
+ }
+ if !issetSpanId {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set"))
+ }
+ return nil
+}
+
+func (p *SpanRef) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI32(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ temp := SpanRefType(v)
+ p.RefType = temp
+ }
+ return nil
+}
+
+func (p *SpanRef) readField2(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+ } else {
+ p.TraceIdLow = v
+ }
+ return nil
+}
+
+func (p *SpanRef) readField3(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+ } else {
+ p.TraceIdHigh = v
+ }
+ return nil
+}
+
+func (p *SpanRef) readField4(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 4: ", err)
+ } else {
+ p.SpanId = v
+ }
+ return nil
+}
+
+func (p *SpanRef) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("SpanRef"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField3(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField4(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *SpanRef) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("refType", thrift.I32, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:refType: ", p), err)
+ }
+ if err := oprot.WriteI32(int32(p.RefType)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.refType (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:refType: ", p), err)
+ }
+ return err
+}
+
+func (p *SpanRef) writeField2(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("traceIdLow", thrift.I64, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdLow: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.TraceIdLow)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (2) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdLow: ", p), err)
+ }
+ return err
+}
+
+func (p *SpanRef) writeField3(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("traceIdHigh", thrift.I64, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:traceIdHigh: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.TraceIdHigh)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (3) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:traceIdHigh: ", p), err)
+ }
+ return err
+}
+
+func (p *SpanRef) writeField4(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("spanId", thrift.I64, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:spanId: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.SpanId)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.spanId (4) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:spanId: ", p), err)
+ }
+ return err
+}
+
+func (p *SpanRef) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("SpanRef(%+v)", *p)
+}
+
+// Attributes:
+// - TraceIdLow
+// - TraceIdHigh
+// - SpanId
+// - ParentSpanId
+// - OperationName
+// - References
+// - Flags
+// - StartTime
+// - Duration
+// - Tags
+// - Logs
+type Span struct {
+ TraceIdLow int64 `thrift:"traceIdLow,1,required" json:"traceIdLow"`
+ TraceIdHigh int64 `thrift:"traceIdHigh,2,required" json:"traceIdHigh"`
+ SpanId int64 `thrift:"spanId,3,required" json:"spanId"`
+ ParentSpanId int64 `thrift:"parentSpanId,4,required" json:"parentSpanId"`
+ OperationName string `thrift:"operationName,5,required" json:"operationName"`
+ References []*SpanRef `thrift:"references,6" json:"references,omitempty"`
+ Flags int32 `thrift:"flags,7,required" json:"flags"`
+ StartTime int64 `thrift:"startTime,8,required" json:"startTime"`
+ Duration int64 `thrift:"duration,9,required" json:"duration"`
+ Tags []*Tag `thrift:"tags,10" json:"tags,omitempty"`
+ Logs []*Log `thrift:"logs,11" json:"logs,omitempty"`
+}
+
+func NewSpan() *Span {
+ return &Span{}
+}
+
+func (p *Span) GetTraceIdLow() int64 {
+ return p.TraceIdLow
+}
+
+func (p *Span) GetTraceIdHigh() int64 {
+ return p.TraceIdHigh
+}
+
+func (p *Span) GetSpanId() int64 {
+ return p.SpanId
+}
+
+func (p *Span) GetParentSpanId() int64 {
+ return p.ParentSpanId
+}
+
+func (p *Span) GetOperationName() string {
+ return p.OperationName
+}
+
+var Span_References_DEFAULT []*SpanRef
+
+func (p *Span) GetReferences() []*SpanRef {
+ return p.References
+}
+
+func (p *Span) GetFlags() int32 {
+ return p.Flags
+}
+
+func (p *Span) GetStartTime() int64 {
+ return p.StartTime
+}
+
+func (p *Span) GetDuration() int64 {
+ return p.Duration
+}
+
+var Span_Tags_DEFAULT []*Tag
+
+func (p *Span) GetTags() []*Tag {
+ return p.Tags
+}
+
+var Span_Logs_DEFAULT []*Log
+
+func (p *Span) GetLogs() []*Log {
+ return p.Logs
+}
+func (p *Span) IsSetReferences() bool {
+ return p.References != nil
+}
+
+func (p *Span) IsSetTags() bool {
+ return p.Tags != nil
+}
+
+func (p *Span) IsSetLogs() bool {
+ return p.Logs != nil
+}
+
+func (p *Span) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetTraceIdLow bool = false
+ var issetTraceIdHigh bool = false
+ var issetSpanId bool = false
+ var issetParentSpanId bool = false
+ var issetOperationName bool = false
+ var issetFlags bool = false
+ var issetStartTime bool = false
+ var issetDuration bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetTraceIdLow = true
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ issetTraceIdHigh = true
+ case 3:
+ if err := p.readField3(iprot); err != nil {
+ return err
+ }
+ issetSpanId = true
+ case 4:
+ if err := p.readField4(iprot); err != nil {
+ return err
+ }
+ issetParentSpanId = true
+ case 5:
+ if err := p.readField5(iprot); err != nil {
+ return err
+ }
+ issetOperationName = true
+ case 6:
+ if err := p.readField6(iprot); err != nil {
+ return err
+ }
+ case 7:
+ if err := p.readField7(iprot); err != nil {
+ return err
+ }
+ issetFlags = true
+ case 8:
+ if err := p.readField8(iprot); err != nil {
+ return err
+ }
+ issetStartTime = true
+ case 9:
+ if err := p.readField9(iprot); err != nil {
+ return err
+ }
+ issetDuration = true
+ case 10:
+ if err := p.readField10(iprot); err != nil {
+ return err
+ }
+ case 11:
+ if err := p.readField11(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetTraceIdLow {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set"))
+ }
+ if !issetTraceIdHigh {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set"))
+ }
+ if !issetSpanId {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set"))
+ }
+ if !issetParentSpanId {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ParentSpanId is not set"))
+ }
+ if !issetOperationName {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationName is not set"))
+ }
+ if !issetFlags {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Flags is not set"))
+ }
+ if !issetStartTime {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StartTime is not set"))
+ }
+ if !issetDuration {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Duration is not set"))
+ }
+ return nil
+}
+
+func (p *Span) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.TraceIdLow = v
+ }
+ return nil
+}
+
+func (p *Span) readField2(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+ } else {
+ p.TraceIdHigh = v
+ }
+ return nil
+}
+
+func (p *Span) readField3(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+ } else {
+ p.SpanId = v
+ }
+ return nil
+}
+
+func (p *Span) readField4(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 4: ", err)
+ } else {
+ p.ParentSpanId = v
+ }
+ return nil
+}
+
+func (p *Span) readField5(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 5: ", err)
+ } else {
+ p.OperationName = v
+ }
+ return nil
+}
+
+func (p *Span) readField6(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*SpanRef, 0, size)
+ p.References = tSlice
+ for i := 0; i < size; i++ {
+ _elem1 := &SpanRef{}
+ if err := _elem1.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err)
+ }
+ p.References = append(p.References, _elem1)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Span) readField7(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI32(); err != nil {
+ return thrift.PrependError("error reading field 7: ", err)
+ } else {
+ p.Flags = v
+ }
+ return nil
+}
+
+func (p *Span) readField8(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 8: ", err)
+ } else {
+ p.StartTime = v
+ }
+ return nil
+}
+
+func (p *Span) readField9(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 9: ", err)
+ } else {
+ p.Duration = v
+ }
+ return nil
+}
+
+func (p *Span) readField10(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Tag, 0, size)
+ p.Tags = tSlice
+ for i := 0; i < size; i++ {
+ _elem2 := &Tag{}
+ if err := _elem2.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err)
+ }
+ p.Tags = append(p.Tags, _elem2)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Span) readField11(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Log, 0, size)
+ p.Logs = tSlice
+ for i := 0; i < size; i++ {
+ _elem3 := &Log{}
+ if err := _elem3.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err)
+ }
+ p.Logs = append(p.Logs, _elem3)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Span) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("Span"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField3(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField4(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField5(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField6(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField7(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField8(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField9(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField10(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField11(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *Span) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("traceIdLow", thrift.I64, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:traceIdLow: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.TraceIdLow)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:traceIdLow: ", p), err)
+ }
+ return err
+}
+
+func (p *Span) writeField2(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("traceIdHigh", thrift.I64, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdHigh: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.TraceIdHigh)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (2) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdHigh: ", p), err)
+ }
+ return err
+}
+
+func (p *Span) writeField3(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("spanId", thrift.I64, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:spanId: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.SpanId)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.spanId (3) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:spanId: ", p), err)
+ }
+ return err
+}
+
+func (p *Span) writeField4(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("parentSpanId", thrift.I64, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:parentSpanId: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.ParentSpanId)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.parentSpanId (4) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:parentSpanId: ", p), err)
+ }
+ return err
+}
+
+func (p *Span) writeField5(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("operationName", thrift.STRING, 5); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:operationName: ", p), err)
+ }
+ if err := oprot.WriteString(string(p.OperationName)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.operationName (5) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 5:operationName: ", p), err)
+ }
+ return err
+}
+
+func (p *Span) writeField6(oprot thrift.TProtocol) (err error) {
+ if p.IsSetReferences() {
+ if err := oprot.WriteFieldBegin("references", thrift.LIST, 6); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:references: ", p), err)
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.References)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.References {
+ if err := v.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 6:references: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Span) writeField7(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("flags", thrift.I32, 7); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:flags: ", p), err)
+ }
+ if err := oprot.WriteI32(int32(p.Flags)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.flags (7) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 7:flags: ", p), err)
+ }
+ return err
+}
+
+func (p *Span) writeField8(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("startTime", thrift.I64, 8); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:startTime: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.StartTime)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.startTime (8) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 8:startTime: ", p), err)
+ }
+ return err
+}
+
+func (p *Span) writeField9(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("duration", thrift.I64, 9); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:duration: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.Duration)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.duration (9) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 9:duration: ", p), err)
+ }
+ return err
+}
+
+func (p *Span) writeField10(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTags() {
+ if err := oprot.WriteFieldBegin("tags", thrift.LIST, 10); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:tags: ", p), err)
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tags)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Tags {
+ if err := v.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 10:tags: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Span) writeField11(oprot thrift.TProtocol) (err error) {
+ if p.IsSetLogs() {
+ if err := oprot.WriteFieldBegin("logs", thrift.LIST, 11); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:logs: ", p), err)
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Logs)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Logs {
+ if err := v.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 11:logs: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Span) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Span(%+v)", *p)
+}
+
+// Attributes:
+// - ServiceName
+// - Tags
+type Process struct {
+ ServiceName string `thrift:"serviceName,1,required" json:"serviceName"`
+ Tags []*Tag `thrift:"tags,2" json:"tags,omitempty"`
+}
+
+func NewProcess() *Process {
+ return &Process{}
+}
+
+func (p *Process) GetServiceName() string {
+ return p.ServiceName
+}
+
+var Process_Tags_DEFAULT []*Tag
+
+func (p *Process) GetTags() []*Tag {
+ return p.Tags
+}
+func (p *Process) IsSetTags() bool {
+ return p.Tags != nil
+}
+
+func (p *Process) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetServiceName bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetServiceName = true
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetServiceName {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ServiceName is not set"))
+ }
+ return nil
+}
+
+func (p *Process) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.ServiceName = v
+ }
+ return nil
+}
+
+func (p *Process) readField2(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Tag, 0, size)
+ p.Tags = tSlice
+ for i := 0; i < size; i++ {
+ _elem4 := &Tag{}
+ if err := _elem4.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
+ }
+ p.Tags = append(p.Tags, _elem4)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Process) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("Process"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *Process) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err)
+ }
+ if err := oprot.WriteString(string(p.ServiceName)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err)
+ }
+ return err
+}
+
+func (p *Process) writeField2(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTags() {
+ if err := oprot.WriteFieldBegin("tags", thrift.LIST, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tags: ", p), err)
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tags)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Tags {
+ if err := v.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tags: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Process) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Process(%+v)", *p)
+}
+
+// Attributes:
+// - Process
+// - Spans
+type Batch struct {
+ Process *Process `thrift:"process,1,required" json:"process"`
+ Spans []*Span `thrift:"spans,2,required" json:"spans"`
+}
+
+func NewBatch() *Batch {
+ return &Batch{}
+}
+
+var Batch_Process_DEFAULT *Process
+
+func (p *Batch) GetProcess() *Process {
+ if !p.IsSetProcess() {
+ return Batch_Process_DEFAULT
+ }
+ return p.Process
+}
+
+func (p *Batch) GetSpans() []*Span {
+ return p.Spans
+}
+func (p *Batch) IsSetProcess() bool {
+ return p.Process != nil
+}
+
+func (p *Batch) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetProcess bool = false
+ var issetSpans bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetProcess = true
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ issetSpans = true
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetProcess {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Process is not set"))
+ }
+ if !issetSpans {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Spans is not set"))
+ }
+ return nil
+}
+
+func (p *Batch) readField1(iprot thrift.TProtocol) error {
+ p.Process = &Process{}
+ if err := p.Process.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Process), err)
+ }
+ return nil
+}
+
+func (p *Batch) readField2(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Span, 0, size)
+ p.Spans = tSlice
+ for i := 0; i < size; i++ {
+ _elem5 := &Span{}
+ if err := _elem5.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err)
+ }
+ p.Spans = append(p.Spans, _elem5)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Batch) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("Batch"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *Batch) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("process", thrift.STRUCT, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:process: ", p), err)
+ }
+ if err := p.Process.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Process), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:process: ", p), err)
+ }
+ return err
+}
+
+func (p *Batch) writeField2(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("spans", thrift.LIST, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:spans: ", p), err)
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Spans {
+ if err := v.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:spans: ", p), err)
+ }
+ return err
+}
+
+func (p *Batch) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Batch(%+v)", *p)
+}
+
+// Attributes:
+// - Ok
+type BatchSubmitResponse struct {
+ Ok bool `thrift:"ok,1,required" json:"ok"`
+}
+
+func NewBatchSubmitResponse() *BatchSubmitResponse {
+ return &BatchSubmitResponse{}
+}
+
+func (p *BatchSubmitResponse) GetOk() bool {
+ return p.Ok
+}
+func (p *BatchSubmitResponse) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetOk bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetOk = true
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetOk {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set"))
+ }
+ return nil
+}
+
+func (p *BatchSubmitResponse) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBool(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.Ok = v
+ }
+ return nil
+}
+
+func (p *BatchSubmitResponse) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("BatchSubmitResponse"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *BatchSubmitResponse) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("ok", thrift.BOOL, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err)
+ }
+ if err := oprot.WriteBool(bool(p.Ok)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err)
+ }
+ return err
+}
+
+func (p *BatchSubmitResponse) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("BatchSubmitResponse(%+v)", *p)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go
new file mode 100644
index 000000000..0f6e3a884
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go
@@ -0,0 +1,18 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package sampling
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+func init() {
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go
new file mode 100644
index 000000000..33179cfeb
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go
@@ -0,0 +1,410 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package sampling
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+type SamplingManager interface {
+ // Parameters:
+ // - ServiceName
+ GetSamplingStrategy(serviceName string) (r *SamplingStrategyResponse, err error)
+}
+
+type SamplingManagerClient struct {
+ Transport thrift.TTransport
+ ProtocolFactory thrift.TProtocolFactory
+ InputProtocol thrift.TProtocol
+ OutputProtocol thrift.TProtocol
+ SeqId int32
+}
+
+func NewSamplingManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *SamplingManagerClient {
+ return &SamplingManagerClient{Transport: t,
+ ProtocolFactory: f,
+ InputProtocol: f.GetProtocol(t),
+ OutputProtocol: f.GetProtocol(t),
+ SeqId: 0,
+ }
+}
+
+func NewSamplingManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *SamplingManagerClient {
+ return &SamplingManagerClient{Transport: t,
+ ProtocolFactory: nil,
+ InputProtocol: iprot,
+ OutputProtocol: oprot,
+ SeqId: 0,
+ }
+}
+
+// Parameters:
+// - ServiceName
+func (p *SamplingManagerClient) GetSamplingStrategy(serviceName string) (r *SamplingStrategyResponse, err error) {
+ if err = p.sendGetSamplingStrategy(serviceName); err != nil {
+ return
+ }
+ return p.recvGetSamplingStrategy()
+}
+
+func (p *SamplingManagerClient) sendGetSamplingStrategy(serviceName string) (err error) {
+ oprot := p.OutputProtocol
+ if oprot == nil {
+ oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.OutputProtocol = oprot
+ }
+ p.SeqId++
+ if err = oprot.WriteMessageBegin("getSamplingStrategy", thrift.CALL, p.SeqId); err != nil {
+ return
+ }
+ args := SamplingManagerGetSamplingStrategyArgs{
+ ServiceName: serviceName,
+ }
+ if err = args.Write(oprot); err != nil {
+ return
+ }
+ if err = oprot.WriteMessageEnd(); err != nil {
+ return
+ }
+ return oprot.Flush()
+}
+
+func (p *SamplingManagerClient) recvGetSamplingStrategy() (value *SamplingStrategyResponse, err error) {
+ iprot := p.InputProtocol
+ if iprot == nil {
+ iprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.InputProtocol = iprot
+ }
+ method, mTypeId, seqId, err := iprot.ReadMessageBegin()
+ if err != nil {
+ return
+ }
+ if method != "getSamplingStrategy" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getSamplingStrategy failed: wrong method name")
+ return
+ }
+ if p.SeqId != seqId {
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getSamplingStrategy failed: out of sequence response")
+ return
+ }
+ if mTypeId == thrift.EXCEPTION {
+ error1 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error2 error
+ error2, err = error1.Read(iprot)
+ if err != nil {
+ return
+ }
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
+ }
+ err = error2
+ return
+ }
+ if mTypeId != thrift.REPLY {
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getSamplingStrategy failed: invalid message type")
+ return
+ }
+ result := SamplingManagerGetSamplingStrategyResult{}
+ if err = result.Read(iprot); err != nil {
+ return
+ }
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
+ }
+ value = result.GetSuccess()
+ return
+}
+
+type SamplingManagerProcessor struct {
+ processorMap map[string]thrift.TProcessorFunction
+ handler SamplingManager
+}
+
+func (p *SamplingManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+ p.processorMap[key] = processor
+}
+
+func (p *SamplingManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+ processor, ok = p.processorMap[key]
+ return processor, ok
+}
+
+func (p *SamplingManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+ return p.processorMap
+}
+
+func NewSamplingManagerProcessor(handler SamplingManager) *SamplingManagerProcessor {
+
+ self3 := &SamplingManagerProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
+ self3.processorMap["getSamplingStrategy"] = &samplingManagerProcessorGetSamplingStrategy{handler: handler}
+ return self3
+}
+
+func (p *SamplingManagerProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ name, _, seqId, err := iprot.ReadMessageBegin()
+ if err != nil {
+ return false, err
+ }
+ if processor, ok := p.GetProcessorFunction(name); ok {
+ return processor.Process(seqId, iprot, oprot)
+ }
+ iprot.Skip(thrift.STRUCT)
+ iprot.ReadMessageEnd()
+ x4 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
+ oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
+ x4.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, x4
+
+}
+
+type samplingManagerProcessorGetSamplingStrategy struct {
+ handler SamplingManager
+}
+
+func (p *samplingManagerProcessorGetSamplingStrategy) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := SamplingManagerGetSamplingStrategyArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("getSamplingStrategy", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ result := SamplingManagerGetSamplingStrategyResult{}
+ var retval *SamplingStrategyResponse
+ var err2 error
+ if retval, err2 = p.handler.GetSamplingStrategy(args.ServiceName); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getSamplingStrategy: "+err2.Error())
+ oprot.WriteMessageBegin("getSamplingStrategy", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("getSamplingStrategy", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+// - ServiceName
+type SamplingManagerGetSamplingStrategyArgs struct {
+ ServiceName string `thrift:"serviceName,1" json:"serviceName"`
+}
+
+func NewSamplingManagerGetSamplingStrategyArgs() *SamplingManagerGetSamplingStrategyArgs {
+ return &SamplingManagerGetSamplingStrategyArgs{}
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs) GetServiceName() string {
+ return p.ServiceName
+}
+func (p *SamplingManagerGetSamplingStrategyArgs) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.ServiceName = v
+ }
+ return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("getSamplingStrategy_args"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err)
+ }
+ if err := oprot.WriteString(string(p.ServiceName)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err)
+ }
+ return err
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("SamplingManagerGetSamplingStrategyArgs(%+v)", *p)
+}
+
+// Attributes:
+// - Success
+type SamplingManagerGetSamplingStrategyResult struct {
+ Success *SamplingStrategyResponse `thrift:"success,0" json:"success,omitempty"`
+}
+
+func NewSamplingManagerGetSamplingStrategyResult() *SamplingManagerGetSamplingStrategyResult {
+ return &SamplingManagerGetSamplingStrategyResult{}
+}
+
+var SamplingManagerGetSamplingStrategyResult_Success_DEFAULT *SamplingStrategyResponse
+
+func (p *SamplingManagerGetSamplingStrategyResult) GetSuccess() *SamplingStrategyResponse {
+ if !p.IsSetSuccess() {
+ return SamplingManagerGetSamplingStrategyResult_Success_DEFAULT
+ }
+ return p.Success
+}
+func (p *SamplingManagerGetSamplingStrategyResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 0:
+ if err := p.readField0(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) readField0(iprot thrift.TProtocol) error {
+ p.Success = &SamplingStrategyResponse{}
+ if err := p.Success.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
+ }
+ return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("getSamplingStrategy_result"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField0(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) writeField0(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
+ }
+ if err := p.Success.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("SamplingManagerGetSamplingStrategyResult(%+v)", *p)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go
new file mode 100644
index 000000000..9abaf0542
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go
@@ -0,0 +1,873 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package sampling
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+var GoUnusedProtection__ int
+
+type SamplingStrategyType int64
+
+const (
+ SamplingStrategyType_PROBABILISTIC SamplingStrategyType = 0
+ SamplingStrategyType_RATE_LIMITING SamplingStrategyType = 1
+)
+
+func (p SamplingStrategyType) String() string {
+ switch p {
+ case SamplingStrategyType_PROBABILISTIC:
+ return "PROBABILISTIC"
+ case SamplingStrategyType_RATE_LIMITING:
+ return "RATE_LIMITING"
+ }
+ return "<UNSET>"
+}
+
+func SamplingStrategyTypeFromString(s string) (SamplingStrategyType, error) {
+ switch s {
+ case "PROBABILISTIC":
+ return SamplingStrategyType_PROBABILISTIC, nil
+ case "RATE_LIMITING":
+ return SamplingStrategyType_RATE_LIMITING, nil
+ }
+ return SamplingStrategyType(0), fmt.Errorf("not a valid SamplingStrategyType string")
+}
+
+func SamplingStrategyTypePtr(v SamplingStrategyType) *SamplingStrategyType { return &v }
+
+func (p SamplingStrategyType) MarshalText() ([]byte, error) {
+ return []byte(p.String()), nil
+}
+
+func (p *SamplingStrategyType) UnmarshalText(text []byte) error {
+ q, err := SamplingStrategyTypeFromString(string(text))
+ if err != nil {
+ return err
+ }
+ *p = q
+ return nil
+}
+
+// Attributes:
+// - SamplingRate
+type ProbabilisticSamplingStrategy struct {
+ SamplingRate float64 `thrift:"samplingRate,1,required" json:"samplingRate"`
+}
+
+func NewProbabilisticSamplingStrategy() *ProbabilisticSamplingStrategy {
+ return &ProbabilisticSamplingStrategy{}
+}
+
+func (p *ProbabilisticSamplingStrategy) GetSamplingRate() float64 {
+ return p.SamplingRate
+}
+func (p *ProbabilisticSamplingStrategy) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetSamplingRate bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetSamplingRate = true
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetSamplingRate {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SamplingRate is not set"))
+ }
+ return nil
+}
+
+func (p *ProbabilisticSamplingStrategy) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadDouble(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.SamplingRate = v
+ }
+ return nil
+}
+
+func (p *ProbabilisticSamplingStrategy) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("ProbabilisticSamplingStrategy"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *ProbabilisticSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("samplingRate", thrift.DOUBLE, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:samplingRate: ", p), err)
+ }
+ if err := oprot.WriteDouble(float64(p.SamplingRate)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.samplingRate (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:samplingRate: ", p), err)
+ }
+ return err
+}
+
+func (p *ProbabilisticSamplingStrategy) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("ProbabilisticSamplingStrategy(%+v)", *p)
+}
+
+// Attributes:
+// - MaxTracesPerSecond
+type RateLimitingSamplingStrategy struct {
+ MaxTracesPerSecond int16 `thrift:"maxTracesPerSecond,1,required" json:"maxTracesPerSecond"`
+}
+
+func NewRateLimitingSamplingStrategy() *RateLimitingSamplingStrategy {
+ return &RateLimitingSamplingStrategy{}
+}
+
+func (p *RateLimitingSamplingStrategy) GetMaxTracesPerSecond() int16 {
+ return p.MaxTracesPerSecond
+}
+func (p *RateLimitingSamplingStrategy) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetMaxTracesPerSecond bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetMaxTracesPerSecond = true
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetMaxTracesPerSecond {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxTracesPerSecond is not set"))
+ }
+ return nil
+}
+
+func (p *RateLimitingSamplingStrategy) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI16(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.MaxTracesPerSecond = v
+ }
+ return nil
+}
+
+func (p *RateLimitingSamplingStrategy) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("RateLimitingSamplingStrategy"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *RateLimitingSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("maxTracesPerSecond", thrift.I16, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:maxTracesPerSecond: ", p), err)
+ }
+ if err := oprot.WriteI16(int16(p.MaxTracesPerSecond)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.maxTracesPerSecond (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:maxTracesPerSecond: ", p), err)
+ }
+ return err
+}
+
+func (p *RateLimitingSamplingStrategy) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("RateLimitingSamplingStrategy(%+v)", *p)
+}
+
+// Attributes:
+// - Operation
+// - ProbabilisticSampling
+type OperationSamplingStrategy struct {
+ Operation string `thrift:"operation,1,required" json:"operation"`
+ ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2,required" json:"probabilisticSampling"`
+}
+
+func NewOperationSamplingStrategy() *OperationSamplingStrategy {
+ return &OperationSamplingStrategy{}
+}
+
+func (p *OperationSamplingStrategy) GetOperation() string {
+ return p.Operation
+}
+
+var OperationSamplingStrategy_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy
+
+func (p *OperationSamplingStrategy) GetProbabilisticSampling() *ProbabilisticSamplingStrategy {
+ if !p.IsSetProbabilisticSampling() {
+ return OperationSamplingStrategy_ProbabilisticSampling_DEFAULT
+ }
+ return p.ProbabilisticSampling
+}
+func (p *OperationSamplingStrategy) IsSetProbabilisticSampling() bool {
+ return p.ProbabilisticSampling != nil
+}
+
+func (p *OperationSamplingStrategy) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetOperation bool = false
+ var issetProbabilisticSampling bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetOperation = true
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ issetProbabilisticSampling = true
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetOperation {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Operation is not set"))
+ }
+ if !issetProbabilisticSampling {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ProbabilisticSampling is not set"))
+ }
+ return nil
+}
+
+func (p *OperationSamplingStrategy) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.Operation = v
+ }
+ return nil
+}
+
+func (p *OperationSamplingStrategy) readField2(iprot thrift.TProtocol) error {
+ p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{}
+ if err := p.ProbabilisticSampling.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err)
+ }
+ return nil
+}
+
+func (p *OperationSamplingStrategy) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("OperationSamplingStrategy"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *OperationSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("operation", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:operation: ", p), err)
+ }
+ if err := oprot.WriteString(string(p.Operation)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.operation (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:operation: ", p), err)
+ }
+ return err
+}
+
+func (p *OperationSamplingStrategy) writeField2(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("probabilisticSampling", thrift.STRUCT, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err)
+ }
+ if err := p.ProbabilisticSampling.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err)
+ }
+ return err
+}
+
+func (p *OperationSamplingStrategy) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("OperationSamplingStrategy(%+v)", *p)
+}
+
+// Attributes:
+// - DefaultSamplingProbability
+// - DefaultLowerBoundTracesPerSecond
+// - PerOperationStrategies
+// - DefaultUpperBoundTracesPerSecond
+type PerOperationSamplingStrategies struct {
+ DefaultSamplingProbability float64 `thrift:"defaultSamplingProbability,1,required" json:"defaultSamplingProbability"`
+ DefaultLowerBoundTracesPerSecond float64 `thrift:"defaultLowerBoundTracesPerSecond,2,required" json:"defaultLowerBoundTracesPerSecond"`
+ PerOperationStrategies []*OperationSamplingStrategy `thrift:"perOperationStrategies,3,required" json:"perOperationStrategies"`
+ DefaultUpperBoundTracesPerSecond *float64 `thrift:"defaultUpperBoundTracesPerSecond,4" json:"defaultUpperBoundTracesPerSecond,omitempty"`
+}
+
+func NewPerOperationSamplingStrategies() *PerOperationSamplingStrategies {
+ return &PerOperationSamplingStrategies{}
+}
+
+func (p *PerOperationSamplingStrategies) GetDefaultSamplingProbability() float64 {
+ return p.DefaultSamplingProbability
+}
+
+func (p *PerOperationSamplingStrategies) GetDefaultLowerBoundTracesPerSecond() float64 {
+ return p.DefaultLowerBoundTracesPerSecond
+}
+
+func (p *PerOperationSamplingStrategies) GetPerOperationStrategies() []*OperationSamplingStrategy {
+ return p.PerOperationStrategies
+}
+
+var PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT float64
+
+func (p *PerOperationSamplingStrategies) GetDefaultUpperBoundTracesPerSecond() float64 {
+ if !p.IsSetDefaultUpperBoundTracesPerSecond() {
+ return PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT
+ }
+ return *p.DefaultUpperBoundTracesPerSecond
+}
+func (p *PerOperationSamplingStrategies) IsSetDefaultUpperBoundTracesPerSecond() bool {
+ return p.DefaultUpperBoundTracesPerSecond != nil
+}
+
+func (p *PerOperationSamplingStrategies) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetDefaultSamplingProbability bool = false
+ var issetDefaultLowerBoundTracesPerSecond bool = false
+ var issetPerOperationStrategies bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetDefaultSamplingProbability = true
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ issetDefaultLowerBoundTracesPerSecond = true
+ case 3:
+ if err := p.readField3(iprot); err != nil {
+ return err
+ }
+ issetPerOperationStrategies = true
+ case 4:
+ if err := p.readField4(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetDefaultSamplingProbability {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultSamplingProbability is not set"))
+ }
+ if !issetDefaultLowerBoundTracesPerSecond {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultLowerBoundTracesPerSecond is not set"))
+ }
+ if !issetPerOperationStrategies {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field PerOperationStrategies is not set"))
+ }
+ return nil
+}
+
+func (p *PerOperationSamplingStrategies) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadDouble(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.DefaultSamplingProbability = v
+ }
+ return nil
+}
+
+func (p *PerOperationSamplingStrategies) readField2(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadDouble(); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+ } else {
+ p.DefaultLowerBoundTracesPerSecond = v
+ }
+ return nil
+}
+
+func (p *PerOperationSamplingStrategies) readField3(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*OperationSamplingStrategy, 0, size)
+ p.PerOperationStrategies = tSlice
+ for i := 0; i < size; i++ {
+ _elem0 := &OperationSamplingStrategy{}
+ if err := _elem0.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
+ }
+ p.PerOperationStrategies = append(p.PerOperationStrategies, _elem0)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *PerOperationSamplingStrategies) readField4(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadDouble(); err != nil {
+ return thrift.PrependError("error reading field 4: ", err)
+ } else {
+ p.DefaultUpperBoundTracesPerSecond = &v
+ }
+ return nil
+}
+
+func (p *PerOperationSamplingStrategies) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("PerOperationSamplingStrategies"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField3(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField4(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *PerOperationSamplingStrategies) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("defaultSamplingProbability", thrift.DOUBLE, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:defaultSamplingProbability: ", p), err)
+ }
+ if err := oprot.WriteDouble(float64(p.DefaultSamplingProbability)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.defaultSamplingProbability (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:defaultSamplingProbability: ", p), err)
+ }
+ return err
+}
+
+func (p *PerOperationSamplingStrategies) writeField2(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("defaultLowerBoundTracesPerSecond", thrift.DOUBLE, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:defaultLowerBoundTracesPerSecond: ", p), err)
+ }
+ if err := oprot.WriteDouble(float64(p.DefaultLowerBoundTracesPerSecond)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.defaultLowerBoundTracesPerSecond (2) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:defaultLowerBoundTracesPerSecond: ", p), err)
+ }
+ return err
+}
+
+func (p *PerOperationSamplingStrategies) writeField3(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("perOperationStrategies", thrift.LIST, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:perOperationStrategies: ", p), err)
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.PerOperationStrategies)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.PerOperationStrategies {
+ if err := v.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:perOperationStrategies: ", p), err)
+ }
+ return err
+}
+
+func (p *PerOperationSamplingStrategies) writeField4(oprot thrift.TProtocol) (err error) {
+ if p.IsSetDefaultUpperBoundTracesPerSecond() {
+ if err := oprot.WriteFieldBegin("defaultUpperBoundTracesPerSecond", thrift.DOUBLE, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:defaultUpperBoundTracesPerSecond: ", p), err)
+ }
+ if err := oprot.WriteDouble(float64(*p.DefaultUpperBoundTracesPerSecond)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.defaultUpperBoundTracesPerSecond (4) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:defaultUpperBoundTracesPerSecond: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *PerOperationSamplingStrategies) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("PerOperationSamplingStrategies(%+v)", *p)
+}
+
+// Attributes:
+// - StrategyType
+// - ProbabilisticSampling
+// - RateLimitingSampling
+// - OperationSampling
+type SamplingStrategyResponse struct {
+ StrategyType SamplingStrategyType `thrift:"strategyType,1,required" json:"strategyType"`
+ ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2" json:"probabilisticSampling,omitempty"`
+ RateLimitingSampling *RateLimitingSamplingStrategy `thrift:"rateLimitingSampling,3" json:"rateLimitingSampling,omitempty"`
+ OperationSampling *PerOperationSamplingStrategies `thrift:"operationSampling,4" json:"operationSampling,omitempty"`
+}
+
+func NewSamplingStrategyResponse() *SamplingStrategyResponse {
+ return &SamplingStrategyResponse{}
+}
+
+func (p *SamplingStrategyResponse) GetStrategyType() SamplingStrategyType {
+ return p.StrategyType
+}
+
+var SamplingStrategyResponse_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy
+
+func (p *SamplingStrategyResponse) GetProbabilisticSampling() *ProbabilisticSamplingStrategy {
+ if !p.IsSetProbabilisticSampling() {
+ return SamplingStrategyResponse_ProbabilisticSampling_DEFAULT
+ }
+ return p.ProbabilisticSampling
+}
+
+var SamplingStrategyResponse_RateLimitingSampling_DEFAULT *RateLimitingSamplingStrategy
+
+func (p *SamplingStrategyResponse) GetRateLimitingSampling() *RateLimitingSamplingStrategy {
+ if !p.IsSetRateLimitingSampling() {
+ return SamplingStrategyResponse_RateLimitingSampling_DEFAULT
+ }
+ return p.RateLimitingSampling
+}
+
+var SamplingStrategyResponse_OperationSampling_DEFAULT *PerOperationSamplingStrategies
+
+func (p *SamplingStrategyResponse) GetOperationSampling() *PerOperationSamplingStrategies {
+ if !p.IsSetOperationSampling() {
+ return SamplingStrategyResponse_OperationSampling_DEFAULT
+ }
+ return p.OperationSampling
+}
+func (p *SamplingStrategyResponse) IsSetProbabilisticSampling() bool {
+ return p.ProbabilisticSampling != nil
+}
+
+func (p *SamplingStrategyResponse) IsSetRateLimitingSampling() bool {
+ return p.RateLimitingSampling != nil
+}
+
+func (p *SamplingStrategyResponse) IsSetOperationSampling() bool {
+ return p.OperationSampling != nil
+}
+
+func (p *SamplingStrategyResponse) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetStrategyType bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetStrategyType = true
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ case 3:
+ if err := p.readField3(iprot); err != nil {
+ return err
+ }
+ case 4:
+ if err := p.readField4(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetStrategyType {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StrategyType is not set"))
+ }
+ return nil
+}
+
+func (p *SamplingStrategyResponse) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI32(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ temp := SamplingStrategyType(v)
+ p.StrategyType = temp
+ }
+ return nil
+}
+
+func (p *SamplingStrategyResponse) readField2(iprot thrift.TProtocol) error {
+ p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{}
+ if err := p.ProbabilisticSampling.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err)
+ }
+ return nil
+}
+
+func (p *SamplingStrategyResponse) readField3(iprot thrift.TProtocol) error {
+ p.RateLimitingSampling = &RateLimitingSamplingStrategy{}
+ if err := p.RateLimitingSampling.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.RateLimitingSampling), err)
+ }
+ return nil
+}
+
+func (p *SamplingStrategyResponse) readField4(iprot thrift.TProtocol) error {
+ p.OperationSampling = &PerOperationSamplingStrategies{}
+ if err := p.OperationSampling.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationSampling), err)
+ }
+ return nil
+}
+
+func (p *SamplingStrategyResponse) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("SamplingStrategyResponse"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField3(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField4(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *SamplingStrategyResponse) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("strategyType", thrift.I32, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:strategyType: ", p), err)
+ }
+ if err := oprot.WriteI32(int32(p.StrategyType)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.strategyType (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:strategyType: ", p), err)
+ }
+ return err
+}
+
+func (p *SamplingStrategyResponse) writeField2(oprot thrift.TProtocol) (err error) {
+ if p.IsSetProbabilisticSampling() {
+ if err := oprot.WriteFieldBegin("probabilisticSampling", thrift.STRUCT, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err)
+ }
+ if err := p.ProbabilisticSampling.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *SamplingStrategyResponse) writeField3(oprot thrift.TProtocol) (err error) {
+ if p.IsSetRateLimitingSampling() {
+ if err := oprot.WriteFieldBegin("rateLimitingSampling", thrift.STRUCT, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:rateLimitingSampling: ", p), err)
+ }
+ if err := p.RateLimitingSampling.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.RateLimitingSampling), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:rateLimitingSampling: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *SamplingStrategyResponse) writeField4(oprot thrift.TProtocol) (err error) {
+ if p.IsSetOperationSampling() {
+ if err := oprot.WriteFieldBegin("operationSampling", thrift.STRUCT, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:operationSampling: ", p), err)
+ }
+ if err := p.OperationSampling.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationSampling), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:operationSampling: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *SamplingStrategyResponse) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("SamplingStrategyResponse(%+v)", *p)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go
new file mode 100644
index 000000000..f05144bfc
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go
@@ -0,0 +1,32 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package zipkincore
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+const CLIENT_SEND = "cs"
+const CLIENT_RECV = "cr"
+const SERVER_SEND = "ss"
+const SERVER_RECV = "sr"
+const WIRE_SEND = "ws"
+const WIRE_RECV = "wr"
+const CLIENT_SEND_FRAGMENT = "csf"
+const CLIENT_RECV_FRAGMENT = "crf"
+const SERVER_SEND_FRAGMENT = "ssf"
+const SERVER_RECV_FRAGMENT = "srf"
+const LOCAL_COMPONENT = "lc"
+const CLIENT_ADDR = "ca"
+const SERVER_ADDR = "sa"
+
+func init() {
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go
new file mode 100644
index 000000000..34b2b267e
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go
@@ -0,0 +1,1247 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package zipkincore
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+var GoUnusedProtection__ int
+
+type AnnotationType int64
+
+const (
+ AnnotationType_BOOL AnnotationType = 0
+ AnnotationType_BYTES AnnotationType = 1
+ AnnotationType_I16 AnnotationType = 2
+ AnnotationType_I32 AnnotationType = 3
+ AnnotationType_I64 AnnotationType = 4
+ AnnotationType_DOUBLE AnnotationType = 5
+ AnnotationType_STRING AnnotationType = 6
+)
+
+func (p AnnotationType) String() string {
+ switch p {
+ case AnnotationType_BOOL:
+ return "BOOL"
+ case AnnotationType_BYTES:
+ return "BYTES"
+ case AnnotationType_I16:
+ return "I16"
+ case AnnotationType_I32:
+ return "I32"
+ case AnnotationType_I64:
+ return "I64"
+ case AnnotationType_DOUBLE:
+ return "DOUBLE"
+ case AnnotationType_STRING:
+ return "STRING"
+ }
+ return "<UNSET>"
+}
+
+func AnnotationTypeFromString(s string) (AnnotationType, error) {
+ switch s {
+ case "BOOL":
+ return AnnotationType_BOOL, nil
+ case "BYTES":
+ return AnnotationType_BYTES, nil
+ case "I16":
+ return AnnotationType_I16, nil
+ case "I32":
+ return AnnotationType_I32, nil
+ case "I64":
+ return AnnotationType_I64, nil
+ case "DOUBLE":
+ return AnnotationType_DOUBLE, nil
+ case "STRING":
+ return AnnotationType_STRING, nil
+ }
+ return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string")
+}
+
+func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v }
+
+func (p AnnotationType) MarshalText() ([]byte, error) {
+ return []byte(p.String()), nil
+}
+
+func (p *AnnotationType) UnmarshalText(text []byte) error {
+ q, err := AnnotationTypeFromString(string(text))
+ if err != nil {
+ return err
+ }
+ *p = q
+ return nil
+}
+
+// Indicates the network context of a service recording an annotation with two
+// exceptions.
+//
+// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR,
+// the endpoint indicates the source or destination of an RPC. This exception
+// allows zipkin to display network context of uninstrumented services, or
+// clients such as web browsers.
+//
+// Attributes:
+// - Ipv4: IPv4 host address packed into 4 bytes.
+//
+// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4
+// - Port: IPv4 port
+//
+// Note: this is to be treated as an unsigned integer, so watch for negatives.
+//
+// Conventionally, when the port isn't known, port = 0.
+// - ServiceName: Service name in lowercase, such as "memcache" or "zipkin-web"
+//
+// Conventionally, when the service name isn't known, service_name = "unknown".
+type Endpoint struct {
+ Ipv4 int32 `thrift:"ipv4,1" json:"ipv4"`
+ Port int16 `thrift:"port,2" json:"port"`
+ ServiceName string `thrift:"service_name,3" json:"service_name"`
+}
+
+func NewEndpoint() *Endpoint {
+ return &Endpoint{}
+}
+
+func (p *Endpoint) GetIpv4() int32 {
+ return p.Ipv4
+}
+
+func (p *Endpoint) GetPort() int16 {
+ return p.Port
+}
+
+func (p *Endpoint) GetServiceName() string {
+ return p.ServiceName
+}
+func (p *Endpoint) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ case 3:
+ if err := p.readField3(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *Endpoint) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI32(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.Ipv4 = v
+ }
+ return nil
+}
+
+func (p *Endpoint) readField2(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI16(); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+ } else {
+ p.Port = v
+ }
+ return nil
+}
+
+func (p *Endpoint) readField3(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+ } else {
+ p.ServiceName = v
+ }
+ return nil
+}
+
+func (p *Endpoint) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("Endpoint"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField3(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *Endpoint) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("ipv4", thrift.I32, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err)
+ }
+ if err := oprot.WriteI32(int32(p.Ipv4)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err)
+ }
+ return err
+}
+
+func (p *Endpoint) writeField2(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("port", thrift.I16, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err)
+ }
+ if err := oprot.WriteI16(int16(p.Port)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err)
+ }
+ return err
+}
+
+func (p *Endpoint) writeField3(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("service_name", thrift.STRING, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err)
+ }
+ if err := oprot.WriteString(string(p.ServiceName)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err)
+ }
+ return err
+}
+
+func (p *Endpoint) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Endpoint(%+v)", *p)
+}
+
+// An annotation is similar to a log statement. It includes a host field which
+// allows these events to be attributed properly, and also aggregatable.
+//
+// Attributes:
+// - Timestamp: Microseconds from epoch.
+//
+// This value should use the most precise value possible. For example,
+// gettimeofday or syncing nanoTime against a tick of currentTimeMillis.
+// - Value
+// - Host: Always the host that recorded the event. By specifying the host you allow
+// rollup of all events (such as client requests to a service) by IP address.
+type Annotation struct {
+ Timestamp int64 `thrift:"timestamp,1" json:"timestamp"`
+ Value string `thrift:"value,2" json:"value"`
+ Host *Endpoint `thrift:"host,3" json:"host,omitempty"`
+}
+
+func NewAnnotation() *Annotation {
+ return &Annotation{}
+}
+
+func (p *Annotation) GetTimestamp() int64 {
+ return p.Timestamp
+}
+
+func (p *Annotation) GetValue() string {
+ return p.Value
+}
+
+var Annotation_Host_DEFAULT *Endpoint
+
+func (p *Annotation) GetHost() *Endpoint {
+ if !p.IsSetHost() {
+ return Annotation_Host_DEFAULT
+ }
+ return p.Host
+}
+func (p *Annotation) IsSetHost() bool {
+ return p.Host != nil
+}
+
+func (p *Annotation) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ case 3:
+ if err := p.readField3(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *Annotation) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.Timestamp = v
+ }
+ return nil
+}
+
+func (p *Annotation) readField2(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+ } else {
+ p.Value = v
+ }
+ return nil
+}
+
+func (p *Annotation) readField3(iprot thrift.TProtocol) error {
+ p.Host = &Endpoint{}
+ if err := p.Host.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
+ }
+ return nil
+}
+
+func (p *Annotation) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("Annotation"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField3(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *Annotation) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.Timestamp)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err)
+ }
+ return err
+}
+
+func (p *Annotation) writeField2(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("value", thrift.STRING, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err)
+ }
+ if err := oprot.WriteString(string(p.Value)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err)
+ }
+ return err
+}
+
+func (p *Annotation) writeField3(oprot thrift.TProtocol) (err error) {
+ if p.IsSetHost() {
+ if err := oprot.WriteFieldBegin("host", thrift.STRUCT, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err)
+ }
+ if err := p.Host.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Annotation) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Annotation(%+v)", *p)
+}
+
+// Binary annotations are tags applied to a Span to give it context. For
+// example, a binary annotation of "http.uri" could the path to a resource in a
+// RPC call.
+//
+// Binary annotations of type STRING are always queryable, though more a
+// historical implementation detail than a structural concern.
+//
+// Binary annotations can repeat, and vary on the host. Similar to Annotation,
+// the host indicates who logged the event. This allows you to tell the
+// difference between the client and server side of the same key. For example,
+// the key "http.uri" might be different on the client and server side due to
+// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field,
+// you can see the different points of view, which often help in debugging.
+//
+// Attributes:
+// - Key
+// - Value
+// - AnnotationType
+// - Host: The host that recorded tag, which allows you to differentiate between
+// multiple tags with the same key. There are two exceptions to this.
+//
+// When the key is CLIENT_ADDR or SERVER_ADDR, host indicates the source or
+// destination of an RPC. This exception allows zipkin to display network
+// context of uninstrumented services, or clients such as web browsers.
+type BinaryAnnotation struct {
+ Key string `thrift:"key,1" json:"key"`
+ Value []byte `thrift:"value,2" json:"value"`
+ AnnotationType AnnotationType `thrift:"annotation_type,3" json:"annotation_type"`
+ Host *Endpoint `thrift:"host,4" json:"host,omitempty"`
+}
+
+func NewBinaryAnnotation() *BinaryAnnotation {
+ return &BinaryAnnotation{}
+}
+
+func (p *BinaryAnnotation) GetKey() string {
+ return p.Key
+}
+
+func (p *BinaryAnnotation) GetValue() []byte {
+ return p.Value
+}
+
+func (p *BinaryAnnotation) GetAnnotationType() AnnotationType {
+ return p.AnnotationType
+}
+
+var BinaryAnnotation_Host_DEFAULT *Endpoint
+
+func (p *BinaryAnnotation) GetHost() *Endpoint {
+ if !p.IsSetHost() {
+ return BinaryAnnotation_Host_DEFAULT
+ }
+ return p.Host
+}
+func (p *BinaryAnnotation) IsSetHost() bool {
+ return p.Host != nil
+}
+
+func (p *BinaryAnnotation) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ case 3:
+ if err := p.readField3(iprot); err != nil {
+ return err
+ }
+ case 4:
+ if err := p.readField4(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *BinaryAnnotation) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.Key = v
+ }
+ return nil
+}
+
+func (p *BinaryAnnotation) readField2(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBinary(); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+ } else {
+ p.Value = v
+ }
+ return nil
+}
+
+func (p *BinaryAnnotation) readField3(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI32(); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+ } else {
+ temp := AnnotationType(v)
+ p.AnnotationType = temp
+ }
+ return nil
+}
+
+func (p *BinaryAnnotation) readField4(iprot thrift.TProtocol) error {
+ p.Host = &Endpoint{}
+ if err := p.Host.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
+ }
+ return nil
+}
+
+func (p *BinaryAnnotation) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("BinaryAnnotation"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField3(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField4(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *BinaryAnnotation) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err)
+ }
+ if err := oprot.WriteString(string(p.Key)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err)
+ }
+ return err
+}
+
+func (p *BinaryAnnotation) writeField2(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("value", thrift.STRING, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err)
+ }
+ if err := oprot.WriteBinary(p.Value); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err)
+ }
+ return err
+}
+
+func (p *BinaryAnnotation) writeField3(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("annotation_type", thrift.I32, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err)
+ }
+ if err := oprot.WriteI32(int32(p.AnnotationType)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err)
+ }
+ return err
+}
+
+func (p *BinaryAnnotation) writeField4(oprot thrift.TProtocol) (err error) {
+ if p.IsSetHost() {
+ if err := oprot.WriteFieldBegin("host", thrift.STRUCT, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err)
+ }
+ if err := p.Host.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *BinaryAnnotation) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("BinaryAnnotation(%+v)", *p)
+}
+
+// A trace is a series of spans (often RPC calls) which form a latency tree.
+//
+// The root span is where trace_id = id and parent_id = Nil. The root span is
+// usually the longest interval in the trace, starting with a SERVER_RECV
+// annotation and ending with a SERVER_SEND.
+//
+// Attributes:
+// - TraceID
+// - Name: Span name in lowercase, rpc method for example
+//
+// Conventionally, when the span name isn't known, name = "unknown".
+// - ID
+// - ParentID
+// - Annotations
+// - BinaryAnnotations
+// - Debug
+// - Timestamp: Microseconds from epoch of the creation of this span.
+//
+// This value should be set directly by instrumentation, using the most
+// precise value possible. For example, gettimeofday or syncing nanoTime
+// against a tick of currentTimeMillis.
+//
+// For compatibilty with instrumentation that precede this field, collectors
+// or span stores can derive this via Annotation.timestamp.
+// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp.
+//
+// This field is optional for compatibility with old data: first-party span
+// stores are expected to support this at time of introduction.
+// - Duration: Measurement of duration in microseconds, used to support queries.
+//
+// This value should be set directly, where possible. Doing so encourages
+// precise measurement decoupled from problems of clocks, such as skew or NTP
+// updates causing time to move backwards.
+//
+// For compatibilty with instrumentation that precede this field, collectors
+// or span stores can derive this by subtracting Annotation.timestamp.
+// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp.
+//
+// If this field is persisted as unset, zipkin will continue to work, except
+// duration query support will be implementation-specific. Similarly, setting
+// this field non-atomically is implementation-specific.
+//
+// This field is i64 vs i32 to support spans longer than 35 minutes.
+type Span struct {
+ TraceID int64 `thrift:"trace_id,1" json:"trace_id"`
+ // unused field # 2
+ Name string `thrift:"name,3" json:"name"`
+ ID int64 `thrift:"id,4" json:"id"`
+ ParentID *int64 `thrift:"parent_id,5" json:"parent_id,omitempty"`
+ Annotations []*Annotation `thrift:"annotations,6" json:"annotations"`
+ // unused field # 7
+ BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" json:"binary_annotations"`
+ Debug bool `thrift:"debug,9" json:"debug,omitempty"`
+ Timestamp *int64 `thrift:"timestamp,10" json:"timestamp,omitempty"`
+ Duration *int64 `thrift:"duration,11" json:"duration,omitempty"`
+}
+
+func NewSpan() *Span {
+ return &Span{}
+}
+
+func (p *Span) GetTraceID() int64 {
+ return p.TraceID
+}
+
+func (p *Span) GetName() string {
+ return p.Name
+}
+
+func (p *Span) GetID() int64 {
+ return p.ID
+}
+
+var Span_ParentID_DEFAULT int64
+
+func (p *Span) GetParentID() int64 {
+ if !p.IsSetParentID() {
+ return Span_ParentID_DEFAULT
+ }
+ return *p.ParentID
+}
+
+func (p *Span) GetAnnotations() []*Annotation {
+ return p.Annotations
+}
+
+func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation {
+ return p.BinaryAnnotations
+}
+
+var Span_Debug_DEFAULT bool = false
+
+func (p *Span) GetDebug() bool {
+ return p.Debug
+}
+
+var Span_Timestamp_DEFAULT int64
+
+func (p *Span) GetTimestamp() int64 {
+ if !p.IsSetTimestamp() {
+ return Span_Timestamp_DEFAULT
+ }
+ return *p.Timestamp
+}
+
+var Span_Duration_DEFAULT int64
+
+func (p *Span) GetDuration() int64 {
+ if !p.IsSetDuration() {
+ return Span_Duration_DEFAULT
+ }
+ return *p.Duration
+}
+func (p *Span) IsSetParentID() bool {
+ return p.ParentID != nil
+}
+
+func (p *Span) IsSetDebug() bool {
+ return p.Debug != Span_Debug_DEFAULT
+}
+
+func (p *Span) IsSetTimestamp() bool {
+ return p.Timestamp != nil
+}
+
+func (p *Span) IsSetDuration() bool {
+ return p.Duration != nil
+}
+
+func (p *Span) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ case 3:
+ if err := p.readField3(iprot); err != nil {
+ return err
+ }
+ case 4:
+ if err := p.readField4(iprot); err != nil {
+ return err
+ }
+ case 5:
+ if err := p.readField5(iprot); err != nil {
+ return err
+ }
+ case 6:
+ if err := p.readField6(iprot); err != nil {
+ return err
+ }
+ case 8:
+ if err := p.readField8(iprot); err != nil {
+ return err
+ }
+ case 9:
+ if err := p.readField9(iprot); err != nil {
+ return err
+ }
+ case 10:
+ if err := p.readField10(iprot); err != nil {
+ return err
+ }
+ case 11:
+ if err := p.readField11(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *Span) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.TraceID = v
+ }
+ return nil
+}
+
+func (p *Span) readField3(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+ } else {
+ p.Name = v
+ }
+ return nil
+}
+
+func (p *Span) readField4(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 4: ", err)
+ } else {
+ p.ID = v
+ }
+ return nil
+}
+
+func (p *Span) readField5(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 5: ", err)
+ } else {
+ p.ParentID = &v
+ }
+ return nil
+}
+
+func (p *Span) readField6(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Annotation, 0, size)
+ p.Annotations = tSlice
+ for i := 0; i < size; i++ {
+ _elem0 := &Annotation{}
+ if err := _elem0.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
+ }
+ p.Annotations = append(p.Annotations, _elem0)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Span) readField8(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*BinaryAnnotation, 0, size)
+ p.BinaryAnnotations = tSlice
+ for i := 0; i < size; i++ {
+ _elem1 := &BinaryAnnotation{}
+ if err := _elem1.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err)
+ }
+ p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Span) readField9(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBool(); err != nil {
+ return thrift.PrependError("error reading field 9: ", err)
+ } else {
+ p.Debug = v
+ }
+ return nil
+}
+
+func (p *Span) readField10(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 10: ", err)
+ } else {
+ p.Timestamp = &v
+ }
+ return nil
+}
+
+func (p *Span) readField11(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 11: ", err)
+ } else {
+ p.Duration = &v
+ }
+ return nil
+}
+
+func (p *Span) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("Span"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField3(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField4(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField5(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField6(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField8(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField9(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField10(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField11(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *Span) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("trace_id", thrift.I64, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.TraceID)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err)
+ }
+ return err
+}
+
+func (p *Span) writeField3(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("name", thrift.STRING, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err)
+ }
+ if err := oprot.WriteString(string(p.Name)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err)
+ }
+ return err
+}
+
+func (p *Span) writeField4(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("id", thrift.I64, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.ID)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err)
+ }
+ return err
+}
+
+func (p *Span) writeField5(oprot thrift.TProtocol) (err error) {
+ if p.IsSetParentID() {
+ if err := oprot.WriteFieldBegin("parent_id", thrift.I64, 5); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(*p.ParentID)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Span) writeField6(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("annotations", thrift.LIST, 6); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err)
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Annotations)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Annotations {
+ if err := v.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err)
+ }
+ return err
+}
+
+func (p *Span) writeField8(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("binary_annotations", thrift.LIST, 8); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err)
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.BinaryAnnotations)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.BinaryAnnotations {
+ if err := v.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err)
+ }
+ return err
+}
+
+func (p *Span) writeField9(oprot thrift.TProtocol) (err error) {
+ if p.IsSetDebug() {
+ if err := oprot.WriteFieldBegin("debug", thrift.BOOL, 9); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err)
+ }
+ if err := oprot.WriteBool(bool(p.Debug)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Span) writeField10(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTimestamp() {
+ if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 10); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(*p.Timestamp)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Span) writeField11(oprot thrift.TProtocol) (err error) {
+ if p.IsSetDuration() {
+ if err := oprot.WriteFieldBegin("duration", thrift.I64, 11); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(*p.Duration)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Span) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Span(%+v)", *p)
+}
+
+// Attributes:
+// - Ok
+type Response struct {
+ Ok bool `thrift:"ok,1,required" json:"ok"`
+}
+
+func NewResponse() *Response {
+ return &Response{}
+}
+
+func (p *Response) GetOk() bool {
+ return p.Ok
+}
+func (p *Response) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetOk bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetOk = true
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetOk {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set"))
+ }
+ return nil
+}
+
+func (p *Response) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBool(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.Ok = v
+ }
+ return nil
+}
+
+func (p *Response) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("Response"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *Response) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("ok", thrift.BOOL, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err)
+ }
+ if err := oprot.WriteBool(bool(p.Ok)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err)
+ }
+ return err
+}
+
+func (p *Response) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Response(%+v)", *p)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go
new file mode 100644
index 000000000..417e883d0
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go
@@ -0,0 +1,446 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package zipkincore
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+type ZipkinCollector interface {
+ // Parameters:
+ // - Spans
+ SubmitZipkinBatch(spans []*Span) (r []*Response, err error)
+}
+
+type ZipkinCollectorClient struct {
+ Transport thrift.TTransport
+ ProtocolFactory thrift.TProtocolFactory
+ InputProtocol thrift.TProtocol
+ OutputProtocol thrift.TProtocol
+ SeqId int32
+}
+
+func NewZipkinCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ZipkinCollectorClient {
+ return &ZipkinCollectorClient{Transport: t,
+ ProtocolFactory: f,
+ InputProtocol: f.GetProtocol(t),
+ OutputProtocol: f.GetProtocol(t),
+ SeqId: 0,
+ }
+}
+
+func NewZipkinCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ZipkinCollectorClient {
+ return &ZipkinCollectorClient{Transport: t,
+ ProtocolFactory: nil,
+ InputProtocol: iprot,
+ OutputProtocol: oprot,
+ SeqId: 0,
+ }
+}
+
+// Parameters:
+// - Spans
+func (p *ZipkinCollectorClient) SubmitZipkinBatch(spans []*Span) (r []*Response, err error) {
+ if err = p.sendSubmitZipkinBatch(spans); err != nil {
+ return
+ }
+ return p.recvSubmitZipkinBatch()
+}
+
+func (p *ZipkinCollectorClient) sendSubmitZipkinBatch(spans []*Span) (err error) {
+ oprot := p.OutputProtocol
+ if oprot == nil {
+ oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.OutputProtocol = oprot
+ }
+ p.SeqId++
+ if err = oprot.WriteMessageBegin("submitZipkinBatch", thrift.CALL, p.SeqId); err != nil {
+ return
+ }
+ args := ZipkinCollectorSubmitZipkinBatchArgs{
+ Spans: spans,
+ }
+ if err = args.Write(oprot); err != nil {
+ return
+ }
+ if err = oprot.WriteMessageEnd(); err != nil {
+ return
+ }
+ return oprot.Flush()
+}
+
+func (p *ZipkinCollectorClient) recvSubmitZipkinBatch() (value []*Response, err error) {
+ iprot := p.InputProtocol
+ if iprot == nil {
+ iprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.InputProtocol = iprot
+ }
+ method, mTypeId, seqId, err := iprot.ReadMessageBegin()
+ if err != nil {
+ return
+ }
+ if method != "submitZipkinBatch" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "submitZipkinBatch failed: wrong method name")
+ return
+ }
+ if p.SeqId != seqId {
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "submitZipkinBatch failed: out of sequence response")
+ return
+ }
+ if mTypeId == thrift.EXCEPTION {
+ error2 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error3 error
+ error3, err = error2.Read(iprot)
+ if err != nil {
+ return
+ }
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
+ }
+ err = error3
+ return
+ }
+ if mTypeId != thrift.REPLY {
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "submitZipkinBatch failed: invalid message type")
+ return
+ }
+ result := ZipkinCollectorSubmitZipkinBatchResult{}
+ if err = result.Read(iprot); err != nil {
+ return
+ }
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
+ }
+ value = result.GetSuccess()
+ return
+}
+
+type ZipkinCollectorProcessor struct {
+ processorMap map[string]thrift.TProcessorFunction
+ handler ZipkinCollector
+}
+
+func (p *ZipkinCollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+ p.processorMap[key] = processor
+}
+
+func (p *ZipkinCollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+ processor, ok = p.processorMap[key]
+ return processor, ok
+}
+
+func (p *ZipkinCollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+ return p.processorMap
+}
+
+func NewZipkinCollectorProcessor(handler ZipkinCollector) *ZipkinCollectorProcessor {
+
+ self4 := &ZipkinCollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
+ self4.processorMap["submitZipkinBatch"] = &zipkinCollectorProcessorSubmitZipkinBatch{handler: handler}
+ return self4
+}
+
+func (p *ZipkinCollectorProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ name, _, seqId, err := iprot.ReadMessageBegin()
+ if err != nil {
+ return false, err
+ }
+ if processor, ok := p.GetProcessorFunction(name); ok {
+ return processor.Process(seqId, iprot, oprot)
+ }
+ iprot.Skip(thrift.STRUCT)
+ iprot.ReadMessageEnd()
+ x5 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
+ oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
+ x5.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, x5
+
+}
+
+type zipkinCollectorProcessorSubmitZipkinBatch struct {
+ handler ZipkinCollector
+}
+
+func (p *zipkinCollectorProcessorSubmitZipkinBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := ZipkinCollectorSubmitZipkinBatchArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("submitZipkinBatch", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ result := ZipkinCollectorSubmitZipkinBatchResult{}
+ var retval []*Response
+ var err2 error
+ if retval, err2 = p.handler.SubmitZipkinBatch(args.Spans); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitZipkinBatch: "+err2.Error())
+ oprot.WriteMessageBegin("submitZipkinBatch", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("submitZipkinBatch", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+// - Spans
+type ZipkinCollectorSubmitZipkinBatchArgs struct {
+ Spans []*Span `thrift:"spans,1" json:"spans"`
+}
+
+func NewZipkinCollectorSubmitZipkinBatchArgs() *ZipkinCollectorSubmitZipkinBatchArgs {
+ return &ZipkinCollectorSubmitZipkinBatchArgs{}
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) GetSpans() []*Span {
+ return p.Spans
+}
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) readField1(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Span, 0, size)
+ p.Spans = tSlice
+ for i := 0; i < size; i++ {
+ _elem6 := &Span{}
+ if err := _elem6.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem6), err)
+ }
+ p.Spans = append(p.Spans, _elem6)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("submitZipkinBatch_args"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("spans", thrift.LIST, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err)
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Spans {
+ if err := v.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err)
+ }
+ return err
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchArgs(%+v)", *p)
+}
+
+// Attributes:
+// - Success
+type ZipkinCollectorSubmitZipkinBatchResult struct {
+ Success []*Response `thrift:"success,0" json:"success,omitempty"`
+}
+
+func NewZipkinCollectorSubmitZipkinBatchResult() *ZipkinCollectorSubmitZipkinBatchResult {
+ return &ZipkinCollectorSubmitZipkinBatchResult{}
+}
+
+var ZipkinCollectorSubmitZipkinBatchResult_Success_DEFAULT []*Response
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) GetSuccess() []*Response {
+ return p.Success
+}
+func (p *ZipkinCollectorSubmitZipkinBatchResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 0:
+ if err := p.readField0(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) readField0(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Response, 0, size)
+ p.Success = tSlice
+ for i := 0; i < size; i++ {
+ _elem7 := &Response{}
+ if err := _elem7.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem7), err)
+ }
+ p.Success = append(p.Success, _elem7)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("submitZipkinBatch_result"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField0(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) writeField0(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err := oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Success {
+ if err := v.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchResult(%+v)", *p)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/README.md b/vendor/github.com/uber/jaeger-client-go/thrift/README.md
new file mode 100644
index 000000000..1d8e642e0
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/README.md
@@ -0,0 +1,7 @@
+# Apache Thrift
+
+This is a partial copy of Apache Thrift v0.10 (https://github.com/apache/thrift/commit/b2a4d4ae21c789b689dd162deb819665567f481c).
+
+It is vendored code to avoid compatibility issues introduced in Thrift v0.11.
+
+See https://github.com/jaegertracing/jaeger-client-go/pull/303.
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go
new file mode 100644
index 000000000..6655cc5a9
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+const (
+ UNKNOWN_APPLICATION_EXCEPTION = 0
+ UNKNOWN_METHOD = 1
+ INVALID_MESSAGE_TYPE_EXCEPTION = 2
+ WRONG_METHOD_NAME = 3
+ BAD_SEQUENCE_ID = 4
+ MISSING_RESULT = 5
+ INTERNAL_ERROR = 6
+ PROTOCOL_ERROR = 7
+)
+
+// Application level Thrift exception
+type TApplicationException interface {
+ TException
+ TypeId() int32
+ Read(iprot TProtocol) (TApplicationException, error)
+ Write(oprot TProtocol) error
+}
+
+type tApplicationException struct {
+ message string
+ type_ int32
+}
+
+func (e tApplicationException) Error() string {
+ return e.message
+}
+
+func NewTApplicationException(type_ int32, message string) TApplicationException {
+ return &tApplicationException{message, type_}
+}
+
+func (p *tApplicationException) TypeId() int32 {
+ return p.type_
+}
+
+func (p *tApplicationException) Read(iprot TProtocol) (TApplicationException, error) {
+ _, err := iprot.ReadStructBegin()
+ if err != nil {
+ return nil, err
+ }
+
+ message := ""
+ type_ := int32(UNKNOWN_APPLICATION_EXCEPTION)
+
+ for {
+ _, ttype, id, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return nil, err
+ }
+ if ttype == STOP {
+ break
+ }
+ switch id {
+ case 1:
+ if ttype == STRING {
+ if message, err = iprot.ReadString(); err != nil {
+ return nil, err
+ }
+ } else {
+ if err = SkipDefaultDepth(iprot, ttype); err != nil {
+ return nil, err
+ }
+ }
+ case 2:
+ if ttype == I32 {
+ if type_, err = iprot.ReadI32(); err != nil {
+ return nil, err
+ }
+ } else {
+ if err = SkipDefaultDepth(iprot, ttype); err != nil {
+ return nil, err
+ }
+ }
+ default:
+ if err = SkipDefaultDepth(iprot, ttype); err != nil {
+ return nil, err
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ return nil, err
+ }
+ }
+ return NewTApplicationException(type_, message), iprot.ReadStructEnd()
+}
+
+func (p *tApplicationException) Write(oprot TProtocol) (err error) {
+ err = oprot.WriteStructBegin("TApplicationException")
+ if len(p.Error()) > 0 {
+ err = oprot.WriteFieldBegin("message", STRING, 1)
+ if err != nil {
+ return
+ }
+ err = oprot.WriteString(p.Error())
+ if err != nil {
+ return
+ }
+ err = oprot.WriteFieldEnd()
+ if err != nil {
+ return
+ }
+ }
+ err = oprot.WriteFieldBegin("type", I32, 2)
+ if err != nil {
+ return
+ }
+ err = oprot.WriteI32(p.type_)
+ if err != nil {
+ return
+ }
+ err = oprot.WriteFieldEnd()
+ if err != nil {
+ return
+ }
+ err = oprot.WriteFieldStop()
+ if err != nil {
+ return
+ }
+ err = oprot.WriteStructEnd()
+ return
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go
new file mode 100644
index 000000000..690d34111
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go
@@ -0,0 +1,514 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+)
+
+type TBinaryProtocol struct {
+ trans TRichTransport
+ origTransport TTransport
+ reader io.Reader
+ writer io.Writer
+ strictRead bool
+ strictWrite bool
+ buffer [64]byte
+}
+
+type TBinaryProtocolFactory struct {
+ strictRead bool
+ strictWrite bool
+}
+
+func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol {
+ return NewTBinaryProtocol(t, false, true)
+}
+
+func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol {
+ p := &TBinaryProtocol{origTransport: t, strictRead: strictRead, strictWrite: strictWrite}
+ if et, ok := t.(TRichTransport); ok {
+ p.trans = et
+ } else {
+ p.trans = NewTRichTransport(t)
+ }
+ p.reader = p.trans
+ p.writer = p.trans
+ return p
+}
+
+func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory {
+ return NewTBinaryProtocolFactory(false, true)
+}
+
+func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory {
+ return &TBinaryProtocolFactory{strictRead: strictRead, strictWrite: strictWrite}
+}
+
+func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol {
+ return NewTBinaryProtocol(t, p.strictRead, p.strictWrite)
+}
+
+/**
+ * Writing Methods
+ */
+
+func (p *TBinaryProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error {
+ if p.strictWrite {
+ version := uint32(VERSION_1) | uint32(typeId)
+ e := p.WriteI32(int32(version))
+ if e != nil {
+ return e
+ }
+ e = p.WriteString(name)
+ if e != nil {
+ return e
+ }
+ e = p.WriteI32(seqId)
+ return e
+ } else {
+ e := p.WriteString(name)
+ if e != nil {
+ return e
+ }
+ e = p.WriteByte(int8(typeId))
+ if e != nil {
+ return e
+ }
+ e = p.WriteI32(seqId)
+ return e
+ }
+ return nil
+}
+
+func (p *TBinaryProtocol) WriteMessageEnd() error {
+ return nil
+}
+
+func (p *TBinaryProtocol) WriteStructBegin(name string) error {
+ return nil
+}
+
+func (p *TBinaryProtocol) WriteStructEnd() error {
+ return nil
+}
+
+func (p *TBinaryProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
+ e := p.WriteByte(int8(typeId))
+ if e != nil {
+ return e
+ }
+ e = p.WriteI16(id)
+ return e
+}
+
+func (p *TBinaryProtocol) WriteFieldEnd() error {
+ return nil
+}
+
+func (p *TBinaryProtocol) WriteFieldStop() error {
+ e := p.WriteByte(STOP)
+ return e
+}
+
+func (p *TBinaryProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
+ e := p.WriteByte(int8(keyType))
+ if e != nil {
+ return e
+ }
+ e = p.WriteByte(int8(valueType))
+ if e != nil {
+ return e
+ }
+ e = p.WriteI32(int32(size))
+ return e
+}
+
+func (p *TBinaryProtocol) WriteMapEnd() error {
+ return nil
+}
+
+func (p *TBinaryProtocol) WriteListBegin(elemType TType, size int) error {
+ e := p.WriteByte(int8(elemType))
+ if e != nil {
+ return e
+ }
+ e = p.WriteI32(int32(size))
+ return e
+}
+
+func (p *TBinaryProtocol) WriteListEnd() error {
+ return nil
+}
+
+func (p *TBinaryProtocol) WriteSetBegin(elemType TType, size int) error {
+ e := p.WriteByte(int8(elemType))
+ if e != nil {
+ return e
+ }
+ e = p.WriteI32(int32(size))
+ return e
+}
+
+func (p *TBinaryProtocol) WriteSetEnd() error {
+ return nil
+}
+
+func (p *TBinaryProtocol) WriteBool(value bool) error {
+ if value {
+ return p.WriteByte(1)
+ }
+ return p.WriteByte(0)
+}
+
+func (p *TBinaryProtocol) WriteByte(value int8) error {
+ e := p.trans.WriteByte(byte(value))
+ return NewTProtocolException(e)
+}
+
+func (p *TBinaryProtocol) WriteI16(value int16) error {
+ v := p.buffer[0:2]
+ binary.BigEndian.PutUint16(v, uint16(value))
+ _, e := p.writer.Write(v)
+ return NewTProtocolException(e)
+}
+
+func (p *TBinaryProtocol) WriteI32(value int32) error {
+ v := p.buffer[0:4]
+ binary.BigEndian.PutUint32(v, uint32(value))
+ _, e := p.writer.Write(v)
+ return NewTProtocolException(e)
+}
+
+func (p *TBinaryProtocol) WriteI64(value int64) error {
+ v := p.buffer[0:8]
+ binary.BigEndian.PutUint64(v, uint64(value))
+ _, err := p.writer.Write(v)
+ return NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) WriteDouble(value float64) error {
+ return p.WriteI64(int64(math.Float64bits(value)))
+}
+
+func (p *TBinaryProtocol) WriteString(value string) error {
+ e := p.WriteI32(int32(len(value)))
+ if e != nil {
+ return e
+ }
+ _, err := p.trans.WriteString(value)
+ return NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) WriteBinary(value []byte) error {
+ e := p.WriteI32(int32(len(value)))
+ if e != nil {
+ return e
+ }
+ _, err := p.writer.Write(value)
+ return NewTProtocolException(err)
+}
+
+/**
+ * Reading methods
+ */
+
+func (p *TBinaryProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
+ size, e := p.ReadI32()
+ if e != nil {
+ return "", typeId, 0, NewTProtocolException(e)
+ }
+ if size < 0 {
+ typeId = TMessageType(size & 0x0ff)
+ version := int64(int64(size) & VERSION_MASK)
+ if version != VERSION_1 {
+ return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin"))
+ }
+ name, e = p.ReadString()
+ if e != nil {
+ return name, typeId, seqId, NewTProtocolException(e)
+ }
+ seqId, e = p.ReadI32()
+ if e != nil {
+ return name, typeId, seqId, NewTProtocolException(e)
+ }
+ return name, typeId, seqId, nil
+ }
+ if p.strictRead {
+ return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin"))
+ }
+ name, e2 := p.readStringBody(size)
+ if e2 != nil {
+ return name, typeId, seqId, e2
+ }
+ b, e3 := p.ReadByte()
+ if e3 != nil {
+ return name, typeId, seqId, e3
+ }
+ typeId = TMessageType(b)
+ seqId, e4 := p.ReadI32()
+ if e4 != nil {
+ return name, typeId, seqId, e4
+ }
+ return name, typeId, seqId, nil
+}
+
+func (p *TBinaryProtocol) ReadMessageEnd() error {
+ return nil
+}
+
+func (p *TBinaryProtocol) ReadStructBegin() (name string, err error) {
+ return
+}
+
+func (p *TBinaryProtocol) ReadStructEnd() error {
+ return nil
+}
+
+func (p *TBinaryProtocol) ReadFieldBegin() (name string, typeId TType, seqId int16, err error) {
+ t, err := p.ReadByte()
+ typeId = TType(t)
+ if err != nil {
+ return name, typeId, seqId, err
+ }
+ if t != STOP {
+ seqId, err = p.ReadI16()
+ }
+ return name, typeId, seqId, err
+}
+
+func (p *TBinaryProtocol) ReadFieldEnd() error {
+ return nil
+}
+
+var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length"))
+
+func (p *TBinaryProtocol) ReadMapBegin() (kType, vType TType, size int, err error) {
+ k, e := p.ReadByte()
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ kType = TType(k)
+ v, e := p.ReadByte()
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ vType = TType(v)
+ size32, e := p.ReadI32()
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ if size32 < 0 {
+ err = invalidDataLength
+ return
+ }
+ size = int(size32)
+ return kType, vType, size, nil
+}
+
+func (p *TBinaryProtocol) ReadMapEnd() error {
+ return nil
+}
+
+func (p *TBinaryProtocol) ReadListBegin() (elemType TType, size int, err error) {
+ b, e := p.ReadByte()
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ elemType = TType(b)
+ size32, e := p.ReadI32()
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ if size32 < 0 {
+ err = invalidDataLength
+ return
+ }
+ size = int(size32)
+
+ return
+}
+
+func (p *TBinaryProtocol) ReadListEnd() error {
+ return nil
+}
+
+func (p *TBinaryProtocol) ReadSetBegin() (elemType TType, size int, err error) {
+ b, e := p.ReadByte()
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ elemType = TType(b)
+ size32, e := p.ReadI32()
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ if size32 < 0 {
+ err = invalidDataLength
+ return
+ }
+ size = int(size32)
+ return elemType, size, nil
+}
+
+func (p *TBinaryProtocol) ReadSetEnd() error {
+ return nil
+}
+
+func (p *TBinaryProtocol) ReadBool() (bool, error) {
+ b, e := p.ReadByte()
+ v := true
+ if b != 1 {
+ v = false
+ }
+ return v, e
+}
+
+func (p *TBinaryProtocol) ReadByte() (int8, error) {
+ v, err := p.trans.ReadByte()
+ return int8(v), err
+}
+
+func (p *TBinaryProtocol) ReadI16() (value int16, err error) {
+ buf := p.buffer[0:2]
+ err = p.readAll(buf)
+ value = int16(binary.BigEndian.Uint16(buf))
+ return value, err
+}
+
+func (p *TBinaryProtocol) ReadI32() (value int32, err error) {
+ buf := p.buffer[0:4]
+ err = p.readAll(buf)
+ value = int32(binary.BigEndian.Uint32(buf))
+ return value, err
+}
+
+func (p *TBinaryProtocol) ReadI64() (value int64, err error) {
+ buf := p.buffer[0:8]
+ err = p.readAll(buf)
+ value = int64(binary.BigEndian.Uint64(buf))
+ return value, err
+}
+
+func (p *TBinaryProtocol) ReadDouble() (value float64, err error) {
+ buf := p.buffer[0:8]
+ err = p.readAll(buf)
+ value = math.Float64frombits(binary.BigEndian.Uint64(buf))
+ return value, err
+}
+
+func (p *TBinaryProtocol) ReadString() (value string, err error) {
+ size, e := p.ReadI32()
+ if e != nil {
+ return "", e
+ }
+ if size < 0 {
+ err = invalidDataLength
+ return
+ }
+
+ return p.readStringBody(size)
+}
+
+func (p *TBinaryProtocol) ReadBinary() ([]byte, error) {
+ size, e := p.ReadI32()
+ if e != nil {
+ return nil, e
+ }
+ if size < 0 {
+ return nil, invalidDataLength
+ }
+ if uint64(size) > p.trans.RemainingBytes() {
+ return nil, invalidDataLength
+ }
+
+ isize := int(size)
+ buf := make([]byte, isize)
+ _, err := io.ReadFull(p.trans, buf)
+ return buf, NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) Flush() (err error) {
+ return NewTProtocolException(p.trans.Flush())
+}
+
+func (p *TBinaryProtocol) Skip(fieldType TType) (err error) {
+ return SkipDefaultDepth(p, fieldType)
+}
+
+func (p *TBinaryProtocol) Transport() TTransport {
+ return p.origTransport
+}
+
+func (p *TBinaryProtocol) readAll(buf []byte) error {
+ _, err := io.ReadFull(p.reader, buf)
+ return NewTProtocolException(err)
+}
+
+const readLimit = 32768
+
+func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) {
+ if size < 0 {
+ return "", nil
+ }
+ if uint64(size) > p.trans.RemainingBytes() {
+ return "", invalidDataLength
+ }
+
+ var (
+ buf bytes.Buffer
+ e error
+ b []byte
+ )
+
+ switch {
+ case int(size) <= len(p.buffer):
+ b = p.buffer[:size] // avoids allocation for small reads
+ case int(size) < readLimit:
+ b = make([]byte, size)
+ default:
+ b = make([]byte, readLimit)
+ }
+
+ for size > 0 {
+ _, e = io.ReadFull(p.trans, b)
+ buf.Write(b)
+ if e != nil {
+ break
+ }
+ size -= readLimit
+ if size < readLimit && size > 0 {
+ b = b[:size]
+ }
+ }
+ return buf.String(), NewTProtocolException(e)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go
new file mode 100644
index 000000000..b9299f2fa
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go
@@ -0,0 +1,815 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math"
+)
+
+const (
+ COMPACT_PROTOCOL_ID = 0x082
+ COMPACT_VERSION = 1
+ COMPACT_VERSION_MASK = 0x1f
+ COMPACT_TYPE_MASK = 0x0E0
+ COMPACT_TYPE_BITS = 0x07
+ COMPACT_TYPE_SHIFT_AMOUNT = 5
+)
+
+type tCompactType byte
+
+const (
+ COMPACT_BOOLEAN_TRUE = 0x01
+ COMPACT_BOOLEAN_FALSE = 0x02
+ COMPACT_BYTE = 0x03
+ COMPACT_I16 = 0x04
+ COMPACT_I32 = 0x05
+ COMPACT_I64 = 0x06
+ COMPACT_DOUBLE = 0x07
+ COMPACT_BINARY = 0x08
+ COMPACT_LIST = 0x09
+ COMPACT_SET = 0x0A
+ COMPACT_MAP = 0x0B
+ COMPACT_STRUCT = 0x0C
+)
+
+var (
+ ttypeToCompactType map[TType]tCompactType
+)
+
+func init() {
+ ttypeToCompactType = map[TType]tCompactType{
+ STOP: STOP,
+ BOOL: COMPACT_BOOLEAN_TRUE,
+ BYTE: COMPACT_BYTE,
+ I16: COMPACT_I16,
+ I32: COMPACT_I32,
+ I64: COMPACT_I64,
+ DOUBLE: COMPACT_DOUBLE,
+ STRING: COMPACT_BINARY,
+ LIST: COMPACT_LIST,
+ SET: COMPACT_SET,
+ MAP: COMPACT_MAP,
+ STRUCT: COMPACT_STRUCT,
+ }
+}
+
+type TCompactProtocolFactory struct{}
+
+func NewTCompactProtocolFactory() *TCompactProtocolFactory {
+ return &TCompactProtocolFactory{}
+}
+
+func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol {
+ return NewTCompactProtocol(trans)
+}
+
+type TCompactProtocol struct {
+ trans TRichTransport
+ origTransport TTransport
+
+ // Used to keep track of the last field for the current and previous structs,
+ // so we can do the delta stuff.
+ lastField []int
+ lastFieldId int
+
+ // If we encounter a boolean field begin, save the TField here so it can
+ // have the value incorporated.
+ booleanFieldName string
+ booleanFieldId int16
+ booleanFieldPending bool
+
+ // If we read a field header, and it's a boolean field, save the boolean
+ // value here so that readBool can use it.
+ boolValue bool
+ boolValueIsNotNull bool
+ buffer [64]byte
+}
+
+// Create a TCompactProtocol given a TTransport
+func NewTCompactProtocol(trans TTransport) *TCompactProtocol {
+ p := &TCompactProtocol{origTransport: trans, lastField: []int{}}
+ if et, ok := trans.(TRichTransport); ok {
+ p.trans = et
+ } else {
+ p.trans = NewTRichTransport(trans)
+ }
+
+ return p
+
+}
+
+//
+// Public Writing methods.
+//
+
+// Write a message header to the wire. Compact Protocol messages contain the
+// protocol version so we can migrate forwards in the future if need be.
+func (p *TCompactProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error {
+ err := p.writeByteDirect(COMPACT_PROTOCOL_ID)
+ if err != nil {
+ return NewTProtocolException(err)
+ }
+ err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK))
+ if err != nil {
+ return NewTProtocolException(err)
+ }
+ _, err = p.writeVarint32(seqid)
+ if err != nil {
+ return NewTProtocolException(err)
+ }
+ e := p.WriteString(name)
+ return e
+
+}
+
+func (p *TCompactProtocol) WriteMessageEnd() error { return nil }
+
+// Write a struct begin. This doesn't actually put anything on the wire. We
+// use it as an opportunity to put special placeholder markers on the field
+// stack so we can get the field id deltas correct.
+func (p *TCompactProtocol) WriteStructBegin(name string) error {
+ p.lastField = append(p.lastField, p.lastFieldId)
+ p.lastFieldId = 0
+ return nil
+}
+
+// Write a struct end. This doesn't actually put anything on the wire. We use
+// this as an opportunity to pop the last field from the current struct off
+// of the field stack.
+func (p *TCompactProtocol) WriteStructEnd() error {
+ p.lastFieldId = p.lastField[len(p.lastField)-1]
+ p.lastField = p.lastField[:len(p.lastField)-1]
+ return nil
+}
+
+func (p *TCompactProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
+ if typeId == BOOL {
+ // we want to possibly include the value, so we'll wait.
+ p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true
+ return nil
+ }
+ _, err := p.writeFieldBeginInternal(name, typeId, id, 0xFF)
+ return NewTProtocolException(err)
+}
+
+// The workhorse of writeFieldBegin. It has the option of doing a
+// 'type override' of the type header. This is used specifically in the
+// boolean field case.
+func (p *TCompactProtocol) writeFieldBeginInternal(name string, typeId TType, id int16, typeOverride byte) (int, error) {
+ // short lastField = lastField_.pop();
+
+ // if there's a type override, use that.
+ var typeToWrite byte
+ if typeOverride == 0xFF {
+ typeToWrite = byte(p.getCompactType(typeId))
+ } else {
+ typeToWrite = typeOverride
+ }
+ // check if we can use delta encoding for the field id
+ fieldId := int(id)
+ written := 0
+ if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 {
+ // write them together
+ err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite)
+ if err != nil {
+ return 0, err
+ }
+ } else {
+ // write them separate
+ err := p.writeByteDirect(typeToWrite)
+ if err != nil {
+ return 0, err
+ }
+ err = p.WriteI16(id)
+ written = 1 + 2
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ p.lastFieldId = fieldId
+ // p.lastField.Push(field.id);
+ return written, nil
+}
+
+func (p *TCompactProtocol) WriteFieldEnd() error { return nil }
+
+func (p *TCompactProtocol) WriteFieldStop() error {
+ err := p.writeByteDirect(STOP)
+ return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
+ if size == 0 {
+ err := p.writeByteDirect(0)
+ return NewTProtocolException(err)
+ }
+ _, err := p.writeVarint32(int32(size))
+ if err != nil {
+ return NewTProtocolException(err)
+ }
+ err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType)))
+ return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteMapEnd() error { return nil }
+
+// Write a list header.
+func (p *TCompactProtocol) WriteListBegin(elemType TType, size int) error {
+ _, err := p.writeCollectionBegin(elemType, size)
+ return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteListEnd() error { return nil }
+
+// Write a set header.
+func (p *TCompactProtocol) WriteSetBegin(elemType TType, size int) error {
+ _, err := p.writeCollectionBegin(elemType, size)
+ return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteSetEnd() error { return nil }
+
+func (p *TCompactProtocol) WriteBool(value bool) error {
+ v := byte(COMPACT_BOOLEAN_FALSE)
+ if value {
+ v = byte(COMPACT_BOOLEAN_TRUE)
+ }
+ if p.booleanFieldPending {
+ // we haven't written the field header yet
+ _, err := p.writeFieldBeginInternal(p.booleanFieldName, BOOL, p.booleanFieldId, v)
+ p.booleanFieldPending = false
+ return NewTProtocolException(err)
+ }
+ // we're not part of a field, so just write the value.
+ err := p.writeByteDirect(v)
+ return NewTProtocolException(err)
+}
+
+// Write a byte. Nothing to see here!
+func (p *TCompactProtocol) WriteByte(value int8) error {
+ err := p.writeByteDirect(byte(value))
+ return NewTProtocolException(err)
+}
+
+// Write an I16 as a zigzag varint.
+func (p *TCompactProtocol) WriteI16(value int16) error {
+ _, err := p.writeVarint32(p.int32ToZigzag(int32(value)))
+ return NewTProtocolException(err)
+}
+
+// Write an i32 as a zigzag varint.
+func (p *TCompactProtocol) WriteI32(value int32) error {
+ _, err := p.writeVarint32(p.int32ToZigzag(value))
+ return NewTProtocolException(err)
+}
+
+// Write an i64 as a zigzag varint.
+func (p *TCompactProtocol) WriteI64(value int64) error {
+ _, err := p.writeVarint64(p.int64ToZigzag(value))
+ return NewTProtocolException(err)
+}
+
+// Write a double to the wire as 8 bytes.
+func (p *TCompactProtocol) WriteDouble(value float64) error {
+ buf := p.buffer[0:8]
+ binary.LittleEndian.PutUint64(buf, math.Float64bits(value))
+ _, err := p.trans.Write(buf)
+ return NewTProtocolException(err)
+}
+
+// Write a string to the wire with a varint size preceding.
+func (p *TCompactProtocol) WriteString(value string) error {
+ _, e := p.writeVarint32(int32(len(value)))
+ if e != nil {
+ return NewTProtocolException(e)
+ }
+ if len(value) > 0 {
+ }
+ _, e = p.trans.WriteString(value)
+ return e
+}
+
+// Write a byte array, using a varint for the size.
+func (p *TCompactProtocol) WriteBinary(bin []byte) error {
+ _, e := p.writeVarint32(int32(len(bin)))
+ if e != nil {
+ return NewTProtocolException(e)
+ }
+ if len(bin) > 0 {
+ _, e = p.trans.Write(bin)
+ return NewTProtocolException(e)
+ }
+ return nil
+}
+
+//
+// Reading methods.
+//
+
+// Read a message header.
+func (p *TCompactProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
+
+ protocolId, err := p.readByteDirect()
+ if err != nil {
+ return
+ }
+
+ if protocolId != COMPACT_PROTOCOL_ID {
+ e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId)
+ return "", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e)
+ }
+
+ versionAndType, err := p.readByteDirect()
+ if err != nil {
+ return
+ }
+
+ version := versionAndType & COMPACT_VERSION_MASK
+ typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS)
+ if version != COMPACT_VERSION {
+ e := fmt.Errorf("Expected version %02x but got %02x", COMPACT_VERSION, version)
+ err = NewTProtocolExceptionWithType(BAD_VERSION, e)
+ return
+ }
+ seqId, e := p.readVarint32()
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ name, err = p.ReadString()
+ return
+}
+
+func (p *TCompactProtocol) ReadMessageEnd() error { return nil }
+
+// Read a struct begin. There's nothing on the wire for this, but it is our
+// opportunity to push a new struct begin marker onto the field stack.
+func (p *TCompactProtocol) ReadStructBegin() (name string, err error) {
+ p.lastField = append(p.lastField, p.lastFieldId)
+ p.lastFieldId = 0
+ return
+}
+
+// Doesn't actually consume any wire data, just removes the last field for
+// this struct from the field stack.
+func (p *TCompactProtocol) ReadStructEnd() error {
+ // consume the last field we read off the wire.
+ p.lastFieldId = p.lastField[len(p.lastField)-1]
+ p.lastField = p.lastField[:len(p.lastField)-1]
+ return nil
+}
+
+// Read a field header off the wire.
+func (p *TCompactProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) {
+ t, err := p.readByteDirect()
+ if err != nil {
+ return
+ }
+
+ // if it's a stop, then we can return immediately, as the struct is over.
+ if (t & 0x0f) == STOP {
+ return "", STOP, 0, nil
+ }
+
+ // mask off the 4 MSB of the type header. it could contain a field id delta.
+ modifier := int16((t & 0xf0) >> 4)
+ if modifier == 0 {
+ // not a delta. look ahead for the zigzag varint field id.
+ id, err = p.ReadI16()
+ if err != nil {
+ return
+ }
+ } else {
+ // has a delta. add the delta to the last read field id.
+ id = int16(p.lastFieldId) + modifier
+ }
+ typeId, e := p.getTType(tCompactType(t & 0x0f))
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+
+ // if this happens to be a boolean field, the value is encoded in the type
+ if p.isBoolType(t) {
+ // save the boolean value in a special instance variable.
+ p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE)
+ p.boolValueIsNotNull = true
+ }
+
+ // push the new field onto the field stack so we can keep the deltas going.
+ p.lastFieldId = int(id)
+ return
+}
+
+func (p *TCompactProtocol) ReadFieldEnd() error { return nil }
+
+// Read a map header off the wire. If the size is zero, skip reading the key
+// and value type. This means that 0-length maps will yield TMaps without the
+// "correct" types.
+func (p *TCompactProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) {
+ size32, e := p.readVarint32()
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ if size32 < 0 {
+ err = invalidDataLength
+ return
+ }
+ size = int(size32)
+
+ keyAndValueType := byte(STOP)
+ if size != 0 {
+ keyAndValueType, err = p.readByteDirect()
+ if err != nil {
+ return
+ }
+ }
+ keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4))
+ valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf))
+ return
+}
+
+func (p *TCompactProtocol) ReadMapEnd() error { return nil }
+
+// Read a list header off the wire. If the list size is 0-14, the size will
+// be packed into the element type header. If it's a longer list, the 4 MSB
+// of the element type header will be 0xF, and a varint will follow with the
+// true size.
+func (p *TCompactProtocol) ReadListBegin() (elemType TType, size int, err error) {
+ size_and_type, err := p.readByteDirect()
+ if err != nil {
+ return
+ }
+ size = int((size_and_type >> 4) & 0x0f)
+ if size == 15 {
+ size2, e := p.readVarint32()
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ if size2 < 0 {
+ err = invalidDataLength
+ return
+ }
+ size = int(size2)
+ }
+ elemType, e := p.getTType(tCompactType(size_and_type))
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ return
+}
+
+func (p *TCompactProtocol) ReadListEnd() error { return nil }
+
+// Read a set header off the wire. If the set size is 0-14, the size will
+// be packed into the element type header. If it's a longer set, the 4 MSB
+// of the element type header will be 0xF, and a varint will follow with the
+// true size.
+func (p *TCompactProtocol) ReadSetBegin() (elemType TType, size int, err error) {
+ return p.ReadListBegin()
+}
+
+func (p *TCompactProtocol) ReadSetEnd() error { return nil }
+
+// Read a boolean off the wire. If this is a boolean field, the value should
+// already have been read during readFieldBegin, so we'll just consume the
+// pre-stored value. Otherwise, read a byte.
+func (p *TCompactProtocol) ReadBool() (value bool, err error) {
+ if p.boolValueIsNotNull {
+ p.boolValueIsNotNull = false
+ return p.boolValue, nil
+ }
+ v, err := p.readByteDirect()
+ return v == COMPACT_BOOLEAN_TRUE, err
+}
+
+// Read a single byte off the wire. Nothing interesting here.
+func (p *TCompactProtocol) ReadByte() (int8, error) {
+ v, err := p.readByteDirect()
+ if err != nil {
+ return 0, NewTProtocolException(err)
+ }
+ return int8(v), err
+}
+
+// Read an i16 from the wire as a zigzag varint.
+func (p *TCompactProtocol) ReadI16() (value int16, err error) {
+ v, err := p.ReadI32()
+ return int16(v), err
+}
+
+// Read an i32 from the wire as a zigzag varint.
+func (p *TCompactProtocol) ReadI32() (value int32, err error) {
+ v, e := p.readVarint32()
+ if e != nil {
+ return 0, NewTProtocolException(e)
+ }
+ value = p.zigzagToInt32(v)
+ return value, nil
+}
+
+// Read an i64 from the wire as a zigzag varint.
+func (p *TCompactProtocol) ReadI64() (value int64, err error) {
+ v, e := p.readVarint64()
+ if e != nil {
+ return 0, NewTProtocolException(e)
+ }
+ value = p.zigzagToInt64(v)
+ return value, nil
+}
+
+// No magic here - just read a double off the wire.
+func (p *TCompactProtocol) ReadDouble() (value float64, err error) {
+ longBits := p.buffer[0:8]
+ _, e := io.ReadFull(p.trans, longBits)
+ if e != nil {
+ return 0.0, NewTProtocolException(e)
+ }
+ return math.Float64frombits(p.bytesToUint64(longBits)), nil
+}
+
+// Reads a []byte (via readBinary), and then UTF-8 decodes it.
+func (p *TCompactProtocol) ReadString() (value string, err error) {
+ length, e := p.readVarint32()
+ if e != nil {
+ return "", NewTProtocolException(e)
+ }
+ if length < 0 {
+ return "", invalidDataLength
+ }
+ if uint64(length) > p.trans.RemainingBytes() {
+ return "", invalidDataLength
+ }
+
+ if length == 0 {
+ return "", nil
+ }
+ var buf []byte
+ if length <= int32(len(p.buffer)) {
+ buf = p.buffer[0:length]
+ } else {
+ buf = make([]byte, length)
+ }
+ _, e = io.ReadFull(p.trans, buf)
+ return string(buf), NewTProtocolException(e)
+}
+
+// Read a []byte from the wire.
+func (p *TCompactProtocol) ReadBinary() (value []byte, err error) {
+ length, e := p.readVarint32()
+ if e != nil {
+ return nil, NewTProtocolException(e)
+ }
+ if length == 0 {
+ return []byte{}, nil
+ }
+ if length < 0 {
+ return nil, invalidDataLength
+ }
+ if uint64(length) > p.trans.RemainingBytes() {
+ return nil, invalidDataLength
+ }
+
+ buf := make([]byte, length)
+ _, e = io.ReadFull(p.trans, buf)
+ return buf, NewTProtocolException(e)
+}
+
+func (p *TCompactProtocol) Flush() (err error) {
+ return NewTProtocolException(p.trans.Flush())
+}
+
+func (p *TCompactProtocol) Skip(fieldType TType) (err error) {
+ return SkipDefaultDepth(p, fieldType)
+}
+
+func (p *TCompactProtocol) Transport() TTransport {
+ return p.origTransport
+}
+
+//
+// Internal writing methods
+//
+
+// Abstract method for writing the start of lists and sets. List and sets on
+// the wire differ only by the type indicator.
+func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) {
+ if size <= 14 {
+ return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType))))
+ }
+ err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType)))
+ if err != nil {
+ return 0, err
+ }
+ m, err := p.writeVarint32(int32(size))
+ return 1 + m, err
+}
+
+// Write an i32 as a varint. Results in 1-5 bytes on the wire.
+// TODO(pomack): make a permanent buffer like writeVarint64?
+func (p *TCompactProtocol) writeVarint32(n int32) (int, error) {
+ i32buf := p.buffer[0:5]
+ idx := 0
+ for {
+ if (n & ^0x7F) == 0 {
+ i32buf[idx] = byte(n)
+ idx++
+ // p.writeByteDirect(byte(n));
+ break
+ // return;
+ } else {
+ i32buf[idx] = byte((n & 0x7F) | 0x80)
+ idx++
+ // p.writeByteDirect(byte(((n & 0x7F) | 0x80)));
+ u := uint32(n)
+ n = int32(u >> 7)
+ }
+ }
+ return p.trans.Write(i32buf[0:idx])
+}
+
+// Write an i64 as a varint. Results in 1-10 bytes on the wire.
+func (p *TCompactProtocol) writeVarint64(n int64) (int, error) {
+ varint64out := p.buffer[0:10]
+ idx := 0
+ for {
+ if (n & ^0x7F) == 0 {
+ varint64out[idx] = byte(n)
+ idx++
+ break
+ } else {
+ varint64out[idx] = byte((n & 0x7F) | 0x80)
+ idx++
+ u := uint64(n)
+ n = int64(u >> 7)
+ }
+ }
+ return p.trans.Write(varint64out[0:idx])
+}
+
+// Convert l into a zigzag long. This allows negative numbers to be
+// represented compactly as a varint.
+func (p *TCompactProtocol) int64ToZigzag(l int64) int64 {
+ return (l << 1) ^ (l >> 63)
+}
+
+// Convert l into a zigzag long. This allows negative numbers to be
+// represented compactly as a varint.
+func (p *TCompactProtocol) int32ToZigzag(n int32) int32 {
+ return (n << 1) ^ (n >> 31)
+}
+
+func (p *TCompactProtocol) fixedUint64ToBytes(n uint64, buf []byte) {
+ binary.LittleEndian.PutUint64(buf, n)
+}
+
+func (p *TCompactProtocol) fixedInt64ToBytes(n int64, buf []byte) {
+ binary.LittleEndian.PutUint64(buf, uint64(n))
+}
+
+// Writes a byte without any possibility of all that field header nonsense.
+// Used internally by other writing methods that know they need to write a byte.
+func (p *TCompactProtocol) writeByteDirect(b byte) error {
+ return p.trans.WriteByte(b)
+}
+
+// Writes a byte without any possibility of all that field header nonsense.
+func (p *TCompactProtocol) writeIntAsByteDirect(n int) (int, error) {
+ return 1, p.writeByteDirect(byte(n))
+}
+
+//
+// Internal reading methods
+//
+
+// Read an i32 from the wire as a varint. The MSB of each byte is set
+// if there is another byte to follow. This can read up to 5 bytes.
+func (p *TCompactProtocol) readVarint32() (int32, error) {
+ // if the wire contains the right stuff, this will just truncate the i64 we
+ // read and get us the right sign.
+ v, err := p.readVarint64()
+ return int32(v), err
+}
+
+// Read an i64 from the wire as a proper varint. The MSB of each byte is set
+// if there is another byte to follow. This can read up to 10 bytes.
+func (p *TCompactProtocol) readVarint64() (int64, error) {
+ shift := uint(0)
+ result := int64(0)
+ for {
+ b, err := p.readByteDirect()
+ if err != nil {
+ return 0, err
+ }
+ result |= int64(b&0x7f) << shift
+ if (b & 0x80) != 0x80 {
+ break
+ }
+ shift += 7
+ }
+ return result, nil
+}
+
+// Read a byte, unlike ReadByte that reads Thrift-byte that is i8.
+func (p *TCompactProtocol) readByteDirect() (byte, error) {
+ return p.trans.ReadByte()
+}
+
+//
+// encoding helpers
+//
+
+// Convert from zigzag int to int.
+func (p *TCompactProtocol) zigzagToInt32(n int32) int32 {
+ u := uint32(n)
+ return int32(u>>1) ^ -(n & 1)
+}
+
+// Convert from zigzag long to long.
+func (p *TCompactProtocol) zigzagToInt64(n int64) int64 {
+ u := uint64(n)
+ return int64(u>>1) ^ -(n & 1)
+}
+
+// Note that it's important that the mask bytes are long literals,
+// otherwise they'll default to ints, and when you shift an int left 56 bits,
+// you just get a messed up int.
+func (p *TCompactProtocol) bytesToInt64(b []byte) int64 {
+ return int64(binary.LittleEndian.Uint64(b))
+}
+
+// Note that it's important that the mask bytes are long literals,
+// otherwise they'll default to ints, and when you shift an int left 56 bits,
+// you just get a messed up int.
+func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 {
+ return binary.LittleEndian.Uint64(b)
+}
+
+//
+// type testing and converting
+//
+
+func (p *TCompactProtocol) isBoolType(b byte) bool {
+ return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE
+}
+
+// Given a tCompactType constant, convert it to its corresponding
+// TType value.
+func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) {
+ switch byte(t) & 0x0f {
+ case STOP:
+ return STOP, nil
+ case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE:
+ return BOOL, nil
+ case COMPACT_BYTE:
+ return BYTE, nil
+ case COMPACT_I16:
+ return I16, nil
+ case COMPACT_I32:
+ return I32, nil
+ case COMPACT_I64:
+ return I64, nil
+ case COMPACT_DOUBLE:
+ return DOUBLE, nil
+ case COMPACT_BINARY:
+ return STRING, nil
+ case COMPACT_LIST:
+ return LIST, nil
+ case COMPACT_SET:
+ return SET, nil
+ case COMPACT_MAP:
+ return MAP, nil
+ case COMPACT_STRUCT:
+ return STRUCT, nil
+ }
+ return STOP, TException(fmt.Errorf("don't know what type: %d", t&0x0f))
+}
+
+// Given a TType value, find the appropriate TCompactProtocol.Types constant.
+func (p *TCompactProtocol) getCompactType(t TType) tCompactType {
+ return ttypeToCompactType[t]
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/exception.go
new file mode 100644
index 000000000..ea8d6f661
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/exception.go
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "errors"
+)
+
+// Generic Thrift exception
+type TException interface {
+ error
+}
+
+// Prepends additional information to an error without losing the Thrift exception interface
+func PrependError(prepend string, err error) error {
+ if t, ok := err.(TTransportException); ok {
+ return NewTTransportException(t.TypeId(), prepend+t.Error())
+ }
+ if t, ok := err.(TProtocolException); ok {
+ return NewTProtocolExceptionWithType(t.TypeId(), errors.New(prepend+err.Error()))
+ }
+ if t, ok := err.(TApplicationException); ok {
+ return NewTApplicationException(t.TypeId(), prepend+t.Error())
+ }
+
+ return errors.New(prepend + err.Error())
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go b/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go
new file mode 100644
index 000000000..b62fd56f0
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "bytes"
+)
+
+// Memory buffer-based implementation of the TTransport interface.
+type TMemoryBuffer struct {
+ *bytes.Buffer
+ size int
+}
+
+type TMemoryBufferTransportFactory struct {
+ size int
+}
+
+func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) TTransport {
+ if trans != nil {
+ t, ok := trans.(*TMemoryBuffer)
+ if ok && t.size > 0 {
+ return NewTMemoryBufferLen(t.size)
+ }
+ }
+ return NewTMemoryBufferLen(p.size)
+}
+
+func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory {
+ return &TMemoryBufferTransportFactory{size: size}
+}
+
+func NewTMemoryBuffer() *TMemoryBuffer {
+ return &TMemoryBuffer{Buffer: &bytes.Buffer{}, size: 0}
+}
+
+func NewTMemoryBufferLen(size int) *TMemoryBuffer {
+ buf := make([]byte, 0, size)
+ return &TMemoryBuffer{Buffer: bytes.NewBuffer(buf), size: size}
+}
+
+func (p *TMemoryBuffer) IsOpen() bool {
+ return true
+}
+
+func (p *TMemoryBuffer) Open() error {
+ return nil
+}
+
+func (p *TMemoryBuffer) Close() error {
+ p.Buffer.Reset()
+ return nil
+}
+
+// Flushing a memory buffer is a no-op
+func (p *TMemoryBuffer) Flush() error {
+ return nil
+}
+
+func (p *TMemoryBuffer) RemainingBytes() (num_bytes uint64) {
+ return uint64(p.Buffer.Len())
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go b/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go
new file mode 100644
index 000000000..25ab2e98a
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Message type constants in the Thrift protocol.
+type TMessageType int32
+
+const (
+ INVALID_TMESSAGE_TYPE TMessageType = 0
+ CALL TMessageType = 1
+ REPLY TMessageType = 2
+ EXCEPTION TMessageType = 3
+ ONEWAY TMessageType = 4
+)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go b/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go
new file mode 100644
index 000000000..aa8daa9b5
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "math"
+ "strconv"
+)
+
+type Numeric interface {
+ Int64() int64
+ Int32() int32
+ Int16() int16
+ Byte() byte
+ Int() int
+ Float64() float64
+ Float32() float32
+ String() string
+ isNull() bool
+}
+
+type numeric struct {
+ iValue int64
+ dValue float64
+ sValue string
+ isNil bool
+}
+
+var (
+ INFINITY Numeric
+ NEGATIVE_INFINITY Numeric
+ NAN Numeric
+ ZERO Numeric
+ NUMERIC_NULL Numeric
+)
+
+func NewNumericFromDouble(dValue float64) Numeric {
+ if math.IsInf(dValue, 1) {
+ return INFINITY
+ }
+ if math.IsInf(dValue, -1) {
+ return NEGATIVE_INFINITY
+ }
+ if math.IsNaN(dValue) {
+ return NAN
+ }
+ iValue := int64(dValue)
+ sValue := strconv.FormatFloat(dValue, 'g', 10, 64)
+ isNil := false
+ return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromI64(iValue int64) Numeric {
+ dValue := float64(iValue)
+ sValue := string(iValue)
+ isNil := false
+ return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromI32(iValue int32) Numeric {
+ dValue := float64(iValue)
+ sValue := string(iValue)
+ isNil := false
+ return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromString(sValue string) Numeric {
+ if sValue == INFINITY.String() {
+ return INFINITY
+ }
+ if sValue == NEGATIVE_INFINITY.String() {
+ return NEGATIVE_INFINITY
+ }
+ if sValue == NAN.String() {
+ return NAN
+ }
+ iValue, _ := strconv.ParseInt(sValue, 10, 64)
+ dValue, _ := strconv.ParseFloat(sValue, 64)
+ isNil := len(sValue) == 0
+ return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromJSONString(sValue string, isNull bool) Numeric {
+ if isNull {
+ return NewNullNumeric()
+ }
+ if sValue == JSON_INFINITY {
+ return INFINITY
+ }
+ if sValue == JSON_NEGATIVE_INFINITY {
+ return NEGATIVE_INFINITY
+ }
+ if sValue == JSON_NAN {
+ return NAN
+ }
+ iValue, _ := strconv.ParseInt(sValue, 10, 64)
+ dValue, _ := strconv.ParseFloat(sValue, 64)
+ return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull}
+}
+
+func NewNullNumeric() Numeric {
+ return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true}
+}
+
+func (p *numeric) Int64() int64 {
+ return p.iValue
+}
+
+func (p *numeric) Int32() int32 {
+ return int32(p.iValue)
+}
+
+func (p *numeric) Int16() int16 {
+ return int16(p.iValue)
+}
+
+func (p *numeric) Byte() byte {
+ return byte(p.iValue)
+}
+
+func (p *numeric) Int() int {
+ return int(p.iValue)
+}
+
+func (p *numeric) Float64() float64 {
+ return p.dValue
+}
+
+func (p *numeric) Float32() float32 {
+ return float32(p.dValue)
+}
+
+func (p *numeric) String() string {
+ return p.sValue
+}
+
+func (p *numeric) isNull() bool {
+ return p.isNil
+}
+
+func init() {
+ INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false}
+ NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false}
+ NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false}
+ ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false}
+ NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/processor.go b/vendor/github.com/uber/jaeger-client-go/thrift/processor.go
new file mode 100644
index 000000000..ca0d3faf2
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/processor.go
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// A processor is a generic object which operates upon an input stream and
+// writes to some output stream.
+type TProcessor interface {
+ Process(in, out TProtocol) (bool, TException)
+}
+
+type TProcessorFunction interface {
+ Process(seqId int32, in, out TProtocol) (bool, TException)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go
new file mode 100644
index 000000000..45fa202e7
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go
@@ -0,0 +1,175 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "errors"
+)
+
+const (
+ VERSION_MASK = 0xffff0000
+ VERSION_1 = 0x80010000
+)
+
+type TProtocol interface {
+ WriteMessageBegin(name string, typeId TMessageType, seqid int32) error
+ WriteMessageEnd() error
+ WriteStructBegin(name string) error
+ WriteStructEnd() error
+ WriteFieldBegin(name string, typeId TType, id int16) error
+ WriteFieldEnd() error
+ WriteFieldStop() error
+ WriteMapBegin(keyType TType, valueType TType, size int) error
+ WriteMapEnd() error
+ WriteListBegin(elemType TType, size int) error
+ WriteListEnd() error
+ WriteSetBegin(elemType TType, size int) error
+ WriteSetEnd() error
+ WriteBool(value bool) error
+ WriteByte(value int8) error
+ WriteI16(value int16) error
+ WriteI32(value int32) error
+ WriteI64(value int64) error
+ WriteDouble(value float64) error
+ WriteString(value string) error
+ WriteBinary(value []byte) error
+
+ ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error)
+ ReadMessageEnd() error
+ ReadStructBegin() (name string, err error)
+ ReadStructEnd() error
+ ReadFieldBegin() (name string, typeId TType, id int16, err error)
+ ReadFieldEnd() error
+ ReadMapBegin() (keyType TType, valueType TType, size int, err error)
+ ReadMapEnd() error
+ ReadListBegin() (elemType TType, size int, err error)
+ ReadListEnd() error
+ ReadSetBegin() (elemType TType, size int, err error)
+ ReadSetEnd() error
+ ReadBool() (value bool, err error)
+ ReadByte() (value int8, err error)
+ ReadI16() (value int16, err error)
+ ReadI32() (value int32, err error)
+ ReadI64() (value int64, err error)
+ ReadDouble() (value float64, err error)
+ ReadString() (value string, err error)
+ ReadBinary() (value []byte, err error)
+
+ Skip(fieldType TType) (err error)
+ Flush() (err error)
+
+ Transport() TTransport
+}
+
+// The maximum recursive depth the skip() function will traverse
+const DEFAULT_RECURSION_DEPTH = 64
+
+// Skips over the next data element from the provided input TProtocol object.
+func SkipDefaultDepth(prot TProtocol, typeId TType) (err error) {
+ return Skip(prot, typeId, DEFAULT_RECURSION_DEPTH)
+}
+
+// Skips over the next data element from the provided input TProtocol object.
+func Skip(self TProtocol, fieldType TType, maxDepth int) (err error) {
+
+ if maxDepth <= 0 {
+ return NewTProtocolExceptionWithType( DEPTH_LIMIT, errors.New("Depth limit exceeded"))
+ }
+
+ switch fieldType {
+ case STOP:
+ return
+ case BOOL:
+ _, err = self.ReadBool()
+ return
+ case BYTE:
+ _, err = self.ReadByte()
+ return
+ case I16:
+ _, err = self.ReadI16()
+ return
+ case I32:
+ _, err = self.ReadI32()
+ return
+ case I64:
+ _, err = self.ReadI64()
+ return
+ case DOUBLE:
+ _, err = self.ReadDouble()
+ return
+ case STRING:
+ _, err = self.ReadString()
+ return
+ case STRUCT:
+ if _, err = self.ReadStructBegin(); err != nil {
+ return err
+ }
+ for {
+ _, typeId, _, _ := self.ReadFieldBegin()
+ if typeId == STOP {
+ break
+ }
+ err := Skip(self, typeId, maxDepth-1)
+ if err != nil {
+ return err
+ }
+ self.ReadFieldEnd()
+ }
+ return self.ReadStructEnd()
+ case MAP:
+ keyType, valueType, size, err := self.ReadMapBegin()
+ if err != nil {
+ return err
+ }
+ for i := 0; i < size; i++ {
+ err := Skip(self, keyType, maxDepth-1)
+ if err != nil {
+ return err
+ }
+ self.Skip(valueType)
+ }
+ return self.ReadMapEnd()
+ case SET:
+ elemType, size, err := self.ReadSetBegin()
+ if err != nil {
+ return err
+ }
+ for i := 0; i < size; i++ {
+ err := Skip(self, elemType, maxDepth-1)
+ if err != nil {
+ return err
+ }
+ }
+ return self.ReadSetEnd()
+ case LIST:
+ elemType, size, err := self.ReadListBegin()
+ if err != nil {
+ return err
+ }
+ for i := 0; i < size; i++ {
+ err := Skip(self, elemType, maxDepth-1)
+ if err != nil {
+ return err
+ }
+ }
+ return self.ReadListEnd()
+ }
+ return nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go
new file mode 100644
index 000000000..6e357ee89
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "encoding/base64"
+)
+
+// Thrift Protocol exception
+type TProtocolException interface {
+ TException
+ TypeId() int
+}
+
+const (
+ UNKNOWN_PROTOCOL_EXCEPTION = 0
+ INVALID_DATA = 1
+ NEGATIVE_SIZE = 2
+ SIZE_LIMIT = 3
+ BAD_VERSION = 4
+ NOT_IMPLEMENTED = 5
+ DEPTH_LIMIT = 6
+)
+
+type tProtocolException struct {
+ typeId int
+ message string
+}
+
+func (p *tProtocolException) TypeId() int {
+ return p.typeId
+}
+
+func (p *tProtocolException) String() string {
+ return p.message
+}
+
+func (p *tProtocolException) Error() string {
+ return p.message
+}
+
+func NewTProtocolException(err error) TProtocolException {
+ if err == nil {
+ return nil
+ }
+ if e,ok := err.(TProtocolException); ok {
+ return e
+ }
+ if _, ok := err.(base64.CorruptInputError); ok {
+ return &tProtocolException{INVALID_DATA, err.Error()}
+ }
+ return &tProtocolException{UNKNOWN_PROTOCOL_EXCEPTION, err.Error()}
+}
+
+func NewTProtocolExceptionWithType(errType int, err error) TProtocolException {
+ if err == nil {
+ return nil
+ }
+ return &tProtocolException{errType, err.Error()}
+}
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go
new file mode 100644
index 000000000..c40f796d8
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Factory interface for constructing protocol instances.
+type TProtocolFactory interface {
+ GetProtocol(trans TTransport) TProtocol
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go
new file mode 100644
index 000000000..8e296a99b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import "io"
+
+type RichTransport struct {
+ TTransport
+}
+
+// Wraps Transport to provide TRichTransport interface
+func NewTRichTransport(trans TTransport) *RichTransport {
+ return &RichTransport{trans}
+}
+
+func (r *RichTransport) ReadByte() (c byte, err error) {
+ return readByte(r.TTransport)
+}
+
+func (r *RichTransport) WriteByte(c byte) error {
+ return writeByte(r.TTransport, c)
+}
+
+func (r *RichTransport) WriteString(s string) (n int, err error) {
+ return r.Write([]byte(s))
+}
+
+func (r *RichTransport) RemainingBytes() (num_bytes uint64) {
+ return r.TTransport.RemainingBytes()
+}
+
+func readByte(r io.Reader) (c byte, err error) {
+ v := [1]byte{0}
+ n, err := r.Read(v[0:1])
+ if n > 0 && (err == nil || err == io.EOF) {
+ return v[0], nil
+ }
+ if n > 0 && err != nil {
+ return v[0], err
+ }
+ if err != nil {
+ return 0, err
+ }
+ return v[0], nil
+}
+
+func writeByte(w io.Writer, c byte) error {
+ v := [1]byte{c}
+ _, err := w.Write(v[0:1])
+ return err
+}
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go b/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go
new file mode 100644
index 000000000..771222999
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+type TSerializer struct {
+ Transport *TMemoryBuffer
+ Protocol TProtocol
+}
+
+type TStruct interface {
+ Write(p TProtocol) error
+ Read(p TProtocol) error
+}
+
+func NewTSerializer() *TSerializer {
+ transport := NewTMemoryBufferLen(1024)
+ protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport)
+
+ return &TSerializer{
+ transport,
+ protocol}
+}
+
+func (t *TSerializer) WriteString(msg TStruct) (s string, err error) {
+ t.Transport.Reset()
+
+ if err = msg.Write(t.Protocol); err != nil {
+ return
+ }
+
+ if err = t.Protocol.Flush(); err != nil {
+ return
+ }
+ if err = t.Transport.Flush(); err != nil {
+ return
+ }
+
+ return t.Transport.String(), nil
+}
+
+func (t *TSerializer) Write(msg TStruct) (b []byte, err error) {
+ t.Transport.Reset()
+
+ if err = msg.Write(t.Protocol); err != nil {
+ return
+ }
+
+ if err = t.Protocol.Flush(); err != nil {
+ return
+ }
+
+ if err = t.Transport.Flush(); err != nil {
+ return
+ }
+
+ b = append(b, t.Transport.Bytes()...)
+ return
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go
new file mode 100644
index 000000000..412a482d0
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go
@@ -0,0 +1,1337 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+)
+
+type _ParseContext int
+
+const (
+ _CONTEXT_IN_TOPLEVEL _ParseContext = 1
+ _CONTEXT_IN_LIST_FIRST _ParseContext = 2
+ _CONTEXT_IN_LIST _ParseContext = 3
+ _CONTEXT_IN_OBJECT_FIRST _ParseContext = 4
+ _CONTEXT_IN_OBJECT_NEXT_KEY _ParseContext = 5
+ _CONTEXT_IN_OBJECT_NEXT_VALUE _ParseContext = 6
+)
+
+func (p _ParseContext) String() string {
+ switch p {
+ case _CONTEXT_IN_TOPLEVEL:
+ return "TOPLEVEL"
+ case _CONTEXT_IN_LIST_FIRST:
+ return "LIST-FIRST"
+ case _CONTEXT_IN_LIST:
+ return "LIST"
+ case _CONTEXT_IN_OBJECT_FIRST:
+ return "OBJECT-FIRST"
+ case _CONTEXT_IN_OBJECT_NEXT_KEY:
+ return "OBJECT-NEXT-KEY"
+ case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+ return "OBJECT-NEXT-VALUE"
+ }
+ return "UNKNOWN-PARSE-CONTEXT"
+}
+
+// JSON protocol implementation for thrift.
+//
+// This protocol produces/consumes a simple output format
+// suitable for parsing by scripting languages. It should not be
+// confused with the full-featured TJSONProtocol.
+//
+type TSimpleJSONProtocol struct {
+ trans TTransport
+
+ parseContextStack []int
+ dumpContext []int
+
+ writer *bufio.Writer
+ reader *bufio.Reader
+}
+
+// Constructor
+func NewTSimpleJSONProtocol(t TTransport) *TSimpleJSONProtocol {
+ v := &TSimpleJSONProtocol{trans: t,
+ writer: bufio.NewWriter(t),
+ reader: bufio.NewReader(t),
+ }
+ v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL))
+ v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL))
+ return v
+}
+
+// Factory
+type TSimpleJSONProtocolFactory struct{}
+
+func (p *TSimpleJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol {
+ return NewTSimpleJSONProtocol(trans)
+}
+
+func NewTSimpleJSONProtocolFactory() *TSimpleJSONProtocolFactory {
+ return &TSimpleJSONProtocolFactory{}
+}
+
+var (
+ JSON_COMMA []byte
+ JSON_COLON []byte
+ JSON_LBRACE []byte
+ JSON_RBRACE []byte
+ JSON_LBRACKET []byte
+ JSON_RBRACKET []byte
+ JSON_QUOTE byte
+ JSON_QUOTE_BYTES []byte
+ JSON_NULL []byte
+ JSON_TRUE []byte
+ JSON_FALSE []byte
+ JSON_INFINITY string
+ JSON_NEGATIVE_INFINITY string
+ JSON_NAN string
+ JSON_INFINITY_BYTES []byte
+ JSON_NEGATIVE_INFINITY_BYTES []byte
+ JSON_NAN_BYTES []byte
+ json_nonbase_map_elem_bytes []byte
+)
+
+func init() {
+ JSON_COMMA = []byte{','}
+ JSON_COLON = []byte{':'}
+ JSON_LBRACE = []byte{'{'}
+ JSON_RBRACE = []byte{'}'}
+ JSON_LBRACKET = []byte{'['}
+ JSON_RBRACKET = []byte{']'}
+ JSON_QUOTE = '"'
+ JSON_QUOTE_BYTES = []byte{'"'}
+ JSON_NULL = []byte{'n', 'u', 'l', 'l'}
+ JSON_TRUE = []byte{'t', 'r', 'u', 'e'}
+ JSON_FALSE = []byte{'f', 'a', 'l', 's', 'e'}
+ JSON_INFINITY = "Infinity"
+ JSON_NEGATIVE_INFINITY = "-Infinity"
+ JSON_NAN = "NaN"
+ JSON_INFINITY_BYTES = []byte{'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'}
+ JSON_NEGATIVE_INFINITY_BYTES = []byte{'-', 'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'}
+ JSON_NAN_BYTES = []byte{'N', 'a', 'N'}
+ json_nonbase_map_elem_bytes = []byte{']', ',', '['}
+}
+
+func jsonQuote(s string) string {
+ b, _ := json.Marshal(s)
+ s1 := string(b)
+ return s1
+}
+
+func jsonUnquote(s string) (string, bool) {
+ s1 := new(string)
+ err := json.Unmarshal([]byte(s), s1)
+ return *s1, err == nil
+}
+
+func mismatch(expected, actual string) error {
+ return fmt.Errorf("Expected '%s' but found '%s' while parsing JSON.", expected, actual)
+}
+
+func (p *TSimpleJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error {
+ p.resetContextStack() // THRIFT-3735
+ if e := p.OutputListBegin(); e != nil {
+ return e
+ }
+ if e := p.WriteString(name); e != nil {
+ return e
+ }
+ if e := p.WriteByte(int8(typeId)); e != nil {
+ return e
+ }
+ if e := p.WriteI32(seqId); e != nil {
+ return e
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteMessageEnd() error {
+ return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteStructBegin(name string) error {
+ if e := p.OutputObjectBegin(); e != nil {
+ return e
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteStructEnd() error {
+ return p.OutputObjectEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
+ if e := p.WriteString(name); e != nil {
+ return e
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteFieldEnd() error {
+ //return p.OutputListEnd()
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteFieldStop() error { return nil }
+
+func (p *TSimpleJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
+ if e := p.OutputListBegin(); e != nil {
+ return e
+ }
+ if e := p.WriteByte(int8(keyType)); e != nil {
+ return e
+ }
+ if e := p.WriteByte(int8(valueType)); e != nil {
+ return e
+ }
+ return p.WriteI32(int32(size))
+}
+
+func (p *TSimpleJSONProtocol) WriteMapEnd() error {
+ return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteListBegin(elemType TType, size int) error {
+ return p.OutputElemListBegin(elemType, size)
+}
+
+func (p *TSimpleJSONProtocol) WriteListEnd() error {
+ return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteSetBegin(elemType TType, size int) error {
+ return p.OutputElemListBegin(elemType, size)
+}
+
+func (p *TSimpleJSONProtocol) WriteSetEnd() error {
+ return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteBool(b bool) error {
+ return p.OutputBool(b)
+}
+
+func (p *TSimpleJSONProtocol) WriteByte(b int8) error {
+ return p.WriteI32(int32(b))
+}
+
+func (p *TSimpleJSONProtocol) WriteI16(v int16) error {
+ return p.WriteI32(int32(v))
+}
+
+func (p *TSimpleJSONProtocol) WriteI32(v int32) error {
+ return p.OutputI64(int64(v))
+}
+
+func (p *TSimpleJSONProtocol) WriteI64(v int64) error {
+ return p.OutputI64(int64(v))
+}
+
+func (p *TSimpleJSONProtocol) WriteDouble(v float64) error {
+ return p.OutputF64(v)
+}
+
+func (p *TSimpleJSONProtocol) WriteString(v string) error {
+ return p.OutputString(v)
+}
+
+func (p *TSimpleJSONProtocol) WriteBinary(v []byte) error {
+ // JSON library only takes in a string,
+ // not an arbitrary byte array, to ensure bytes are transmitted
+ // efficiently we must convert this into a valid JSON string
+ // therefore we use base64 encoding to avoid excessive escaping/quoting
+ if e := p.OutputPreValue(); e != nil {
+ return e
+ }
+ if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
+ return NewTProtocolException(e)
+ }
+ writer := base64.NewEncoder(base64.StdEncoding, p.writer)
+ if _, e := writer.Write(v); e != nil {
+ p.writer.Reset(p.trans) // THRIFT-3735
+ return NewTProtocolException(e)
+ }
+ if e := writer.Close(); e != nil {
+ return NewTProtocolException(e)
+ }
+ if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
+ return NewTProtocolException(e)
+ }
+ return p.OutputPostValue()
+}
+
+// Reading methods.
+func (p *TSimpleJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
+ p.resetContextStack() // THRIFT-3735
+ if isNull, err := p.ParseListBegin(); isNull || err != nil {
+ return name, typeId, seqId, err
+ }
+ if name, err = p.ReadString(); err != nil {
+ return name, typeId, seqId, err
+ }
+ bTypeId, err := p.ReadByte()
+ typeId = TMessageType(bTypeId)
+ if err != nil {
+ return name, typeId, seqId, err
+ }
+ if seqId, err = p.ReadI32(); err != nil {
+ return name, typeId, seqId, err
+ }
+ return name, typeId, seqId, nil
+}
+
+func (p *TSimpleJSONProtocol) ReadMessageEnd() error {
+ return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadStructBegin() (name string, err error) {
+ _, err = p.ParseObjectStart()
+ return "", err
+}
+
+func (p *TSimpleJSONProtocol) ReadStructEnd() error {
+ return p.ParseObjectEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadFieldBegin() (string, TType, int16, error) {
+ if err := p.ParsePreValue(); err != nil {
+ return "", STOP, 0, err
+ }
+ b, _ := p.reader.Peek(1)
+ if len(b) > 0 {
+ switch b[0] {
+ case JSON_RBRACE[0]:
+ return "", STOP, 0, nil
+ case JSON_QUOTE:
+ p.reader.ReadByte()
+ name, err := p.ParseStringBody()
+ // simplejson is not meant to be read back into thrift
+ // - see http://wiki.apache.org/thrift/ThriftUsageJava
+ // - use JSON instead
+ if err != nil {
+ return name, STOP, 0, err
+ }
+ return name, STOP, -1, p.ParsePostValue()
+ /*
+ if err = p.ParsePostValue(); err != nil {
+ return name, STOP, 0, err
+ }
+ if isNull, err := p.ParseListBegin(); isNull || err != nil {
+ return name, STOP, 0, err
+ }
+ bType, err := p.ReadByte()
+ thetype := TType(bType)
+ if err != nil {
+ return name, thetype, 0, err
+ }
+ id, err := p.ReadI16()
+ return name, thetype, id, err
+ */
+ }
+ e := fmt.Errorf("Expected \"}\" or '\"', but found: '%s'", string(b))
+ return "", STOP, 0, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ return "", STOP, 0, NewTProtocolException(io.EOF)
+}
+
+func (p *TSimpleJSONProtocol) ReadFieldEnd() error {
+ return nil
+ //return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) {
+ if isNull, e := p.ParseListBegin(); isNull || e != nil {
+ return VOID, VOID, 0, e
+ }
+
+ // read keyType
+ bKeyType, e := p.ReadByte()
+ keyType = TType(bKeyType)
+ if e != nil {
+ return keyType, valueType, size, e
+ }
+
+ // read valueType
+ bValueType, e := p.ReadByte()
+ valueType = TType(bValueType)
+ if e != nil {
+ return keyType, valueType, size, e
+ }
+
+ // read size
+ iSize, err := p.ReadI64()
+ size = int(iSize)
+ return keyType, valueType, size, err
+}
+
+func (p *TSimpleJSONProtocol) ReadMapEnd() error {
+ return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadListBegin() (elemType TType, size int, e error) {
+ return p.ParseElemListBegin()
+}
+
+func (p *TSimpleJSONProtocol) ReadListEnd() error {
+ return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) {
+ return p.ParseElemListBegin()
+}
+
+func (p *TSimpleJSONProtocol) ReadSetEnd() error {
+ return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadBool() (bool, error) {
+ var value bool
+
+ if err := p.ParsePreValue(); err != nil {
+ return value, err
+ }
+ f, _ := p.reader.Peek(1)
+ if len(f) > 0 {
+ switch f[0] {
+ case JSON_TRUE[0]:
+ b := make([]byte, len(JSON_TRUE))
+ _, err := p.reader.Read(b)
+ if err != nil {
+ return false, NewTProtocolException(err)
+ }
+ if string(b) == string(JSON_TRUE) {
+ value = true
+ } else {
+ e := fmt.Errorf("Expected \"true\" but found: %s", string(b))
+ return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ break
+ case JSON_FALSE[0]:
+ b := make([]byte, len(JSON_FALSE))
+ _, err := p.reader.Read(b)
+ if err != nil {
+ return false, NewTProtocolException(err)
+ }
+ if string(b) == string(JSON_FALSE) {
+ value = false
+ } else {
+ e := fmt.Errorf("Expected \"false\" but found: %s", string(b))
+ return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ break
+ case JSON_NULL[0]:
+ b := make([]byte, len(JSON_NULL))
+ _, err := p.reader.Read(b)
+ if err != nil {
+ return false, NewTProtocolException(err)
+ }
+ if string(b) == string(JSON_NULL) {
+ value = false
+ } else {
+ e := fmt.Errorf("Expected \"null\" but found: %s", string(b))
+ return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ default:
+ e := fmt.Errorf("Expected \"true\", \"false\", or \"null\" but found: %s", string(f))
+ return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ }
+ return value, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ReadByte() (int8, error) {
+ v, err := p.ReadI64()
+ return int8(v), err
+}
+
+func (p *TSimpleJSONProtocol) ReadI16() (int16, error) {
+ v, err := p.ReadI64()
+ return int16(v), err
+}
+
+func (p *TSimpleJSONProtocol) ReadI32() (int32, error) {
+ v, err := p.ReadI64()
+ return int32(v), err
+}
+
+func (p *TSimpleJSONProtocol) ReadI64() (int64, error) {
+ v, _, err := p.ParseI64()
+ return v, err
+}
+
+func (p *TSimpleJSONProtocol) ReadDouble() (float64, error) {
+ v, _, err := p.ParseF64()
+ return v, err
+}
+
+func (p *TSimpleJSONProtocol) ReadString() (string, error) {
+ var v string
+ if err := p.ParsePreValue(); err != nil {
+ return v, err
+ }
+ f, _ := p.reader.Peek(1)
+ if len(f) > 0 && f[0] == JSON_QUOTE {
+ p.reader.ReadByte()
+ value, err := p.ParseStringBody()
+ v = value
+ if err != nil {
+ return v, err
+ }
+ } else if len(f) > 0 && f[0] == JSON_NULL[0] {
+ b := make([]byte, len(JSON_NULL))
+ _, err := p.reader.Read(b)
+ if err != nil {
+ return v, NewTProtocolException(err)
+ }
+ if string(b) != string(JSON_NULL) {
+ e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
+ return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ } else {
+ e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
+ return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ return v, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ReadBinary() ([]byte, error) {
+ var v []byte
+ if err := p.ParsePreValue(); err != nil {
+ return nil, err
+ }
+ f, _ := p.reader.Peek(1)
+ if len(f) > 0 && f[0] == JSON_QUOTE {
+ p.reader.ReadByte()
+ value, err := p.ParseBase64EncodedBody()
+ v = value
+ if err != nil {
+ return v, err
+ }
+ } else if len(f) > 0 && f[0] == JSON_NULL[0] {
+ b := make([]byte, len(JSON_NULL))
+ _, err := p.reader.Read(b)
+ if err != nil {
+ return v, NewTProtocolException(err)
+ }
+ if string(b) != string(JSON_NULL) {
+ e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
+ return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ } else {
+ e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
+ return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+
+ return v, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) Flush() (err error) {
+ return NewTProtocolException(p.writer.Flush())
+}
+
+func (p *TSimpleJSONProtocol) Skip(fieldType TType) (err error) {
+ return SkipDefaultDepth(p, fieldType)
+}
+
+func (p *TSimpleJSONProtocol) Transport() TTransport {
+ return p.trans
+}
+
+func (p *TSimpleJSONProtocol) OutputPreValue() error {
+ cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1])
+ switch cxt {
+ case _CONTEXT_IN_LIST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+ if _, e := p.write(JSON_COMMA); e != nil {
+ return NewTProtocolException(e)
+ }
+ break
+ case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+ if _, e := p.write(JSON_COLON); e != nil {
+ return NewTProtocolException(e)
+ }
+ break
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputPostValue() error {
+ cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1])
+ switch cxt {
+ case _CONTEXT_IN_LIST_FIRST:
+ p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+ p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST))
+ break
+ case _CONTEXT_IN_OBJECT_FIRST:
+ p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+ p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE))
+ break
+ case _CONTEXT_IN_OBJECT_NEXT_KEY:
+ p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+ p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE))
+ break
+ case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+ p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+ p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_KEY))
+ break
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputBool(value bool) error {
+ if e := p.OutputPreValue(); e != nil {
+ return e
+ }
+ var v string
+ if value {
+ v = string(JSON_TRUE)
+ } else {
+ v = string(JSON_FALSE)
+ }
+ switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) {
+ case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+ v = jsonQuote(v)
+ default:
+ }
+ if e := p.OutputStringData(v); e != nil {
+ return e
+ }
+ return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputNull() error {
+ if e := p.OutputPreValue(); e != nil {
+ return e
+ }
+ if _, e := p.write(JSON_NULL); e != nil {
+ return NewTProtocolException(e)
+ }
+ return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputF64(value float64) error {
+ if e := p.OutputPreValue(); e != nil {
+ return e
+ }
+ var v string
+ if math.IsNaN(value) {
+ v = string(JSON_QUOTE) + JSON_NAN + string(JSON_QUOTE)
+ } else if math.IsInf(value, 1) {
+ v = string(JSON_QUOTE) + JSON_INFINITY + string(JSON_QUOTE)
+ } else if math.IsInf(value, -1) {
+ v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE)
+ } else {
+ v = strconv.FormatFloat(value, 'g', -1, 64)
+ switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) {
+ case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+ v = string(JSON_QUOTE) + v + string(JSON_QUOTE)
+ default:
+ }
+ }
+ if e := p.OutputStringData(v); e != nil {
+ return e
+ }
+ return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputI64(value int64) error {
+ if e := p.OutputPreValue(); e != nil {
+ return e
+ }
+ v := strconv.FormatInt(value, 10)
+ switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) {
+ case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+ v = jsonQuote(v)
+ default:
+ }
+ if e := p.OutputStringData(v); e != nil {
+ return e
+ }
+ return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputString(s string) error {
+ if e := p.OutputPreValue(); e != nil {
+ return e
+ }
+ if e := p.OutputStringData(jsonQuote(s)); e != nil {
+ return e
+ }
+ return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputStringData(s string) error {
+ _, e := p.write([]byte(s))
+ return NewTProtocolException(e)
+}
+
+func (p *TSimpleJSONProtocol) OutputObjectBegin() error {
+ if e := p.OutputPreValue(); e != nil {
+ return e
+ }
+ if _, e := p.write(JSON_LBRACE); e != nil {
+ return NewTProtocolException(e)
+ }
+ p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_FIRST))
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputObjectEnd() error {
+ if _, e := p.write(JSON_RBRACE); e != nil {
+ return NewTProtocolException(e)
+ }
+ p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+ if e := p.OutputPostValue(); e != nil {
+ return e
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputListBegin() error {
+ if e := p.OutputPreValue(); e != nil {
+ return e
+ }
+ if _, e := p.write(JSON_LBRACKET); e != nil {
+ return NewTProtocolException(e)
+ }
+ p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST_FIRST))
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputListEnd() error {
+ if _, e := p.write(JSON_RBRACKET); e != nil {
+ return NewTProtocolException(e)
+ }
+ p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+ if e := p.OutputPostValue(); e != nil {
+ return e
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputElemListBegin(elemType TType, size int) error {
+ if e := p.OutputListBegin(); e != nil {
+ return e
+ }
+ if e := p.WriteByte(int8(elemType)); e != nil {
+ return e
+ }
+ if e := p.WriteI64(int64(size)); e != nil {
+ return e
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) ParsePreValue() error {
+ if e := p.readNonSignificantWhitespace(); e != nil {
+ return NewTProtocolException(e)
+ }
+ cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1])
+ b, _ := p.reader.Peek(1)
+ switch cxt {
+ case _CONTEXT_IN_LIST:
+ if len(b) > 0 {
+ switch b[0] {
+ case JSON_RBRACKET[0]:
+ return nil
+ case JSON_COMMA[0]:
+ p.reader.ReadByte()
+ if e := p.readNonSignificantWhitespace(); e != nil {
+ return NewTProtocolException(e)
+ }
+ return nil
+ default:
+ e := fmt.Errorf("Expected \"]\" or \",\" in list context, but found \"%s\"", string(b))
+ return NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ }
+ break
+ case _CONTEXT_IN_OBJECT_NEXT_KEY:
+ if len(b) > 0 {
+ switch b[0] {
+ case JSON_RBRACE[0]:
+ return nil
+ case JSON_COMMA[0]:
+ p.reader.ReadByte()
+ if e := p.readNonSignificantWhitespace(); e != nil {
+ return NewTProtocolException(e)
+ }
+ return nil
+ default:
+ e := fmt.Errorf("Expected \"}\" or \",\" in object context, but found \"%s\"", string(b))
+ return NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ }
+ break
+ case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+ if len(b) > 0 {
+ switch b[0] {
+ case JSON_COLON[0]:
+ p.reader.ReadByte()
+ if e := p.readNonSignificantWhitespace(); e != nil {
+ return NewTProtocolException(e)
+ }
+ return nil
+ default:
+ e := fmt.Errorf("Expected \":\" in object context, but found \"%s\"", string(b))
+ return NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ }
+ break
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) ParsePostValue() error {
+ if e := p.readNonSignificantWhitespace(); e != nil {
+ return NewTProtocolException(e)
+ }
+ cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1])
+ switch cxt {
+ case _CONTEXT_IN_LIST_FIRST:
+ p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
+ p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST))
+ break
+ case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+ p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
+ p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_VALUE))
+ break
+ case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+ p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
+ p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_KEY))
+ break
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) readNonSignificantWhitespace() error {
+ for {
+ b, _ := p.reader.Peek(1)
+ if len(b) < 1 {
+ return nil
+ }
+ switch b[0] {
+ case ' ', '\r', '\n', '\t':
+ p.reader.ReadByte()
+ continue
+ default:
+ break
+ }
+ break
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) ParseStringBody() (string, error) {
+ line, err := p.reader.ReadString(JSON_QUOTE)
+ if err != nil {
+ return "", NewTProtocolException(err)
+ }
+ l := len(line)
+ // count number of escapes to see if we need to keep going
+ i := 1
+ for ; i < l; i++ {
+ if line[l-i-1] != '\\' {
+ break
+ }
+ }
+ if i&0x01 == 1 {
+ v, ok := jsonUnquote(string(JSON_QUOTE) + line)
+ if !ok {
+ return "", NewTProtocolException(err)
+ }
+ return v, nil
+ }
+ s, err := p.ParseQuotedStringBody()
+ if err != nil {
+ return "", NewTProtocolException(err)
+ }
+ str := string(JSON_QUOTE) + line + s
+ v, ok := jsonUnquote(str)
+ if !ok {
+ e := fmt.Errorf("Unable to parse as JSON string %s", str)
+ return "", NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ return v, nil
+}
+
+func (p *TSimpleJSONProtocol) ParseQuotedStringBody() (string, error) {
+ line, err := p.reader.ReadString(JSON_QUOTE)
+ if err != nil {
+ return "", NewTProtocolException(err)
+ }
+ l := len(line)
+ // count number of escapes to see if we need to keep going
+ i := 1
+ for ; i < l; i++ {
+ if line[l-i-1] != '\\' {
+ break
+ }
+ }
+ if i&0x01 == 1 {
+ return line, nil
+ }
+ s, err := p.ParseQuotedStringBody()
+ if err != nil {
+ return "", NewTProtocolException(err)
+ }
+ v := line + s
+ return v, nil
+}
+
+func (p *TSimpleJSONProtocol) ParseBase64EncodedBody() ([]byte, error) {
+ line, err := p.reader.ReadBytes(JSON_QUOTE)
+ if err != nil {
+ return line, NewTProtocolException(err)
+ }
+ line2 := line[0 : len(line)-1]
+ l := len(line2)
+ if (l % 4) != 0 {
+ pad := 4 - (l % 4)
+ fill := [...]byte{'=', '=', '='}
+ line2 = append(line2, fill[:pad]...)
+ l = len(line2)
+ }
+ output := make([]byte, base64.StdEncoding.DecodedLen(l))
+ n, err := base64.StdEncoding.Decode(output, line2)
+ return output[0:n], NewTProtocolException(err)
+}
+
+func (p *TSimpleJSONProtocol) ParseI64() (int64, bool, error) {
+ if err := p.ParsePreValue(); err != nil {
+ return 0, false, err
+ }
+ var value int64
+ var isnull bool
+ if p.safePeekContains(JSON_NULL) {
+ p.reader.Read(make([]byte, len(JSON_NULL)))
+ isnull = true
+ } else {
+ num, err := p.readNumeric()
+ isnull = (num == nil)
+ if !isnull {
+ value = num.Int64()
+ }
+ if err != nil {
+ return value, isnull, err
+ }
+ }
+ return value, isnull, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ParseF64() (float64, bool, error) {
+ if err := p.ParsePreValue(); err != nil {
+ return 0, false, err
+ }
+ var value float64
+ var isnull bool
+ if p.safePeekContains(JSON_NULL) {
+ p.reader.Read(make([]byte, len(JSON_NULL)))
+ isnull = true
+ } else {
+ num, err := p.readNumeric()
+ isnull = (num == nil)
+ if !isnull {
+ value = num.Float64()
+ }
+ if err != nil {
+ return value, isnull, err
+ }
+ }
+ return value, isnull, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ParseObjectStart() (bool, error) {
+ if err := p.ParsePreValue(); err != nil {
+ return false, err
+ }
+ var b []byte
+ b, err := p.reader.Peek(1)
+ if err != nil {
+ return false, err
+ }
+ if len(b) > 0 && b[0] == JSON_LBRACE[0] {
+ p.reader.ReadByte()
+ p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_FIRST))
+ return false, nil
+ } else if p.safePeekContains(JSON_NULL) {
+ return true, nil
+ }
+ e := fmt.Errorf("Expected '{' or null, but found '%s'", string(b))
+ return false, NewTProtocolExceptionWithType(INVALID_DATA, e)
+}
+
+func (p *TSimpleJSONProtocol) ParseObjectEnd() error {
+ if isNull, err := p.readIfNull(); isNull || err != nil {
+ return err
+ }
+ cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1])
+ if (cxt != _CONTEXT_IN_OBJECT_FIRST) && (cxt != _CONTEXT_IN_OBJECT_NEXT_KEY) {
+ e := fmt.Errorf("Expected to be in the Object Context, but not in Object Context (%d)", cxt)
+ return NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ line, err := p.reader.ReadString(JSON_RBRACE[0])
+ if err != nil {
+ return NewTProtocolException(err)
+ }
+ for _, char := range line {
+ switch char {
+ default:
+ e := fmt.Errorf("Expecting end of object \"}\", but found: \"%s\"", line)
+ return NewTProtocolExceptionWithType(INVALID_DATA, e)
+ case ' ', '\n', '\r', '\t', '}':
+ break
+ }
+ }
+ p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
+ return p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ParseListBegin() (isNull bool, err error) {
+ if e := p.ParsePreValue(); e != nil {
+ return false, e
+ }
+ var b []byte
+ b, err = p.reader.Peek(1)
+ if err != nil {
+ return false, err
+ }
+ if len(b) >= 1 && b[0] == JSON_LBRACKET[0] {
+ p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST_FIRST))
+ p.reader.ReadByte()
+ isNull = false
+ } else if p.safePeekContains(JSON_NULL) {
+ isNull = true
+ } else {
+ err = fmt.Errorf("Expected \"null\" or \"[\", received %q", b)
+ }
+ return isNull, NewTProtocolExceptionWithType(INVALID_DATA, err)
+}
+
+func (p *TSimpleJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) {
+ if isNull, e := p.ParseListBegin(); isNull || e != nil {
+ return VOID, 0, e
+ }
+ bElemType, err := p.ReadByte()
+ elemType = TType(bElemType)
+ if err != nil {
+ return elemType, size, err
+ }
+ nSize, err2 := p.ReadI64()
+ size = int(nSize)
+ return elemType, size, err2
+}
+
+func (p *TSimpleJSONProtocol) ParseListEnd() error {
+ if isNull, err := p.readIfNull(); isNull || err != nil {
+ return err
+ }
+ cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1])
+ if cxt != _CONTEXT_IN_LIST {
+ e := fmt.Errorf("Expected to be in the List Context, but not in List Context (%d)", cxt)
+ return NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ line, err := p.reader.ReadString(JSON_RBRACKET[0])
+ if err != nil {
+ return NewTProtocolException(err)
+ }
+ for _, char := range line {
+ switch char {
+ default:
+ e := fmt.Errorf("Expecting end of list \"]\", but found: \"%s\"", line)
+ return NewTProtocolExceptionWithType(INVALID_DATA, e)
+ case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]):
+ break
+ }
+ }
+ p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
+ if _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) == _CONTEXT_IN_TOPLEVEL {
+ return nil
+ }
+ return p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) readSingleValue() (interface{}, TType, error) {
+ e := p.readNonSignificantWhitespace()
+ if e != nil {
+ return nil, VOID, NewTProtocolException(e)
+ }
+ b, e := p.reader.Peek(1)
+ if len(b) > 0 {
+ c := b[0]
+ switch c {
+ case JSON_NULL[0]:
+ buf := make([]byte, len(JSON_NULL))
+ _, e := p.reader.Read(buf)
+ if e != nil {
+ return nil, VOID, NewTProtocolException(e)
+ }
+ if string(JSON_NULL) != string(buf) {
+ e = mismatch(string(JSON_NULL), string(buf))
+ return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ return nil, VOID, nil
+ case JSON_QUOTE:
+ p.reader.ReadByte()
+ v, e := p.ParseStringBody()
+ if e != nil {
+ return v, UTF8, NewTProtocolException(e)
+ }
+ if v == JSON_INFINITY {
+ return INFINITY, DOUBLE, nil
+ } else if v == JSON_NEGATIVE_INFINITY {
+ return NEGATIVE_INFINITY, DOUBLE, nil
+ } else if v == JSON_NAN {
+ return NAN, DOUBLE, nil
+ }
+ return v, UTF8, nil
+ case JSON_TRUE[0]:
+ buf := make([]byte, len(JSON_TRUE))
+ _, e := p.reader.Read(buf)
+ if e != nil {
+ return true, BOOL, NewTProtocolException(e)
+ }
+ if string(JSON_TRUE) != string(buf) {
+ e := mismatch(string(JSON_TRUE), string(buf))
+ return true, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ return true, BOOL, nil
+ case JSON_FALSE[0]:
+ buf := make([]byte, len(JSON_FALSE))
+ _, e := p.reader.Read(buf)
+ if e != nil {
+ return false, BOOL, NewTProtocolException(e)
+ }
+ if string(JSON_FALSE) != string(buf) {
+ e := mismatch(string(JSON_FALSE), string(buf))
+ return false, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ return false, BOOL, nil
+ case JSON_LBRACKET[0]:
+ _, e := p.reader.ReadByte()
+ return make([]interface{}, 0), LIST, NewTProtocolException(e)
+ case JSON_LBRACE[0]:
+ _, e := p.reader.ReadByte()
+ return make(map[string]interface{}), STRUCT, NewTProtocolException(e)
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-', JSON_INFINITY[0], JSON_NAN[0]:
+ // assume numeric
+ v, e := p.readNumeric()
+ return v, DOUBLE, e
+ default:
+ e := fmt.Errorf("Expected element in list but found '%s' while parsing JSON.", string(c))
+ return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ }
+ e = fmt.Errorf("Cannot read a single element while parsing JSON.")
+ return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
+
+}
+
+func (p *TSimpleJSONProtocol) readIfNull() (bool, error) {
+ cont := true
+ for cont {
+ b, _ := p.reader.Peek(1)
+ if len(b) < 1 {
+ return false, nil
+ }
+ switch b[0] {
+ default:
+ return false, nil
+ case JSON_NULL[0]:
+ cont = false
+ break
+ case ' ', '\n', '\r', '\t':
+ p.reader.ReadByte()
+ break
+ }
+ }
+ if p.safePeekContains(JSON_NULL) {
+ p.reader.Read(make([]byte, len(JSON_NULL)))
+ return true, nil
+ }
+ return false, nil
+}
+
+func (p *TSimpleJSONProtocol) readQuoteIfNext() {
+ b, _ := p.reader.Peek(1)
+ if len(b) > 0 && b[0] == JSON_QUOTE {
+ p.reader.ReadByte()
+ }
+}
+
+func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) {
+ isNull, err := p.readIfNull()
+ if isNull || err != nil {
+ return NUMERIC_NULL, err
+ }
+ hasDecimalPoint := false
+ nextCanBeSign := true
+ hasE := false
+ MAX_LEN := 40
+ buf := bytes.NewBuffer(make([]byte, 0, MAX_LEN))
+ continueFor := true
+ inQuotes := false
+ for continueFor {
+ c, err := p.reader.ReadByte()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return NUMERIC_NULL, NewTProtocolException(err)
+ }
+ switch c {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ buf.WriteByte(c)
+ nextCanBeSign = false
+ case '.':
+ if hasDecimalPoint {
+ e := fmt.Errorf("Unable to parse number with multiple decimal points '%s.'", buf.String())
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ if hasE {
+ e := fmt.Errorf("Unable to parse number with decimal points in the exponent '%s.'", buf.String())
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ buf.WriteByte(c)
+ hasDecimalPoint, nextCanBeSign = true, false
+ case 'e', 'E':
+ if hasE {
+ e := fmt.Errorf("Unable to parse number with multiple exponents '%s%c'", buf.String(), c)
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ buf.WriteByte(c)
+ hasE, nextCanBeSign = true, true
+ case '-', '+':
+ if !nextCanBeSign {
+ e := fmt.Errorf("Negative sign within number")
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ buf.WriteByte(c)
+ nextCanBeSign = false
+ case ' ', 0, '\t', '\n', '\r', JSON_RBRACE[0], JSON_RBRACKET[0], JSON_COMMA[0], JSON_COLON[0]:
+ p.reader.UnreadByte()
+ continueFor = false
+ case JSON_NAN[0]:
+ if buf.Len() == 0 {
+ buffer := make([]byte, len(JSON_NAN))
+ buffer[0] = c
+ _, e := p.reader.Read(buffer[1:])
+ if e != nil {
+ return NUMERIC_NULL, NewTProtocolException(e)
+ }
+ if JSON_NAN != string(buffer) {
+ e := mismatch(JSON_NAN, string(buffer))
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ if inQuotes {
+ p.readQuoteIfNext()
+ }
+ return NAN, nil
+ } else {
+ e := fmt.Errorf("Unable to parse number starting with character '%c'", c)
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ case JSON_INFINITY[0]:
+ if buf.Len() == 0 || (buf.Len() == 1 && buf.Bytes()[0] == '+') {
+ buffer := make([]byte, len(JSON_INFINITY))
+ buffer[0] = c
+ _, e := p.reader.Read(buffer[1:])
+ if e != nil {
+ return NUMERIC_NULL, NewTProtocolException(e)
+ }
+ if JSON_INFINITY != string(buffer) {
+ e := mismatch(JSON_INFINITY, string(buffer))
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ if inQuotes {
+ p.readQuoteIfNext()
+ }
+ return INFINITY, nil
+ } else if buf.Len() == 1 && buf.Bytes()[0] == JSON_NEGATIVE_INFINITY[0] {
+ buffer := make([]byte, len(JSON_NEGATIVE_INFINITY))
+ buffer[0] = JSON_NEGATIVE_INFINITY[0]
+ buffer[1] = c
+ _, e := p.reader.Read(buffer[2:])
+ if e != nil {
+ return NUMERIC_NULL, NewTProtocolException(e)
+ }
+ if JSON_NEGATIVE_INFINITY != string(buffer) {
+ e := mismatch(JSON_NEGATIVE_INFINITY, string(buffer))
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ if inQuotes {
+ p.readQuoteIfNext()
+ }
+ return NEGATIVE_INFINITY, nil
+ } else {
+ e := fmt.Errorf("Unable to parse number starting with character '%c' due to existing buffer %s", c, buf.String())
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ case JSON_QUOTE:
+ if !inQuotes {
+ inQuotes = true
+ } else {
+ break
+ }
+ default:
+ e := fmt.Errorf("Unable to parse number starting with character '%c'", c)
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ }
+ if buf.Len() == 0 {
+ e := fmt.Errorf("Unable to parse number from empty string ''")
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ return NewNumericFromJSONString(buf.String(), false), nil
+}
+
+// Safely peeks into the buffer, reading only what is necessary
+func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool {
+ for i := 0; i < len(b); i++ {
+ a, _ := p.reader.Peek(i + 1)
+ if len(a) == 0 || a[i] != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// Reset the context stack to its initial state.
+func (p *TSimpleJSONProtocol) resetContextStack() {
+ p.parseContextStack = []int{int(_CONTEXT_IN_TOPLEVEL)}
+ p.dumpContext = []int{int(_CONTEXT_IN_TOPLEVEL)}
+}
+
+func (p *TSimpleJSONProtocol) write(b []byte) (int, error) {
+ n, err := p.writer.Write(b)
+ if err != nil {
+ p.writer.Reset(p.trans) // THRIFT-3735
+ }
+ return n, err
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport.go
new file mode 100644
index 000000000..453899651
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/transport.go
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "errors"
+ "io"
+)
+
+var errTransportInterrupted = errors.New("Transport Interrupted")
+
+type Flusher interface {
+ Flush() (err error)
+}
+
+type ReadSizeProvider interface {
+ RemainingBytes() (num_bytes uint64)
+}
+
+
+// Encapsulates the I/O layer
+type TTransport interface {
+ io.ReadWriteCloser
+ Flusher
+ ReadSizeProvider
+
+ // Opens the transport for communication
+ Open() error
+
+ // Returns true if the transport is open
+ IsOpen() bool
+}
+
+type stringWriter interface {
+ WriteString(s string) (n int, err error)
+}
+
+
+// This is "enchanced" transport with extra capabilities. You need to use one of these
+// to construct protocol.
+// Notably, TSocket does not implement this interface, and it is always a mistake to use
+// TSocket directly in protocol.
+type TRichTransport interface {
+ io.ReadWriter
+ io.ByteReader
+ io.ByteWriter
+ stringWriter
+ Flusher
+ ReadSizeProvider
+}
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go
new file mode 100644
index 000000000..9505b4461
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "errors"
+ "io"
+)
+
+type timeoutable interface {
+ Timeout() bool
+}
+
+// Thrift Transport exception
+type TTransportException interface {
+ TException
+ TypeId() int
+ Err() error
+}
+
+const (
+ UNKNOWN_TRANSPORT_EXCEPTION = 0
+ NOT_OPEN = 1
+ ALREADY_OPEN = 2
+ TIMED_OUT = 3
+ END_OF_FILE = 4
+)
+
+type tTransportException struct {
+ typeId int
+ err error
+}
+
+func (p *tTransportException) TypeId() int {
+ return p.typeId
+}
+
+func (p *tTransportException) Error() string {
+ return p.err.Error()
+}
+
+func (p *tTransportException) Err() error {
+ return p.err
+}
+
+func NewTTransportException(t int, e string) TTransportException {
+ return &tTransportException{typeId: t, err: errors.New(e)}
+}
+
+func NewTTransportExceptionFromError(e error) TTransportException {
+ if e == nil {
+ return nil
+ }
+
+ if t, ok := e.(TTransportException); ok {
+ return t
+ }
+
+ switch v := e.(type) {
+ case TTransportException:
+ return v
+ case timeoutable:
+ if v.Timeout() {
+ return &tTransportException{typeId: TIMED_OUT, err: e}
+ }
+ }
+
+ if e == io.EOF {
+ return &tTransportException{typeId: END_OF_FILE, err: e}
+ }
+
+ return &tTransportException{typeId: UNKNOWN_TRANSPORT_EXCEPTION, err: e}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go
new file mode 100644
index 000000000..533d1b437
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Factory class used to create wrapped instance of Transports.
+// This is used primarily in servers, which get Transports from
+// a ServerTransport and then may want to mutate them (i.e. create
+// a BufferedTransport from the underlying base transport)
+type TTransportFactory interface {
+ GetTransport(trans TTransport) TTransport
+}
+
+type tTransportFactory struct{}
+
+// Return a wrapped instance of the base Transport.
+func (p *tTransportFactory) GetTransport(trans TTransport) TTransport {
+ return trans
+}
+
+func NewTTransportFactory() TTransportFactory {
+ return &tTransportFactory{}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/type.go b/vendor/github.com/uber/jaeger-client-go/thrift/type.go
new file mode 100644
index 000000000..4292ffcad
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/type.go
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Type constants in the Thrift protocol
+type TType byte
+
+const (
+ STOP = 0
+ VOID = 1
+ BOOL = 2
+ BYTE = 3
+ I08 = 3
+ DOUBLE = 4
+ I16 = 6
+ I32 = 8
+ I64 = 10
+ STRING = 11
+ UTF7 = 11
+ STRUCT = 12
+ MAP = 13
+ SET = 14
+ LIST = 15
+ UTF8 = 16
+ UTF16 = 17
+ //BINARY = 18 wrong and unusued
+)
+
+var typeNames = map[int]string{
+ STOP: "STOP",
+ VOID: "VOID",
+ BOOL: "BOOL",
+ BYTE: "BYTE",
+ DOUBLE: "DOUBLE",
+ I16: "I16",
+ I32: "I32",
+ I64: "I64",
+ STRING: "STRING",
+ STRUCT: "STRUCT",
+ MAP: "MAP",
+ SET: "SET",
+ LIST: "LIST",
+ UTF8: "UTF8",
+ UTF16: "UTF16",
+}
+
+func (p TType) String() string {
+ if s, ok := typeNames[int(p)]; ok {
+ return s
+ }
+ return "Unknown"
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/tracer.go b/vendor/github.com/uber/jaeger-client-go/tracer.go
new file mode 100644
index 000000000..2c29ae8c9
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/tracer.go
@@ -0,0 +1,441 @@
+// Copyright (c) 2017-2018 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "fmt"
+ "io"
+ "math/rand"
+ "os"
+ "reflect"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/ext"
+
+ "github.com/uber/jaeger-client-go/internal/baggage"
+ "github.com/uber/jaeger-client-go/internal/throttler"
+ "github.com/uber/jaeger-client-go/log"
+ "github.com/uber/jaeger-client-go/utils"
+)
+
+// Tracer implements opentracing.Tracer.
+type Tracer struct {
+ serviceName string
+ hostIPv4 uint32 // this is for zipkin endpoint conversion
+
+ sampler Sampler
+ reporter Reporter
+ metrics Metrics
+ logger log.Logger
+
+ timeNow func() time.Time
+ randomNumber func() uint64
+
+ options struct {
+ poolSpans bool
+ gen128Bit bool // whether to generate 128bit trace IDs
+ zipkinSharedRPCSpan bool
+ highTraceIDGenerator func() uint64 // custom high trace ID generator
+ maxTagValueLength int
+ // more options to come
+ }
+ // pool for Span objects
+ spanPool sync.Pool
+
+ injectors map[interface{}]Injector
+ extractors map[interface{}]Extractor
+
+ observer compositeObserver
+
+ tags []Tag
+ process Process
+
+ baggageRestrictionManager baggage.RestrictionManager
+ baggageSetter *baggageSetter
+
+ debugThrottler throttler.Throttler
+}
+
+// NewTracer creates Tracer implementation that reports tracing to Jaeger.
+// The returned io.Closer can be used in shutdown hooks to ensure that the internal
+// queue of the Reporter is drained and all buffered spans are submitted to collectors.
+func NewTracer(
+ serviceName string,
+ sampler Sampler,
+ reporter Reporter,
+ options ...TracerOption,
+) (opentracing.Tracer, io.Closer) {
+ t := &Tracer{
+ serviceName: serviceName,
+ sampler: sampler,
+ reporter: reporter,
+ injectors: make(map[interface{}]Injector),
+ extractors: make(map[interface{}]Extractor),
+ metrics: *NewNullMetrics(),
+ spanPool: sync.Pool{New: func() interface{} {
+ return &Span{}
+ }},
+ }
+
+ for _, option := range options {
+ option(t)
+ }
+
+ // register default injectors/extractors unless they are already provided via options
+ textPropagator := newTextMapPropagator(getDefaultHeadersConfig(), t.metrics)
+ t.addCodec(opentracing.TextMap, textPropagator, textPropagator)
+
+ httpHeaderPropagator := newHTTPHeaderPropagator(getDefaultHeadersConfig(), t.metrics)
+ t.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator)
+
+ binaryPropagator := newBinaryPropagator(t)
+ t.addCodec(opentracing.Binary, binaryPropagator, binaryPropagator)
+
+ // TODO remove after TChannel supports OpenTracing
+ interopPropagator := &jaegerTraceContextPropagator{tracer: t}
+ t.addCodec(SpanContextFormat, interopPropagator, interopPropagator)
+
+ zipkinPropagator := &zipkinPropagator{tracer: t}
+ t.addCodec(ZipkinSpanFormat, zipkinPropagator, zipkinPropagator)
+
+ if t.baggageRestrictionManager != nil {
+ t.baggageSetter = newBaggageSetter(t.baggageRestrictionManager, &t.metrics)
+ } else {
+ t.baggageSetter = newBaggageSetter(baggage.NewDefaultRestrictionManager(0), &t.metrics)
+ }
+ if t.debugThrottler == nil {
+ t.debugThrottler = throttler.DefaultThrottler{}
+ }
+
+ if t.randomNumber == nil {
+ seedGenerator := utils.NewRand(time.Now().UnixNano())
+ pool := sync.Pool{
+ New: func() interface{} {
+ return rand.NewSource(seedGenerator.Int63())
+ },
+ }
+
+ t.randomNumber = func() uint64 {
+ generator := pool.Get().(rand.Source)
+ number := uint64(generator.Int63())
+ pool.Put(generator)
+ return number
+ }
+ }
+ if t.timeNow == nil {
+ t.timeNow = time.Now
+ }
+ if t.logger == nil {
+ t.logger = log.NullLogger
+ }
+ // Set tracer-level tags
+ t.tags = append(t.tags, Tag{key: JaegerClientVersionTagKey, value: JaegerClientVersion})
+ if hostname, err := os.Hostname(); err == nil {
+ t.tags = append(t.tags, Tag{key: TracerHostnameTagKey, value: hostname})
+ }
+ if ip, err := utils.HostIP(); err == nil {
+ t.tags = append(t.tags, Tag{key: TracerIPTagKey, value: ip.String()})
+ t.hostIPv4 = utils.PackIPAsUint32(ip)
+ } else {
+ t.logger.Error("Unable to determine this host's IP address: " + err.Error())
+ }
+
+ if t.options.gen128Bit {
+ if t.options.highTraceIDGenerator == nil {
+ t.options.highTraceIDGenerator = t.randomNumber
+ }
+ } else if t.options.highTraceIDGenerator != nil {
+ t.logger.Error("Overriding high trace ID generator but not generating " +
+ "128 bit trace IDs, consider enabling the \"Gen128Bit\" option")
+ }
+ if t.options.maxTagValueLength == 0 {
+ t.options.maxTagValueLength = DefaultMaxTagValueLength
+ }
+ t.process = Process{
+ Service: serviceName,
+ UUID: strconv.FormatUint(t.randomNumber(), 16),
+ Tags: t.tags,
+ }
+ if throttler, ok := t.debugThrottler.(ProcessSetter); ok {
+ throttler.SetProcess(t.process)
+ }
+
+ return t, t
+}
+
+// addCodec adds registers injector and extractor for given propagation format if not already defined.
+func (t *Tracer) addCodec(format interface{}, injector Injector, extractor Extractor) {
+ if _, ok := t.injectors[format]; !ok {
+ t.injectors[format] = injector
+ }
+ if _, ok := t.extractors[format]; !ok {
+ t.extractors[format] = extractor
+ }
+}
+
+// StartSpan implements StartSpan() method of opentracing.Tracer.
+func (t *Tracer) StartSpan(
+ operationName string,
+ options ...opentracing.StartSpanOption,
+) opentracing.Span {
+ sso := opentracing.StartSpanOptions{}
+ for _, o := range options {
+ o.Apply(&sso)
+ }
+ return t.startSpanWithOptions(operationName, sso)
+}
+
+func (t *Tracer) startSpanWithOptions(
+ operationName string,
+ options opentracing.StartSpanOptions,
+) opentracing.Span {
+ if options.StartTime.IsZero() {
+ options.StartTime = t.timeNow()
+ }
+
+ // Predicate whether the given span context is a valid reference
+ // which may be used as parent / debug ID / baggage items source
+ isValidReference := func(ctx SpanContext) bool {
+ return ctx.IsValid() || ctx.isDebugIDContainerOnly() || len(ctx.baggage) != 0
+ }
+
+ var references []Reference
+ var parent SpanContext
+ var hasParent bool // need this because `parent` is a value, not reference
+ for _, ref := range options.References {
+ ctx, ok := ref.ReferencedContext.(SpanContext)
+ if !ok {
+ t.logger.Error(fmt.Sprintf(
+ "Reference contains invalid type of SpanReference: %s",
+ reflect.ValueOf(ref.ReferencedContext)))
+ continue
+ }
+ if !isValidReference(ctx) {
+ continue
+ }
+ references = append(references, Reference{Type: ref.Type, Context: ctx})
+ if !hasParent {
+ parent = ctx
+ hasParent = ref.Type == opentracing.ChildOfRef
+ }
+ }
+ if !hasParent && isValidReference(parent) {
+ // If ChildOfRef wasn't found but a FollowFromRef exists, use the context from
+ // the FollowFromRef as the parent
+ hasParent = true
+ }
+
+ rpcServer := false
+ if v, ok := options.Tags[ext.SpanKindRPCServer.Key]; ok {
+ rpcServer = (v == ext.SpanKindRPCServerEnum || v == string(ext.SpanKindRPCServerEnum))
+ }
+
+ var samplerTags []Tag
+ var ctx SpanContext
+ newTrace := false
+ if !hasParent || !parent.IsValid() {
+ newTrace = true
+ ctx.traceID.Low = t.randomID()
+ if t.options.gen128Bit {
+ ctx.traceID.High = t.options.highTraceIDGenerator()
+ }
+ ctx.spanID = SpanID(ctx.traceID.Low)
+ ctx.parentID = 0
+ ctx.flags = byte(0)
+ if hasParent && parent.isDebugIDContainerOnly() && t.isDebugAllowed(operationName) {
+ ctx.flags |= (flagSampled | flagDebug)
+ samplerTags = []Tag{{key: JaegerDebugHeader, value: parent.debugID}}
+ } else if sampled, tags := t.sampler.IsSampled(ctx.traceID, operationName); sampled {
+ ctx.flags |= flagSampled
+ samplerTags = tags
+ }
+ } else {
+ ctx.traceID = parent.traceID
+ if rpcServer && t.options.zipkinSharedRPCSpan {
+ // Support Zipkin's one-span-per-RPC model
+ ctx.spanID = parent.spanID
+ ctx.parentID = parent.parentID
+ } else {
+ ctx.spanID = SpanID(t.randomID())
+ ctx.parentID = parent.spanID
+ }
+ ctx.flags = parent.flags
+ }
+ if hasParent {
+ // copy baggage items
+ if l := len(parent.baggage); l > 0 {
+ ctx.baggage = make(map[string]string, len(parent.baggage))
+ for k, v := range parent.baggage {
+ ctx.baggage[k] = v
+ }
+ }
+ }
+
+ sp := t.newSpan()
+ sp.context = ctx
+ sp.observer = t.observer.OnStartSpan(sp, operationName, options)
+ return t.startSpanInternal(
+ sp,
+ operationName,
+ options.StartTime,
+ samplerTags,
+ options.Tags,
+ newTrace,
+ rpcServer,
+ references,
+ )
+}
+
+// Inject implements Inject() method of opentracing.Tracer
+func (t *Tracer) Inject(ctx opentracing.SpanContext, format interface{}, carrier interface{}) error {
+ c, ok := ctx.(SpanContext)
+ if !ok {
+ return opentracing.ErrInvalidSpanContext
+ }
+ if injector, ok := t.injectors[format]; ok {
+ return injector.Inject(c, carrier)
+ }
+ return opentracing.ErrUnsupportedFormat
+}
+
+// Extract implements Extract() method of opentracing.Tracer
+func (t *Tracer) Extract(
+ format interface{},
+ carrier interface{},
+) (opentracing.SpanContext, error) {
+ if extractor, ok := t.extractors[format]; ok {
+ return extractor.Extract(carrier)
+ }
+ return nil, opentracing.ErrUnsupportedFormat
+}
+
+// Close releases all resources used by the Tracer and flushes any remaining buffered spans.
+func (t *Tracer) Close() error {
+ t.reporter.Close()
+ t.sampler.Close()
+ if mgr, ok := t.baggageRestrictionManager.(io.Closer); ok {
+ mgr.Close()
+ }
+ if throttler, ok := t.debugThrottler.(io.Closer); ok {
+ throttler.Close()
+ }
+ return nil
+}
+
+// Tags returns a slice of tracer-level tags.
+func (t *Tracer) Tags() []opentracing.Tag {
+ tags := make([]opentracing.Tag, len(t.tags))
+ for i, tag := range t.tags {
+ tags[i] = opentracing.Tag{Key: tag.key, Value: tag.value}
+ }
+ return tags
+}
+
+// newSpan returns an instance of a clean Span object.
+// If options.PoolSpans is true, the spans are retrieved from an object pool.
+func (t *Tracer) newSpan() *Span {
+ if !t.options.poolSpans {
+ return &Span{}
+ }
+ sp := t.spanPool.Get().(*Span)
+ sp.context = emptyContext
+ sp.tracer = nil
+ sp.tags = nil
+ sp.logs = nil
+ return sp
+}
+
+func (t *Tracer) startSpanInternal(
+ sp *Span,
+ operationName string,
+ startTime time.Time,
+ internalTags []Tag,
+ tags opentracing.Tags,
+ newTrace bool,
+ rpcServer bool,
+ references []Reference,
+) *Span {
+ sp.tracer = t
+ sp.operationName = operationName
+ sp.startTime = startTime
+ sp.duration = 0
+ sp.references = references
+ sp.firstInProcess = rpcServer || sp.context.parentID == 0
+ if len(tags) > 0 || len(internalTags) > 0 {
+ sp.tags = make([]Tag, len(internalTags), len(tags)+len(internalTags))
+ copy(sp.tags, internalTags)
+ for k, v := range tags {
+ sp.observer.OnSetTag(k, v)
+ if k == string(ext.SamplingPriority) && !setSamplingPriority(sp, v) {
+ continue
+ }
+ sp.setTagNoLocking(k, v)
+ }
+ }
+ // emit metrics
+ if sp.context.IsSampled() {
+ t.metrics.SpansStartedSampled.Inc(1)
+ if newTrace {
+ // We cannot simply check for parentID==0 because in Zipkin model the
+ // server-side RPC span has the exact same trace/span/parent IDs as the
+ // calling client-side span, but obviously the server side span is
+ // no longer a root span of the trace.
+ t.metrics.TracesStartedSampled.Inc(1)
+ } else if sp.firstInProcess {
+ t.metrics.TracesJoinedSampled.Inc(1)
+ }
+ } else {
+ t.metrics.SpansStartedNotSampled.Inc(1)
+ if newTrace {
+ t.metrics.TracesStartedNotSampled.Inc(1)
+ } else if sp.firstInProcess {
+ t.metrics.TracesJoinedNotSampled.Inc(1)
+ }
+ }
+ return sp
+}
+
+func (t *Tracer) reportSpan(sp *Span) {
+ t.metrics.SpansFinished.Inc(1)
+ if sp.context.IsSampled() {
+ t.reporter.Report(sp)
+ }
+ if t.options.poolSpans {
+ t.spanPool.Put(sp)
+ }
+}
+
+// randomID generates a random trace/span ID, using tracer.random() generator.
+// It never returns 0.
+func (t *Tracer) randomID() uint64 {
+ val := t.randomNumber()
+ for val == 0 {
+ val = t.randomNumber()
+ }
+ return val
+}
+
+// (NB) span must hold the lock before making this call
+func (t *Tracer) setBaggage(sp *Span, key, value string) {
+ t.baggageSetter.setBaggage(sp, key, value)
+}
+
+// (NB) span must hold the lock before making this call
+func (t *Tracer) isDebugAllowed(operation string) bool {
+ return t.debugThrottler.IsAllowed(operation)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/tracer_options.go b/vendor/github.com/uber/jaeger-client-go/tracer_options.go
new file mode 100644
index 000000000..a90265f03
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/tracer_options.go
@@ -0,0 +1,159 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "time"
+
+ "github.com/opentracing/opentracing-go"
+
+ "github.com/uber/jaeger-client-go/internal/baggage"
+ "github.com/uber/jaeger-client-go/internal/throttler"
+)
+
+// TracerOption is a function that sets some option on the tracer
+type TracerOption func(tracer *Tracer)
+
+// TracerOptions is a factory for all available TracerOption's
+var TracerOptions tracerOptions
+
+type tracerOptions struct{}
+
+// Metrics creates a TracerOption that initializes Metrics on the tracer,
+// which is used to emit statistics.
+func (tracerOptions) Metrics(m *Metrics) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.metrics = *m
+ }
+}
+
+// Logger creates a TracerOption that gives the tracer a Logger.
+func (tracerOptions) Logger(logger Logger) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.logger = logger
+ }
+}
+
+func (tracerOptions) CustomHeaderKeys(headerKeys *HeadersConfig) TracerOption {
+ return func(tracer *Tracer) {
+ if headerKeys == nil {
+ return
+ }
+ textPropagator := newTextMapPropagator(headerKeys.applyDefaults(), tracer.metrics)
+ tracer.addCodec(opentracing.TextMap, textPropagator, textPropagator)
+
+ httpHeaderPropagator := newHTTPHeaderPropagator(headerKeys.applyDefaults(), tracer.metrics)
+ tracer.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator)
+ }
+}
+
+// TimeNow creates a TracerOption that gives the tracer a function
+// used to generate timestamps for spans.
+func (tracerOptions) TimeNow(timeNow func() time.Time) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.timeNow = timeNow
+ }
+}
+
+// RandomNumber creates a TracerOption that gives the tracer
+// a thread-safe random number generator function for generating trace IDs.
+func (tracerOptions) RandomNumber(randomNumber func() uint64) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.randomNumber = randomNumber
+ }
+}
+
+// PoolSpans creates a TracerOption that tells the tracer whether it should use
+// an object pool to minimize span allocations.
+// This should be used with care, only if the service is not running any async tasks
+// that can access parent spans after those spans have been finished.
+func (tracerOptions) PoolSpans(poolSpans bool) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.options.poolSpans = poolSpans
+ }
+}
+
+// Deprecated: HostIPv4 creates a TracerOption that identifies the current service/process.
+// If not set, the factory method will obtain the current IP address.
+// The TracerOption is deprecated; the tracer will attempt to automatically detect the IP.
+func (tracerOptions) HostIPv4(hostIPv4 uint32) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.hostIPv4 = hostIPv4
+ }
+}
+
+func (tracerOptions) Injector(format interface{}, injector Injector) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.injectors[format] = injector
+ }
+}
+
+func (tracerOptions) Extractor(format interface{}, extractor Extractor) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.extractors[format] = extractor
+ }
+}
+
+func (t tracerOptions) Observer(observer Observer) TracerOption {
+ return t.ContribObserver(&oldObserver{obs: observer})
+}
+
+func (tracerOptions) ContribObserver(observer ContribObserver) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.observer.append(observer)
+ }
+}
+
+func (tracerOptions) Gen128Bit(gen128Bit bool) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.options.gen128Bit = gen128Bit
+ }
+}
+
+func (tracerOptions) HighTraceIDGenerator(highTraceIDGenerator func() uint64) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.options.highTraceIDGenerator = highTraceIDGenerator
+ }
+}
+
+func (tracerOptions) MaxTagValueLength(maxTagValueLength int) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.options.maxTagValueLength = maxTagValueLength
+ }
+}
+
+func (tracerOptions) ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.options.zipkinSharedRPCSpan = zipkinSharedRPCSpan
+ }
+}
+
+func (tracerOptions) Tag(key string, value interface{}) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.tags = append(tracer.tags, Tag{key: key, value: value})
+ }
+}
+
+func (tracerOptions) BaggageRestrictionManager(mgr baggage.RestrictionManager) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.baggageRestrictionManager = mgr
+ }
+}
+
+func (tracerOptions) DebugThrottler(throttler throttler.Throttler) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.debugThrottler = throttler
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/transport.go b/vendor/github.com/uber/jaeger-client-go/transport.go
new file mode 100644
index 000000000..c5f5b1955
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/transport.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "io"
+)
+
+// Transport abstracts the method of sending spans out of process.
+// Implementations are NOT required to be thread-safe; the RemoteReporter
+// is expected to only call methods on the Transport from the same go-routine.
+type Transport interface {
+ // Append converts the span to the wire representation and adds it
+ // to sender's internal buffer. If the buffer exceeds its designated
+ // size, the transport should call Flush() and return the number of spans
+ // flushed, otherwise return 0. If error is returned, the returned number
+ // of spans is treated as failed span, and reported to metrics accordingly.
+ Append(span *Span) (int, error)
+
+ // Flush submits the internal buffer to the remote server. It returns the
+ // number of spans flushed. If error is returned, the returned number of
+ // spans is treated as failed span, and reported to metrics accordingly.
+ Flush() (int, error)
+
+ io.Closer
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/transport/doc.go b/vendor/github.com/uber/jaeger-client-go/transport/doc.go
new file mode 100644
index 000000000..6b961fb63
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/transport/doc.go
@@ -0,0 +1,23 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package transport defines various transports that can be used with
+// RemoteReporter to send spans out of process. Transport is responsible
+// for serializing the spans into a specific format suitable for sending
+// to the tracing backend. Examples may include Thrift over UDP, Thrift
+// or JSON over HTTP, Thrift over Kafka, etc.
+//
+// Implementations are NOT required to be thread-safe; the RemoteReporter
+// is expected to only call methods on the Transport from the same go-routine.
+package transport
diff --git a/vendor/github.com/uber/jaeger-client-go/transport/http.go b/vendor/github.com/uber/jaeger-client-go/transport/http.go
new file mode 100644
index 000000000..bc1b3e6b0
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/transport/http.go
@@ -0,0 +1,163 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "time"
+
+ "github.com/uber/jaeger-client-go/thrift"
+
+ "github.com/uber/jaeger-client-go"
+ j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+)
+
+// Default timeout for http request in seconds
+const defaultHTTPTimeout = time.Second * 5
+
+// HTTPTransport implements Transport by forwarding spans to a http server.
+type HTTPTransport struct {
+ url string
+ client *http.Client
+ batchSize int
+ spans []*j.Span
+ process *j.Process
+ httpCredentials *HTTPBasicAuthCredentials
+}
+
+// HTTPBasicAuthCredentials stores credentials for HTTP basic auth.
+type HTTPBasicAuthCredentials struct {
+ username string
+ password string
+}
+
+// HTTPOption sets a parameter for the HttpCollector
+type HTTPOption func(c *HTTPTransport)
+
+// HTTPTimeout sets maximum timeout for http request.
+func HTTPTimeout(duration time.Duration) HTTPOption {
+ return func(c *HTTPTransport) { c.client.Timeout = duration }
+}
+
+// HTTPBatchSize sets the maximum batch size, after which a collect will be
+// triggered. The default batch size is 100 spans.
+func HTTPBatchSize(n int) HTTPOption {
+ return func(c *HTTPTransport) { c.batchSize = n }
+}
+
+// HTTPBasicAuth sets the credentials required to perform HTTP basic auth
+func HTTPBasicAuth(username string, password string) HTTPOption {
+ return func(c *HTTPTransport) {
+ c.httpCredentials = &HTTPBasicAuthCredentials{username: username, password: password}
+ }
+}
+
+// HTTPRoundTripper configures the underlying Transport on the *http.Client
+// that is used
+func HTTPRoundTripper(transport http.RoundTripper) HTTPOption {
+ return func(c *HTTPTransport) {
+ c.client.Transport = transport
+ }
+}
+
+// NewHTTPTransport returns a new HTTP-backend transport. url should be an http
+// url of the collector to handle POST request, typically something like:
+// http://hostname:14268/api/traces?format=jaeger.thrift
+func NewHTTPTransport(url string, options ...HTTPOption) *HTTPTransport {
+ c := &HTTPTransport{
+ url: url,
+ client: &http.Client{Timeout: defaultHTTPTimeout},
+ batchSize: 100,
+ spans: []*j.Span{},
+ }
+
+ for _, option := range options {
+ option(c)
+ }
+ return c
+}
+
+// Append implements Transport.
+func (c *HTTPTransport) Append(span *jaeger.Span) (int, error) {
+ if c.process == nil {
+ c.process = jaeger.BuildJaegerProcessThrift(span)
+ }
+ jSpan := jaeger.BuildJaegerThrift(span)
+ c.spans = append(c.spans, jSpan)
+ if len(c.spans) >= c.batchSize {
+ return c.Flush()
+ }
+ return 0, nil
+}
+
+// Flush implements Transport.
+func (c *HTTPTransport) Flush() (int, error) {
+ count := len(c.spans)
+ if count == 0 {
+ return 0, nil
+ }
+ err := c.send(c.spans)
+ c.spans = c.spans[:0]
+ return count, err
+}
+
+// Close implements Transport.
+func (c *HTTPTransport) Close() error {
+ return nil
+}
+
+func (c *HTTPTransport) send(spans []*j.Span) error {
+ batch := &j.Batch{
+ Spans: spans,
+ Process: c.process,
+ }
+ body, err := serializeThrift(batch)
+ if err != nil {
+ return err
+ }
+ req, err := http.NewRequest("POST", c.url, body)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", "application/x-thrift")
+
+ if c.httpCredentials != nil {
+ req.SetBasicAuth(c.httpCredentials.username, c.httpCredentials.password)
+ }
+
+ resp, err := c.client.Do(req)
+ if err != nil {
+ return err
+ }
+ io.Copy(ioutil.Discard, resp.Body)
+ resp.Body.Close()
+ if resp.StatusCode >= http.StatusBadRequest {
+ return fmt.Errorf("error from collector: %d", resp.StatusCode)
+ }
+ return nil
+}
+
+func serializeThrift(obj thrift.TStruct) (*bytes.Buffer, error) {
+ t := thrift.NewTMemoryBuffer()
+ p := thrift.NewTBinaryProtocolTransport(t)
+ if err := obj.Write(p); err != nil {
+ return nil, err
+ }
+ return t.Buffer, nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/transport_udp.go b/vendor/github.com/uber/jaeger-client-go/transport_udp.go
new file mode 100644
index 000000000..7b9ccf937
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/transport_udp.go
@@ -0,0 +1,131 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/uber/jaeger-client-go/thrift"
+
+ j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+ "github.com/uber/jaeger-client-go/utils"
+)
+
+// Empirically obtained constant for how many bytes in the message are used for envelope.
+// The total datagram size is:
+// sizeof(Span) * numSpans + processByteSize + emitBatchOverhead <= maxPacketSize
+// There is a unit test `TestEmitBatchOverhead` that validates this number.
+// Note that due to the use of Compact Thrift protocol, overhead grows with the number of spans
+// in the batch, because the length of the list is encoded as varint32, as well as SeqId.
+const emitBatchOverhead = 30
+
+var errSpanTooLarge = errors.New("Span is too large")
+
+type udpSender struct {
+ client *utils.AgentClientUDP
+ maxPacketSize int // max size of datagram in bytes
+ maxSpanBytes int // max number of bytes to record spans (excluding envelope) in the datagram
+ byteBufferSize int // current number of span bytes accumulated in the buffer
+ spanBuffer []*j.Span // spans buffered before a flush
+ thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span
+ thriftProtocol thrift.TProtocol
+ process *j.Process
+ processByteSize int
+}
+
+// NewUDPTransport creates a reporter that submits spans to jaeger-agent
+func NewUDPTransport(hostPort string, maxPacketSize int) (Transport, error) {
+ if len(hostPort) == 0 {
+ hostPort = fmt.Sprintf("%s:%d", DefaultUDPSpanServerHost, DefaultUDPSpanServerPort)
+ }
+ if maxPacketSize == 0 {
+ maxPacketSize = utils.UDPPacketMaxLength
+ }
+
+ protocolFactory := thrift.NewTCompactProtocolFactory()
+
+ // Each span is first written to thriftBuffer to determine its size in bytes.
+ thriftBuffer := thrift.NewTMemoryBufferLen(maxPacketSize)
+ thriftProtocol := protocolFactory.GetProtocol(thriftBuffer)
+
+ client, err := utils.NewAgentClientUDP(hostPort, maxPacketSize)
+ if err != nil {
+ return nil, err
+ }
+
+ sender := &udpSender{
+ client: client,
+ maxSpanBytes: maxPacketSize - emitBatchOverhead,
+ thriftBuffer: thriftBuffer,
+ thriftProtocol: thriftProtocol}
+ return sender, nil
+}
+
+func (s *udpSender) calcSizeOfSerializedThrift(thriftStruct thrift.TStruct) int {
+ s.thriftBuffer.Reset()
+ thriftStruct.Write(s.thriftProtocol)
+ return s.thriftBuffer.Len()
+}
+
+func (s *udpSender) Append(span *Span) (int, error) {
+ if s.process == nil {
+ s.process = BuildJaegerProcessThrift(span)
+ s.processByteSize = s.calcSizeOfSerializedThrift(s.process)
+ s.byteBufferSize += s.processByteSize
+ }
+ jSpan := BuildJaegerThrift(span)
+ spanSize := s.calcSizeOfSerializedThrift(jSpan)
+ if spanSize > s.maxSpanBytes {
+ return 1, errSpanTooLarge
+ }
+
+ s.byteBufferSize += spanSize
+ if s.byteBufferSize <= s.maxSpanBytes {
+ s.spanBuffer = append(s.spanBuffer, jSpan)
+ if s.byteBufferSize < s.maxSpanBytes {
+ return 0, nil
+ }
+ return s.Flush()
+ }
+ // the latest span did not fit in the buffer
+ n, err := s.Flush()
+ s.spanBuffer = append(s.spanBuffer, jSpan)
+ s.byteBufferSize = spanSize + s.processByteSize
+ return n, err
+}
+
+func (s *udpSender) Flush() (int, error) {
+ n := len(s.spanBuffer)
+ if n == 0 {
+ return 0, nil
+ }
+ err := s.client.EmitBatch(&j.Batch{Process: s.process, Spans: s.spanBuffer})
+ s.resetBuffers()
+
+ return n, err
+}
+
+func (s *udpSender) Close() error {
+ return s.client.Close()
+}
+
+func (s *udpSender) resetBuffers() {
+ for i := range s.spanBuffer {
+ s.spanBuffer[i] = nil
+ }
+ s.spanBuffer = s.spanBuffer[:0]
+ s.byteBufferSize = s.processByteSize
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/http_json.go b/vendor/github.com/uber/jaeger-client-go/utils/http_json.go
new file mode 100644
index 000000000..237211f82
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/http_json.go
@@ -0,0 +1,54 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+)
+
+// GetJSON makes an HTTP call to the specified URL and parses the returned JSON into `out`.
+func GetJSON(url string, out interface{}) error {
+ resp, err := http.Get(url)
+ if err != nil {
+ return err
+ }
+ return ReadJSON(resp, out)
+}
+
+// ReadJSON reads JSON from http.Response and parses it into `out`
+func ReadJSON(resp *http.Response, out interface{}) error {
+ defer resp.Body.Close()
+
+ if resp.StatusCode >= 400 {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+
+ return fmt.Errorf("StatusCode: %d, Body: %s", resp.StatusCode, body)
+ }
+
+ if out == nil {
+ io.Copy(ioutil.Discard, resp.Body)
+ return nil
+ }
+
+ decoder := json.NewDecoder(resp.Body)
+ return decoder.Decode(out)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/localip.go b/vendor/github.com/uber/jaeger-client-go/utils/localip.go
new file mode 100644
index 000000000..b51af7713
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/localip.go
@@ -0,0 +1,84 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "errors"
+ "net"
+)
+
+// This code is borrowed from https://github.com/uber/tchannel-go/blob/dev/localip.go
+
+// scoreAddr scores how likely the given addr is to be a remote address and returns the
+// IP to use when listening. Any address which receives a negative score should not be used.
+// Scores are calculated as:
+// -1 for any unknown IP addresses.
+// +300 for IPv4 addresses
+// +100 for non-local addresses, extra +100 for "up" interaces.
+func scoreAddr(iface net.Interface, addr net.Addr) (int, net.IP) {
+ var ip net.IP
+ if netAddr, ok := addr.(*net.IPNet); ok {
+ ip = netAddr.IP
+ } else if netIP, ok := addr.(*net.IPAddr); ok {
+ ip = netIP.IP
+ } else {
+ return -1, nil
+ }
+
+ var score int
+ if ip.To4() != nil {
+ score += 300
+ }
+ if iface.Flags&net.FlagLoopback == 0 && !ip.IsLoopback() {
+ score += 100
+ if iface.Flags&net.FlagUp != 0 {
+ score += 100
+ }
+ }
+ return score, ip
+}
+
+// HostIP tries to find an IP that can be used by other machines to reach this machine.
+func HostIP() (net.IP, error) {
+ interfaces, err := net.Interfaces()
+ if err != nil {
+ return nil, err
+ }
+
+ bestScore := -1
+ var bestIP net.IP
+ // Select the highest scoring IP as the best IP.
+ for _, iface := range interfaces {
+ addrs, err := iface.Addrs()
+ if err != nil {
+ // Skip this interface if there is an error.
+ continue
+ }
+
+ for _, addr := range addrs {
+ score, ip := scoreAddr(iface, addr)
+ if score > bestScore {
+ bestScore = score
+ bestIP = ip
+ }
+ }
+ }
+
+ if bestScore == -1 {
+ return nil, errors.New("no addresses to listen on")
+ }
+
+ return bestIP, nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/rand.go b/vendor/github.com/uber/jaeger-client-go/utils/rand.go
new file mode 100644
index 000000000..9875f7f55
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/rand.go
@@ -0,0 +1,46 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "math/rand"
+ "sync"
+)
+
+// lockedSource allows a random number generator to be used by multiple goroutines concurrently.
+// The code is very similar to math/rand.lockedSource, which is unfortunately not exposed.
+type lockedSource struct {
+ mut sync.Mutex
+ src rand.Source
+}
+
+// NewRand returns a rand.Rand that is threadsafe.
+func NewRand(seed int64) *rand.Rand {
+ return rand.New(&lockedSource{src: rand.NewSource(seed)})
+}
+
+func (r *lockedSource) Int63() (n int64) {
+ r.mut.Lock()
+ n = r.src.Int63()
+ r.mut.Unlock()
+ return
+}
+
+// Seed implements Seed() of Source
+func (r *lockedSource) Seed(seed int64) {
+ r.mut.Lock()
+ r.src.Seed(seed)
+ r.mut.Unlock()
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go
new file mode 100644
index 000000000..1b8db9758
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go
@@ -0,0 +1,77 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "sync"
+ "time"
+)
+
+// RateLimiter is a filter used to check if a message that is worth itemCost units is within the rate limits.
+type RateLimiter interface {
+ CheckCredit(itemCost float64) bool
+}
+
+type rateLimiter struct {
+ sync.Mutex
+
+ creditsPerSecond float64
+ balance float64
+ maxBalance float64
+ lastTick time.Time
+
+ timeNow func() time.Time
+}
+
+// NewRateLimiter creates a new rate limiter based on leaky bucket algorithm, formulated in terms of a
+// credits balance that is replenished every time CheckCredit() method is called (tick) by the amount proportional
+// to the time elapsed since the last tick, up to max of creditsPerSecond. A call to CheckCredit() takes a cost
+// of an item we want to pay with the balance. If the balance exceeds the cost of the item, the item is "purchased"
+// and the balance reduced, indicated by returned value of true. Otherwise the balance is unchanged and return false.
+//
+// This can be used to limit a rate of messages emitted by a service by instantiating the Rate Limiter with the
+// max number of messages a service is allowed to emit per second, and calling CheckCredit(1.0) for each message
+// to determine if the message is within the rate limit.
+//
+// It can also be used to limit the rate of traffic in bytes, by setting creditsPerSecond to desired throughput
+// as bytes/second, and calling CheckCredit() with the actual message size.
+func NewRateLimiter(creditsPerSecond, maxBalance float64) RateLimiter {
+ return &rateLimiter{
+ creditsPerSecond: creditsPerSecond,
+ balance: maxBalance,
+ maxBalance: maxBalance,
+ lastTick: time.Now(),
+ timeNow: time.Now}
+}
+
+func (b *rateLimiter) CheckCredit(itemCost float64) bool {
+ b.Lock()
+ defer b.Unlock()
+ // calculate how much time passed since the last tick, and update current tick
+ currentTime := b.timeNow()
+ elapsedTime := currentTime.Sub(b.lastTick)
+ b.lastTick = currentTime
+ // calculate how much credit have we accumulated since the last tick
+ b.balance += elapsedTime.Seconds() * b.creditsPerSecond
+ if b.balance > b.maxBalance {
+ b.balance = b.maxBalance
+ }
+ // if we have enough credits to pay for current item, then reduce balance and allow
+ if b.balance >= itemCost {
+ b.balance -= itemCost
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go
new file mode 100644
index 000000000..6f042073d
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go
@@ -0,0 +1,98 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net"
+
+ "github.com/uber/jaeger-client-go/thrift"
+
+ "github.com/uber/jaeger-client-go/thrift-gen/agent"
+ "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+ "github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+)
+
+// UDPPacketMaxLength is the max size of UDP packet we want to send, synced with jaeger-agent
+const UDPPacketMaxLength = 65000
+
+// AgentClientUDP is a UDP client to Jaeger agent that implements agent.Agent interface.
+type AgentClientUDP struct {
+ agent.Agent
+ io.Closer
+
+ connUDP *net.UDPConn
+ client *agent.AgentClient
+ maxPacketSize int // max size of datagram in bytes
+ thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span
+}
+
+// NewAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP.
+func NewAgentClientUDP(hostPort string, maxPacketSize int) (*AgentClientUDP, error) {
+ if maxPacketSize == 0 {
+ maxPacketSize = UDPPacketMaxLength
+ }
+
+ thriftBuffer := thrift.NewTMemoryBufferLen(maxPacketSize)
+ protocolFactory := thrift.NewTCompactProtocolFactory()
+ client := agent.NewAgentClientFactory(thriftBuffer, protocolFactory)
+
+ destAddr, err := net.ResolveUDPAddr("udp", hostPort)
+ if err != nil {
+ return nil, err
+ }
+
+ connUDP, err := net.DialUDP(destAddr.Network(), nil, destAddr)
+ if err != nil {
+ return nil, err
+ }
+ if err := connUDP.SetWriteBuffer(maxPacketSize); err != nil {
+ return nil, err
+ }
+
+ clientUDP := &AgentClientUDP{
+ connUDP: connUDP,
+ client: client,
+ maxPacketSize: maxPacketSize,
+ thriftBuffer: thriftBuffer}
+ return clientUDP, nil
+}
+
+// EmitZipkinBatch implements EmitZipkinBatch() of Agent interface
+func (a *AgentClientUDP) EmitZipkinBatch(spans []*zipkincore.Span) error {
+ return errors.New("Not implemented")
+}
+
+// EmitBatch implements EmitBatch() of Agent interface
+func (a *AgentClientUDP) EmitBatch(batch *jaeger.Batch) error {
+ a.thriftBuffer.Reset()
+ a.client.SeqId = 0 // we have no need for distinct SeqIds for our one-way UDP messages
+ if err := a.client.EmitBatch(batch); err != nil {
+ return err
+ }
+ if a.thriftBuffer.Len() > a.maxPacketSize {
+ return fmt.Errorf("Data does not fit within one UDP packet; size %d, max %d, spans %d",
+ a.thriftBuffer.Len(), a.maxPacketSize, len(batch.Spans))
+ }
+ _, err := a.connUDP.Write(a.thriftBuffer.Bytes())
+ return err
+}
+
+// Close implements Close() of io.Closer and closes the underlying UDP connection.
+func (a *AgentClientUDP) Close() error {
+ return a.connUDP.Close()
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/utils.go b/vendor/github.com/uber/jaeger-client-go/utils/utils.go
new file mode 100644
index 000000000..ac3c325d1
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/utils.go
@@ -0,0 +1,87 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "encoding/binary"
+ "errors"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ // ErrEmptyIP an error for empty ip strings
+ ErrEmptyIP = errors.New("empty string given for ip")
+
+ // ErrNotHostColonPort an error for invalid host port string
+ ErrNotHostColonPort = errors.New("expecting host:port")
+
+ // ErrNotFourOctets an error for the wrong number of octets after splitting a string
+ ErrNotFourOctets = errors.New("Wrong number of octets")
+)
+
+// ParseIPToUint32 converts a string ip (e.g. "x.y.z.w") to an uint32
+func ParseIPToUint32(ip string) (uint32, error) {
+ if ip == "" {
+ return 0, ErrEmptyIP
+ }
+
+ if ip == "localhost" {
+ return 127<<24 | 1, nil
+ }
+
+ octets := strings.Split(ip, ".")
+ if len(octets) != 4 {
+ return 0, ErrNotFourOctets
+ }
+
+ var intIP uint32
+ for i := 0; i < 4; i++ {
+ octet, err := strconv.Atoi(octets[i])
+ if err != nil {
+ return 0, err
+ }
+ intIP = (intIP << 8) | uint32(octet)
+ }
+
+ return intIP, nil
+}
+
+// ParsePort converts port number from string to uin16
+func ParsePort(portString string) (uint16, error) {
+ port, err := strconv.ParseUint(portString, 10, 16)
+ return uint16(port), err
+}
+
+// PackIPAsUint32 packs an IPv4 as uint32
+func PackIPAsUint32(ip net.IP) uint32 {
+ if ipv4 := ip.To4(); ipv4 != nil {
+ return binary.BigEndian.Uint32(ipv4)
+ }
+ return 0
+}
+
+// TimeToMicrosecondsSinceEpochInt64 converts Go time.Time to a long
+// representing time since epoch in microseconds, which is used expected
+// in the Jaeger spans encoded as Thrift.
+func TimeToMicrosecondsSinceEpochInt64(t time.Time) int64 {
+ // ^^^ Passing time.Time by value is faster than passing a pointer!
+ // BenchmarkTimeByValue-8 2000000000 1.37 ns/op
+ // BenchmarkTimeByPtr-8 2000000000 1.98 ns/op
+
+ return t.UnixNano() / 1000
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin.go b/vendor/github.com/uber/jaeger-client-go/zipkin.go
new file mode 100644
index 000000000..636952b7f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/zipkin.go
@@ -0,0 +1,76 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "github.com/opentracing/opentracing-go"
+)
+
+// ZipkinSpanFormat is an OpenTracing carrier format constant
+const ZipkinSpanFormat = "zipkin-span-format"
+
+// ExtractableZipkinSpan is a type of Carrier used for integration with Zipkin-aware
+// RPC frameworks (like TChannel). It does not support baggage, only trace IDs.
+type ExtractableZipkinSpan interface {
+ TraceID() uint64
+ SpanID() uint64
+ ParentID() uint64
+ Flags() byte
+}
+
+// InjectableZipkinSpan is a type of Carrier used for integration with Zipkin-aware
+// RPC frameworks (like TChannel). It does not support baggage, only trace IDs.
+type InjectableZipkinSpan interface {
+ SetTraceID(traceID uint64)
+ SetSpanID(spanID uint64)
+ SetParentID(parentID uint64)
+ SetFlags(flags byte)
+}
+
+type zipkinPropagator struct {
+ tracer *Tracer
+}
+
+func (p *zipkinPropagator) Inject(
+ ctx SpanContext,
+ abstractCarrier interface{},
+) error {
+ carrier, ok := abstractCarrier.(InjectableZipkinSpan)
+ if !ok {
+ return opentracing.ErrInvalidCarrier
+ }
+
+ carrier.SetTraceID(ctx.TraceID().Low) // TODO this cannot work with 128bit IDs
+ carrier.SetSpanID(uint64(ctx.SpanID()))
+ carrier.SetParentID(uint64(ctx.ParentID()))
+ carrier.SetFlags(ctx.flags)
+ return nil
+}
+
+func (p *zipkinPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
+ carrier, ok := abstractCarrier.(ExtractableZipkinSpan)
+ if !ok {
+ return emptyContext, opentracing.ErrInvalidCarrier
+ }
+ if carrier.TraceID() == 0 {
+ return emptyContext, opentracing.ErrSpanContextNotFound
+ }
+ var ctx SpanContext
+ ctx.traceID.Low = carrier.TraceID()
+ ctx.spanID = SpanID(carrier.SpanID())
+ ctx.parentID = SpanID(carrier.ParentID())
+ ctx.flags = carrier.Flags()
+ return ctx, nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go
new file mode 100644
index 000000000..dce58b433
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go
@@ -0,0 +1,322 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "encoding/binary"
+ "fmt"
+ "time"
+
+ "github.com/opentracing/opentracing-go/ext"
+
+ "github.com/uber/jaeger-client-go/internal/spanlog"
+ z "github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+ "github.com/uber/jaeger-client-go/utils"
+)
+
+const (
+ // Zipkin UI does not work well with non-string tag values
+ allowPackedNumbers = false
+)
+
+var specialTagHandlers = map[string]func(*zipkinSpan, interface{}){
+ string(ext.SpanKind): setSpanKind,
+ string(ext.PeerHostIPv4): setPeerIPv4,
+ string(ext.PeerPort): setPeerPort,
+ string(ext.PeerService): setPeerService,
+ TracerIPTagKey: removeTag,
+}
+
+// BuildZipkinThrift builds thrift span based on internal span.
+func BuildZipkinThrift(s *Span) *z.Span {
+ span := &zipkinSpan{Span: s}
+ span.handleSpecialTags()
+ parentID := int64(span.context.parentID)
+ var ptrParentID *int64
+ if parentID != 0 {
+ ptrParentID = &parentID
+ }
+ timestamp := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime)
+ duration := span.duration.Nanoseconds() / int64(time.Microsecond)
+ endpoint := &z.Endpoint{
+ ServiceName: span.tracer.serviceName,
+ Ipv4: int32(span.tracer.hostIPv4)}
+ thriftSpan := &z.Span{
+ TraceID: int64(span.context.traceID.Low), // TODO upgrade zipkin thrift and use TraceIdHigh
+ ID: int64(span.context.spanID),
+ ParentID: ptrParentID,
+ Name: span.operationName,
+ Timestamp: &timestamp,
+ Duration: &duration,
+ Debug: span.context.IsDebug(),
+ Annotations: buildAnnotations(span, endpoint),
+ BinaryAnnotations: buildBinaryAnnotations(span, endpoint)}
+ return thriftSpan
+}
+
+func buildAnnotations(span *zipkinSpan, endpoint *z.Endpoint) []*z.Annotation {
+ // automatically adding 2 Zipkin CoreAnnotations
+ annotations := make([]*z.Annotation, 0, 2+len(span.logs))
+ var startLabel, endLabel string
+ if span.spanKind == string(ext.SpanKindRPCClientEnum) {
+ startLabel, endLabel = z.CLIENT_SEND, z.CLIENT_RECV
+ } else if span.spanKind == string(ext.SpanKindRPCServerEnum) {
+ startLabel, endLabel = z.SERVER_RECV, z.SERVER_SEND
+ }
+ if !span.startTime.IsZero() && startLabel != "" {
+ start := &z.Annotation{
+ Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(span.startTime),
+ Value: startLabel,
+ Host: endpoint}
+ annotations = append(annotations, start)
+ if span.duration != 0 {
+ endTs := span.startTime.Add(span.duration)
+ end := &z.Annotation{
+ Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(endTs),
+ Value: endLabel,
+ Host: endpoint}
+ annotations = append(annotations, end)
+ }
+ }
+ for _, log := range span.logs {
+ anno := &z.Annotation{
+ Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(log.Timestamp),
+ Host: endpoint}
+ if content, err := spanlog.MaterializeWithJSON(log.Fields); err == nil {
+ anno.Value = truncateString(string(content), span.tracer.options.maxTagValueLength)
+ } else {
+ anno.Value = err.Error()
+ }
+ annotations = append(annotations, anno)
+ }
+ return annotations
+}
+
+func buildBinaryAnnotations(span *zipkinSpan, endpoint *z.Endpoint) []*z.BinaryAnnotation {
+ // automatically adding local component or server/client address tag, and client version
+ annotations := make([]*z.BinaryAnnotation, 0, 2+len(span.tags))
+
+ if span.peerDefined() && span.isRPC() {
+ peer := z.Endpoint{
+ Ipv4: span.peer.Ipv4,
+ Port: span.peer.Port,
+ ServiceName: span.peer.ServiceName}
+ label := z.CLIENT_ADDR
+ if span.isRPCClient() {
+ label = z.SERVER_ADDR
+ }
+ anno := &z.BinaryAnnotation{
+ Key: label,
+ Value: []byte{1},
+ AnnotationType: z.AnnotationType_BOOL,
+ Host: &peer}
+ annotations = append(annotations, anno)
+ }
+ if !span.isRPC() {
+ componentName := endpoint.ServiceName
+ for _, tag := range span.tags {
+ if tag.key == string(ext.Component) {
+ componentName = stringify(tag.value)
+ break
+ }
+ }
+ local := &z.BinaryAnnotation{
+ Key: z.LOCAL_COMPONENT,
+ Value: []byte(componentName),
+ AnnotationType: z.AnnotationType_STRING,
+ Host: endpoint}
+ annotations = append(annotations, local)
+ }
+ for _, tag := range span.tags {
+ // "Special tags" are already handled by this point, we'd be double reporting the
+ // tags if we don't skip here
+ if _, ok := specialTagHandlers[tag.key]; ok {
+ continue
+ }
+ if anno := buildBinaryAnnotation(tag.key, tag.value, span.tracer.options.maxTagValueLength, nil); anno != nil {
+ annotations = append(annotations, anno)
+ }
+ }
+ return annotations
+}
+
+func buildBinaryAnnotation(key string, val interface{}, maxTagValueLength int, endpoint *z.Endpoint) *z.BinaryAnnotation {
+ bann := &z.BinaryAnnotation{Key: key, Host: endpoint}
+ if value, ok := val.(string); ok {
+ bann.Value = []byte(truncateString(value, maxTagValueLength))
+ bann.AnnotationType = z.AnnotationType_STRING
+ } else if value, ok := val.([]byte); ok {
+ if len(value) > maxTagValueLength {
+ value = value[:maxTagValueLength]
+ }
+ bann.Value = value
+ bann.AnnotationType = z.AnnotationType_BYTES
+ } else if value, ok := val.(int32); ok && allowPackedNumbers {
+ bann.Value = int32ToBytes(value)
+ bann.AnnotationType = z.AnnotationType_I32
+ } else if value, ok := val.(int64); ok && allowPackedNumbers {
+ bann.Value = int64ToBytes(value)
+ bann.AnnotationType = z.AnnotationType_I64
+ } else if value, ok := val.(int); ok && allowPackedNumbers {
+ bann.Value = int64ToBytes(int64(value))
+ bann.AnnotationType = z.AnnotationType_I64
+ } else if value, ok := val.(bool); ok {
+ bann.Value = []byte{boolToByte(value)}
+ bann.AnnotationType = z.AnnotationType_BOOL
+ } else {
+ value := stringify(val)
+ bann.Value = []byte(truncateString(value, maxTagValueLength))
+ bann.AnnotationType = z.AnnotationType_STRING
+ }
+ return bann
+}
+
+func stringify(value interface{}) string {
+ if s, ok := value.(string); ok {
+ return s
+ }
+ return fmt.Sprintf("%+v", value)
+}
+
+func truncateString(value string, maxLength int) string {
+ // we ignore the problem of utf8 runes possibly being sliced in the middle,
+ // as it is rather expensive to iterate through each tag just to find rune
+ // boundaries.
+ if len(value) > maxLength {
+ return value[:maxLength]
+ }
+ return value
+}
+
+func boolToByte(b bool) byte {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+// int32ToBytes converts int32 to bytes.
+func int32ToBytes(i int32) []byte {
+ buf := make([]byte, 4)
+ binary.BigEndian.PutUint32(buf, uint32(i))
+ return buf
+}
+
+// int64ToBytes converts int64 to bytes.
+func int64ToBytes(i int64) []byte {
+ buf := make([]byte, 8)
+ binary.BigEndian.PutUint64(buf, uint64(i))
+ return buf
+}
+
+type zipkinSpan struct {
+ *Span
+
+ // peer points to the peer service participating in this span,
+ // e.g. the Client if this span is a server span,
+ // or Server if this span is a client span
+ peer struct {
+ Ipv4 int32
+ Port int16
+ ServiceName string
+ }
+
+ // used to distinguish local vs. RPC Server vs. RPC Client spans
+ spanKind string
+}
+
+func (s *zipkinSpan) handleSpecialTags() {
+ s.Lock()
+ defer s.Unlock()
+ if s.firstInProcess {
+ // append the process tags
+ s.tags = append(s.tags, s.tracer.tags...)
+ }
+ filteredTags := make([]Tag, 0, len(s.tags))
+ for _, tag := range s.tags {
+ if handler, ok := specialTagHandlers[tag.key]; ok {
+ handler(s, tag.value)
+ } else {
+ filteredTags = append(filteredTags, tag)
+ }
+ }
+ s.tags = filteredTags
+}
+
+func setSpanKind(s *zipkinSpan, value interface{}) {
+ if val, ok := value.(string); ok {
+ s.spanKind = val
+ return
+ }
+ if val, ok := value.(ext.SpanKindEnum); ok {
+ s.spanKind = string(val)
+ }
+}
+
+func setPeerIPv4(s *zipkinSpan, value interface{}) {
+ if val, ok := value.(string); ok {
+ if ip, err := utils.ParseIPToUint32(val); err == nil {
+ s.peer.Ipv4 = int32(ip)
+ return
+ }
+ }
+ if val, ok := value.(uint32); ok {
+ s.peer.Ipv4 = int32(val)
+ return
+ }
+ if val, ok := value.(int32); ok {
+ s.peer.Ipv4 = val
+ }
+}
+
+func setPeerPort(s *zipkinSpan, value interface{}) {
+ if val, ok := value.(string); ok {
+ if port, err := utils.ParsePort(val); err == nil {
+ s.peer.Port = int16(port)
+ return
+ }
+ }
+ if val, ok := value.(uint16); ok {
+ s.peer.Port = int16(val)
+ return
+ }
+ if val, ok := value.(int); ok {
+ s.peer.Port = int16(val)
+ }
+}
+
+func setPeerService(s *zipkinSpan, value interface{}) {
+ if val, ok := value.(string); ok {
+ s.peer.ServiceName = val
+ }
+}
+
+func removeTag(s *zipkinSpan, value interface{}) {}
+
+func (s *zipkinSpan) peerDefined() bool {
+ return s.peer.ServiceName != "" || s.peer.Ipv4 != 0 || s.peer.Port != 0
+}
+
+func (s *zipkinSpan) isRPC() bool {
+ s.RLock()
+ defer s.RUnlock()
+ return s.spanKind == string(ext.SpanKindRPCClientEnum) || s.spanKind == string(ext.SpanKindRPCServerEnum)
+}
+
+func (s *zipkinSpan) isRPCClient() bool {
+ s.RLock()
+ defer s.RUnlock()
+ return s.spanKind == string(ext.SpanKindRPCClientEnum)
+}
diff --git a/vendor/github.com/uber/jaeger-lib/LICENSE b/vendor/github.com/uber/jaeger-lib/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/uber/jaeger-lib/README.md b/vendor/github.com/uber/jaeger-lib/README.md
new file mode 100644
index 000000000..144ed1fb8
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/README.md
@@ -0,0 +1,27 @@
+[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+
+# jaeger-lib
+
+A collection of shared infrastructure libraries used by different
+components of [Jaeger](https://github.com/uber/jaeger) backend and [jaeger-client-go](https://github.com/uber/jaeger-client-go).
+This library is *not intended to be used standalone*, and provides *no guarantees of backwards compatibility*.
+
+The library's import path is `github.com/uber/jaeger-lib`.
+
+## How to Contribute
+
+Please see [CONTRIBUTING.md](CONTRIBUTING.md).
+
+## License
+
+[Apache 2.0 License](./LICENSE).
+
+
+[doc-img]: https://godoc.org/github.com/uber/jaeger-lib?status.svg
+[doc]: https://godoc.org/github.com/uber/jaeger-lib
+[ci-img]: https://travis-ci.org/jaegertracing/jaeger-lib.svg?branch=master
+[ci]: https://travis-ci.org/jaegertracing/jaeger-lib
+[cov-img]: https://coveralls.io/repos/jaegertracing/jaeger-lib/badge.svg?branch=master&service=github
+[cov]: https://coveralls.io/github/jaegertracing/jaeger-lib?branch=master
+
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/counter.go b/vendor/github.com/uber/jaeger-lib/metrics/counter.go
new file mode 100644
index 000000000..2a6a43efd
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/counter.go
@@ -0,0 +1,28 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+// Counter tracks the number of times an event has occurred
+type Counter interface {
+ // Inc adds the given value to the counter.
+ Inc(int64)
+}
+
+// NullCounter counter that does nothing
+var NullCounter Counter = nullCounter{}
+
+type nullCounter struct{}
+
+func (nullCounter) Inc(int64) {}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/factory.go b/vendor/github.com/uber/jaeger-lib/metrics/factory.go
new file mode 100644
index 000000000..0ead061eb
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/factory.go
@@ -0,0 +1,78 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+ "time"
+)
+
+// NSOptions defines the name and tags map associated with a factory namespace
+type NSOptions struct {
+ Name string
+ Tags map[string]string
+}
+
+// Options defines the information associated with a metric
+type Options struct {
+ Name string
+ Tags map[string]string
+ Help string
+}
+
+// TimerOptions defines the information associated with a metric
+type TimerOptions struct {
+ Name string
+ Tags map[string]string
+ Help string
+ Buckets []time.Duration
+}
+
+// HistogramOptions defines the information associated with a metric
+type HistogramOptions struct {
+ Name string
+ Tags map[string]string
+ Help string
+ Buckets []float64
+}
+
+// Factory creates new metrics
+type Factory interface {
+ Counter(metric Options) Counter
+ Timer(metric TimerOptions) Timer
+ Gauge(metric Options) Gauge
+ Histogram(metric HistogramOptions) Histogram
+
+ // Namespace returns a nested metrics factory.
+ Namespace(scope NSOptions) Factory
+}
+
+// NullFactory is a metrics factory that returns NullCounter, NullTimer, and NullGauge.
+var NullFactory Factory = nullFactory{}
+
+type nullFactory struct{}
+
+func (nullFactory) Counter(options Options) Counter {
+ return NullCounter
+}
+func (nullFactory) Timer(options TimerOptions) Timer {
+ return NullTimer
+}
+func (nullFactory) Gauge(options Options) Gauge {
+ return NullGauge
+}
+func (nullFactory) Histogram(options HistogramOptions) Histogram {
+ return NullHistogram
+}
+func (nullFactory) Namespace(scope NSOptions) Factory { return NullFactory }
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/gauge.go b/vendor/github.com/uber/jaeger-lib/metrics/gauge.go
new file mode 100644
index 000000000..3c606391a
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/gauge.go
@@ -0,0 +1,28 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+// Gauge returns instantaneous measurements of something as an int64 value
+type Gauge interface {
+ // Update the gauge to the value passed in.
+ Update(int64)
+}
+
+// NullGauge gauge that does nothing
+var NullGauge Gauge = nullGauge{}
+
+type nullGauge struct{}
+
+func (nullGauge) Update(int64) {}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/histogram.go b/vendor/github.com/uber/jaeger-lib/metrics/histogram.go
new file mode 100644
index 000000000..d3bd6174f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/histogram.go
@@ -0,0 +1,28 @@
+// Copyright (c) 2018 The Jaeger Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+// Histogram that keeps track of a distribution of values.
+type Histogram interface {
+ // Records the value passed in.
+ Record(float64)
+}
+
+// NullHistogram that does nothing
+var NullHistogram Histogram = nullHistogram{}
+
+type nullHistogram struct{}
+
+func (nullHistogram) Record(float64) {}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/keys.go b/vendor/github.com/uber/jaeger-lib/metrics/keys.go
new file mode 100644
index 000000000..c24445a10
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/keys.go
@@ -0,0 +1,35 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+ "sort"
+)
+
+// GetKey converts name+tags into a single string of the form
+// "name|tag1=value1|...|tagN=valueN", where tag names are
+// sorted alphabetically.
+func GetKey(name string, tags map[string]string, tagsSep string, tagKVSep string) string {
+ keys := make([]string, 0, len(tags))
+ for k := range tags {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ key := name
+ for _, k := range keys {
+ key = key + tagsSep + k + tagKVSep + tags[k]
+ }
+ return key
+}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/metrics.go b/vendor/github.com/uber/jaeger-lib/metrics/metrics.go
new file mode 100644
index 000000000..0c6396888
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/metrics.go
@@ -0,0 +1,137 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// MustInit initializes the passed in metrics and initializes its fields using the passed in factory.
+//
+// It uses reflection to initialize a struct containing metrics fields
+// by assigning new Counter/Gauge/Timer values with the metric name retrieved
+// from the `metric` tag and stats tags retrieved from the `tags` tag.
+//
+// Note: all fields of the struct must be exported, have a `metric` tag, and be
+// of type Counter or Gauge or Timer.
+//
+// Errors during Init lead to a panic.
+func MustInit(metrics interface{}, factory Factory, globalTags map[string]string) {
+ if err := Init(metrics, factory, globalTags); err != nil {
+ panic(err.Error())
+ }
+}
+
+// Init does the same as Init, but returns an error instead of
+// panicking.
+func Init(m interface{}, factory Factory, globalTags map[string]string) error {
+ // Allow user to opt out of reporting metrics by passing in nil.
+ if factory == nil {
+ factory = NullFactory
+ }
+
+ counterPtrType := reflect.TypeOf((*Counter)(nil)).Elem()
+ gaugePtrType := reflect.TypeOf((*Gauge)(nil)).Elem()
+ timerPtrType := reflect.TypeOf((*Timer)(nil)).Elem()
+ histogramPtrType := reflect.TypeOf((*Histogram)(nil)).Elem()
+
+ v := reflect.ValueOf(m).Elem()
+ t := v.Type()
+ for i := 0; i < t.NumField(); i++ {
+ tags := make(map[string]string)
+ for k, v := range globalTags {
+ tags[k] = v
+ }
+ var buckets []float64
+ field := t.Field(i)
+ metric := field.Tag.Get("metric")
+ if metric == "" {
+ return fmt.Errorf("Field %s is missing a tag 'metric'", field.Name)
+ }
+ if tagString := field.Tag.Get("tags"); tagString != "" {
+ tagPairs := strings.Split(tagString, ",")
+ for _, tagPair := range tagPairs {
+ tag := strings.Split(tagPair, "=")
+ if len(tag) != 2 {
+ return fmt.Errorf(
+ "Field [%s]: Tag [%s] is not of the form key=value in 'tags' string [%s]",
+ field.Name, tagPair, tagString)
+ }
+ tags[tag[0]] = tag[1]
+ }
+ }
+ if bucketString := field.Tag.Get("buckets"); bucketString != "" {
+ if field.Type.AssignableTo(timerPtrType) {
+ // TODO: Parse timer duration buckets
+ return fmt.Errorf(
+ "Field [%s]: Buckets are not currently initialized for timer metrics",
+ field.Name)
+ } else if field.Type.AssignableTo(histogramPtrType) {
+ bucketValues := strings.Split(bucketString, ",")
+ for _, bucket := range bucketValues {
+ b, err := strconv.ParseFloat(bucket, 64)
+ if err != nil {
+ return fmt.Errorf(
+ "Field [%s]: Bucket [%s] could not be converted to float64 in 'buckets' string [%s]",
+ field.Name, bucket, bucketString)
+ }
+ buckets = append(buckets, b)
+ }
+ } else {
+ return fmt.Errorf(
+ "Field [%s]: Buckets should only be defined for Timer and Histogram metric types",
+ field.Name)
+ }
+ }
+ help := field.Tag.Get("help")
+ var obj interface{}
+ if field.Type.AssignableTo(counterPtrType) {
+ obj = factory.Counter(Options{
+ Name: metric,
+ Tags: tags,
+ Help: help,
+ })
+ } else if field.Type.AssignableTo(gaugePtrType) {
+ obj = factory.Gauge(Options{
+ Name: metric,
+ Tags: tags,
+ Help: help,
+ })
+ } else if field.Type.AssignableTo(timerPtrType) {
+ // TODO: Add buckets once parsed (see TODO above)
+ obj = factory.Timer(TimerOptions{
+ Name: metric,
+ Tags: tags,
+ Help: help,
+ })
+ } else if field.Type.AssignableTo(histogramPtrType) {
+ obj = factory.Histogram(HistogramOptions{
+ Name: metric,
+ Tags: tags,
+ Help: help,
+ Buckets: buckets,
+ })
+ } else {
+ return fmt.Errorf(
+ "Field %s is not a pointer to timer, gauge, or counter",
+ field.Name)
+ }
+ v.Field(i).Set(reflect.ValueOf(obj))
+ }
+ return nil
+}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go b/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go
new file mode 100644
index 000000000..4a8abdb53
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go
@@ -0,0 +1,43 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+ "time"
+)
+
+// StartStopwatch begins recording the executing time of an event, returning
+// a Stopwatch that should be used to stop the recording the time for
+// that event. Multiple events can be occurring simultaneously each
+// represented by different active Stopwatches
+func StartStopwatch(timer Timer) Stopwatch {
+ return Stopwatch{t: timer, start: time.Now()}
+}
+
+// A Stopwatch tracks the execution time of a specific event
+type Stopwatch struct {
+ t Timer
+ start time.Time
+}
+
+// Stop stops executing of the stopwatch and records the amount of elapsed time
+func (s Stopwatch) Stop() {
+ s.t.Record(s.ElapsedTime())
+}
+
+// ElapsedTime returns the amount of elapsed time (in time.Duration)
+func (s Stopwatch) ElapsedTime() time.Duration {
+ return time.Since(s.start)
+}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/timer.go b/vendor/github.com/uber/jaeger-lib/metrics/timer.go
new file mode 100644
index 000000000..e18d222ab
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/timer.go
@@ -0,0 +1,33 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+ "time"
+)
+
+// Timer accumulates observations about how long some operation took,
+// and also maintains a historgam of percentiles.
+type Timer interface {
+ // Records the time passed in.
+ Record(time.Duration)
+}
+
+// NullTimer timer that does nothing
+var NullTimer Timer = nullTimer{}
+
+type nullTimer struct{}
+
+func (nullTimer) Record(time.Duration) {}