summaryrefslogtreecommitdiff
path: root/vendor/github.com/prometheus/client_golang
diff options
context:
space:
mode:
authorDaniel J Walsh <dwalsh@redhat.com>2022-07-11 10:03:44 -0400
committerMatthew Heon <matthew.heon@pm.me>2022-07-26 13:34:38 -0400
commit9c1de040b36483fed1c331c438d8bce5fd8fab58 (patch)
tree7423c50dd57336eb045fea31665f4a1fb808acab /vendor/github.com/prometheus/client_golang
parent03eaea8bbe4dc7791c2129d64321988d3ec12bb0 (diff)
downloadpodman-9c1de040b36483fed1c331c438d8bce5fd8fab58.tar.gz
podman-9c1de040b36483fed1c331c438d8bce5fd8fab58.tar.bz2
podman-9c1de040b36483fed1c331c438d8bce5fd8fab58.zip
Vendor in containers/(storage,image, common, buildah)
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
Diffstat (limited to 'vendor/github.com/prometheus/client_golang')
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/README.md2
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go38
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/collector.go8
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/counter.go8
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/go_collector.go494
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go107
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go408
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/histogram.go28
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go142
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go1
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/value.go6
11 files changed, 948 insertions, 294 deletions
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md
index 44986bff0..c67ff1b7f 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/README.md
+++ b/vendor/github.com/prometheus/client_golang/prometheus/README.md
@@ -1 +1 @@
-See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).
+See [![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/client_golang/prometheus.svg)](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus).
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
new file mode 100644
index 000000000..450189f35
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
@@ -0,0 +1,38 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "runtime/debug"
+
+// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewBuildInfoCollector instead.
+func NewBuildInfoCollector() Collector {
+ path, version, sum := "unknown", "unknown", "unknown"
+ if bi, ok := debug.ReadBuildInfo(); ok {
+ path = bi.Main.Path
+ version = bi.Main.Version
+ sum = bi.Main.Sum
+ }
+ c := &selfCollector{MustNewConstMetric(
+ NewDesc(
+ "go_build_info",
+ "Build information about the main Go module.",
+ nil, Labels{"path": path, "version": version, "checksum": sum},
+ ),
+ GaugeValue, 1)}
+ c.init(c.self)
+ return c
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
index 1e839650d..ac1ca3cf5 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -118,3 +118,11 @@ func (c *selfCollector) Describe(ch chan<- *Desc) {
func (c *selfCollector) Collect(ch chan<- Metric) {
ch <- c.self
}
+
+// collectorMetric is a metric that is also a collector.
+// Because of selfCollector, most (if not all) Metrics in
+// this package are also collectors.
+type collectorMetric interface {
+ Metric
+ Collector
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
index 3f8fd790d..00d70f09b 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -133,10 +133,14 @@ func (c *counter) Inc() {
atomic.AddUint64(&c.valInt, 1)
}
-func (c *counter) Write(out *dto.Metric) error {
+func (c *counter) get() float64 {
fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
ival := atomic.LoadUint64(&c.valInt)
- val := fval + float64(ival)
+ return fval + float64(ival)
+}
+
+func (c *counter) Write(out *dto.Metric) error {
+ val := c.get()
var exemplar *dto.Exemplar
if e := c.exemplar.Load(); e != nil {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
index a96ed1cee..08195b410 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -16,32 +16,209 @@ package prometheus
import (
"runtime"
"runtime/debug"
- "sync"
"time"
)
-type goCollector struct {
+func goRuntimeMemStats() memStatsMetrics {
+ return memStatsMetrics{
+ {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes"),
+ "Number of bytes allocated and still in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes_total"),
+ "Total number of bytes allocated, even if freed.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("sys_bytes"),
+ "Number of bytes obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("lookups_total"),
+ "Total number of pointer lookups.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mallocs_total"),
+ "Total number of mallocs.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("frees_total"),
+ "Total number of frees.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_alloc_bytes"),
+ "Number of heap bytes allocated and still in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_sys_bytes"),
+ "Number of heap bytes obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_idle_bytes"),
+ "Number of heap bytes waiting to be used.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_inuse_bytes"),
+ "Number of heap bytes that are in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_released_bytes"),
+ "Number of heap bytes released to OS.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_objects"),
+ "Number of allocated objects.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_inuse_bytes"),
+ "Number of bytes in use by the stack allocator.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_sys_bytes"),
+ "Number of bytes obtained from system for stack allocator.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_inuse_bytes"),
+ "Number of bytes in use by mspan structures.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_sys_bytes"),
+ "Number of bytes used for mspan structures obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_inuse_bytes"),
+ "Number of bytes in use by mcache structures.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_sys_bytes"),
+ "Number of bytes used for mcache structures obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("buck_hash_sys_bytes"),
+ "Number of bytes used by the profiling bucket hash table.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("gc_sys_bytes"),
+ "Number of bytes used for garbage collection system metadata.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("other_sys_bytes"),
+ "Number of bytes used for other system allocations.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("next_gc_bytes"),
+ "Number of heap bytes when next garbage collection will take place.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("gc_cpu_fraction"),
+ "The fraction of this program's available CPU time used by the GC since the program started.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
+ valType: GaugeValue,
+ },
+ }
+}
+
+type baseGoCollector struct {
goroutinesDesc *Desc
threadsDesc *Desc
gcDesc *Desc
+ gcLastTimeDesc *Desc
goInfoDesc *Desc
-
- // ms... are memstats related.
- msLast *runtime.MemStats // Previously collected memstats.
- msLastTimestamp time.Time
- msMtx sync.Mutex // Protects msLast and msLastTimestamp.
- msMetrics memStatsMetrics
- msRead func(*runtime.MemStats) // For mocking in tests.
- msMaxWait time.Duration // Wait time for fresh memstats.
- msMaxAge time.Duration // Maximum allowed age of old memstats.
}
-// NewGoCollector is the obsolete version of collectors.NewGoCollector.
-// See there for documentation.
-//
-// Deprecated: Use collectors.NewGoCollector instead.
-func NewGoCollector() Collector {
- return &goCollector{
+func newBaseGoCollector() baseGoCollector {
+ return baseGoCollector{
goroutinesDesc: NewDesc(
"go_goroutines",
"Number of goroutines that currently exist.",
@@ -54,243 +231,28 @@ func NewGoCollector() Collector {
"go_gc_duration_seconds",
"A summary of the pause duration of garbage collection cycles.",
nil, nil),
+ gcLastTimeDesc: NewDesc(
+ memstatNamespace("last_gc_time_seconds"),
+ "Number of seconds since 1970 of last garbage collection.",
+ nil, nil),
goInfoDesc: NewDesc(
"go_info",
"Information about the Go environment.",
nil, Labels{"version": runtime.Version()}),
- msLast: &runtime.MemStats{},
- msRead: runtime.ReadMemStats,
- msMaxWait: time.Second,
- msMaxAge: 5 * time.Minute,
- msMetrics: memStatsMetrics{
- {
- desc: NewDesc(
- memstatNamespace("alloc_bytes"),
- "Number of bytes allocated and still in use.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("alloc_bytes_total"),
- "Total number of bytes allocated, even if freed.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("sys_bytes"),
- "Number of bytes obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("lookups_total"),
- "Total number of pointer lookups.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mallocs_total"),
- "Total number of mallocs.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("frees_total"),
- "Total number of frees.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_alloc_bytes"),
- "Number of heap bytes allocated and still in use.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_sys_bytes"),
- "Number of heap bytes obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_idle_bytes"),
- "Number of heap bytes waiting to be used.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_inuse_bytes"),
- "Number of heap bytes that are in use.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_released_bytes"),
- "Number of heap bytes released to OS.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_objects"),
- "Number of allocated objects.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("stack_inuse_bytes"),
- "Number of bytes in use by the stack allocator.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("stack_sys_bytes"),
- "Number of bytes obtained from system for stack allocator.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mspan_inuse_bytes"),
- "Number of bytes in use by mspan structures.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mspan_sys_bytes"),
- "Number of bytes used for mspan structures obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mcache_inuse_bytes"),
- "Number of bytes in use by mcache structures.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mcache_sys_bytes"),
- "Number of bytes used for mcache structures obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("buck_hash_sys_bytes"),
- "Number of bytes used by the profiling bucket hash table.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("gc_sys_bytes"),
- "Number of bytes used for garbage collection system metadata.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("other_sys_bytes"),
- "Number of bytes used for other system allocations.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("next_gc_bytes"),
- "Number of heap bytes when next garbage collection will take place.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("last_gc_time_seconds"),
- "Number of seconds since 1970 of last garbage collection.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("gc_cpu_fraction"),
- "The fraction of this program's available CPU time used by the GC since the program started.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
- valType: GaugeValue,
- },
- },
}
}
-func memstatNamespace(s string) string {
- return "go_memstats_" + s
-}
-
// Describe returns all descriptions of the collector.
-func (c *goCollector) Describe(ch chan<- *Desc) {
+func (c *baseGoCollector) Describe(ch chan<- *Desc) {
ch <- c.goroutinesDesc
ch <- c.threadsDesc
ch <- c.gcDesc
+ ch <- c.gcLastTimeDesc
ch <- c.goInfoDesc
- for _, i := range c.msMetrics {
- ch <- i.desc
- }
}
// Collect returns the current state of all metrics of the collector.
-func (c *goCollector) Collect(ch chan<- Metric) {
- var (
- ms = &runtime.MemStats{}
- done = make(chan struct{})
- )
- // Start reading memstats first as it might take a while.
- go func() {
- c.msRead(ms)
- c.msMtx.Lock()
- c.msLast = ms
- c.msLastTimestamp = time.Now()
- c.msMtx.Unlock()
- close(done)
- }()
-
+func (c *baseGoCollector) Collect(ch chan<- Metric) {
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
n, _ := runtime.ThreadCreateProfile(nil)
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
@@ -305,63 +267,19 @@ func (c *goCollector) Collect(ch chan<- Metric) {
}
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
+ ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9)
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
-
- timer := time.NewTimer(c.msMaxWait)
- select {
- case <-done: // Our own ReadMemStats succeeded in time. Use it.
- timer.Stop() // Important for high collection frequencies to not pile up timers.
- c.msCollect(ch, ms)
- return
- case <-timer.C: // Time out, use last memstats if possible. Continue below.
- }
- c.msMtx.Lock()
- if time.Since(c.msLastTimestamp) < c.msMaxAge {
- // Last memstats are recent enough. Collect from them under the lock.
- c.msCollect(ch, c.msLast)
- c.msMtx.Unlock()
- return
- }
- // If we are here, the last memstats are too old or don't exist. We have
- // to wait until our own ReadMemStats finally completes. For that to
- // happen, we have to release the lock.
- c.msMtx.Unlock()
- <-done
- c.msCollect(ch, ms)
}
-func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
- for _, i := range c.msMetrics {
- ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
- }
+func memstatNamespace(s string) string {
+ return "go_memstats_" + s
}
-// memStatsMetrics provide description, value, and value type for memstat metrics.
+// memStatsMetrics provide description, evaluator, runtime/metrics name, and
+// value type for memstat metrics.
type memStatsMetrics []struct {
desc *Desc
eval func(*runtime.MemStats) float64
valType ValueType
}
-
-// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
-// See there for documentation.
-//
-// Deprecated: Use collectors.NewBuildInfoCollector instead.
-func NewBuildInfoCollector() Collector {
- path, version, sum := "unknown", "unknown", "unknown"
- if bi, ok := debug.ReadBuildInfo(); ok {
- path = bi.Main.Path
- version = bi.Main.Version
- sum = bi.Main.Sum
- }
- c := &selfCollector{MustNewConstMetric(
- NewDesc(
- "go_build_info",
- "Build information about the main Go module.",
- nil, Labels{"path": path, "version": version, "checksum": sum},
- ),
- GaugeValue, 1)}
- c.init(c.self)
- return c
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
new file mode 100644
index 000000000..24526131e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
@@ -0,0 +1,107 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !go1.17
+// +build !go1.17
+
+package prometheus
+
+import (
+ "runtime"
+ "sync"
+ "time"
+)
+
+type goCollector struct {
+ base baseGoCollector
+
+ // ms... are memstats related.
+ msLast *runtime.MemStats // Previously collected memstats.
+ msLastTimestamp time.Time
+ msMtx sync.Mutex // Protects msLast and msLastTimestamp.
+ msMetrics memStatsMetrics
+ msRead func(*runtime.MemStats) // For mocking in tests.
+ msMaxWait time.Duration // Wait time for fresh memstats.
+ msMaxAge time.Duration // Maximum allowed age of old memstats.
+}
+
+// NewGoCollector is the obsolete version of collectors.NewGoCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewGoCollector instead.
+func NewGoCollector() Collector {
+ return &goCollector{
+ base: newBaseGoCollector(),
+ msLast: &runtime.MemStats{},
+ msRead: runtime.ReadMemStats,
+ msMaxWait: time.Second,
+ msMaxAge: 5 * time.Minute,
+ msMetrics: goRuntimeMemStats(),
+ }
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+ c.base.Describe(ch)
+ for _, i := range c.msMetrics {
+ ch <- i.desc
+ }
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+ var (
+ ms = &runtime.MemStats{}
+ done = make(chan struct{})
+ )
+ // Start reading memstats first as it might take a while.
+ go func() {
+ c.msRead(ms)
+ c.msMtx.Lock()
+ c.msLast = ms
+ c.msLastTimestamp = time.Now()
+ c.msMtx.Unlock()
+ close(done)
+ }()
+
+ // Collect base non-memory metrics.
+ c.base.Collect(ch)
+
+ timer := time.NewTimer(c.msMaxWait)
+ select {
+ case <-done: // Our own ReadMemStats succeeded in time. Use it.
+ timer.Stop() // Important for high collection frequencies to not pile up timers.
+ c.msCollect(ch, ms)
+ return
+ case <-timer.C: // Time out, use last memstats if possible. Continue below.
+ }
+ c.msMtx.Lock()
+ if time.Since(c.msLastTimestamp) < c.msMaxAge {
+ // Last memstats are recent enough. Collect from them under the lock.
+ c.msCollect(ch, c.msLast)
+ c.msMtx.Unlock()
+ return
+ }
+ // If we are here, the last memstats are too old or don't exist. We have
+ // to wait until our own ReadMemStats finally completes. For that to
+ // happen, we have to release the lock.
+ c.msMtx.Unlock()
+ <-done
+ c.msCollect(ch, ms)
+}
+
+func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
+ for _, i := range c.msMetrics {
+ ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go
new file mode 100644
index 000000000..d43bdcdda
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go
@@ -0,0 +1,408 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.17
+// +build go1.17
+
+package prometheus
+
+import (
+ "math"
+ "runtime"
+ "runtime/metrics"
+ "strings"
+ "sync"
+
+ //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/client_golang/prometheus/internal"
+ dto "github.com/prometheus/client_model/go"
+)
+
+type goCollector struct {
+ base baseGoCollector
+
+ // mu protects updates to all fields ensuring a consistent
+ // snapshot is always produced by Collect.
+ mu sync.Mutex
+
+ // rm... fields all pertain to the runtime/metrics package.
+ rmSampleBuf []metrics.Sample
+ rmSampleMap map[string]*metrics.Sample
+ rmMetrics []collectorMetric
+
+ // With Go 1.17, the runtime/metrics package was introduced.
+ // From that point on, metric names produced by the runtime/metrics
+ // package could be generated from runtime/metrics names. However,
+ // these differ from the old names for the same values.
+ //
+ // This field exist to export the same values under the old names
+ // as well.
+ msMetrics memStatsMetrics
+}
+
+// NewGoCollector is the obsolete version of collectors.NewGoCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewGoCollector instead.
+func NewGoCollector() Collector {
+ descriptions := metrics.All()
+
+ // Collect all histogram samples so that we can get their buckets.
+ // The API guarantees that the buckets are always fixed for the lifetime
+ // of the process.
+ var histograms []metrics.Sample
+ for _, d := range descriptions {
+ if d.Kind == metrics.KindFloat64Histogram {
+ histograms = append(histograms, metrics.Sample{Name: d.Name})
+ }
+ }
+ metrics.Read(histograms)
+ bucketsMap := make(map[string][]float64)
+ for i := range histograms {
+ bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets
+ }
+
+ // Generate a Desc and ValueType for each runtime/metrics metric.
+ metricSet := make([]collectorMetric, 0, len(descriptions))
+ sampleBuf := make([]metrics.Sample, 0, len(descriptions))
+ sampleMap := make(map[string]*metrics.Sample, len(descriptions))
+ for i := range descriptions {
+ d := &descriptions[i]
+ namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(d)
+ if !ok {
+ // Just ignore this metric; we can't do anything with it here.
+ // If a user decides to use the latest version of Go, we don't want
+ // to fail here. This condition is tested elsewhere.
+ continue
+ }
+
+ // Set up sample buffer for reading, and a map
+ // for quick lookup of sample values.
+ sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
+ sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
+
+ var m collectorMetric
+ if d.Kind == metrics.KindFloat64Histogram {
+ _, hasSum := rmExactSumMap[d.Name]
+ unit := d.Name[strings.IndexRune(d.Name, ':')+1:]
+ m = newBatchHistogram(
+ NewDesc(
+ BuildFQName(namespace, subsystem, name),
+ d.Description,
+ nil,
+ nil,
+ ),
+ internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit),
+ hasSum,
+ )
+ } else if d.Cumulative {
+ m = NewCounter(CounterOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: name,
+ Help: d.Description,
+ })
+ } else {
+ m = NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: name,
+ Help: d.Description,
+ })
+ }
+ metricSet = append(metricSet, m)
+ }
+ return &goCollector{
+ base: newBaseGoCollector(),
+ rmSampleBuf: sampleBuf,
+ rmSampleMap: sampleMap,
+ rmMetrics: metricSet,
+ msMetrics: goRuntimeMemStats(),
+ }
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+ c.base.Describe(ch)
+ for _, i := range c.msMetrics {
+ ch <- i.desc
+ }
+ for _, m := range c.rmMetrics {
+ ch <- m.Desc()
+ }
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+ // Collect base non-memory metrics.
+ c.base.Collect(ch)
+
+ // Collect must be thread-safe, so prevent concurrent use of
+ // rmSampleBuf. Just read into rmSampleBuf but write all the data
+ // we get into our Metrics or MemStats.
+ //
+ // This lock also ensures that the Metrics we send out are all from
+ // the same updates, ensuring their mutual consistency insofar as
+ // is guaranteed by the runtime/metrics package.
+ //
+ // N.B. This locking is heavy-handed, but Collect is expected to be called
+ // relatively infrequently. Also the core operation here, metrics.Read,
+ // is fast (O(tens of microseconds)) so contention should certainly be
+ // low, though channel operations and any allocations may add to that.
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ // Populate runtime/metrics sample buffer.
+ metrics.Read(c.rmSampleBuf)
+
+ // Update all our metrics from rmSampleBuf.
+ for i, sample := range c.rmSampleBuf {
+ // N.B. switch on concrete type because it's significantly more efficient
+ // than checking for the Counter and Gauge interface implementations. In
+ // this case, we control all the types here.
+ switch m := c.rmMetrics[i].(type) {
+ case *counter:
+ // Guard against decreases. This should never happen, but a failure
+ // to do so will result in a panic, which is a harsh consequence for
+ // a metrics collection bug.
+ v0, v1 := m.get(), unwrapScalarRMValue(sample.Value)
+ if v1 > v0 {
+ m.Add(unwrapScalarRMValue(sample.Value) - m.get())
+ }
+ m.Collect(ch)
+ case *gauge:
+ m.Set(unwrapScalarRMValue(sample.Value))
+ m.Collect(ch)
+ case *batchHistogram:
+ m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name))
+ m.Collect(ch)
+ default:
+ panic("unexpected metric type")
+ }
+ }
+ // ms is a dummy MemStats that we populate ourselves so that we can
+ // populate the old metrics from it.
+ var ms runtime.MemStats
+ memStatsFromRM(&ms, c.rmSampleMap)
+ for _, i := range c.msMetrics {
+ ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
+ }
+}
+
+// unwrapScalarRMValue unwraps a runtime/metrics value that is assumed
+// to be scalar and returns the equivalent float64 value. Panics if the
+// value is not scalar.
+func unwrapScalarRMValue(v metrics.Value) float64 {
+ switch v.Kind() {
+ case metrics.KindUint64:
+ return float64(v.Uint64())
+ case metrics.KindFloat64:
+ return v.Float64()
+ case metrics.KindBad:
+ // Unsupported metric.
+ //
+ // This should never happen because we always populate our metric
+ // set from the runtime/metrics package.
+ panic("unexpected unsupported metric")
+ default:
+ // Unsupported metric kind.
+ //
+ // This should never happen because we check for this during initialization
+ // and flag and filter metrics whose kinds we don't understand.
+ panic("unexpected unsupported metric kind")
+ }
+}
+
+var rmExactSumMap = map[string]string{
+ "/gc/heap/allocs-by-size:bytes": "/gc/heap/allocs:bytes",
+ "/gc/heap/frees-by-size:bytes": "/gc/heap/frees:bytes",
+}
+
+// exactSumFor takes a runtime/metrics metric name (that is assumed to
+// be of kind KindFloat64Histogram) and returns its exact sum and whether
+// its exact sum exists.
+//
+// The runtime/metrics API for histograms doesn't currently expose exact
+// sums, but some of the other metrics are in fact exact sums of histograms.
+func (c *goCollector) exactSumFor(rmName string) float64 {
+ sumName, ok := rmExactSumMap[rmName]
+ if !ok {
+ return 0
+ }
+ s, ok := c.rmSampleMap[sumName]
+ if !ok {
+ return 0
+ }
+ return unwrapScalarRMValue(s.Value)
+}
+
+func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) {
+ lookupOrZero := func(name string) uint64 {
+ if s, ok := rm[name]; ok {
+ return s.Value.Uint64()
+ }
+ return 0
+ }
+
+ // Currently, MemStats adds tiny alloc count to both Mallocs AND Frees.
+ // The reason for this is because MemStats couldn't be extended at the time
+ // but there was a desire to have Mallocs at least be a little more representative,
+ // while having Mallocs - Frees still represent a live object count.
+ // Unfortunately, MemStats doesn't actually export a large allocation count,
+ // so it's impossible to pull this number out directly.
+ tinyAllocs := lookupOrZero("/gc/heap/tiny/allocs:objects")
+ ms.Mallocs = lookupOrZero("/gc/heap/allocs:objects") + tinyAllocs
+ ms.Frees = lookupOrZero("/gc/heap/frees:objects") + tinyAllocs
+
+ ms.TotalAlloc = lookupOrZero("/gc/heap/allocs:bytes")
+ ms.Sys = lookupOrZero("/memory/classes/total:bytes")
+ ms.Lookups = 0 // Already always zero.
+ ms.HeapAlloc = lookupOrZero("/memory/classes/heap/objects:bytes")
+ ms.Alloc = ms.HeapAlloc
+ ms.HeapInuse = ms.HeapAlloc + lookupOrZero("/memory/classes/heap/unused:bytes")
+ ms.HeapReleased = lookupOrZero("/memory/classes/heap/released:bytes")
+ ms.HeapIdle = ms.HeapReleased + lookupOrZero("/memory/classes/heap/free:bytes")
+ ms.HeapSys = ms.HeapInuse + ms.HeapIdle
+ ms.HeapObjects = lookupOrZero("/gc/heap/objects:objects")
+ ms.StackInuse = lookupOrZero("/memory/classes/heap/stacks:bytes")
+ ms.StackSys = ms.StackInuse + lookupOrZero("/memory/classes/os-stacks:bytes")
+ ms.MSpanInuse = lookupOrZero("/memory/classes/metadata/mspan/inuse:bytes")
+ ms.MSpanSys = ms.MSpanInuse + lookupOrZero("/memory/classes/metadata/mspan/free:bytes")
+ ms.MCacheInuse = lookupOrZero("/memory/classes/metadata/mcache/inuse:bytes")
+ ms.MCacheSys = ms.MCacheInuse + lookupOrZero("/memory/classes/metadata/mcache/free:bytes")
+ ms.BuckHashSys = lookupOrZero("/memory/classes/profiling/buckets:bytes")
+ ms.GCSys = lookupOrZero("/memory/classes/metadata/other:bytes")
+ ms.OtherSys = lookupOrZero("/memory/classes/other:bytes")
+ ms.NextGC = lookupOrZero("/gc/heap/goal:bytes")
+
+ // N.B. LastGC is omitted because runtime.GCStats already has this.
+ // See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
+ // for more details.
+ ms.LastGC = 0
+
+ // N.B. GCCPUFraction is intentionally omitted. This metric is not useful,
+ // and often misleading due to the fact that it's an average over the lifetime
+ // of the process.
+ // See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
+ // for more details.
+ ms.GCCPUFraction = 0
+}
+
+// batchHistogram is a mutable histogram that is updated
+// in batches.
+type batchHistogram struct {
+ selfCollector
+
+ // Static fields updated only once.
+ desc *Desc
+ hasSum bool
+
+ // Because this histogram operates in batches, it just uses a
+ // single mutex for everything. updates are always serialized
+ // but Write calls may operate concurrently with updates.
+ // Contention between these two sources should be rare.
+ mu sync.Mutex
+ buckets []float64 // Inclusive lower bounds, like runtime/metrics.
+ counts []uint64
+ sum float64 // Used if hasSum is true.
+}
+
+// newBatchHistogram creates a new batch histogram value with the given
+// Desc, buckets, and whether or not it has an exact sum available.
+//
+// buckets must always be from the runtime/metrics package, following
+// the same conventions.
+func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram {
+ h := &batchHistogram{
+ desc: desc,
+ buckets: buckets,
+ // Because buckets follows runtime/metrics conventions, there's
+ // 1 more value in the buckets list than there are buckets represented,
+ // because in runtime/metrics, the bucket values represent *boundaries*,
+ // and non-Inf boundaries are inclusive lower bounds for that bucket.
+ counts: make([]uint64, len(buckets)-1),
+ hasSum: hasSum,
+ }
+ h.init(h)
+ return h
+}
+
+// update updates the batchHistogram from a runtime/metrics histogram.
+//
+// sum must be provided if the batchHistogram was created to have an exact sum.
+// h.buckets must be a strict subset of his.Buckets.
+func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) {
+ counts, buckets := his.Counts, his.Buckets
+
+ h.mu.Lock()
+ defer h.mu.Unlock()
+
+ // Clear buckets.
+ for i := range h.counts {
+ h.counts[i] = 0
+ }
+ // Copy and reduce buckets.
+ var j int
+ for i, count := range counts {
+ h.counts[j] += count
+ if buckets[i+1] == h.buckets[j+1] {
+ j++
+ }
+ }
+ if h.hasSum {
+ h.sum = sum
+ }
+}
+
+func (h *batchHistogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *batchHistogram) Write(out *dto.Metric) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+
+ sum := float64(0)
+ if h.hasSum {
+ sum = h.sum
+ }
+ dtoBuckets := make([]*dto.Bucket, 0, len(h.counts))
+ totalCount := uint64(0)
+ for i, count := range h.counts {
+ totalCount += count
+ if !h.hasSum {
+ // N.B. This computed sum is an underestimate.
+ sum += h.buckets[i] * float64(count)
+ }
+
+ // Skip the +Inf bucket, but only for the bucket list.
+ // It must still count for sum and totalCount.
+ if math.IsInf(h.buckets[i+1], 1) {
+ break
+ }
+ // Float64Histogram's upper bound is exclusive, so make it inclusive
+ // by obtaining the next float64 value down, in order.
+ upperBound := math.Nextafter(h.buckets[i+1], h.buckets[i])
+ dtoBuckets = append(dtoBuckets, &dto.Bucket{
+ CumulativeCount: proto.Uint64(totalCount),
+ UpperBound: proto.Float64(upperBound),
+ })
+ }
+ out.Histogram = &dto.Histogram{
+ Bucket: dtoBuckets,
+ SampleCount: proto.Uint64(totalCount),
+ SampleSum: proto.Float64(sum),
+ }
+ return nil
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
index 8425640b3..893802fd6 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -116,6 +116,34 @@ func ExponentialBuckets(start, factor float64, count int) []float64 {
return buckets
}
+// ExponentialBucketsRange creates 'count' buckets, where the lowest bucket is
+// 'min' and the highest bucket is 'max'. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative.
+func ExponentialBucketsRange(min, max float64, count int) []float64 {
+ if count < 1 {
+ panic("ExponentialBucketsRange count needs a positive count")
+ }
+ if min <= 0 {
+ panic("ExponentialBucketsRange min needs to be greater than 0")
+ }
+
+ // Formula for exponential buckets.
+ // max = min*growthFactor^(bucketCount-1)
+
+ // We know max/min and highest bucket. Solve for growthFactor.
+ growthFactor := math.Pow(max/min, 1.0/float64(count-1))
+
+ // Now that we know growthFactor, solve for each bucket.
+ buckets := make([]float64, count)
+ for i := 1; i <= count; i++ {
+ buckets[i-1] = min * math.Pow(growthFactor, float64(i-1))
+ }
+ return buckets
+}
+
// HistogramOpts bundles the options for creating a Histogram metric. It is
// mandatory to set Name to a non-empty string. All other fields are optional
// and can safely be left at their zero value, although it is strongly
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
new file mode 100644
index 000000000..fe0a52180
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
@@ -0,0 +1,142 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.17
+// +build go1.17
+
+package internal
+
+import (
+ "math"
+ "path"
+ "runtime/metrics"
+ "strings"
+
+ "github.com/prometheus/common/model"
+)
+
+// RuntimeMetricsToProm produces a Prometheus metric name from a runtime/metrics
+// metric description and validates whether the metric is suitable for integration
+// with Prometheus.
+//
+// Returns false if a name could not be produced, or if Prometheus does not understand
+// the runtime/metrics Kind.
+//
+// Note that the main reason a name couldn't be produced is if the runtime/metrics
+// package exports a name with characters outside the valid Prometheus metric name
+// character set. This is theoretically possible, but should never happen in practice.
+// Still, don't rely on it.
+func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) {
+ namespace := "go"
+
+ comp := strings.SplitN(d.Name, ":", 2)
+ key := comp[0]
+ unit := comp[1]
+
+ // The last path element in the key is the name,
+ // the rest is the subsystem.
+ subsystem := path.Dir(key[1:] /* remove leading / */)
+ name := path.Base(key)
+
+ // subsystem is translated by replacing all / and - with _.
+ subsystem = strings.ReplaceAll(subsystem, "/", "_")
+ subsystem = strings.ReplaceAll(subsystem, "-", "_")
+
+ // unit is translated assuming that the unit contains no
+ // non-ASCII characters.
+ unit = strings.ReplaceAll(unit, "-", "_")
+ unit = strings.ReplaceAll(unit, "*", "_")
+ unit = strings.ReplaceAll(unit, "/", "_per_")
+
+ // name has - replaced with _ and is concatenated with the unit and
+ // other data.
+ name = strings.ReplaceAll(name, "-", "_")
+ name = name + "_" + unit
+ if d.Cumulative {
+ name = name + "_total"
+ }
+
+ valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name))
+ switch d.Kind {
+ case metrics.KindUint64:
+ case metrics.KindFloat64:
+ case metrics.KindFloat64Histogram:
+ default:
+ valid = false
+ }
+ return namespace, subsystem, name, valid
+}
+
+// RuntimeMetricsBucketsForUnit takes a set of buckets obtained for a runtime/metrics histogram
+// type (so, lower-bound inclusive) and a unit from a runtime/metrics name, and produces
+// a reduced set of buckets. This function always removes any -Inf bucket as it's represented
+// as the bottom-most upper-bound inclusive bucket in Prometheus.
+func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
+ switch unit {
+ case "bytes":
+ // Rebucket as powers of 2.
+ return rebucketExp(buckets, 2)
+ case "seconds":
+ // Rebucket as powers of 10 and then merge all buckets greater
+ // than 1 second into the +Inf bucket.
+ b := rebucketExp(buckets, 10)
+ for i := range b {
+ if b[i] <= 1 {
+ continue
+ }
+ b[i] = math.Inf(1)
+ b = b[:i+1]
+ break
+ }
+ return b
+ }
+ return buckets
+}
+
+// rebucketExp takes a list of bucket boundaries (lower bound inclusive) and
+// downsamples the buckets to those a multiple of base apart. The end result
+// is a roughly exponential (in many cases, perfectly exponential) bucketing
+// scheme.
+func rebucketExp(buckets []float64, base float64) []float64 {
+ bucket := buckets[0]
+ var newBuckets []float64
+ // We may see a -Inf here, in which case, add it and skip it
+ // since we risk producing NaNs otherwise.
+ //
+ // We need to preserve -Inf values to maintain runtime/metrics
+ // conventions. We'll strip it out later.
+ if bucket == math.Inf(-1) {
+ newBuckets = append(newBuckets, bucket)
+ buckets = buckets[1:]
+ bucket = buckets[0]
+ }
+ // From now on, bucket should always have a non-Inf value because
+ // Infs are only ever at the ends of the bucket lists, so
+ // arithmetic operations on it are non-NaN.
+ for i := 1; i < len(buckets); i++ {
+ if bucket >= 0 && buckets[i] < bucket*base {
+ // The next bucket we want to include is at least bucket*base.
+ continue
+ } else if bucket < 0 && buckets[i] < bucket/base {
+ // In this case the bucket we're targeting is negative, and since
+ // we're ascending through buckets here, we need to divide to get
+ // closer to zero exponentially.
+ continue
+ }
+ // The +Inf bucket will always be the last one, and we'll always
+ // end up including it here because bucket
+ newBuckets = append(newBuckets, bucket)
+ bucket = buckets[i]
+ }
+ return append(newBuckets, bucket)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
index 3117461cd..2dc3660da 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
@@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build !windows
// +build !windows
package prometheus
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
index c778711b8..b4e0ae11c 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/value.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -21,7 +21,7 @@ import (
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/ptypes"
+ "google.golang.org/protobuf/types/known/timestamppb"
dto "github.com/prometheus/client_model/go"
)
@@ -183,8 +183,8 @@ const ExemplarMaxRunes = 64
func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) {
e := &dto.Exemplar{}
e.Value = proto.Float64(value)
- tsProto, err := ptypes.TimestampProto(ts)
- if err != nil {
+ tsProto := timestamppb.New(ts)
+ if err := tsProto.CheckValid(); err != nil {
return nil, err
}
e.Timestamp = tsProto