summaryrefslogtreecommitdiff
path: root/vendor/golang.org/x/net
diff options
context:
space:
mode:
authordependabot-preview[bot] <27856297+dependabot-preview[bot]@users.noreply.github.com>2021-02-09 09:17:50 +0000
committerDaniel J Walsh <dwalsh@redhat.com>2021-02-09 10:49:43 -0500
commit08d8290f1d65a254b6794f7fe87a6f769b2ca792 (patch)
tree1cb56c15d412d1d20226d1486bbd05656a3106e5 /vendor/golang.org/x/net
parent19507d0ffe8cda0a69f056838556f471fd9e61fa (diff)
downloadpodman-08d8290f1d65a254b6794f7fe87a6f769b2ca792.tar.gz
podman-08d8290f1d65a254b6794f7fe87a6f769b2ca792.tar.bz2
podman-08d8290f1d65a254b6794f7fe87a6f769b2ca792.zip
Bump github.com/containers/ocicrypt from 1.0.3 to 1.1.0
Bumps [github.com/containers/ocicrypt](https://github.com/containers/ocicrypt) from 1.0.3 to 1.1.0. - [Release notes](https://github.com/containers/ocicrypt/releases) - [Commits](https://github.com/containers/ocicrypt/compare/v1.0.3...v1.1.0) Signed-off-by: dependabot-preview[bot] <support@dependabot.com> Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
Diffstat (limited to 'vendor/golang.org/x/net')
-rw-r--r--vendor/golang.org/x/net/internal/timeseries/timeseries.go525
-rw-r--r--vendor/golang.org/x/net/trace/events.go532
-rw-r--r--vendor/golang.org/x/net/trace/histogram.go365
-rw-r--r--vendor/golang.org/x/net/trace/trace.go1130
4 files changed, 2552 insertions, 0 deletions
diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go
new file mode 100644
index 000000000..dc5225b6d
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go
@@ -0,0 +1,525 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package timeseries implements a time series structure for stats collection.
+package timeseries // import "golang.org/x/net/internal/timeseries"
+
+import (
+ "fmt"
+ "log"
+ "time"
+)
+
+const (
+ timeSeriesNumBuckets = 64
+ minuteHourSeriesNumBuckets = 60
+)
+
+var timeSeriesResolutions = []time.Duration{
+ 1 * time.Second,
+ 10 * time.Second,
+ 1 * time.Minute,
+ 10 * time.Minute,
+ 1 * time.Hour,
+ 6 * time.Hour,
+ 24 * time.Hour, // 1 day
+ 7 * 24 * time.Hour, // 1 week
+ 4 * 7 * 24 * time.Hour, // 4 weeks
+ 16 * 7 * 24 * time.Hour, // 16 weeks
+}
+
+var minuteHourSeriesResolutions = []time.Duration{
+ 1 * time.Second,
+ 1 * time.Minute,
+}
+
+// An Observable is a kind of data that can be aggregated in a time series.
+type Observable interface {
+ Multiply(ratio float64) // Multiplies the data in self by a given ratio
+ Add(other Observable) // Adds the data from a different observation to self
+ Clear() // Clears the observation so it can be reused.
+ CopyFrom(other Observable) // Copies the contents of a given observation to self
+}
+
+// Float attaches the methods of Observable to a float64.
+type Float float64
+
+// NewFloat returns a Float.
+func NewFloat() Observable {
+ f := Float(0)
+ return &f
+}
+
+// String returns the float as a string.
+func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) }
+
+// Value returns the float's value.
+func (f *Float) Value() float64 { return float64(*f) }
+
+func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) }
+
+func (f *Float) Add(other Observable) {
+ o := other.(*Float)
+ *f += *o
+}
+
+func (f *Float) Clear() { *f = 0 }
+
+func (f *Float) CopyFrom(other Observable) {
+ o := other.(*Float)
+ *f = *o
+}
+
+// A Clock tells the current time.
+type Clock interface {
+ Time() time.Time
+}
+
+type defaultClock int
+
+var defaultClockInstance defaultClock
+
+func (defaultClock) Time() time.Time { return time.Now() }
+
+// Information kept per level. Each level consists of a circular list of
+// observations. The start of the level may be derived from end and the
+// len(buckets) * sizeInMillis.
+type tsLevel struct {
+ oldest int // index to oldest bucketed Observable
+ newest int // index to newest bucketed Observable
+ end time.Time // end timestamp for this level
+ size time.Duration // duration of the bucketed Observable
+ buckets []Observable // collections of observations
+ provider func() Observable // used for creating new Observable
+}
+
+func (l *tsLevel) Clear() {
+ l.oldest = 0
+ l.newest = len(l.buckets) - 1
+ l.end = time.Time{}
+ for i := range l.buckets {
+ if l.buckets[i] != nil {
+ l.buckets[i].Clear()
+ l.buckets[i] = nil
+ }
+ }
+}
+
+func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) {
+ l.size = size
+ l.provider = f
+ l.buckets = make([]Observable, numBuckets)
+}
+
+// Keeps a sequence of levels. Each level is responsible for storing data at
+// a given resolution. For example, the first level stores data at a one
+// minute resolution while the second level stores data at a one hour
+// resolution.
+
+// Each level is represented by a sequence of buckets. Each bucket spans an
+// interval equal to the resolution of the level. New observations are added
+// to the last bucket.
+type timeSeries struct {
+ provider func() Observable // make more Observable
+ numBuckets int // number of buckets in each level
+ levels []*tsLevel // levels of bucketed Observable
+ lastAdd time.Time // time of last Observable tracked
+ total Observable // convenient aggregation of all Observable
+ clock Clock // Clock for getting current time
+ pending Observable // observations not yet bucketed
+ pendingTime time.Time // what time are we keeping in pending
+ dirty bool // if there are pending observations
+}
+
+// init initializes a level according to the supplied criteria.
+func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) {
+ ts.provider = f
+ ts.numBuckets = numBuckets
+ ts.clock = clock
+ ts.levels = make([]*tsLevel, len(resolutions))
+
+ for i := range resolutions {
+ if i > 0 && resolutions[i-1] >= resolutions[i] {
+ log.Print("timeseries: resolutions must be monotonically increasing")
+ break
+ }
+ newLevel := new(tsLevel)
+ newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider)
+ ts.levels[i] = newLevel
+ }
+
+ ts.Clear()
+}
+
+// Clear removes all observations from the time series.
+func (ts *timeSeries) Clear() {
+ ts.lastAdd = time.Time{}
+ ts.total = ts.resetObservation(ts.total)
+ ts.pending = ts.resetObservation(ts.pending)
+ ts.pendingTime = time.Time{}
+ ts.dirty = false
+
+ for i := range ts.levels {
+ ts.levels[i].Clear()
+ }
+}
+
+// Add records an observation at the current time.
+func (ts *timeSeries) Add(observation Observable) {
+ ts.AddWithTime(observation, ts.clock.Time())
+}
+
+// AddWithTime records an observation at the specified time.
+func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) {
+
+ smallBucketDuration := ts.levels[0].size
+
+ if t.After(ts.lastAdd) {
+ ts.lastAdd = t
+ }
+
+ if t.After(ts.pendingTime) {
+ ts.advance(t)
+ ts.mergePendingUpdates()
+ ts.pendingTime = ts.levels[0].end
+ ts.pending.CopyFrom(observation)
+ ts.dirty = true
+ } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) {
+ // The observation is close enough to go into the pending bucket.
+ // This compensates for clock skewing and small scheduling delays
+ // by letting the update stay in the fast path.
+ ts.pending.Add(observation)
+ ts.dirty = true
+ } else {
+ ts.mergeValue(observation, t)
+ }
+}
+
+// mergeValue inserts the observation at the specified time in the past into all levels.
+func (ts *timeSeries) mergeValue(observation Observable, t time.Time) {
+ for _, level := range ts.levels {
+ index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size)
+ if 0 <= index && index < ts.numBuckets {
+ bucketNumber := (level.oldest + index) % ts.numBuckets
+ if level.buckets[bucketNumber] == nil {
+ level.buckets[bucketNumber] = level.provider()
+ }
+ level.buckets[bucketNumber].Add(observation)
+ }
+ }
+ ts.total.Add(observation)
+}
+
+// mergePendingUpdates applies the pending updates into all levels.
+func (ts *timeSeries) mergePendingUpdates() {
+ if ts.dirty {
+ ts.mergeValue(ts.pending, ts.pendingTime)
+ ts.pending = ts.resetObservation(ts.pending)
+ ts.dirty = false
+ }
+}
+
+// advance cycles the buckets at each level until the latest bucket in
+// each level can hold the time specified.
+func (ts *timeSeries) advance(t time.Time) {
+ if !t.After(ts.levels[0].end) {
+ return
+ }
+ for i := 0; i < len(ts.levels); i++ {
+ level := ts.levels[i]
+ if !level.end.Before(t) {
+ break
+ }
+
+ // If the time is sufficiently far, just clear the level and advance
+ // directly.
+ if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) {
+ for _, b := range level.buckets {
+ ts.resetObservation(b)
+ }
+ level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds())
+ }
+
+ for t.After(level.end) {
+ level.end = level.end.Add(level.size)
+ level.newest = level.oldest
+ level.oldest = (level.oldest + 1) % ts.numBuckets
+ ts.resetObservation(level.buckets[level.newest])
+ }
+
+ t = level.end
+ }
+}
+
+// Latest returns the sum of the num latest buckets from the level.
+func (ts *timeSeries) Latest(level, num int) Observable {
+ now := ts.clock.Time()
+ if ts.levels[0].end.Before(now) {
+ ts.advance(now)
+ }
+
+ ts.mergePendingUpdates()
+
+ result := ts.provider()
+ l := ts.levels[level]
+ index := l.newest
+
+ for i := 0; i < num; i++ {
+ if l.buckets[index] != nil {
+ result.Add(l.buckets[index])
+ }
+ if index == 0 {
+ index = ts.numBuckets
+ }
+ index--
+ }
+
+ return result
+}
+
+// LatestBuckets returns a copy of the num latest buckets from level.
+func (ts *timeSeries) LatestBuckets(level, num int) []Observable {
+ if level < 0 || level > len(ts.levels) {
+ log.Print("timeseries: bad level argument: ", level)
+ return nil
+ }
+ if num < 0 || num >= ts.numBuckets {
+ log.Print("timeseries: bad num argument: ", num)
+ return nil
+ }
+
+ results := make([]Observable, num)
+ now := ts.clock.Time()
+ if ts.levels[0].end.Before(now) {
+ ts.advance(now)
+ }
+
+ ts.mergePendingUpdates()
+
+ l := ts.levels[level]
+ index := l.newest
+
+ for i := 0; i < num; i++ {
+ result := ts.provider()
+ results[i] = result
+ if l.buckets[index] != nil {
+ result.CopyFrom(l.buckets[index])
+ }
+
+ if index == 0 {
+ index = ts.numBuckets
+ }
+ index -= 1
+ }
+ return results
+}
+
+// ScaleBy updates observations by scaling by factor.
+func (ts *timeSeries) ScaleBy(factor float64) {
+ for _, l := range ts.levels {
+ for i := 0; i < ts.numBuckets; i++ {
+ l.buckets[i].Multiply(factor)
+ }
+ }
+
+ ts.total.Multiply(factor)
+ ts.pending.Multiply(factor)
+}
+
+// Range returns the sum of observations added over the specified time range.
+// If start or finish times don't fall on bucket boundaries of the same
+// level, then return values are approximate answers.
+func (ts *timeSeries) Range(start, finish time.Time) Observable {
+ return ts.ComputeRange(start, finish, 1)[0]
+}
+
+// Recent returns the sum of observations from the last delta.
+func (ts *timeSeries) Recent(delta time.Duration) Observable {
+ now := ts.clock.Time()
+ return ts.Range(now.Add(-delta), now)
+}
+
+// Total returns the total of all observations.
+func (ts *timeSeries) Total() Observable {
+ ts.mergePendingUpdates()
+ return ts.total
+}
+
+// ComputeRange computes a specified number of values into a slice using
+// the observations recorded over the specified time period. The return
+// values are approximate if the start or finish times don't fall on the
+// bucket boundaries at the same level or if the number of buckets spanning
+// the range is not an integral multiple of num.
+func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable {
+ if start.After(finish) {
+ log.Printf("timeseries: start > finish, %v>%v", start, finish)
+ return nil
+ }
+
+ if num < 0 {
+ log.Printf("timeseries: num < 0, %v", num)
+ return nil
+ }
+
+ results := make([]Observable, num)
+
+ for _, l := range ts.levels {
+ if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) {
+ ts.extract(l, start, finish, num, results)
+ return results
+ }
+ }
+
+ // Failed to find a level that covers the desired range. So just
+ // extract from the last level, even if it doesn't cover the entire
+ // desired range.
+ ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results)
+
+ return results
+}
+
+// RecentList returns the specified number of values in slice over the most
+// recent time period of the specified range.
+func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable {
+ if delta < 0 {
+ return nil
+ }
+ now := ts.clock.Time()
+ return ts.ComputeRange(now.Add(-delta), now, num)
+}
+
+// extract returns a slice of specified number of observations from a given
+// level over a given range.
+func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) {
+ ts.mergePendingUpdates()
+
+ srcInterval := l.size
+ dstInterval := finish.Sub(start) / time.Duration(num)
+ dstStart := start
+ srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets))
+
+ srcIndex := 0
+
+ // Where should scanning start?
+ if dstStart.After(srcStart) {
+ advance := int(dstStart.Sub(srcStart) / srcInterval)
+ srcIndex += advance
+ srcStart = srcStart.Add(time.Duration(advance) * srcInterval)
+ }
+
+ // The i'th value is computed as show below.
+ // interval = (finish/start)/num
+ // i'th value = sum of observation in range
+ // [ start + i * interval,
+ // start + (i + 1) * interval )
+ for i := 0; i < num; i++ {
+ results[i] = ts.resetObservation(results[i])
+ dstEnd := dstStart.Add(dstInterval)
+ for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) {
+ srcEnd := srcStart.Add(srcInterval)
+ if srcEnd.After(ts.lastAdd) {
+ srcEnd = ts.lastAdd
+ }
+
+ if !srcEnd.Before(dstStart) {
+ srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets]
+ if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) {
+ // dst completely contains src.
+ if srcValue != nil {
+ results[i].Add(srcValue)
+ }
+ } else {
+ // dst partially overlaps src.
+ overlapStart := maxTime(srcStart, dstStart)
+ overlapEnd := minTime(srcEnd, dstEnd)
+ base := srcEnd.Sub(srcStart)
+ fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds()
+
+ used := ts.provider()
+ if srcValue != nil {
+ used.CopyFrom(srcValue)
+ }
+ used.Multiply(fraction)
+ results[i].Add(used)
+ }
+
+ if srcEnd.After(dstEnd) {
+ break
+ }
+ }
+ srcIndex++
+ srcStart = srcStart.Add(srcInterval)
+ }
+ dstStart = dstStart.Add(dstInterval)
+ }
+}
+
+// resetObservation clears the content so the struct may be reused.
+func (ts *timeSeries) resetObservation(observation Observable) Observable {
+ if observation == nil {
+ observation = ts.provider()
+ } else {
+ observation.Clear()
+ }
+ return observation
+}
+
+// TimeSeries tracks data at granularities from 1 second to 16 weeks.
+type TimeSeries struct {
+ timeSeries
+}
+
+// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable.
+func NewTimeSeries(f func() Observable) *TimeSeries {
+ return NewTimeSeriesWithClock(f, defaultClockInstance)
+}
+
+// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for
+// assigning timestamps.
+func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries {
+ ts := new(TimeSeries)
+ ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock)
+ return ts
+}
+
+// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour.
+type MinuteHourSeries struct {
+ timeSeries
+}
+
+// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable.
+func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries {
+ return NewMinuteHourSeriesWithClock(f, defaultClockInstance)
+}
+
+// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for
+// assigning timestamps.
+func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries {
+ ts := new(MinuteHourSeries)
+ ts.timeSeries.init(minuteHourSeriesResolutions, f,
+ minuteHourSeriesNumBuckets, clock)
+ return ts
+}
+
+func (ts *MinuteHourSeries) Minute() Observable {
+ return ts.timeSeries.Latest(0, 60)
+}
+
+func (ts *MinuteHourSeries) Hour() Observable {
+ return ts.timeSeries.Latest(1, 60)
+}
+
+func minTime(a, b time.Time) time.Time {
+ if a.Before(b) {
+ return a
+ }
+ return b
+}
+
+func maxTime(a, b time.Time) time.Time {
+ if a.After(b) {
+ return a
+ }
+ return b
+}
diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go
new file mode 100644
index 000000000..c646a6952
--- /dev/null
+++ b/vendor/golang.org/x/net/trace/events.go
@@ -0,0 +1,532 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "io"
+ "log"
+ "net/http"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "text/tabwriter"
+ "time"
+)
+
+const maxEventsPerLog = 100
+
+type bucket struct {
+ MaxErrAge time.Duration
+ String string
+}
+
+var buckets = []bucket{
+ {0, "total"},
+ {10 * time.Second, "errs<10s"},
+ {1 * time.Minute, "errs<1m"},
+ {10 * time.Minute, "errs<10m"},
+ {1 * time.Hour, "errs<1h"},
+ {10 * time.Hour, "errs<10h"},
+ {24000 * time.Hour, "errors"},
+}
+
+// RenderEvents renders the HTML page typically served at /debug/events.
+// It does not do any auth checking. The request may be nil.
+//
+// Most users will use the Events handler.
+func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
+ now := time.Now()
+ data := &struct {
+ Families []string // family names
+ Buckets []bucket
+ Counts [][]int // eventLog count per family/bucket
+
+ // Set when a bucket has been selected.
+ Family string
+ Bucket int
+ EventLogs eventLogs
+ Expanded bool
+ }{
+ Buckets: buckets,
+ }
+
+ data.Families = make([]string, 0, len(families))
+ famMu.RLock()
+ for name := range families {
+ data.Families = append(data.Families, name)
+ }
+ famMu.RUnlock()
+ sort.Strings(data.Families)
+
+ // Count the number of eventLogs in each family for each error age.
+ data.Counts = make([][]int, len(data.Families))
+ for i, name := range data.Families {
+ // TODO(sameer): move this loop under the family lock.
+ f := getEventFamily(name)
+ data.Counts[i] = make([]int, len(data.Buckets))
+ for j, b := range data.Buckets {
+ data.Counts[i][j] = f.Count(now, b.MaxErrAge)
+ }
+ }
+
+ if req != nil {
+ var ok bool
+ data.Family, data.Bucket, ok = parseEventsArgs(req)
+ if !ok {
+ // No-op
+ } else {
+ data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge)
+ }
+ if data.EventLogs != nil {
+ defer data.EventLogs.Free()
+ sort.Sort(data.EventLogs)
+ }
+ if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
+ data.Expanded = exp
+ }
+ }
+
+ famMu.RLock()
+ defer famMu.RUnlock()
+ if err := eventsTmpl().Execute(w, data); err != nil {
+ log.Printf("net/trace: Failed executing template: %v", err)
+ }
+}
+
+func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) {
+ fam, bStr := req.FormValue("fam"), req.FormValue("b")
+ if fam == "" || bStr == "" {
+ return "", 0, false
+ }
+ b, err := strconv.Atoi(bStr)
+ if err != nil || b < 0 || b >= len(buckets) {
+ return "", 0, false
+ }
+ return fam, b, true
+}
+
+// An EventLog provides a log of events associated with a specific object.
+type EventLog interface {
+ // Printf formats its arguments with fmt.Sprintf and adds the
+ // result to the event log.
+ Printf(format string, a ...interface{})
+
+ // Errorf is like Printf, but it marks this event as an error.
+ Errorf(format string, a ...interface{})
+
+ // Finish declares that this event log is complete.
+ // The event log should not be used after calling this method.
+ Finish()
+}
+
+// NewEventLog returns a new EventLog with the specified family name
+// and title.
+func NewEventLog(family, title string) EventLog {
+ el := newEventLog()
+ el.ref()
+ el.Family, el.Title = family, title
+ el.Start = time.Now()
+ el.events = make([]logEntry, 0, maxEventsPerLog)
+ el.stack = make([]uintptr, 32)
+ n := runtime.Callers(2, el.stack)
+ el.stack = el.stack[:n]
+
+ getEventFamily(family).add(el)
+ return el
+}
+
+func (el *eventLog) Finish() {
+ getEventFamily(el.Family).remove(el)
+ el.unref() // matches ref in New
+}
+
+var (
+ famMu sync.RWMutex
+ families = make(map[string]*eventFamily) // family name => family
+)
+
+func getEventFamily(fam string) *eventFamily {
+ famMu.Lock()
+ defer famMu.Unlock()
+ f := families[fam]
+ if f == nil {
+ f = &eventFamily{}
+ families[fam] = f
+ }
+ return f
+}
+
+type eventFamily struct {
+ mu sync.RWMutex
+ eventLogs eventLogs
+}
+
+func (f *eventFamily) add(el *eventLog) {
+ f.mu.Lock()
+ f.eventLogs = append(f.eventLogs, el)
+ f.mu.Unlock()
+}
+
+func (f *eventFamily) remove(el *eventLog) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ for i, el0 := range f.eventLogs {
+ if el == el0 {
+ copy(f.eventLogs[i:], f.eventLogs[i+1:])
+ f.eventLogs = f.eventLogs[:len(f.eventLogs)-1]
+ return
+ }
+ }
+}
+
+func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) {
+ f.mu.RLock()
+ defer f.mu.RUnlock()
+ for _, el := range f.eventLogs {
+ if el.hasRecentError(now, maxErrAge) {
+ n++
+ }
+ }
+ return
+}
+
+func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) {
+ f.mu.RLock()
+ defer f.mu.RUnlock()
+ els = make(eventLogs, 0, len(f.eventLogs))
+ for _, el := range f.eventLogs {
+ if el.hasRecentError(now, maxErrAge) {
+ el.ref()
+ els = append(els, el)
+ }
+ }
+ return
+}
+
+type eventLogs []*eventLog
+
+// Free calls unref on each element of the list.
+func (els eventLogs) Free() {
+ for _, el := range els {
+ el.unref()
+ }
+}
+
+// eventLogs may be sorted in reverse chronological order.
+func (els eventLogs) Len() int { return len(els) }
+func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) }
+func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] }
+
+// A logEntry is a timestamped log entry in an event log.
+type logEntry struct {
+ When time.Time
+ Elapsed time.Duration // since previous event in log
+ NewDay bool // whether this event is on a different day to the previous event
+ What string
+ IsErr bool
+}
+
+// WhenString returns a string representation of the elapsed time of the event.
+// It will include the date if midnight was crossed.
+func (e logEntry) WhenString() string {
+ if e.NewDay {
+ return e.When.Format("2006/01/02 15:04:05.000000")
+ }
+ return e.When.Format("15:04:05.000000")
+}
+
+// An eventLog represents an active event log.
+type eventLog struct {
+ // Family is the top-level grouping of event logs to which this belongs.
+ Family string
+
+ // Title is the title of this event log.
+ Title string
+
+ // Timing information.
+ Start time.Time
+
+ // Call stack where this event log was created.
+ stack []uintptr
+
+ // Append-only sequence of events.
+ //
+ // TODO(sameer): change this to a ring buffer to avoid the array copy
+ // when we hit maxEventsPerLog.
+ mu sync.RWMutex
+ events []logEntry
+ LastErrorTime time.Time
+ discarded int
+
+ refs int32 // how many buckets this is in
+}
+
+func (el *eventLog) reset() {
+ // Clear all but the mutex. Mutexes may not be copied, even when unlocked.
+ el.Family = ""
+ el.Title = ""
+ el.Start = time.Time{}
+ el.stack = nil
+ el.events = nil
+ el.LastErrorTime = time.Time{}
+ el.discarded = 0
+ el.refs = 0
+}
+
+func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool {
+ if maxErrAge == 0 {
+ return true
+ }
+ el.mu.RLock()
+ defer el.mu.RUnlock()
+ return now.Sub(el.LastErrorTime) < maxErrAge
+}
+
+// delta returns the elapsed time since the last event or the log start,
+// and whether it spans midnight.
+// L >= el.mu
+func (el *eventLog) delta(t time.Time) (time.Duration, bool) {
+ if len(el.events) == 0 {
+ return t.Sub(el.Start), false
+ }
+ prev := el.events[len(el.events)-1].When
+ return t.Sub(prev), prev.Day() != t.Day()
+
+}
+
+func (el *eventLog) Printf(format string, a ...interface{}) {
+ el.printf(false, format, a...)
+}
+
+func (el *eventLog) Errorf(format string, a ...interface{}) {
+ el.printf(true, format, a...)
+}
+
+func (el *eventLog) printf(isErr bool, format string, a ...interface{}) {
+ e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)}
+ el.mu.Lock()
+ e.Elapsed, e.NewDay = el.delta(e.When)
+ if len(el.events) < maxEventsPerLog {
+ el.events = append(el.events, e)
+ } else {
+ // Discard the oldest event.
+ if el.discarded == 0 {
+ // el.discarded starts at two to count for the event it
+ // is replacing, plus the next one that we are about to
+ // drop.
+ el.discarded = 2
+ } else {
+ el.discarded++
+ }
+ // TODO(sameer): if this causes allocations on a critical path,
+ // change eventLog.What to be a fmt.Stringer, as in trace.go.
+ el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded)
+ // The timestamp of the discarded meta-event should be
+ // the time of the last event it is representing.
+ el.events[0].When = el.events[1].When
+ copy(el.events[1:], el.events[2:])
+ el.events[maxEventsPerLog-1] = e
+ }
+ if e.IsErr {
+ el.LastErrorTime = e.When
+ }
+ el.mu.Unlock()
+}
+
+func (el *eventLog) ref() {
+ atomic.AddInt32(&el.refs, 1)
+}
+
+func (el *eventLog) unref() {
+ if atomic.AddInt32(&el.refs, -1) == 0 {
+ freeEventLog(el)
+ }
+}
+
+func (el *eventLog) When() string {
+ return el.Start.Format("2006/01/02 15:04:05.000000")
+}
+
+func (el *eventLog) ElapsedTime() string {
+ elapsed := time.Since(el.Start)
+ return fmt.Sprintf("%.6f", elapsed.Seconds())
+}
+
+func (el *eventLog) Stack() string {
+ buf := new(bytes.Buffer)
+ tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0)
+ printStackRecord(tw, el.stack)
+ tw.Flush()
+ return buf.String()
+}
+
+// printStackRecord prints the function + source line information
+// for a single stack trace.
+// Adapted from runtime/pprof/pprof.go.
+func printStackRecord(w io.Writer, stk []uintptr) {
+ for _, pc := range stk {
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ continue
+ }
+ file, line := f.FileLine(pc)
+ name := f.Name()
+ // Hide runtime.goexit and any runtime functions at the beginning.
+ if strings.HasPrefix(name, "runtime.") {
+ continue
+ }
+ fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line)
+ }
+}
+
+func (el *eventLog) Events() []logEntry {
+ el.mu.RLock()
+ defer el.mu.RUnlock()
+ return el.events
+}
+
+// freeEventLogs is a freelist of *eventLog
+var freeEventLogs = make(chan *eventLog, 1000)
+
+// newEventLog returns a event log ready to use.
+func newEventLog() *eventLog {
+ select {
+ case el := <-freeEventLogs:
+ return el
+ default:
+ return new(eventLog)
+ }
+}
+
+// freeEventLog adds el to freeEventLogs if there's room.
+// This is non-blocking.
+func freeEventLog(el *eventLog) {
+ el.reset()
+ select {
+ case freeEventLogs <- el:
+ default:
+ }
+}
+
+var eventsTmplCache *template.Template
+var eventsTmplOnce sync.Once
+
+func eventsTmpl() *template.Template {
+ eventsTmplOnce.Do(func() {
+ eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{
+ "elapsed": elapsed,
+ "trimSpace": strings.TrimSpace,
+ }).Parse(eventsHTML))
+ })
+ return eventsTmplCache
+}
+
+const eventsHTML = `
+<html>
+ <head>
+ <title>events</title>
+ </head>
+ <style type="text/css">
+ body {
+ font-family: sans-serif;
+ }
+ table#req-status td.family {
+ padding-right: 2em;
+ }
+ table#req-status td.active {
+ padding-right: 1em;
+ }
+ table#req-status td.empty {
+ color: #aaa;
+ }
+ table#reqs {
+ margin-top: 1em;
+ }
+ table#reqs tr.first {
+ {{if $.Expanded}}font-weight: bold;{{end}}
+ }
+ table#reqs td {
+ font-family: monospace;
+ }
+ table#reqs td.when {
+ text-align: right;
+ white-space: nowrap;
+ }
+ table#reqs td.elapsed {
+ padding: 0 0.5em;
+ text-align: right;
+ white-space: pre;
+ width: 10em;
+ }
+ address {
+ font-size: smaller;
+ margin-top: 5em;
+ }
+ </style>
+ <body>
+
+<h1>/debug/events</h1>
+
+<table id="req-status">
+ {{range $i, $fam := .Families}}
+ <tr>
+ <td class="family">{{$fam}}</td>
+
+ {{range $j, $bucket := $.Buckets}}
+ {{$n := index $.Counts $i $j}}
+ <td class="{{if not $bucket.MaxErrAge}}active{{end}}{{if not $n}}empty{{end}}">
+ {{if $n}}<a href="?fam={{$fam}}&b={{$j}}{{if $.Expanded}}&exp=1{{end}}">{{end}}
+ [{{$n}} {{$bucket.String}}]
+ {{if $n}}</a>{{end}}
+ </td>
+ {{end}}
+
+ </tr>{{end}}
+</table>
+
+{{if $.EventLogs}}
+<hr />
+<h3>Family: {{$.Family}}</h3>
+
+{{if $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}">{{end}}
+[Summary]{{if $.Expanded}}</a>{{end}}
+
+{{if not $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1">{{end}}
+[Expanded]{{if not $.Expanded}}</a>{{end}}
+
+<table id="reqs">
+ <tr><th>When</th><th>Elapsed</th></tr>
+ {{range $el := $.EventLogs}}
+ <tr class="first">
+ <td class="when">{{$el.When}}</td>
+ <td class="elapsed">{{$el.ElapsedTime}}</td>
+ <td>{{$el.Title}}
+ </tr>
+ {{if $.Expanded}}
+ <tr>
+ <td class="when"></td>
+ <td class="elapsed"></td>
+ <td><pre>{{$el.Stack|trimSpace}}</pre></td>
+ </tr>
+ {{range $el.Events}}
+ <tr>
+ <td class="when">{{.WhenString}}</td>
+ <td class="elapsed">{{elapsed .Elapsed}}</td>
+ <td>.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}</td>
+ </tr>
+ {{end}}
+ {{end}}
+ {{end}}
+</table>
+{{end}}
+ </body>
+</html>
+`
diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go
new file mode 100644
index 000000000..9bf4286c7
--- /dev/null
+++ b/vendor/golang.org/x/net/trace/histogram.go
@@ -0,0 +1,365 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+// This file implements histogramming for RPC statistics collection.
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "log"
+ "math"
+ "sync"
+
+ "golang.org/x/net/internal/timeseries"
+)
+
+const (
+ bucketCount = 38
+)
+
+// histogram keeps counts of values in buckets that are spaced
+// out in powers of 2: 0-1, 2-3, 4-7...
+// histogram implements timeseries.Observable
+type histogram struct {
+ sum int64 // running total of measurements
+ sumOfSquares float64 // square of running total
+ buckets []int64 // bucketed values for histogram
+ value int // holds a single value as an optimization
+ valueCount int64 // number of values recorded for single value
+}
+
+// AddMeasurement records a value measurement observation to the histogram.
+func (h *histogram) addMeasurement(value int64) {
+ // TODO: assert invariant
+ h.sum += value
+ h.sumOfSquares += float64(value) * float64(value)
+
+ bucketIndex := getBucket(value)
+
+ if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) {
+ h.value = bucketIndex
+ h.valueCount++
+ } else {
+ h.allocateBuckets()
+ h.buckets[bucketIndex]++
+ }
+}
+
+func (h *histogram) allocateBuckets() {
+ if h.buckets == nil {
+ h.buckets = make([]int64, bucketCount)
+ h.buckets[h.value] = h.valueCount
+ h.value = 0
+ h.valueCount = -1
+ }
+}
+
+func log2(i int64) int {
+ n := 0
+ for ; i >= 0x100; i >>= 8 {
+ n += 8
+ }
+ for ; i > 0; i >>= 1 {
+ n += 1
+ }
+ return n
+}
+
+func getBucket(i int64) (index int) {
+ index = log2(i) - 1
+ if index < 0 {
+ index = 0
+ }
+ if index >= bucketCount {
+ index = bucketCount - 1
+ }
+ return
+}
+
+// Total returns the number of recorded observations.
+func (h *histogram) total() (total int64) {
+ if h.valueCount >= 0 {
+ total = h.valueCount
+ }
+ for _, val := range h.buckets {
+ total += int64(val)
+ }
+ return
+}
+
+// Average returns the average value of recorded observations.
+func (h *histogram) average() float64 {
+ t := h.total()
+ if t == 0 {
+ return 0
+ }
+ return float64(h.sum) / float64(t)
+}
+
+// Variance returns the variance of recorded observations.
+func (h *histogram) variance() float64 {
+ t := float64(h.total())
+ if t == 0 {
+ return 0
+ }
+ s := float64(h.sum) / t
+ return h.sumOfSquares/t - s*s
+}
+
+// StandardDeviation returns the standard deviation of recorded observations.
+func (h *histogram) standardDeviation() float64 {
+ return math.Sqrt(h.variance())
+}
+
+// PercentileBoundary estimates the value that the given fraction of recorded
+// observations are less than.
+func (h *histogram) percentileBoundary(percentile float64) int64 {
+ total := h.total()
+
+ // Corner cases (make sure result is strictly less than Total())
+ if total == 0 {
+ return 0
+ } else if total == 1 {
+ return int64(h.average())
+ }
+
+ percentOfTotal := round(float64(total) * percentile)
+ var runningTotal int64
+
+ for i := range h.buckets {
+ value := h.buckets[i]
+ runningTotal += value
+ if runningTotal == percentOfTotal {
+ // We hit an exact bucket boundary. If the next bucket has data, it is a
+ // good estimate of the value. If the bucket is empty, we interpolate the
+ // midpoint between the next bucket's boundary and the next non-zero
+ // bucket. If the remaining buckets are all empty, then we use the
+ // boundary for the next bucket as the estimate.
+ j := uint8(i + 1)
+ min := bucketBoundary(j)
+ if runningTotal < total {
+ for h.buckets[j] == 0 {
+ j++
+ }
+ }
+ max := bucketBoundary(j)
+ return min + round(float64(max-min)/2)
+ } else if runningTotal > percentOfTotal {
+ // The value is in this bucket. Interpolate the value.
+ delta := runningTotal - percentOfTotal
+ percentBucket := float64(value-delta) / float64(value)
+ bucketMin := bucketBoundary(uint8(i))
+ nextBucketMin := bucketBoundary(uint8(i + 1))
+ bucketSize := nextBucketMin - bucketMin
+ return bucketMin + round(percentBucket*float64(bucketSize))
+ }
+ }
+ return bucketBoundary(bucketCount - 1)
+}
+
+// Median returns the estimated median of the observed values.
+func (h *histogram) median() int64 {
+ return h.percentileBoundary(0.5)
+}
+
+// Add adds other to h.
+func (h *histogram) Add(other timeseries.Observable) {
+ o := other.(*histogram)
+ if o.valueCount == 0 {
+ // Other histogram is empty
+ } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value {
+ // Both have a single bucketed value, aggregate them
+ h.valueCount += o.valueCount
+ } else {
+ // Two different values necessitate buckets in this histogram
+ h.allocateBuckets()
+ if o.valueCount >= 0 {
+ h.buckets[o.value] += o.valueCount
+ } else {
+ for i := range h.buckets {
+ h.buckets[i] += o.buckets[i]
+ }
+ }
+ }
+ h.sumOfSquares += o.sumOfSquares
+ h.sum += o.sum
+}
+
+// Clear resets the histogram to an empty state, removing all observed values.
+func (h *histogram) Clear() {
+ h.buckets = nil
+ h.value = 0
+ h.valueCount = 0
+ h.sum = 0
+ h.sumOfSquares = 0
+}
+
+// CopyFrom copies from other, which must be a *histogram, into h.
+func (h *histogram) CopyFrom(other timeseries.Observable) {
+ o := other.(*histogram)
+ if o.valueCount == -1 {
+ h.allocateBuckets()
+ copy(h.buckets, o.buckets)
+ }
+ h.sum = o.sum
+ h.sumOfSquares = o.sumOfSquares
+ h.value = o.value
+ h.valueCount = o.valueCount
+}
+
+// Multiply scales the histogram by the specified ratio.
+func (h *histogram) Multiply(ratio float64) {
+ if h.valueCount == -1 {
+ for i := range h.buckets {
+ h.buckets[i] = int64(float64(h.buckets[i]) * ratio)
+ }
+ } else {
+ h.valueCount = int64(float64(h.valueCount) * ratio)
+ }
+ h.sum = int64(float64(h.sum) * ratio)
+ h.sumOfSquares = h.sumOfSquares * ratio
+}
+
+// New creates a new histogram.
+func (h *histogram) New() timeseries.Observable {
+ r := new(histogram)
+ r.Clear()
+ return r
+}
+
+func (h *histogram) String() string {
+ return fmt.Sprintf("%d, %f, %d, %d, %v",
+ h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets)
+}
+
+// round returns the closest int64 to the argument
+func round(in float64) int64 {
+ return int64(math.Floor(in + 0.5))
+}
+
+// bucketBoundary returns the first value in the bucket.
+func bucketBoundary(bucket uint8) int64 {
+ if bucket == 0 {
+ return 0
+ }
+ return 1 << bucket
+}
+
+// bucketData holds data about a specific bucket for use in distTmpl.
+type bucketData struct {
+ Lower, Upper int64
+ N int64
+ Pct, CumulativePct float64
+ GraphWidth int
+}
+
+// data holds data about a Distribution for use in distTmpl.
+type data struct {
+ Buckets []*bucketData
+ Count, Median int64
+ Mean, StandardDeviation float64
+}
+
+// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets.
+const maxHTMLBarWidth = 350.0
+
+// newData returns data representing h for use in distTmpl.
+func (h *histogram) newData() *data {
+ // Force the allocation of buckets to simplify the rendering implementation
+ h.allocateBuckets()
+ // We scale the bars on the right so that the largest bar is
+ // maxHTMLBarWidth pixels in width.
+ maxBucket := int64(0)
+ for _, n := range h.buckets {
+ if n > maxBucket {
+ maxBucket = n
+ }
+ }
+ total := h.total()
+ barsizeMult := maxHTMLBarWidth / float64(maxBucket)
+ var pctMult float64
+ if total == 0 {
+ pctMult = 1.0
+ } else {
+ pctMult = 100.0 / float64(total)
+ }
+
+ buckets := make([]*bucketData, len(h.buckets))
+ runningTotal := int64(0)
+ for i, n := range h.buckets {
+ if n == 0 {
+ continue
+ }
+ runningTotal += n
+ var upperBound int64
+ if i < bucketCount-1 {
+ upperBound = bucketBoundary(uint8(i + 1))
+ } else {
+ upperBound = math.MaxInt64
+ }
+ buckets[i] = &bucketData{
+ Lower: bucketBoundary(uint8(i)),
+ Upper: upperBound,
+ N: n,
+ Pct: float64(n) * pctMult,
+ CumulativePct: float64(runningTotal) * pctMult,
+ GraphWidth: int(float64(n) * barsizeMult),
+ }
+ }
+ return &data{
+ Buckets: buckets,
+ Count: total,
+ Median: h.median(),
+ Mean: h.average(),
+ StandardDeviation: h.standardDeviation(),
+ }
+}
+
+func (h *histogram) html() template.HTML {
+ buf := new(bytes.Buffer)
+ if err := distTmpl().Execute(buf, h.newData()); err != nil {
+ buf.Reset()
+ log.Printf("net/trace: couldn't execute template: %v", err)
+ }
+ return template.HTML(buf.String())
+}
+
+var distTmplCache *template.Template
+var distTmplOnce sync.Once
+
+func distTmpl() *template.Template {
+ distTmplOnce.Do(func() {
+ // Input: data
+ distTmplCache = template.Must(template.New("distTmpl").Parse(`
+<table>
+<tr>
+ <td style="padding:0.25em">Count: {{.Count}}</td>
+ <td style="padding:0.25em">Mean: {{printf "%.0f" .Mean}}</td>
+ <td style="padding:0.25em">StdDev: {{printf "%.0f" .StandardDeviation}}</td>
+ <td style="padding:0.25em">Median: {{.Median}}</td>
+</tr>
+</table>
+<hr>
+<table>
+{{range $b := .Buckets}}
+{{if $b}}
+ <tr>
+ <td style="padding:0 0 0 0.25em">[</td>
+ <td style="text-align:right;padding:0 0.25em">{{.Lower}},</td>
+ <td style="text-align:right;padding:0 0.25em">{{.Upper}})</td>
+ <td style="text-align:right;padding:0 0.25em">{{.N}}</td>
+ <td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .Pct}}%</td>
+ <td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .CumulativePct}}%</td>
+ <td><div style="background-color: blue; height: 1em; width: {{.GraphWidth}};"></div></td>
+ </tr>
+{{end}}
+{{end}}
+</table>
+`))
+ })
+ return distTmplCache
+}
diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go
new file mode 100644
index 000000000..3ebf6f2da
--- /dev/null
+++ b/vendor/golang.org/x/net/trace/trace.go
@@ -0,0 +1,1130 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package trace implements tracing of requests and long-lived objects.
+It exports HTTP interfaces on /debug/requests and /debug/events.
+
+A trace.Trace provides tracing for short-lived objects, usually requests.
+A request handler might be implemented like this:
+
+ func fooHandler(w http.ResponseWriter, req *http.Request) {
+ tr := trace.New("mypkg.Foo", req.URL.Path)
+ defer tr.Finish()
+ ...
+ tr.LazyPrintf("some event %q happened", str)
+ ...
+ if err := somethingImportant(); err != nil {
+ tr.LazyPrintf("somethingImportant failed: %v", err)
+ tr.SetError()
+ }
+ }
+
+The /debug/requests HTTP endpoint organizes the traces by family,
+errors, and duration. It also provides histogram of request duration
+for each family.
+
+A trace.EventLog provides tracing for long-lived objects, such as RPC
+connections.
+
+ // A Fetcher fetches URL paths for a single domain.
+ type Fetcher struct {
+ domain string
+ events trace.EventLog
+ }
+
+ func NewFetcher(domain string) *Fetcher {
+ return &Fetcher{
+ domain,
+ trace.NewEventLog("mypkg.Fetcher", domain),
+ }
+ }
+
+ func (f *Fetcher) Fetch(path string) (string, error) {
+ resp, err := http.Get("http://" + f.domain + "/" + path)
+ if err != nil {
+ f.events.Errorf("Get(%q) = %v", path, err)
+ return "", err
+ }
+ f.events.Printf("Get(%q) = %s", path, resp.Status)
+ ...
+ }
+
+ func (f *Fetcher) Close() error {
+ f.events.Finish()
+ return nil
+ }
+
+The /debug/events HTTP endpoint organizes the event logs by family and
+by time since the last error. The expanded view displays recent log
+entries and the log's call stack.
+*/
+package trace // import "golang.org/x/net/trace"
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "html/template"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "net/url"
+ "runtime"
+ "sort"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/net/internal/timeseries"
+)
+
+// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing.
+// FOR DEBUGGING ONLY. This will slow down the program.
+var DebugUseAfterFinish = false
+
+// HTTP ServeMux paths.
+const (
+ debugRequestsPath = "/debug/requests"
+ debugEventsPath = "/debug/events"
+)
+
+// AuthRequest determines whether a specific request is permitted to load the
+// /debug/requests or /debug/events pages.
+//
+// It returns two bools; the first indicates whether the page may be viewed at all,
+// and the second indicates whether sensitive events will be shown.
+//
+// AuthRequest may be replaced by a program to customize its authorization requirements.
+//
+// The default AuthRequest function returns (true, true) if and only if the request
+// comes from localhost/127.0.0.1/[::1].
+var AuthRequest = func(req *http.Request) (any, sensitive bool) {
+ // RemoteAddr is commonly in the form "IP" or "IP:port".
+ // If it is in the form "IP:port", split off the port.
+ host, _, err := net.SplitHostPort(req.RemoteAddr)
+ if err != nil {
+ host = req.RemoteAddr
+ }
+ switch host {
+ case "localhost", "127.0.0.1", "::1":
+ return true, true
+ default:
+ return false, false
+ }
+}
+
+func init() {
+ _, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: debugRequestsPath}})
+ if pat == debugRequestsPath {
+ panic("/debug/requests is already registered. You may have two independent copies of " +
+ "golang.org/x/net/trace in your binary, trying to maintain separate state. This may " +
+ "involve a vendored copy of golang.org/x/net/trace.")
+ }
+
+ // TODO(jbd): Serve Traces from /debug/traces in the future?
+ // There is no requirement for a request to be present to have traces.
+ http.HandleFunc(debugRequestsPath, Traces)
+ http.HandleFunc(debugEventsPath, Events)
+}
+
+// NewContext returns a copy of the parent context
+// and associates it with a Trace.
+func NewContext(ctx context.Context, tr Trace) context.Context {
+ return context.WithValue(ctx, contextKey, tr)
+}
+
+// FromContext returns the Trace bound to the context, if any.
+func FromContext(ctx context.Context) (tr Trace, ok bool) {
+ tr, ok = ctx.Value(contextKey).(Trace)
+ return
+}
+
+// Traces responds with traces from the program.
+// The package initialization registers it in http.DefaultServeMux
+// at /debug/requests.
+//
+// It performs authorization by running AuthRequest.
+func Traces(w http.ResponseWriter, req *http.Request) {
+ any, sensitive := AuthRequest(req)
+ if !any {
+ http.Error(w, "not allowed", http.StatusUnauthorized)
+ return
+ }
+ w.Header().Set("Content-Type", "text/html; charset=utf-8")
+ Render(w, req, sensitive)
+}
+
+// Events responds with a page of events collected by EventLogs.
+// The package initialization registers it in http.DefaultServeMux
+// at /debug/events.
+//
+// It performs authorization by running AuthRequest.
+func Events(w http.ResponseWriter, req *http.Request) {
+ any, sensitive := AuthRequest(req)
+ if !any {
+ http.Error(w, "not allowed", http.StatusUnauthorized)
+ return
+ }
+ w.Header().Set("Content-Type", "text/html; charset=utf-8")
+ RenderEvents(w, req, sensitive)
+}
+
+// Render renders the HTML page typically served at /debug/requests.
+// It does not do any auth checking. The request may be nil.
+//
+// Most users will use the Traces handler.
+func Render(w io.Writer, req *http.Request, sensitive bool) {
+ data := &struct {
+ Families []string
+ ActiveTraceCount map[string]int
+ CompletedTraces map[string]*family
+
+ // Set when a bucket has been selected.
+ Traces traceList
+ Family string
+ Bucket int
+ Expanded bool
+ Traced bool
+ Active bool
+ ShowSensitive bool // whether to show sensitive events
+
+ Histogram template.HTML
+ HistogramWindow string // e.g. "last minute", "last hour", "all time"
+
+ // If non-zero, the set of traces is a partial set,
+ // and this is the total number.
+ Total int
+ }{
+ CompletedTraces: completedTraces,
+ }
+
+ data.ShowSensitive = sensitive
+ if req != nil {
+ // Allow show_sensitive=0 to force hiding of sensitive data for testing.
+ // This only goes one way; you can't use show_sensitive=1 to see things.
+ if req.FormValue("show_sensitive") == "0" {
+ data.ShowSensitive = false
+ }
+
+ if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
+ data.Expanded = exp
+ }
+ if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil {
+ data.Traced = exp
+ }
+ }
+
+ completedMu.RLock()
+ data.Families = make([]string, 0, len(completedTraces))
+ for fam := range completedTraces {
+ data.Families = append(data.Families, fam)
+ }
+ completedMu.RUnlock()
+ sort.Strings(data.Families)
+
+ // We are careful here to minimize the time spent locking activeMu,
+ // since that lock is required every time an RPC starts and finishes.
+ data.ActiveTraceCount = make(map[string]int, len(data.Families))
+ activeMu.RLock()
+ for fam, s := range activeTraces {
+ data.ActiveTraceCount[fam] = s.Len()
+ }
+ activeMu.RUnlock()
+
+ var ok bool
+ data.Family, data.Bucket, ok = parseArgs(req)
+ switch {
+ case !ok:
+ // No-op
+ case data.Bucket == -1:
+ data.Active = true
+ n := data.ActiveTraceCount[data.Family]
+ data.Traces = getActiveTraces(data.Family)
+ if len(data.Traces) < n {
+ data.Total = n
+ }
+ case data.Bucket < bucketsPerFamily:
+ if b := lookupBucket(data.Family, data.Bucket); b != nil {
+ data.Traces = b.Copy(data.Traced)
+ }
+ default:
+ if f := getFamily(data.Family, false); f != nil {
+ var obs timeseries.Observable
+ f.LatencyMu.RLock()
+ switch o := data.Bucket - bucketsPerFamily; o {
+ case 0:
+ obs = f.Latency.Minute()
+ data.HistogramWindow = "last minute"
+ case 1:
+ obs = f.Latency.Hour()
+ data.HistogramWindow = "last hour"
+ case 2:
+ obs = f.Latency.Total()
+ data.HistogramWindow = "all time"
+ }
+ f.LatencyMu.RUnlock()
+ if obs != nil {
+ data.Histogram = obs.(*histogram).html()
+ }
+ }
+ }
+
+ if data.Traces != nil {
+ defer data.Traces.Free()
+ sort.Sort(data.Traces)
+ }
+
+ completedMu.RLock()
+ defer completedMu.RUnlock()
+ if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil {
+ log.Printf("net/trace: Failed executing template: %v", err)
+ }
+}
+
+func parseArgs(req *http.Request) (fam string, b int, ok bool) {
+ if req == nil {
+ return "", 0, false
+ }
+ fam, bStr := req.FormValue("fam"), req.FormValue("b")
+ if fam == "" || bStr == "" {
+ return "", 0, false
+ }
+ b, err := strconv.Atoi(bStr)
+ if err != nil || b < -1 {
+ return "", 0, false
+ }
+
+ return fam, b, true
+}
+
+func lookupBucket(fam string, b int) *traceBucket {
+ f := getFamily(fam, false)
+ if f == nil || b < 0 || b >= len(f.Buckets) {
+ return nil
+ }
+ return f.Buckets[b]
+}
+
+type contextKeyT string
+
+var contextKey = contextKeyT("golang.org/x/net/trace.Trace")
+
+// Trace represents an active request.
+type Trace interface {
+ // LazyLog adds x to the event log. It will be evaluated each time the
+ // /debug/requests page is rendered. Any memory referenced by x will be
+ // pinned until the trace is finished and later discarded.
+ LazyLog(x fmt.Stringer, sensitive bool)
+
+ // LazyPrintf evaluates its arguments with fmt.Sprintf each time the
+ // /debug/requests page is rendered. Any memory referenced by a will be
+ // pinned until the trace is finished and later discarded.
+ LazyPrintf(format string, a ...interface{})
+
+ // SetError declares that this trace resulted in an error.
+ SetError()
+
+ // SetRecycler sets a recycler for the trace.
+ // f will be called for each event passed to LazyLog at a time when
+ // it is no longer required, whether while the trace is still active
+ // and the event is discarded, or when a completed trace is discarded.
+ SetRecycler(f func(interface{}))
+
+ // SetTraceInfo sets the trace info for the trace.
+ // This is currently unused.
+ SetTraceInfo(traceID, spanID uint64)
+
+ // SetMaxEvents sets the maximum number of events that will be stored
+ // in the trace. This has no effect if any events have already been
+ // added to the trace.
+ SetMaxEvents(m int)
+
+ // Finish declares that this trace is complete.
+ // The trace should not be used after calling this method.
+ Finish()
+}
+
+type lazySprintf struct {
+ format string
+ a []interface{}
+}
+
+func (l *lazySprintf) String() string {
+ return fmt.Sprintf(l.format, l.a...)
+}
+
+// New returns a new Trace with the specified family and title.
+func New(family, title string) Trace {
+ tr := newTrace()
+ tr.ref()
+ tr.Family, tr.Title = family, title
+ tr.Start = time.Now()
+ tr.maxEvents = maxEventsPerTrace
+ tr.events = tr.eventsBuf[:0]
+
+ activeMu.RLock()
+ s := activeTraces[tr.Family]
+ activeMu.RUnlock()
+ if s == nil {
+ activeMu.Lock()
+ s = activeTraces[tr.Family] // check again
+ if s == nil {
+ s = new(traceSet)
+ activeTraces[tr.Family] = s
+ }
+ activeMu.Unlock()
+ }
+ s.Add(tr)
+
+ // Trigger allocation of the completed trace structure for this family.
+ // This will cause the family to be present in the request page during
+ // the first trace of this family. We don't care about the return value,
+ // nor is there any need for this to run inline, so we execute it in its
+ // own goroutine, but only if the family isn't allocated yet.
+ completedMu.RLock()
+ if _, ok := completedTraces[tr.Family]; !ok {
+ go allocFamily(tr.Family)
+ }
+ completedMu.RUnlock()
+
+ return tr
+}
+
+func (tr *trace) Finish() {
+ elapsed := time.Now().Sub(tr.Start)
+ tr.mu.Lock()
+ tr.Elapsed = elapsed
+ tr.mu.Unlock()
+
+ if DebugUseAfterFinish {
+ buf := make([]byte, 4<<10) // 4 KB should be enough
+ n := runtime.Stack(buf, false)
+ tr.finishStack = buf[:n]
+ }
+
+ activeMu.RLock()
+ m := activeTraces[tr.Family]
+ activeMu.RUnlock()
+ m.Remove(tr)
+
+ f := getFamily(tr.Family, true)
+ tr.mu.RLock() // protects tr fields in Cond.match calls
+ for _, b := range f.Buckets {
+ if b.Cond.match(tr) {
+ b.Add(tr)
+ }
+ }
+ tr.mu.RUnlock()
+
+ // Add a sample of elapsed time as microseconds to the family's timeseries
+ h := new(histogram)
+ h.addMeasurement(elapsed.Nanoseconds() / 1e3)
+ f.LatencyMu.Lock()
+ f.Latency.Add(h)
+ f.LatencyMu.Unlock()
+
+ tr.unref() // matches ref in New
+}
+
+const (
+ bucketsPerFamily = 9
+ tracesPerBucket = 10
+ maxActiveTraces = 20 // Maximum number of active traces to show.
+ maxEventsPerTrace = 10
+ numHistogramBuckets = 38
+)
+
+var (
+ // The active traces.
+ activeMu sync.RWMutex
+ activeTraces = make(map[string]*traceSet) // family -> traces
+
+ // Families of completed traces.
+ completedMu sync.RWMutex
+ completedTraces = make(map[string]*family) // family -> traces
+)
+
+type traceSet struct {
+ mu sync.RWMutex
+ m map[*trace]bool
+
+ // We could avoid the entire map scan in FirstN by having a slice of all the traces
+ // ordered by start time, and an index into that from the trace struct, with a periodic
+ // repack of the slice after enough traces finish; we could also use a skip list or similar.
+ // However, that would shift some of the expense from /debug/requests time to RPC time,
+ // which is probably the wrong trade-off.
+}
+
+func (ts *traceSet) Len() int {
+ ts.mu.RLock()
+ defer ts.mu.RUnlock()
+ return len(ts.m)
+}
+
+func (ts *traceSet) Add(tr *trace) {
+ ts.mu.Lock()
+ if ts.m == nil {
+ ts.m = make(map[*trace]bool)
+ }
+ ts.m[tr] = true
+ ts.mu.Unlock()
+}
+
+func (ts *traceSet) Remove(tr *trace) {
+ ts.mu.Lock()
+ delete(ts.m, tr)
+ ts.mu.Unlock()
+}
+
+// FirstN returns the first n traces ordered by time.
+func (ts *traceSet) FirstN(n int) traceList {
+ ts.mu.RLock()
+ defer ts.mu.RUnlock()
+
+ if n > len(ts.m) {
+ n = len(ts.m)
+ }
+ trl := make(traceList, 0, n)
+
+ // Fast path for when no selectivity is needed.
+ if n == len(ts.m) {
+ for tr := range ts.m {
+ tr.ref()
+ trl = append(trl, tr)
+ }
+ sort.Sort(trl)
+ return trl
+ }
+
+ // Pick the oldest n traces.
+ // This is inefficient. See the comment in the traceSet struct.
+ for tr := range ts.m {
+ // Put the first n traces into trl in the order they occur.
+ // When we have n, sort trl, and thereafter maintain its order.
+ if len(trl) < n {
+ tr.ref()
+ trl = append(trl, tr)
+ if len(trl) == n {
+ // This is guaranteed to happen exactly once during this loop.
+ sort.Sort(trl)
+ }
+ continue
+ }
+ if tr.Start.After(trl[n-1].Start) {
+ continue
+ }
+
+ // Find where to insert this one.
+ tr.ref()
+ i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) })
+ trl[n-1].unref()
+ copy(trl[i+1:], trl[i:])
+ trl[i] = tr
+ }
+
+ return trl
+}
+
+func getActiveTraces(fam string) traceList {
+ activeMu.RLock()
+ s := activeTraces[fam]
+ activeMu.RUnlock()
+ if s == nil {
+ return nil
+ }
+ return s.FirstN(maxActiveTraces)
+}
+
+func getFamily(fam string, allocNew bool) *family {
+ completedMu.RLock()
+ f := completedTraces[fam]
+ completedMu.RUnlock()
+ if f == nil && allocNew {
+ f = allocFamily(fam)
+ }
+ return f
+}
+
+func allocFamily(fam string) *family {
+ completedMu.Lock()
+ defer completedMu.Unlock()
+ f := completedTraces[fam]
+ if f == nil {
+ f = newFamily()
+ completedTraces[fam] = f
+ }
+ return f
+}
+
+// family represents a set of trace buckets and associated latency information.
+type family struct {
+ // traces may occur in multiple buckets.
+ Buckets [bucketsPerFamily]*traceBucket
+
+ // latency time series
+ LatencyMu sync.RWMutex
+ Latency *timeseries.MinuteHourSeries
+}
+
+func newFamily() *family {
+ return &family{
+ Buckets: [bucketsPerFamily]*traceBucket{
+ {Cond: minCond(0)},
+ {Cond: minCond(50 * time.Millisecond)},
+ {Cond: minCond(100 * time.Millisecond)},
+ {Cond: minCond(200 * time.Millisecond)},
+ {Cond: minCond(500 * time.Millisecond)},
+ {Cond: minCond(1 * time.Second)},
+ {Cond: minCond(10 * time.Second)},
+ {Cond: minCond(100 * time.Second)},
+ {Cond: errorCond{}},
+ },
+ Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }),
+ }
+}
+
+// traceBucket represents a size-capped bucket of historic traces,
+// along with a condition for a trace to belong to the bucket.
+type traceBucket struct {
+ Cond cond
+
+ // Ring buffer implementation of a fixed-size FIFO queue.
+ mu sync.RWMutex
+ buf [tracesPerBucket]*trace
+ start int // < tracesPerBucket
+ length int // <= tracesPerBucket
+}
+
+func (b *traceBucket) Add(tr *trace) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ i := b.start + b.length
+ if i >= tracesPerBucket {
+ i -= tracesPerBucket
+ }
+ if b.length == tracesPerBucket {
+ // "Remove" an element from the bucket.
+ b.buf[i].unref()
+ b.start++
+ if b.start == tracesPerBucket {
+ b.start = 0
+ }
+ }
+ b.buf[i] = tr
+ if b.length < tracesPerBucket {
+ b.length++
+ }
+ tr.ref()
+}
+
+// Copy returns a copy of the traces in the bucket.
+// If tracedOnly is true, only the traces with trace information will be returned.
+// The logs will be ref'd before returning; the caller should call
+// the Free method when it is done with them.
+// TODO(dsymonds): keep track of traced requests in separate buckets.
+func (b *traceBucket) Copy(tracedOnly bool) traceList {
+ b.mu.RLock()
+ defer b.mu.RUnlock()
+
+ trl := make(traceList, 0, b.length)
+ for i, x := 0, b.start; i < b.length; i++ {
+ tr := b.buf[x]
+ if !tracedOnly || tr.spanID != 0 {
+ tr.ref()
+ trl = append(trl, tr)
+ }
+ x++
+ if x == b.length {
+ x = 0
+ }
+ }
+ return trl
+}
+
+func (b *traceBucket) Empty() bool {
+ b.mu.RLock()
+ defer b.mu.RUnlock()
+ return b.length == 0
+}
+
+// cond represents a condition on a trace.
+type cond interface {
+ match(t *trace) bool
+ String() string
+}
+
+type minCond time.Duration
+
+func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) }
+func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) }
+
+type errorCond struct{}
+
+func (e errorCond) match(t *trace) bool { return t.IsError }
+func (e errorCond) String() string { return "errors" }
+
+type traceList []*trace
+
+// Free calls unref on each element of the list.
+func (trl traceList) Free() {
+ for _, t := range trl {
+ t.unref()
+ }
+}
+
+// traceList may be sorted in reverse chronological order.
+func (trl traceList) Len() int { return len(trl) }
+func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) }
+func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] }
+
+// An event is a timestamped log entry in a trace.
+type event struct {
+ When time.Time
+ Elapsed time.Duration // since previous event in trace
+ NewDay bool // whether this event is on a different day to the previous event
+ Recyclable bool // whether this event was passed via LazyLog
+ Sensitive bool // whether this event contains sensitive information
+ What interface{} // string or fmt.Stringer
+}
+
+// WhenString returns a string representation of the elapsed time of the event.
+// It will include the date if midnight was crossed.
+func (e event) WhenString() string {
+ if e.NewDay {
+ return e.When.Format("2006/01/02 15:04:05.000000")
+ }
+ return e.When.Format("15:04:05.000000")
+}
+
+// discarded represents a number of discarded events.
+// It is stored as *discarded to make it easier to update in-place.
+type discarded int
+
+func (d *discarded) String() string {
+ return fmt.Sprintf("(%d events discarded)", int(*d))
+}
+
+// trace represents an active or complete request,
+// either sent or received by this program.
+type trace struct {
+ // Family is the top-level grouping of traces to which this belongs.
+ Family string
+
+ // Title is the title of this trace.
+ Title string
+
+ // Start time of the this trace.
+ Start time.Time
+
+ mu sync.RWMutex
+ events []event // Append-only sequence of events (modulo discards).
+ maxEvents int
+ recycler func(interface{})
+ IsError bool // Whether this trace resulted in an error.
+ Elapsed time.Duration // Elapsed time for this trace, zero while active.
+ traceID uint64 // Trace information if non-zero.
+ spanID uint64
+
+ refs int32 // how many buckets this is in
+ disc discarded // scratch space to avoid allocation
+
+ finishStack []byte // where finish was called, if DebugUseAfterFinish is set
+
+ eventsBuf [4]event // preallocated buffer in case we only log a few events
+}
+
+func (tr *trace) reset() {
+ // Clear all but the mutex. Mutexes may not be copied, even when unlocked.
+ tr.Family = ""
+ tr.Title = ""
+ tr.Start = time.Time{}
+
+ tr.mu.Lock()
+ tr.Elapsed = 0
+ tr.traceID = 0
+ tr.spanID = 0
+ tr.IsError = false
+ tr.maxEvents = 0
+ tr.events = nil
+ tr.recycler = nil
+ tr.mu.Unlock()
+
+ tr.refs = 0
+ tr.disc = 0
+ tr.finishStack = nil
+ for i := range tr.eventsBuf {
+ tr.eventsBuf[i] = event{}
+ }
+}
+
+// delta returns the elapsed time since the last event or the trace start,
+// and whether it spans midnight.
+// L >= tr.mu
+func (tr *trace) delta(t time.Time) (time.Duration, bool) {
+ if len(tr.events) == 0 {
+ return t.Sub(tr.Start), false
+ }
+ prev := tr.events[len(tr.events)-1].When
+ return t.Sub(prev), prev.Day() != t.Day()
+}
+
+func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) {
+ if DebugUseAfterFinish && tr.finishStack != nil {
+ buf := make([]byte, 4<<10) // 4 KB should be enough
+ n := runtime.Stack(buf, false)
+ log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n])
+ }
+
+ /*
+ NOTE TO DEBUGGERS
+
+ If you are here because your program panicked in this code,
+ it is almost definitely the fault of code using this package,
+ and very unlikely to be the fault of this code.
+
+ The most likely scenario is that some code elsewhere is using
+ a trace.Trace after its Finish method is called.
+ You can temporarily set the DebugUseAfterFinish var
+ to help discover where that is; do not leave that var set,
+ since it makes this package much less efficient.
+ */
+
+ e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive}
+ tr.mu.Lock()
+ e.Elapsed, e.NewDay = tr.delta(e.When)
+ if len(tr.events) < tr.maxEvents {
+ tr.events = append(tr.events, e)
+ } else {
+ // Discard the middle events.
+ di := int((tr.maxEvents - 1) / 2)
+ if d, ok := tr.events[di].What.(*discarded); ok {
+ (*d)++
+ } else {
+ // disc starts at two to count for the event it is replacing,
+ // plus the next one that we are about to drop.
+ tr.disc = 2
+ if tr.recycler != nil && tr.events[di].Recyclable {
+ go tr.recycler(tr.events[di].What)
+ }
+ tr.events[di].What = &tr.disc
+ }
+ // The timestamp of the discarded meta-event should be
+ // the time of the last event it is representing.
+ tr.events[di].When = tr.events[di+1].When
+
+ if tr.recycler != nil && tr.events[di+1].Recyclable {
+ go tr.recycler(tr.events[di+1].What)
+ }
+ copy(tr.events[di+1:], tr.events[di+2:])
+ tr.events[tr.maxEvents-1] = e
+ }
+ tr.mu.Unlock()
+}
+
+func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) {
+ tr.addEvent(x, true, sensitive)
+}
+
+func (tr *trace) LazyPrintf(format string, a ...interface{}) {
+ tr.addEvent(&lazySprintf{format, a}, false, false)
+}
+
+func (tr *trace) SetError() {
+ tr.mu.Lock()
+ tr.IsError = true
+ tr.mu.Unlock()
+}
+
+func (tr *trace) SetRecycler(f func(interface{})) {
+ tr.mu.Lock()
+ tr.recycler = f
+ tr.mu.Unlock()
+}
+
+func (tr *trace) SetTraceInfo(traceID, spanID uint64) {
+ tr.mu.Lock()
+ tr.traceID, tr.spanID = traceID, spanID
+ tr.mu.Unlock()
+}
+
+func (tr *trace) SetMaxEvents(m int) {
+ tr.mu.Lock()
+ // Always keep at least three events: first, discarded count, last.
+ if len(tr.events) == 0 && m > 3 {
+ tr.maxEvents = m
+ }
+ tr.mu.Unlock()
+}
+
+func (tr *trace) ref() {
+ atomic.AddInt32(&tr.refs, 1)
+}
+
+func (tr *trace) unref() {
+ if atomic.AddInt32(&tr.refs, -1) == 0 {
+ tr.mu.RLock()
+ if tr.recycler != nil {
+ // freeTrace clears tr, so we hold tr.recycler and tr.events here.
+ go func(f func(interface{}), es []event) {
+ for _, e := range es {
+ if e.Recyclable {
+ f(e.What)
+ }
+ }
+ }(tr.recycler, tr.events)
+ }
+ tr.mu.RUnlock()
+
+ freeTrace(tr)
+ }
+}
+
+func (tr *trace) When() string {
+ return tr.Start.Format("2006/01/02 15:04:05.000000")
+}
+
+func (tr *trace) ElapsedTime() string {
+ tr.mu.RLock()
+ t := tr.Elapsed
+ tr.mu.RUnlock()
+
+ if t == 0 {
+ // Active trace.
+ t = time.Since(tr.Start)
+ }
+ return fmt.Sprintf("%.6f", t.Seconds())
+}
+
+func (tr *trace) Events() []event {
+ tr.mu.RLock()
+ defer tr.mu.RUnlock()
+ return tr.events
+}
+
+var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool?
+
+// newTrace returns a trace ready to use.
+func newTrace() *trace {
+ select {
+ case tr := <-traceFreeList:
+ return tr
+ default:
+ return new(trace)
+ }
+}
+
+// freeTrace adds tr to traceFreeList if there's room.
+// This is non-blocking.
+func freeTrace(tr *trace) {
+ if DebugUseAfterFinish {
+ return // never reuse
+ }
+ tr.reset()
+ select {
+ case traceFreeList <- tr:
+ default:
+ }
+}
+
+func elapsed(d time.Duration) string {
+ b := []byte(fmt.Sprintf("%.6f", d.Seconds()))
+
+ // For subsecond durations, blank all zeros before decimal point,
+ // and all zeros between the decimal point and the first non-zero digit.
+ if d < time.Second {
+ dot := bytes.IndexByte(b, '.')
+ for i := 0; i < dot; i++ {
+ b[i] = ' '
+ }
+ for i := dot + 1; i < len(b); i++ {
+ if b[i] == '0' {
+ b[i] = ' '
+ } else {
+ break
+ }
+ }
+ }
+
+ return string(b)
+}
+
+var pageTmplCache *template.Template
+var pageTmplOnce sync.Once
+
+func pageTmpl() *template.Template {
+ pageTmplOnce.Do(func() {
+ pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{
+ "elapsed": elapsed,
+ "add": func(a, b int) int { return a + b },
+ }).Parse(pageHTML))
+ })
+ return pageTmplCache
+}
+
+const pageHTML = `
+{{template "Prolog" .}}
+{{template "StatusTable" .}}
+{{template "Epilog" .}}
+
+{{define "Prolog"}}
+<html>
+ <head>
+ <title>/debug/requests</title>
+ <style type="text/css">
+ body {
+ font-family: sans-serif;
+ }
+ table#tr-status td.family {
+ padding-right: 2em;
+ }
+ table#tr-status td.active {
+ padding-right: 1em;
+ }
+ table#tr-status td.latency-first {
+ padding-left: 1em;
+ }
+ table#tr-status td.empty {
+ color: #aaa;
+ }
+ table#reqs {
+ margin-top: 1em;
+ }
+ table#reqs tr.first {
+ {{if $.Expanded}}font-weight: bold;{{end}}
+ }
+ table#reqs td {
+ font-family: monospace;
+ }
+ table#reqs td.when {
+ text-align: right;
+ white-space: nowrap;
+ }
+ table#reqs td.elapsed {
+ padding: 0 0.5em;
+ text-align: right;
+ white-space: pre;
+ width: 10em;
+ }
+ address {
+ font-size: smaller;
+ margin-top: 5em;
+ }
+ </style>
+ </head>
+ <body>
+
+<h1>/debug/requests</h1>
+{{end}} {{/* end of Prolog */}}
+
+{{define "StatusTable"}}
+<table id="tr-status">
+ {{range $fam := .Families}}
+ <tr>
+ <td class="family">{{$fam}}</td>
+
+ {{$n := index $.ActiveTraceCount $fam}}
+ <td class="active {{if not $n}}empty{{end}}">
+ {{if $n}}<a href="?fam={{$fam}}&b=-1{{if $.Expanded}}&exp=1{{end}}">{{end}}
+ [{{$n}} active]
+ {{if $n}}</a>{{end}}
+ </td>
+
+ {{$f := index $.CompletedTraces $fam}}
+ {{range $i, $b := $f.Buckets}}
+ {{$empty := $b.Empty}}
+ <td {{if $empty}}class="empty"{{end}}>
+ {{if not $empty}}<a href="?fam={{$fam}}&b={{$i}}{{if $.Expanded}}&exp=1{{end}}">{{end}}
+ [{{.Cond}}]
+ {{if not $empty}}</a>{{end}}
+ </td>
+ {{end}}
+
+ {{$nb := len $f.Buckets}}
+ <td class="latency-first">
+ <a href="?fam={{$fam}}&b={{$nb}}">[minute]</a>
+ </td>
+ <td>
+ <a href="?fam={{$fam}}&b={{add $nb 1}}">[hour]</a>
+ </td>
+ <td>
+ <a href="?fam={{$fam}}&b={{add $nb 2}}">[total]</a>
+ </td>
+
+ </tr>
+ {{end}}
+</table>
+{{end}} {{/* end of StatusTable */}}
+
+{{define "Epilog"}}
+{{if $.Traces}}
+<hr />
+<h3>Family: {{$.Family}}</h3>
+
+{{if or $.Expanded $.Traced}}
+ <a href="?fam={{$.Family}}&b={{$.Bucket}}">[Normal/Summary]</a>
+{{else}}
+ [Normal/Summary]
+{{end}}
+
+{{if or (not $.Expanded) $.Traced}}
+ <a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1">[Normal/Expanded]</a>
+{{else}}
+ [Normal/Expanded]
+{{end}}
+
+{{if not $.Active}}
+ {{if or $.Expanded (not $.Traced)}}
+ <a href="?fam={{$.Family}}&b={{$.Bucket}}&rtraced=1">[Traced/Summary]</a>
+ {{else}}
+ [Traced/Summary]
+ {{end}}
+ {{if or (not $.Expanded) (not $.Traced)}}
+ <a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1&rtraced=1">[Traced/Expanded]</a>
+ {{else}}
+ [Traced/Expanded]
+ {{end}}
+{{end}}
+
+{{if $.Total}}
+<p><em>Showing <b>{{len $.Traces}}</b> of <b>{{$.Total}}</b> traces.</em></p>
+{{end}}
+
+<table id="reqs">
+ <caption>
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests
+ </caption>
+ <tr><th>When</th><th>Elapsed&nbsp;(s)</th></tr>
+ {{range $tr := $.Traces}}
+ <tr class="first">
+ <td class="when">{{$tr.When}}</td>
+ <td class="elapsed">{{$tr.ElapsedTime}}</td>
+ <td>{{$tr.Title}}</td>
+ {{/* TODO: include traceID/spanID */}}
+ </tr>
+ {{if $.Expanded}}
+ {{range $tr.Events}}
+ <tr>
+ <td class="when">{{.WhenString}}</td>
+ <td class="elapsed">{{elapsed .Elapsed}}</td>
+ <td>{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}<em>[redacted]</em>{{end}}</td>
+ </tr>
+ {{end}}
+ {{end}}
+ {{end}}
+</table>
+{{end}} {{/* if $.Traces */}}
+
+{{if $.Histogram}}
+<h4>Latency (&micro;s) of {{$.Family}} over {{$.HistogramWindow}}</h4>
+{{$.Histogram}}
+{{end}} {{/* if $.Histogram */}}
+
+ </body>
+</html>
+{{end}} {{/* end of Epilog */}}
+`