1
0
Fork 0

Vendor main dependencies.

This commit is contained in:
Timo Reimann 2017-02-07 22:33:23 +01:00
parent 49a09ab7dd
commit dd5e3fba01
2738 changed files with 1045689 additions and 0 deletions

99
vendor/github.com/vulcand/oxy/memmetrics/anomaly.go generated vendored Normal file
View file

@ -0,0 +1,99 @@
package memmetrics
import (
"math"
"sort"
"time"
)
// SplitRatios provides simple anomaly detection for requests latencies.
// it splits values into good or bad category based on the threshold and the median value.
// If all values are not far from the median, it will return all values in 'good' set.
// Precision is the smallest value to consider, e.g. if set to millisecond, microseconds will be ignored.
func SplitLatencies(values []time.Duration, precision time.Duration) (good map[time.Duration]bool, bad map[time.Duration]bool) {
// Find the max latency M and then map each latency L to the ratio L/M and then call SplitFloat64
v2r := map[float64]time.Duration{}
ratios := make([]float64, len(values))
m := maxTime(values)
for i, v := range values {
ratio := float64(v/precision+1) / float64(m/precision+1) // +1 is to avoid division by 0
v2r[ratio] = v
ratios[i] = ratio
}
good, bad = make(map[time.Duration]bool), make(map[time.Duration]bool)
// Note that multiplier makes this function way less sensitive than ratios detector, this is to avoid noise.
vgood, vbad := SplitFloat64(2, 0, ratios)
for r, _ := range vgood {
good[v2r[r]] = true
}
for r, _ := range vbad {
bad[v2r[r]] = true
}
return good, bad
}
// SplitRatios provides simple anomaly detection for ratio values, that are all in the range [0, 1]
// it splits values into good or bad category based on the threshold and the median value.
// If all values are not far from the median, it will return all values in 'good' set.
func SplitRatios(values []float64) (good map[float64]bool, bad map[float64]bool) {
return SplitFloat64(1.5, 0, values)
}
// SplitFloat64 provides simple anomaly detection for skewed data sets with no particular distribution.
// In essense it applies the formula if(v > median(values) + threshold * medianAbsoluteDeviation) -> anomaly
// There's a corner case where there are just 2 values, so by definition there's no value that exceeds the threshold.
// This case is solved by introducing additional value that we know is good, e.g. 0. That helps to improve the detection results
// on such data sets.
func SplitFloat64(threshold, sentinel float64, values []float64) (good map[float64]bool, bad map[float64]bool) {
good, bad = make(map[float64]bool), make(map[float64]bool)
var newValues []float64
if len(values)%2 == 0 {
newValues = make([]float64, len(values)+1)
copy(newValues, values)
// Add a sentinel endpoint so we can distinguish outliers better
newValues[len(newValues)-1] = sentinel
} else {
newValues = values
}
m := median(newValues)
mAbs := medianAbsoluteDeviation(newValues)
for _, v := range values {
if v > (m+mAbs)*threshold {
bad[v] = true
} else {
good[v] = true
}
}
return good, bad
}
func median(values []float64) float64 {
vals := make([]float64, len(values))
copy(vals, values)
sort.Float64s(vals)
l := len(vals)
if l%2 != 0 {
return vals[l/2]
}
return (vals[l/2-1] + vals[l/2]) / 2.0
}
func medianAbsoluteDeviation(values []float64) float64 {
m := median(values)
distances := make([]float64, len(values))
for i, v := range values {
distances[i] = math.Abs(v - m)
}
return median(distances)
}
func maxTime(vals []time.Duration) time.Duration {
val := vals[0]
for _, v := range vals {
if v > val {
val = v
}
}
return val
}

155
vendor/github.com/vulcand/oxy/memmetrics/counter.go generated vendored Normal file
View file

@ -0,0 +1,155 @@
package memmetrics
import (
"fmt"
"time"
"github.com/mailgun/timetools"
)
type rcOptSetter func(*RollingCounter) error
func CounterClock(c timetools.TimeProvider) rcOptSetter {
return func(r *RollingCounter) error {
r.clock = c
return nil
}
}
// Calculates in memory failure rate of an endpoint using rolling window of a predefined size
type RollingCounter struct {
clock timetools.TimeProvider
resolution time.Duration
values []int
countedBuckets int // how many samples in different buckets have we collected so far
lastBucket int // last recorded bucket
lastUpdated time.Time
}
// NewCounter creates a counter with fixed amount of buckets that are rotated every resolution period.
// E.g. 10 buckets with 1 second means that every new second the bucket is refreshed, so it maintains 10 second rolling window.
// By default creates a bucket with 10 buckets and 1 second resolution
func NewCounter(buckets int, resolution time.Duration, options ...rcOptSetter) (*RollingCounter, error) {
if buckets <= 0 {
return nil, fmt.Errorf("Buckets should be >= 0")
}
if resolution < time.Second {
return nil, fmt.Errorf("Resolution should be larger than a second")
}
rc := &RollingCounter{
lastBucket: -1,
resolution: resolution,
values: make([]int, buckets),
}
for _, o := range options {
if err := o(rc); err != nil {
return nil, err
}
}
if rc.clock == nil {
rc.clock = &timetools.RealTime{}
}
return rc, nil
}
func (c *RollingCounter) Append(o *RollingCounter) error {
c.Inc(int(o.Count()))
return nil
}
func (c *RollingCounter) Clone() *RollingCounter {
c.cleanup()
other := &RollingCounter{
resolution: c.resolution,
values: make([]int, len(c.values)),
clock: c.clock,
lastBucket: c.lastBucket,
lastUpdated: c.lastUpdated,
}
for i, v := range c.values {
other.values[i] = v
}
return other
}
func (c *RollingCounter) Reset() {
c.lastBucket = -1
c.countedBuckets = 0
c.lastUpdated = time.Time{}
for i := range c.values {
c.values[i] = 0
}
}
func (c *RollingCounter) CountedBuckets() int {
return c.countedBuckets
}
func (c *RollingCounter) Count() int64 {
c.cleanup()
return c.sum()
}
func (c *RollingCounter) Resolution() time.Duration {
return c.resolution
}
func (c *RollingCounter) Buckets() int {
return len(c.values)
}
func (c *RollingCounter) WindowSize() time.Duration {
return time.Duration(len(c.values)) * c.resolution
}
func (c *RollingCounter) Inc(v int) {
c.cleanup()
c.incBucketValue(v)
}
func (c *RollingCounter) incBucketValue(v int) {
now := c.clock.UtcNow()
bucket := c.getBucket(now)
c.values[bucket] += v
c.lastUpdated = now
// Update usage stats if we haven't collected enough data
if c.countedBuckets < len(c.values) {
// Only update if we have advanced to the next bucket and not incremented the value
// in the current bucket.
if c.lastBucket != bucket {
c.lastBucket = bucket
c.countedBuckets++
}
}
}
// Returns the number in the moving window bucket that this slot occupies
func (c *RollingCounter) getBucket(t time.Time) int {
return int(t.Truncate(c.resolution).Unix() % int64(len(c.values)))
}
// Reset buckets that were not updated
func (c *RollingCounter) cleanup() {
now := c.clock.UtcNow()
for i := 0; i < len(c.values); i++ {
now = now.Add(time.Duration(-1*i) * c.resolution)
if now.Truncate(c.resolution).After(c.lastUpdated.Truncate(c.resolution)) {
c.values[c.getBucket(now)] = 0
} else {
break
}
}
}
func (c *RollingCounter) sum() int64 {
out := int64(0)
for _, v := range c.values {
out += int64(v)
}
return out
}

174
vendor/github.com/vulcand/oxy/memmetrics/histogram.go generated vendored Normal file
View file

@ -0,0 +1,174 @@
package memmetrics
import (
"fmt"
"time"
"github.com/codahale/hdrhistogram"
"github.com/mailgun/timetools"
)
// HDRHistogram is a tiny wrapper around github.com/codahale/hdrhistogram that provides convenience functions for measuring http latencies
type HDRHistogram struct {
// lowest trackable value
low int64
// highest trackable value
high int64
// significant figures
sigfigs int
h *hdrhistogram.Histogram
}
func NewHDRHistogram(low, high int64, sigfigs int) (h *HDRHistogram, err error) {
defer func() {
if msg := recover(); msg != nil {
err = fmt.Errorf("%s", msg)
}
}()
return &HDRHistogram{
low: low,
high: high,
sigfigs: sigfigs,
h: hdrhistogram.New(low, high, sigfigs),
}, nil
}
// Returns latency at quantile with microsecond precision
func (h *HDRHistogram) LatencyAtQuantile(q float64) time.Duration {
return time.Duration(h.ValueAtQuantile(q)) * time.Microsecond
}
// Records latencies with microsecond precision
func (h *HDRHistogram) RecordLatencies(d time.Duration, n int64) error {
return h.RecordValues(int64(d/time.Microsecond), n)
}
func (h *HDRHistogram) Reset() {
h.h.Reset()
}
func (h *HDRHistogram) ValueAtQuantile(q float64) int64 {
return h.h.ValueAtQuantile(q)
}
func (h *HDRHistogram) RecordValues(v, n int64) error {
return h.h.RecordValues(v, n)
}
func (h *HDRHistogram) Merge(other *HDRHistogram) error {
if other == nil {
return fmt.Errorf("other is nil")
}
h.h.Merge(other.h)
return nil
}
type rhOptSetter func(r *RollingHDRHistogram) error
func RollingClock(clock timetools.TimeProvider) rhOptSetter {
return func(r *RollingHDRHistogram) error {
r.clock = clock
return nil
}
}
// RollingHistogram holds multiple histograms and rotates every period.
// It provides resulting histogram as a result of a call of 'Merged' function.
type RollingHDRHistogram struct {
idx int
lastRoll time.Time
period time.Duration
bucketCount int
low int64
high int64
sigfigs int
buckets []*HDRHistogram
clock timetools.TimeProvider
}
func NewRollingHDRHistogram(low, high int64, sigfigs int, period time.Duration, bucketCount int, options ...rhOptSetter) (*RollingHDRHistogram, error) {
rh := &RollingHDRHistogram{
bucketCount: bucketCount,
period: period,
low: low,
high: high,
sigfigs: sigfigs,
}
for _, o := range options {
if err := o(rh); err != nil {
return nil, err
}
}
if rh.clock == nil {
rh.clock = &timetools.RealTime{}
}
buckets := make([]*HDRHistogram, rh.bucketCount)
for i := range buckets {
h, err := NewHDRHistogram(low, high, sigfigs)
if err != nil {
return nil, err
}
buckets[i] = h
}
rh.buckets = buckets
return rh, nil
}
func (r *RollingHDRHistogram) Append(o *RollingHDRHistogram) error {
if r.bucketCount != o.bucketCount || r.period != o.period || r.low != o.low || r.high != o.high || r.sigfigs != o.sigfigs {
return fmt.Errorf("can't merge")
}
for i := range r.buckets {
if err := r.buckets[i].Merge(o.buckets[i]); err != nil {
return err
}
}
return nil
}
func (r *RollingHDRHistogram) Reset() {
r.idx = 0
r.lastRoll = r.clock.UtcNow()
for _, b := range r.buckets {
b.Reset()
}
}
func (r *RollingHDRHistogram) rotate() {
r.idx = (r.idx + 1) % len(r.buckets)
r.buckets[r.idx].Reset()
}
func (r *RollingHDRHistogram) Merged() (*HDRHistogram, error) {
m, err := NewHDRHistogram(r.low, r.high, r.sigfigs)
if err != nil {
return m, err
}
for _, h := range r.buckets {
if m.Merge(h); err != nil {
return nil, err
}
}
return m, nil
}
func (r *RollingHDRHistogram) getHist() *HDRHistogram {
if r.clock.UtcNow().Sub(r.lastRoll) >= r.period {
r.rotate()
r.lastRoll = r.clock.UtcNow()
}
return r.buckets[r.idx]
}
func (r *RollingHDRHistogram) RecordLatencies(v time.Duration, n int64) error {
return r.getHist().RecordLatencies(v, n)
}
func (r *RollingHDRHistogram) RecordValues(v, n int64) error {
return r.getHist().RecordValues(v, n)
}

120
vendor/github.com/vulcand/oxy/memmetrics/ratio.go generated vendored Normal file
View file

@ -0,0 +1,120 @@
package memmetrics
import (
"time"
"github.com/mailgun/timetools"
)
type ratioOptSetter func(r *RatioCounter) error
func RatioClock(clock timetools.TimeProvider) ratioOptSetter {
return func(r *RatioCounter) error {
r.clock = clock
return nil
}
}
// RatioCounter calculates a ratio of a/a+b over a rolling window of predefined buckets
type RatioCounter struct {
clock timetools.TimeProvider
a *RollingCounter
b *RollingCounter
}
func NewRatioCounter(buckets int, resolution time.Duration, options ...ratioOptSetter) (*RatioCounter, error) {
rc := &RatioCounter{}
for _, o := range options {
if err := o(rc); err != nil {
return nil, err
}
}
if rc.clock == nil {
rc.clock = &timetools.RealTime{}
}
a, err := NewCounter(buckets, resolution, CounterClock(rc.clock))
if err != nil {
return nil, err
}
b, err := NewCounter(buckets, resolution, CounterClock(rc.clock))
if err != nil {
return nil, err
}
rc.a = a
rc.b = b
return rc, nil
}
func (r *RatioCounter) Reset() {
r.a.Reset()
r.b.Reset()
}
func (r *RatioCounter) IsReady() bool {
return r.a.countedBuckets+r.b.countedBuckets >= len(r.a.values)
}
func (r *RatioCounter) CountA() int64 {
return r.a.Count()
}
func (r *RatioCounter) CountB() int64 {
return r.b.Count()
}
func (r *RatioCounter) Resolution() time.Duration {
return r.a.Resolution()
}
func (r *RatioCounter) Buckets() int {
return r.a.Buckets()
}
func (r *RatioCounter) WindowSize() time.Duration {
return r.a.WindowSize()
}
func (r *RatioCounter) ProcessedCount() int64 {
return r.CountA() + r.CountB()
}
func (r *RatioCounter) Ratio() float64 {
a := r.a.Count()
b := r.b.Count()
// No data, return ok
if a+b == 0 {
return 0
}
return float64(a) / float64(a+b)
}
func (r *RatioCounter) IncA(v int) {
r.a.Inc(v)
}
func (r *RatioCounter) IncB(v int) {
r.b.Inc(v)
}
type TestMeter struct {
Rate float64
NotReady bool
WindowSize time.Duration
}
func (tm *TestMeter) GetWindowSize() time.Duration {
return tm.WindowSize
}
func (tm *TestMeter) IsReady() bool {
return !tm.NotReady
}
func (tm *TestMeter) GetRate() float64 {
return tm.Rate
}

259
vendor/github.com/vulcand/oxy/memmetrics/roundtrip.go generated vendored Normal file
View file

@ -0,0 +1,259 @@
package memmetrics
import (
"errors"
"net/http"
"sync"
"time"
"github.com/mailgun/timetools"
)
// RTMetrics provides aggregated performance metrics for HTTP requests processing
// such as round trip latency, response codes counters network error and total requests.
// all counters are collected as rolling window counters with defined precision, histograms
// are a rolling window histograms with defined precision as well.
// See RTOptions for more detail on parameters.
type RTMetrics struct {
total *RollingCounter
netErrors *RollingCounter
statusCodes map[int]*RollingCounter
statusCodesLock sync.RWMutex
histogram *RollingHDRHistogram
newCounter NewCounterFn
newHist NewRollingHistogramFn
clock timetools.TimeProvider
}
type rrOptSetter func(r *RTMetrics) error
type NewRTMetricsFn func() (*RTMetrics, error)
type NewCounterFn func() (*RollingCounter, error)
type NewRollingHistogramFn func() (*RollingHDRHistogram, error)
func RTCounter(new NewCounterFn) rrOptSetter {
return func(r *RTMetrics) error {
r.newCounter = new
return nil
}
}
func RTHistogram(new NewRollingHistogramFn) rrOptSetter {
return func(r *RTMetrics) error {
r.newHist = new
return nil
}
}
func RTClock(clock timetools.TimeProvider) rrOptSetter {
return func(r *RTMetrics) error {
r.clock = clock
return nil
}
}
// NewRTMetrics returns new instance of metrics collector.
func NewRTMetrics(settings ...rrOptSetter) (*RTMetrics, error) {
m := &RTMetrics{
statusCodes: make(map[int]*RollingCounter),
statusCodesLock: sync.RWMutex{},
}
for _, s := range settings {
if err := s(m); err != nil {
return nil, err
}
}
if m.clock == nil {
m.clock = &timetools.RealTime{}
}
if m.newCounter == nil {
m.newCounter = func() (*RollingCounter, error) {
return NewCounter(counterBuckets, counterResolution, CounterClock(m.clock))
}
}
if m.newHist == nil {
m.newHist = func() (*RollingHDRHistogram, error) {
return NewRollingHDRHistogram(histMin, histMax, histSignificantFigures, histPeriod, histBuckets, RollingClock(m.clock))
}
}
h, err := m.newHist()
if err != nil {
return nil, err
}
netErrors, err := m.newCounter()
if err != nil {
return nil, err
}
total, err := m.newCounter()
if err != nil {
return nil, err
}
m.histogram = h
m.netErrors = netErrors
m.total = total
return m, nil
}
func (m *RTMetrics) CounterWindowSize() time.Duration {
return m.total.WindowSize()
}
// GetNetworkErrorRatio calculates the amont of network errors such as time outs and dropped connection
// that occured in the given time window compared to the total requests count.
func (m *RTMetrics) NetworkErrorRatio() float64 {
if m.total.Count() == 0 {
return 0
}
return float64(m.netErrors.Count()) / float64(m.total.Count())
}
// GetResponseCodeRatio calculates ratio of count(startA to endA) / count(startB to endB)
func (m *RTMetrics) ResponseCodeRatio(startA, endA, startB, endB int) float64 {
a := int64(0)
b := int64(0)
m.statusCodesLock.RLock()
defer m.statusCodesLock.RUnlock()
for code, v := range m.statusCodes {
if code < endA && code >= startA {
a += v.Count()
}
if code < endB && code >= startB {
b += v.Count()
}
}
if b != 0 {
return float64(a) / float64(b)
}
return 0
}
func (m *RTMetrics) Append(other *RTMetrics) error {
if m == other {
return errors.New("RTMetrics cannot append to self")
}
if err := m.total.Append(other.total); err != nil {
return err
}
if err := m.netErrors.Append(other.netErrors); err != nil {
return err
}
m.statusCodesLock.Lock()
defer m.statusCodesLock.Unlock()
other.statusCodesLock.RLock()
defer other.statusCodesLock.RUnlock()
for code, c := range other.statusCodes {
o, ok := m.statusCodes[code]
if ok {
if err := o.Append(c); err != nil {
return err
}
} else {
m.statusCodes[code] = c.Clone()
}
}
return m.histogram.Append(other.histogram)
}
func (m *RTMetrics) Record(code int, duration time.Duration) {
m.total.Inc(1)
if code == http.StatusGatewayTimeout || code == http.StatusBadGateway {
m.netErrors.Inc(1)
}
m.recordStatusCode(code)
m.recordLatency(duration)
}
// GetTotalCount returns total count of processed requests collected.
func (m *RTMetrics) TotalCount() int64 {
return m.total.Count()
}
// GetNetworkErrorCount returns total count of processed requests observed
func (m *RTMetrics) NetworkErrorCount() int64 {
return m.netErrors.Count()
}
// GetStatusCodesCounts returns map with counts of the response codes
func (m *RTMetrics) StatusCodesCounts() map[int]int64 {
sc := make(map[int]int64)
m.statusCodesLock.RLock()
defer m.statusCodesLock.RUnlock()
for k, v := range m.statusCodes {
if v.Count() != 0 {
sc[k] = v.Count()
}
}
return sc
}
// GetLatencyHistogram computes and returns resulting histogram with latencies observed.
func (m *RTMetrics) LatencyHistogram() (*HDRHistogram, error) {
return m.histogram.Merged()
}
func (m *RTMetrics) Reset() {
m.histogram.Reset()
m.total.Reset()
m.netErrors.Reset()
m.statusCodesLock.Lock()
defer m.statusCodesLock.Unlock()
m.statusCodes = make(map[int]*RollingCounter)
}
func (m *RTMetrics) recordNetError() error {
m.netErrors.Inc(1)
return nil
}
func (m *RTMetrics) recordLatency(d time.Duration) error {
return m.histogram.RecordLatencies(d, 1)
}
func (m *RTMetrics) recordStatusCode(statusCode int) error {
m.statusCodesLock.RLock()
if c, ok := m.statusCodes[statusCode]; ok {
c.Inc(1)
m.statusCodesLock.RUnlock()
return nil
}
m.statusCodesLock.RUnlock()
m.statusCodesLock.Lock()
defer m.statusCodesLock.Unlock()
// Check if another goroutine has written our counter already
if c, ok := m.statusCodes[statusCode]; ok {
c.Inc(1)
return nil
}
c, err := m.newCounter()
if err != nil {
return err
}
c.Inc(1)
m.statusCodes[statusCode] = c
return nil
}
const (
counterBuckets = 10
counterResolution = time.Second
histMin = 1
histMax = 3600000000 // 1 hour in microseconds
histSignificantFigures = 2 // signigicant figures (1% precision)
histBuckets = 6 // number of sub-histograms in a rolling histogram
histPeriod = 10 * time.Second // roll time
)