DataDog and StatsD Metrics Support

* Added support for DataDog and StatsD monitoring
* Added documentation
This commit is contained in:
Alex Antonov 2017-07-20 17:26:43 -05:00 committed by Ludovic Fernandez
parent cd28e7b24f
commit 69c628b626
39 changed files with 3921 additions and 13 deletions

93
vendor/github.com/go-kit/kit/log/doc.go generated vendored Normal file
View file

@ -0,0 +1,93 @@
// Package log provides a structured logger.
//
// Structured logging produces logs easily consumed later by humans or
// machines. Humans might be interested in debugging errors, or tracing
// specific requests. Machines might be interested in counting interesting
// events, or aggregating information for off-line processing. In both cases,
// it is important that the log messages are structured and actionable.
// Package log is designed to encourage both of these best practices.
//
// Basic Usage
//
// The fundamental interface is Logger. Loggers create log events from
// key/value data. The Logger interface has a single method, Log, which
// accepts a sequence of alternating key/value pairs, which this package names
// keyvals.
//
// type Logger interface {
// Log(keyvals ...interface{}) error
// }
//
// Here is an example of a function using a Logger to create log events.
//
// func RunTask(task Task, logger log.Logger) string {
// logger.Log("taskID", task.ID, "event", "starting task")
// ...
// logger.Log("taskID", task.ID, "event", "task complete")
// }
//
// The keys in the above example are "taskID" and "event". The values are
// task.ID, "starting task", and "task complete". Every key is followed
// immediately by its value.
//
// Keys are usually plain strings. Values may be any type that has a sensible
// encoding in the chosen log format. With structured logging it is a good
// idea to log simple values without formatting them. This practice allows
// the chosen logger to encode values in the most appropriate way.
//
// Log Context
//
// A log context stores keyvals that it includes in all log events. Building
// appropriate log contexts reduces repetition and aids consistency in the
// resulting log output. We can use a context to improve the RunTask example.
//
// func RunTask(task Task, logger log.Logger) string {
// logger = log.NewContext(logger).With("taskID", task.ID)
// logger.Log("event", "starting task")
// ...
// taskHelper(task.Cmd, logger)
// ...
// logger.Log("event", "task complete")
// }
//
// The improved version emits the same log events as the original for the
// first and last calls to Log. The call to taskHelper highlights that a
// context may be passed as a logger to other functions. Each log event
// created by the called function will include the task.ID even though the
// function does not have access to that value. Using log contexts this way
// simplifies producing log output that enables tracing the life cycle of
// individual tasks. (See the Context example for the full code of the
// above snippet.)
//
// Dynamic Context Values
//
// A Valuer function stored in a log context generates a new value each time
// the context logs an event. The Valuer example demonstrates how this
// feature works.
//
// Valuers provide the basis for consistently logging timestamps and source
// code location. The log package defines several valuers for that purpose.
// See Timestamp, DefaultTimestamp, DefaultTimestampUTC, Caller, and
// DefaultCaller. A common logger initialization sequence that ensures all log
// entries contain a timestamp and source location looks like this:
//
// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
// logger = log.NewContext(logger).With("ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
//
// Concurrent Safety
//
// Applications with multiple goroutines want each log event written to the
// same logger to remain separate from other log events. Package log provides
// two simple solutions for concurrent safe logging.
//
// NewSyncWriter wraps an io.Writer and serializes each call to its Write
// method. Using a SyncWriter has the benefit that the smallest practical
// portion of the logging logic is performed within a mutex, but it requires
// the formatting Logger to make only one call to Write per log event.
//
// NewSyncLogger wraps any Logger and serializes each call to its Log method.
// Using a SyncLogger has the benefit that it guarantees each log event is
// handled atomically within the wrapped logger, but it typically serializes
// both the formatting and output logic. Use a SyncLogger if the formatting
// logger may perform multiple writes per log event.
package log

92
vendor/github.com/go-kit/kit/log/json_logger.go generated vendored Normal file
View file

@ -0,0 +1,92 @@
package log
import (
"encoding"
"encoding/json"
"fmt"
"io"
"reflect"
)
type jsonLogger struct {
io.Writer
}
// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a
// single JSON object. Each log event produces no more than one call to
// w.Write. The passed Writer must be safe for concurrent use by multiple
// goroutines if the returned Logger will be used concurrently.
func NewJSONLogger(w io.Writer) Logger {
return &jsonLogger{w}
}
func (l *jsonLogger) Log(keyvals ...interface{}) error {
n := (len(keyvals) + 1) / 2 // +1 to handle case when len is odd
m := make(map[string]interface{}, n)
for i := 0; i < len(keyvals); i += 2 {
k := keyvals[i]
var v interface{} = ErrMissingValue
if i+1 < len(keyvals) {
v = keyvals[i+1]
}
merge(m, k, v)
}
return json.NewEncoder(l.Writer).Encode(m)
}
func merge(dst map[string]interface{}, k, v interface{}) {
var key string
switch x := k.(type) {
case string:
key = x
case fmt.Stringer:
key = safeString(x)
default:
key = fmt.Sprint(x)
}
if x, ok := v.(error); ok {
v = safeError(x)
}
// We want json.Marshaler and encoding.TextMarshaller to take priority over
// err.Error() and v.String(). But json.Marshall (called later) does that by
// default so we force a no-op if it's one of those 2 case.
switch x := v.(type) {
case json.Marshaler:
case encoding.TextMarshaler:
case error:
v = safeError(x)
case fmt.Stringer:
v = safeString(x)
}
dst[key] = v
}
func safeString(str fmt.Stringer) (s string) {
defer func() {
if panicVal := recover(); panicVal != nil {
if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() {
s = "NULL"
} else {
panic(panicVal)
}
}
}()
s = str.String()
return
}
func safeError(err error) (s interface{}) {
defer func() {
if panicVal := recover(); panicVal != nil {
if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() {
s = nil
} else {
panic(panicVal)
}
}
}()
s = err.Error()
return
}

144
vendor/github.com/go-kit/kit/log/log.go generated vendored Normal file
View file

@ -0,0 +1,144 @@
package log
import "errors"
// Logger is the fundamental interface for all log operations. Log creates a
// log event from keyvals, a variadic sequence of alternating keys and values.
// Implementations must be safe for concurrent use by multiple goroutines. In
// particular, any implementation of Logger that appends to keyvals or
// modifies any of its elements must make a copy first.
type Logger interface {
Log(keyvals ...interface{}) error
}
// ErrMissingValue is appended to keyvals slices with odd length to substitute
// the missing value.
var ErrMissingValue = errors.New("(MISSING)")
// NewContext returns a new Context that logs to logger.
func NewContext(logger Logger) *Context {
if c, ok := logger.(*Context); ok {
return c
}
return &Context{logger: logger}
}
// Context must always have the same number of stack frames between calls to
// its Log method and the eventual binding of Valuers to their value. This
// requirement comes from the functional requirement to allow a context to
// resolve application call site information for a log.Caller stored in the
// context. To do this we must be able to predict the number of logging
// functions on the stack when bindValues is called.
//
// Three implementation details provide the needed stack depth consistency.
// The first two of these details also result in better amortized performance,
// and thus make sense even without the requirements regarding stack depth.
// The third detail, however, is subtle and tied to the implementation of the
// Go compiler.
//
// 1. NewContext avoids introducing an additional layer when asked to
// wrap another Context.
// 2. With avoids introducing an additional layer by returning a newly
// constructed Context with a merged keyvals rather than simply
// wrapping the existing Context.
// 3. All of Context's methods take pointer receivers even though they
// do not mutate the Context.
//
// Before explaining the last detail, first some background. The Go compiler
// generates wrapper methods to implement the auto dereferencing behavior when
// calling a value method through a pointer variable. These wrapper methods
// are also used when calling a value method through an interface variable
// because interfaces store a pointer to the underlying concrete value.
// Calling a pointer receiver through an interface does not require generating
// an additional function.
//
// If Context had value methods then calling Context.Log through a variable
// with type Logger would have an extra stack frame compared to calling
// Context.Log through a variable with type Context. Using pointer receivers
// avoids this problem.
// A Context wraps a Logger and holds keyvals that it includes in all log
// events. When logging, a Context replaces all value elements (odd indexes)
// containing a Valuer with their generated value for each call to its Log
// method.
type Context struct {
logger Logger
keyvals []interface{}
hasValuer bool
}
// Log replaces all value elements (odd indexes) containing a Valuer in the
// stored context with their generated value, appends keyvals, and passes the
// result to the wrapped Logger.
func (l *Context) Log(keyvals ...interface{}) error {
kvs := append(l.keyvals, keyvals...)
if len(kvs)%2 != 0 {
kvs = append(kvs, ErrMissingValue)
}
if l.hasValuer {
// If no keyvals were appended above then we must copy l.keyvals so
// that future log events will reevaluate the stored Valuers.
if len(keyvals) == 0 {
kvs = append([]interface{}{}, l.keyvals...)
}
bindValues(kvs[:len(l.keyvals)])
}
return l.logger.Log(kvs...)
}
// With returns a new Context with keyvals appended to those of the receiver.
func (l *Context) With(keyvals ...interface{}) *Context {
if len(keyvals) == 0 {
return l
}
kvs := append(l.keyvals, keyvals...)
if len(kvs)%2 != 0 {
kvs = append(kvs, ErrMissingValue)
}
return &Context{
logger: l.logger,
// Limiting the capacity of the stored keyvals ensures that a new
// backing array is created if the slice must grow in Log or With.
// Using the extra capacity without copying risks a data race that
// would violate the Logger interface contract.
keyvals: kvs[:len(kvs):len(kvs)],
hasValuer: l.hasValuer || containsValuer(keyvals),
}
}
// WithPrefix returns a new Context with keyvals prepended to those of the
// receiver.
func (l *Context) WithPrefix(keyvals ...interface{}) *Context {
if len(keyvals) == 0 {
return l
}
// Limiting the capacity of the stored keyvals ensures that a new
// backing array is created if the slice must grow in Log or With.
// Using the extra capacity without copying risks a data race that
// would violate the Logger interface contract.
n := len(l.keyvals) + len(keyvals)
if len(keyvals)%2 != 0 {
n++
}
kvs := make([]interface{}, 0, n)
kvs = append(kvs, keyvals...)
if len(kvs)%2 != 0 {
kvs = append(kvs, ErrMissingValue)
}
kvs = append(kvs, l.keyvals...)
return &Context{
logger: l.logger,
keyvals: kvs,
hasValuer: l.hasValuer || containsValuer(keyvals),
}
}
// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If
// f is a function with the appropriate signature, LoggerFunc(f) is a Logger
// object that calls f.
type LoggerFunc func(...interface{}) error
// Log implements Logger by calling f(keyvals...).
func (f LoggerFunc) Log(keyvals ...interface{}) error {
return f(keyvals...)
}

62
vendor/github.com/go-kit/kit/log/logfmt_logger.go generated vendored Normal file
View file

@ -0,0 +1,62 @@
package log
import (
"bytes"
"io"
"sync"
"github.com/go-logfmt/logfmt"
)
type logfmtEncoder struct {
*logfmt.Encoder
buf bytes.Buffer
}
func (l *logfmtEncoder) Reset() {
l.Encoder.Reset()
l.buf.Reset()
}
var logfmtEncoderPool = sync.Pool{
New: func() interface{} {
var enc logfmtEncoder
enc.Encoder = logfmt.NewEncoder(&enc.buf)
return &enc
},
}
type logfmtLogger struct {
w io.Writer
}
// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in
// logfmt format. Each log event produces no more than one call to w.Write.
// The passed Writer must be safe for concurrent use by multiple goroutines if
// the returned Logger will be used concurrently.
func NewLogfmtLogger(w io.Writer) Logger {
return &logfmtLogger{w}
}
func (l logfmtLogger) Log(keyvals ...interface{}) error {
enc := logfmtEncoderPool.Get().(*logfmtEncoder)
enc.Reset()
defer logfmtEncoderPool.Put(enc)
if err := enc.EncodeKeyvals(keyvals...); err != nil {
return err
}
// Add newline to the end of the buffer
if err := enc.EndRecord(); err != nil {
return err
}
// The Logger interface requires implementations to be safe for concurrent
// use by multiple goroutines. For this implementation that means making
// only one call to l.w.Write() for each call to Log.
if _, err := l.w.Write(enc.buf.Bytes()); err != nil {
return err
}
return nil
}

8
vendor/github.com/go-kit/kit/log/nop_logger.go generated vendored Normal file
View file

@ -0,0 +1,8 @@
package log
type nopLogger struct{}
// NewNopLogger returns a logger that doesn't do anything.
func NewNopLogger() Logger { return nopLogger{} }
func (nopLogger) Log(...interface{}) error { return nil }

116
vendor/github.com/go-kit/kit/log/stdlib.go generated vendored Normal file
View file

@ -0,0 +1,116 @@
package log
import (
"io"
"log"
"regexp"
"strings"
)
// StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's
// designed to be passed to a Go kit logger as the writer, for cases where
// it's necessary to redirect all Go kit log output to the stdlib logger.
//
// If you have any choice in the matter, you shouldn't use this. Prefer to
// redirect the stdlib log to the Go kit logger via NewStdlibAdapter.
type StdlibWriter struct{}
// Write implements io.Writer.
func (w StdlibWriter) Write(p []byte) (int, error) {
log.Print(strings.TrimSpace(string(p)))
return len(p), nil
}
// StdlibAdapter wraps a Logger and allows it to be passed to the stdlib
// logger's SetOutput. It will extract date/timestamps, filenames, and
// messages, and place them under relevant keys.
type StdlibAdapter struct {
Logger
timestampKey string
fileKey string
messageKey string
}
// StdlibAdapterOption sets a parameter for the StdlibAdapter.
type StdlibAdapterOption func(*StdlibAdapter)
// TimestampKey sets the key for the timestamp field. By default, it's "ts".
func TimestampKey(key string) StdlibAdapterOption {
return func(a *StdlibAdapter) { a.timestampKey = key }
}
// FileKey sets the key for the file and line field. By default, it's "file".
func FileKey(key string) StdlibAdapterOption {
return func(a *StdlibAdapter) { a.fileKey = key }
}
// MessageKey sets the key for the actual log message. By default, it's "msg".
func MessageKey(key string) StdlibAdapterOption {
return func(a *StdlibAdapter) { a.messageKey = key }
}
// NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed
// logger. It's designed to be passed to log.SetOutput.
func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer {
a := StdlibAdapter{
Logger: logger,
timestampKey: "ts",
fileKey: "file",
messageKey: "msg",
}
for _, option := range options {
option(&a)
}
return a
}
func (a StdlibAdapter) Write(p []byte) (int, error) {
result := subexps(p)
keyvals := []interface{}{}
var timestamp string
if date, ok := result["date"]; ok && date != "" {
timestamp = date
}
if time, ok := result["time"]; ok && time != "" {
if timestamp != "" {
timestamp += " "
}
timestamp += time
}
if timestamp != "" {
keyvals = append(keyvals, a.timestampKey, timestamp)
}
if file, ok := result["file"]; ok && file != "" {
keyvals = append(keyvals, a.fileKey, file)
}
if msg, ok := result["msg"]; ok {
keyvals = append(keyvals, a.messageKey, msg)
}
if err := a.Logger.Log(keyvals...); err != nil {
return 0, err
}
return len(p), nil
}
const (
logRegexpDate = `(?P<date>[0-9]{4}/[0-9]{2}/[0-9]{2})?[ ]?`
logRegexpTime = `(?P<time>[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?)?[ ]?`
logRegexpFile = `(?P<file>.+?:[0-9]+)?`
logRegexpMsg = `(: )?(?P<msg>.*)`
)
var (
logRegexp = regexp.MustCompile(logRegexpDate + logRegexpTime + logRegexpFile + logRegexpMsg)
)
func subexps(line []byte) map[string]string {
m := logRegexp.FindSubmatch(line)
if len(m) < len(logRegexp.SubexpNames()) {
return map[string]string{}
}
result := map[string]string{}
for i, name := range logRegexp.SubexpNames() {
result[name] = string(m[i])
}
return result
}

81
vendor/github.com/go-kit/kit/log/sync.go generated vendored Normal file
View file

@ -0,0 +1,81 @@
package log
import (
"io"
"sync"
"sync/atomic"
)
// SwapLogger wraps another logger that may be safely replaced while other
// goroutines use the SwapLogger concurrently. The zero value for a SwapLogger
// will discard all log events without error.
//
// SwapLogger serves well as a package global logger that can be changed by
// importers.
type SwapLogger struct {
logger atomic.Value
}
type loggerStruct struct {
Logger
}
// Log implements the Logger interface by forwarding keyvals to the currently
// wrapped logger. It does not log anything if the wrapped logger is nil.
func (l *SwapLogger) Log(keyvals ...interface{}) error {
s, ok := l.logger.Load().(loggerStruct)
if !ok || s.Logger == nil {
return nil
}
return s.Log(keyvals...)
}
// Swap replaces the currently wrapped logger with logger. Swap may be called
// concurrently with calls to Log from other goroutines.
func (l *SwapLogger) Swap(logger Logger) {
l.logger.Store(loggerStruct{logger})
}
// SyncWriter synchronizes concurrent writes to an io.Writer.
type SyncWriter struct {
mu sync.Mutex
w io.Writer
}
// NewSyncWriter returns a new SyncWriter. The returned writer is safe for
// concurrent use by multiple goroutines.
func NewSyncWriter(w io.Writer) *SyncWriter {
return &SyncWriter{w: w}
}
// Write writes p to the underlying io.Writer. If another write is already in
// progress, the calling goroutine blocks until the SyncWriter is available.
func (w *SyncWriter) Write(p []byte) (n int, err error) {
w.mu.Lock()
n, err = w.w.Write(p)
w.mu.Unlock()
return n, err
}
// syncLogger provides concurrent safe logging for another Logger.
type syncLogger struct {
mu sync.Mutex
logger Logger
}
// NewSyncLogger returns a logger that synchronizes concurrent use of the
// wrapped logger. When multiple goroutines use the SyncLogger concurrently
// only one goroutine will be allowed to log to the wrapped logger at a time.
// The other goroutines will block until the logger is available.
func NewSyncLogger(logger Logger) Logger {
return &syncLogger{logger: logger}
}
// Log logs keyvals to the underlying Logger. If another log is already in
// progress, the calling goroutine blocks until the syncLogger is available.
func (l *syncLogger) Log(keyvals ...interface{}) error {
l.mu.Lock()
err := l.logger.Log(keyvals...)
l.mu.Unlock()
return err
}

62
vendor/github.com/go-kit/kit/log/value.go generated vendored Normal file
View file

@ -0,0 +1,62 @@
package log
import (
"time"
"github.com/go-stack/stack"
)
// A Valuer generates a log value. When passed to Context.With in a value
// element (odd indexes), it represents a dynamic value which is re-evaluated
// with each log event.
type Valuer func() interface{}
// bindValues replaces all value elements (odd indexes) containing a Valuer
// with their generated value.
func bindValues(keyvals []interface{}) {
for i := 1; i < len(keyvals); i += 2 {
if v, ok := keyvals[i].(Valuer); ok {
keyvals[i] = v()
}
}
}
// containsValuer returns true if any of the value elements (odd indexes)
// contain a Valuer.
func containsValuer(keyvals []interface{}) bool {
for i := 1; i < len(keyvals); i += 2 {
if _, ok := keyvals[i].(Valuer); ok {
return true
}
}
return false
}
// Timestamp returns a Valuer that invokes the underlying function when bound,
// returning a time.Time. Users will probably want to use DefaultTimestamp or
// DefaultTimestampUTC.
func Timestamp(t func() time.Time) Valuer {
return func() interface{} { return t() }
}
var (
// DefaultTimestamp is a Valuer that returns the current wallclock time,
// respecting time zones, when bound.
DefaultTimestamp Valuer = func() interface{} { return time.Now().Format(time.RFC3339) }
// DefaultTimestampUTC is a Valuer that returns the current time in UTC
// when bound.
DefaultTimestampUTC Valuer = func() interface{} { return time.Now().UTC().Format(time.RFC3339) }
)
// Caller returns a Valuer that returns a file and line from a specified depth
// in the callstack. Users will probably want to use DefaultCaller.
func Caller(depth int) Valuer {
return func() interface{} { return stack.Caller(depth) }
}
var (
// DefaultCaller is a Valuer that returns the file and line where the Log
// method was invoked. It can only be used with log.With.
DefaultCaller = Caller(3)
)

View file

@ -0,0 +1,306 @@
// Package dogstatsd provides a DogStatsD backend for package metrics. It's very
// similar to StatsD, but supports arbitrary tags per-metric, which map to Go
// kit's label values. So, while label values are no-ops in StatsD, they are
// supported here. For more details, see the documentation at
// http://docs.datadoghq.com/guides/dogstatsd/.
//
// This package batches observations and emits them on some schedule to the
// remote server. This is useful even if you connect to your DogStatsD server
// over UDP. Emitting one network packet per observation can quickly overwhelm
// even the fastest internal network.
package dogstatsd
import (
"fmt"
"io"
"strings"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/metrics"
"github.com/go-kit/kit/metrics/internal/lv"
"github.com/go-kit/kit/metrics/internal/ratemap"
"github.com/go-kit/kit/util/conn"
)
// Dogstatsd receives metrics observations and forwards them to a DogStatsD
// server. Create a Dogstatsd object, use it to create metrics, and pass those
// metrics as dependencies to the components that will use them.
//
// All metrics are buffered until WriteTo is called. Counters and gauges are
// aggregated into a single observation per timeseries per write. Timings and
// histograms are buffered but not aggregated.
//
// To regularly report metrics to an io.Writer, use the WriteLoop helper method.
// To send to a DogStatsD server, use the SendLoop helper method.
type Dogstatsd struct {
prefix string
rates *ratemap.RateMap
counters *lv.Space
gauges *lv.Space
timings *lv.Space
histograms *lv.Space
logger log.Logger
}
// New returns a Dogstatsd object that may be used to create metrics. Prefix is
// applied to all created metrics. Callers must ensure that regular calls to
// WriteTo are performed, either manually or with one of the helper methods.
func New(prefix string, logger log.Logger) *Dogstatsd {
return &Dogstatsd{
prefix: prefix,
rates: ratemap.New(),
counters: lv.NewSpace(),
gauges: lv.NewSpace(),
timings: lv.NewSpace(),
histograms: lv.NewSpace(),
logger: logger,
}
}
// NewCounter returns a counter, sending observations to this Dogstatsd object.
func (d *Dogstatsd) NewCounter(name string, sampleRate float64) *Counter {
d.rates.Set(d.prefix+name, sampleRate)
return &Counter{
name: d.prefix + name,
obs: d.counters.Observe,
}
}
// NewGauge returns a gauge, sending observations to this Dogstatsd object.
func (d *Dogstatsd) NewGauge(name string) *Gauge {
return &Gauge{
name: d.prefix + name,
obs: d.gauges.Observe,
}
}
// NewTiming returns a histogram whose observations are interpreted as
// millisecond durations, and are forwarded to this Dogstatsd object.
func (d *Dogstatsd) NewTiming(name string, sampleRate float64) *Timing {
d.rates.Set(d.prefix+name, sampleRate)
return &Timing{
name: d.prefix + name,
obs: d.timings.Observe,
}
}
// NewHistogram returns a histogram whose observations are of an unspecified
// unit, and are forwarded to this Dogstatsd object.
func (d *Dogstatsd) NewHistogram(name string, sampleRate float64) *Histogram {
d.rates.Set(d.prefix+name, sampleRate)
return &Histogram{
name: d.prefix + name,
obs: d.histograms.Observe,
}
}
// WriteLoop is a helper method that invokes WriteTo to the passed writer every
// time the passed channel fires. This method blocks until the channel is
// closed, so clients probably want to run it in its own goroutine. For typical
// usage, create a time.Ticker and pass its C channel to this method.
func (d *Dogstatsd) WriteLoop(c <-chan time.Time, w io.Writer) {
for range c {
if _, err := d.WriteTo(w); err != nil {
d.logger.Log("during", "WriteTo", "err", err)
}
}
}
// SendLoop is a helper method that wraps WriteLoop, passing a managed
// connection to the network and address. Like WriteLoop, this method blocks
// until the channel is closed, so clients probably want to start it in its own
// goroutine. For typical usage, create a time.Ticker and pass its C channel to
// this method.
func (d *Dogstatsd) SendLoop(c <-chan time.Time, network, address string) {
d.WriteLoop(c, conn.NewDefaultManager(network, address, d.logger))
}
// WriteTo flushes the buffered content of the metrics to the writer, in
// DogStatsD format. WriteTo abides best-effort semantics, so observations are
// lost if there is a problem with the write. Clients should be sure to call
// WriteTo regularly, ideally through the WriteLoop or SendLoop helper methods.
func (d *Dogstatsd) WriteTo(w io.Writer) (count int64, err error) {
var n int
d.counters.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool {
n, err = fmt.Fprintf(w, "%s:%f|c%s%s\n", name, sum(values), sampling(d.rates.Get(name)), tagValues(lvs))
if err != nil {
return false
}
count += int64(n)
return true
})
if err != nil {
return count, err
}
d.gauges.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool {
n, err = fmt.Fprintf(w, "%s:%f|g%s\n", name, last(values), tagValues(lvs))
if err != nil {
return false
}
count += int64(n)
return true
})
if err != nil {
return count, err
}
d.timings.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool {
sampleRate := d.rates.Get(name)
for _, value := range values {
n, err = fmt.Fprintf(w, "%s:%f|ms%s%s\n", name, value, sampling(sampleRate), tagValues(lvs))
if err != nil {
return false
}
count += int64(n)
}
return true
})
if err != nil {
return count, err
}
d.histograms.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool {
sampleRate := d.rates.Get(name)
for _, value := range values {
n, err = fmt.Fprintf(w, "%s:%f|h%s%s\n", name, value, sampling(sampleRate), tagValues(lvs))
if err != nil {
return false
}
count += int64(n)
}
return true
})
if err != nil {
return count, err
}
return count, err
}
func sum(a []float64) float64 {
var v float64
for _, f := range a {
v += f
}
return v
}
func last(a []float64) float64 {
return a[len(a)-1]
}
func sampling(r float64) string {
var sv string
if r < 1.0 {
sv = fmt.Sprintf("|@%f", r)
}
return sv
}
func tagValues(labelValues []string) string {
if len(labelValues) == 0 {
return ""
}
if len(labelValues)%2 != 0 {
panic("tagValues received a labelValues with an odd number of strings")
}
pairs := make([]string, 0, len(labelValues)/2)
for i := 0; i < len(labelValues); i += 2 {
pairs = append(pairs, labelValues[i]+":"+labelValues[i+1])
}
return "|#" + strings.Join(pairs, ",")
}
type observeFunc func(name string, lvs lv.LabelValues, value float64)
// Counter is a DogStatsD counter. Observations are forwarded to a Dogstatsd
// object, and aggregated (summed) per timeseries.
type Counter struct {
name string
lvs lv.LabelValues
obs observeFunc
}
// With implements metrics.Counter.
func (c *Counter) With(labelValues ...string) metrics.Counter {
return &Counter{
name: c.name,
lvs: c.lvs.With(labelValues...),
obs: c.obs,
}
}
// Add implements metrics.Counter.
func (c *Counter) Add(delta float64) {
c.obs(c.name, c.lvs, delta)
}
// Gauge is a DogStatsD gauge. Observations are forwarded to a Dogstatsd
// object, and aggregated (the last observation selected) per timeseries.
type Gauge struct {
name string
lvs lv.LabelValues
obs observeFunc
}
// With implements metrics.Gauge.
func (g *Gauge) With(labelValues ...string) metrics.Gauge {
return &Gauge{
name: g.name,
lvs: g.lvs.With(labelValues...),
obs: g.obs,
}
}
// Set implements metrics.Gauge.
func (g *Gauge) Set(value float64) {
g.obs(g.name, g.lvs, value)
}
// Timing is a DogStatsD timing, or metrics.Histogram. Observations are
// forwarded to a Dogstatsd object, and collected (but not aggregated) per
// timeseries.
type Timing struct {
name string
lvs lv.LabelValues
obs observeFunc
}
// With implements metrics.Timing.
func (t *Timing) With(labelValues ...string) metrics.Histogram {
return &Timing{
name: t.name,
lvs: t.lvs.With(labelValues...),
obs: t.obs,
}
}
// Observe implements metrics.Histogram. Value is interpreted as milliseconds.
func (t *Timing) Observe(value float64) {
t.obs(t.name, t.lvs, value)
}
// Histogram is a DogStatsD histrogram. Observations are forwarded to a
// Dogstatsd object, and collected (but not aggregated) per timeseries.
type Histogram struct {
name string
lvs lv.LabelValues
obs observeFunc
}
// With implements metrics.Histogram.
func (h *Histogram) With(labelValues ...string) metrics.Histogram {
return &Histogram{
name: h.name,
lvs: h.lvs.With(labelValues...),
obs: h.obs,
}
}
// Observe implements metrics.Histogram.
func (h *Histogram) Observe(value float64) {
h.obs(h.name, h.lvs, value)
}

View file

@ -0,0 +1,40 @@
// Package ratemap implements a goroutine-safe map of string to float64. It can
// be embedded in implementations whose metrics support fixed sample rates, so
// that an additional parameter doesn't have to be tracked through the e.g.
// lv.Space object.
package ratemap
import "sync"
// RateMap is a simple goroutine-safe map of string to float64.
type RateMap struct {
mtx sync.RWMutex
m map[string]float64
}
// New returns a new RateMap.
func New() *RateMap {
return &RateMap{
m: map[string]float64{},
}
}
// Set writes the given name/rate pair to the map.
// Set is safe for concurrent access by multiple goroutines.
func (m *RateMap) Set(name string, rate float64) {
m.mtx.Lock()
defer m.mtx.Unlock()
m.m[name] = rate
}
// Get retrieves the rate for the given name, or 1.0 if none is set.
// Get is safe for concurrent access by multiple goroutines.
func (m *RateMap) Get(name string) float64 {
m.mtx.RLock()
defer m.mtx.RUnlock()
f, ok := m.m[name]
if !ok {
f = 1.0
}
return f
}

79
vendor/github.com/go-kit/kit/metrics/multi/multi.go generated vendored Normal file
View file

@ -0,0 +1,79 @@
// Package multi provides adapters that send observations to multiple metrics
// simultaneously. This is useful if your service needs to emit to multiple
// instrumentation systems at the same time, for example if your organization is
// transitioning from one system to another.
package multi
import "github.com/go-kit/kit/metrics"
// Counter collects multiple individual counters and treats them as a unit.
type Counter []metrics.Counter
// NewCounter returns a multi-counter, wrapping the passed counters.
func NewCounter(c ...metrics.Counter) Counter {
return Counter(c)
}
// Add implements counter.
func (c Counter) Add(delta float64) {
for _, counter := range c {
counter.Add(delta)
}
}
// With implements counter.
func (c Counter) With(labelValues ...string) metrics.Counter {
next := make(Counter, len(c))
for i := range c {
next[i] = c[i].With(labelValues...)
}
return next
}
// Gauge collects multiple individual gauges and treats them as a unit.
type Gauge []metrics.Gauge
// NewGauge returns a multi-gauge, wrapping the passed gauges.
func NewGauge(g ...metrics.Gauge) Gauge {
return Gauge(g)
}
// Set implements Gauge.
func (g Gauge) Set(value float64) {
for _, gauge := range g {
gauge.Set(value)
}
}
// With implements gauge.
func (g Gauge) With(labelValues ...string) metrics.Gauge {
next := make(Gauge, len(g))
for i := range g {
next[i] = g[i].With(labelValues...)
}
return next
}
// Histogram collects multiple individual histograms and treats them as a unit.
type Histogram []metrics.Histogram
// NewHistogram returns a multi-histogram, wrapping the passed histograms.
func NewHistogram(h ...metrics.Histogram) Histogram {
return Histogram(h)
}
// Observe implements Histogram.
func (h Histogram) Observe(value float64) {
for _, histogram := range h {
histogram.Observe(value)
}
}
// With implements histogram.
func (h Histogram) With(labelValues ...string) metrics.Histogram {
next := make(Histogram, len(h))
for i := range h {
next[i] = h[i].With(labelValues...)
}
return next
}

232
vendor/github.com/go-kit/kit/metrics/statsd/statsd.go generated vendored Normal file
View file

@ -0,0 +1,232 @@
// Package statsd provides a StatsD backend for package metrics. StatsD has no
// concept of arbitrary key-value tagging, so label values are not supported,
// and With is a no-op on all metrics.
//
// This package batches observations and emits them on some schedule to the
// remote server. This is useful even if you connect to your StatsD server over
// UDP. Emitting one network packet per observation can quickly overwhelm even
// the fastest internal network.
package statsd
import (
"fmt"
"io"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/metrics"
"github.com/go-kit/kit/metrics/internal/lv"
"github.com/go-kit/kit/metrics/internal/ratemap"
"github.com/go-kit/kit/util/conn"
)
// Statsd receives metrics observations and forwards them to a StatsD server.
// Create a Statsd object, use it to create metrics, and pass those metrics as
// dependencies to the components that will use them.
//
// All metrics are buffered until WriteTo is called. Counters and gauges are
// aggregated into a single observation per timeseries per write. Timings are
// buffered but not aggregated.
//
// To regularly report metrics to an io.Writer, use the WriteLoop helper method.
// To send to a StatsD server, use the SendLoop helper method.
type Statsd struct {
prefix string
rates *ratemap.RateMap
// The observations are collected in an N-dimensional vector space, even
// though they only take advantage of a single dimension (name). This is an
// implementation detail born purely from convenience. It would be more
// accurate to collect them in a map[string][]float64, but we already have
// this nice data structure and helper methods.
counters *lv.Space
gauges *lv.Space
timings *lv.Space
logger log.Logger
}
// New returns a Statsd object that may be used to create metrics. Prefix is
// applied to all created metrics. Callers must ensure that regular calls to
// WriteTo are performed, either manually or with one of the helper methods.
func New(prefix string, logger log.Logger) *Statsd {
return &Statsd{
prefix: prefix,
rates: ratemap.New(),
counters: lv.NewSpace(),
gauges: lv.NewSpace(),
timings: lv.NewSpace(),
logger: logger,
}
}
// NewCounter returns a counter, sending observations to this Statsd object.
func (s *Statsd) NewCounter(name string, sampleRate float64) *Counter {
s.rates.Set(s.prefix+name, sampleRate)
return &Counter{
name: s.prefix + name,
obs: s.counters.Observe,
}
}
// NewGauge returns a gauge, sending observations to this Statsd object.
func (s *Statsd) NewGauge(name string) *Gauge {
return &Gauge{
name: s.prefix + name,
obs: s.gauges.Observe,
}
}
// NewTiming returns a histogram whose observations are interpreted as
// millisecond durations, and are forwarded to this Statsd object.
func (s *Statsd) NewTiming(name string, sampleRate float64) *Timing {
s.rates.Set(s.prefix+name, sampleRate)
return &Timing{
name: s.prefix + name,
obs: s.timings.Observe,
}
}
// WriteLoop is a helper method that invokes WriteTo to the passed writer every
// time the passed channel fires. This method blocks until the channel is
// closed, so clients probably want to run it in its own goroutine. For typical
// usage, create a time.Ticker and pass its C channel to this method.
func (s *Statsd) WriteLoop(c <-chan time.Time, w io.Writer) {
for range c {
if _, err := s.WriteTo(w); err != nil {
s.logger.Log("during", "WriteTo", "err", err)
}
}
}
// SendLoop is a helper method that wraps WriteLoop, passing a managed
// connection to the network and address. Like WriteLoop, this method blocks
// until the channel is closed, so clients probably want to start it in its own
// goroutine. For typical usage, create a time.Ticker and pass its C channel to
// this method.
func (s *Statsd) SendLoop(c <-chan time.Time, network, address string) {
s.WriteLoop(c, conn.NewDefaultManager(network, address, s.logger))
}
// WriteTo flushes the buffered content of the metrics to the writer, in
// StatsD format. WriteTo abides best-effort semantics, so observations are
// lost if there is a problem with the write. Clients should be sure to call
// WriteTo regularly, ideally through the WriteLoop or SendLoop helper methods.
func (s *Statsd) WriteTo(w io.Writer) (count int64, err error) {
var n int
s.counters.Reset().Walk(func(name string, _ lv.LabelValues, values []float64) bool {
n, err = fmt.Fprintf(w, "%s:%f|c%s\n", name, sum(values), sampling(s.rates.Get(name)))
if err != nil {
return false
}
count += int64(n)
return true
})
if err != nil {
return count, err
}
s.gauges.Reset().Walk(func(name string, _ lv.LabelValues, values []float64) bool {
n, err = fmt.Fprintf(w, "%s:%f|g\n", name, last(values))
if err != nil {
return false
}
count += int64(n)
return true
})
if err != nil {
return count, err
}
s.timings.Reset().Walk(func(name string, _ lv.LabelValues, values []float64) bool {
sampleRate := s.rates.Get(name)
for _, value := range values {
n, err = fmt.Fprintf(w, "%s:%f|ms%s\n", name, value, sampling(sampleRate))
if err != nil {
return false
}
count += int64(n)
}
return true
})
if err != nil {
return count, err
}
return count, err
}
func sum(a []float64) float64 {
var v float64
for _, f := range a {
v += f
}
return v
}
func last(a []float64) float64 {
return a[len(a)-1]
}
func sampling(r float64) string {
var sv string
if r < 1.0 {
sv = fmt.Sprintf("|@%f", r)
}
return sv
}
type observeFunc func(name string, lvs lv.LabelValues, value float64)
// Counter is a StatsD counter. Observations are forwarded to a Statsd object,
// and aggregated (summed) per timeseries.
type Counter struct {
name string
obs observeFunc
}
// With is a no-op.
func (c *Counter) With(...string) metrics.Counter {
return c
}
// Add implements metrics.Counter.
func (c *Counter) Add(delta float64) {
c.obs(c.name, lv.LabelValues{}, delta)
}
// Gauge is a StatsD gauge. Observations are forwarded to a Statsd object, and
// aggregated (the last observation selected) per timeseries.
type Gauge struct {
name string
obs observeFunc
}
// With is a no-op.
func (g *Gauge) With(...string) metrics.Gauge {
return g
}
// Set implements metrics.Gauge.
func (g *Gauge) Set(value float64) {
g.obs(g.name, lv.LabelValues{}, value)
}
// Timing is a StatsD timing, or metrics.Histogram. Observations are
// forwarded to a Statsd object, and collected (but not aggregated) per
// timeseries.
type Timing struct {
name string
obs observeFunc
}
// With is a no-op.
func (t *Timing) With(...string) metrics.Histogram {
return t
}
// Observe implements metrics.Histogram. Value is interpreted as milliseconds.
func (t *Timing) Observe(value float64) {
t.obs(t.name, lv.LabelValues{}, value)
}

2
vendor/github.com/go-kit/kit/util/conn/doc.go generated vendored Normal file
View file

@ -0,0 +1,2 @@
// Package conn provides utilities related to connections.
package conn

145
vendor/github.com/go-kit/kit/util/conn/manager.go generated vendored Normal file
View file

@ -0,0 +1,145 @@
package conn
import (
"errors"
"net"
"time"
"github.com/go-kit/kit/log"
)
// Dialer imitates net.Dial. Dialer is assumed to yield connections that are
// safe for use by multiple concurrent goroutines.
type Dialer func(network, address string) (net.Conn, error)
// AfterFunc imitates time.After.
type AfterFunc func(time.Duration) <-chan time.Time
// Manager manages a net.Conn.
//
// Clients provide a way to create the connection with a Dialer, network, and
// address. Clients should Take the connection when they want to use it, and Put
// back whatever error they receive from its use. When a non-nil error is Put,
// the connection is invalidated, and a new connection is established.
// Connection failures are retried after an exponential backoff.
type Manager struct {
dialer Dialer
network string
address string
after AfterFunc
logger log.Logger
takec chan net.Conn
putc chan error
}
// NewManager returns a connection manager using the passed Dialer, network, and
// address. The AfterFunc is used to control exponential backoff and retries.
// The logger is used to log errors; pass a log.NopLogger if you don't care to
// receive them. For normal use, prefer NewDefaultManager.
func NewManager(d Dialer, network, address string, after AfterFunc, logger log.Logger) *Manager {
m := &Manager{
dialer: d,
network: network,
address: address,
after: after,
logger: logger,
takec: make(chan net.Conn),
putc: make(chan error),
}
go m.loop()
return m
}
// NewDefaultManager is a helper constructor, suitable for most normal use in
// real (non-test) code. It uses the real net.Dial and time.After functions.
func NewDefaultManager(network, address string, logger log.Logger) *Manager {
return NewManager(net.Dial, network, address, time.After, logger)
}
// Take yields the current connection. It may be nil.
func (m *Manager) Take() net.Conn {
return <-m.takec
}
// Put accepts an error that came from a previously yielded connection. If the
// error is non-nil, the manager will invalidate the current connection and try
// to reconnect, with exponential backoff. Putting a nil error is a no-op.
func (m *Manager) Put(err error) {
m.putc <- err
}
// Write writes the passed data to the connection in a single Take/Put cycle.
func (m *Manager) Write(b []byte) (int, error) {
conn := m.Take()
if conn == nil {
return 0, ErrConnectionUnavailable
}
n, err := conn.Write(b)
defer m.Put(err)
return n, err
}
func (m *Manager) loop() {
var (
conn = dial(m.dialer, m.network, m.address, m.logger) // may block slightly
connc = make(chan net.Conn, 1)
reconnectc <-chan time.Time // initially nil
backoff = time.Second
)
// If the initial dial fails, we need to trigger a reconnect via the loop
// body, below. If we did this in a goroutine, we would race on the conn
// variable. So we use a buffered chan instead.
connc <- conn
for {
select {
case <-reconnectc:
reconnectc = nil // one-shot
go func() { connc <- dial(m.dialer, m.network, m.address, m.logger) }()
case conn = <-connc:
if conn == nil {
// didn't work
backoff = exponential(backoff) // wait longer
reconnectc = m.after(backoff) // try again
} else {
// worked!
backoff = time.Second // reset wait time
reconnectc = nil // no retry necessary
}
case m.takec <- conn:
case err := <-m.putc:
if err != nil && conn != nil {
m.logger.Log("err", err)
conn = nil // connection is bad
reconnectc = m.after(time.Nanosecond) // trigger immediately
}
}
}
}
func dial(d Dialer, network, address string, logger log.Logger) net.Conn {
conn, err := d(network, address)
if err != nil {
logger.Log("err", err)
conn = nil // just to be sure
}
return conn
}
func exponential(d time.Duration) time.Duration {
d *= 2
if d > time.Minute {
d = time.Minute
}
return d
}
// ErrConnectionUnavailable is returned by the Manager's Write method when the
// manager cannot yield a good connection.
var ErrConnectionUnavailable = errors.New("connection unavailable")