Add InfluxDB support for traefik metrics
This commit is contained in:
parent
e3131481e9
commit
00d7c5972f
35 changed files with 4693 additions and 28 deletions
48
vendor/github.com/influxdata/influxdb/models/consistency.go
generated
vendored
Normal file
48
vendor/github.com/influxdata/influxdb/models/consistency.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
package models
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ConsistencyLevel represent a required replication criteria before a write can
|
||||
// be returned as successful.
|
||||
//
|
||||
// The consistency level is handled in open-source InfluxDB but only applicable to clusters.
|
||||
type ConsistencyLevel int
|
||||
|
||||
const (
|
||||
// ConsistencyLevelAny allows for hinted handoff, potentially no write happened yet.
|
||||
ConsistencyLevelAny ConsistencyLevel = iota
|
||||
|
||||
// ConsistencyLevelOne requires at least one data node acknowledged a write.
|
||||
ConsistencyLevelOne
|
||||
|
||||
// ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write.
|
||||
ConsistencyLevelQuorum
|
||||
|
||||
// ConsistencyLevelAll requires all data nodes to acknowledge a write.
|
||||
ConsistencyLevelAll
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrInvalidConsistencyLevel is returned when parsing the string version
|
||||
// of a consistency level.
|
||||
ErrInvalidConsistencyLevel = errors.New("invalid consistency level")
|
||||
)
|
||||
|
||||
// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const.
|
||||
func ParseConsistencyLevel(level string) (ConsistencyLevel, error) {
|
||||
switch strings.ToLower(level) {
|
||||
case "any":
|
||||
return ConsistencyLevelAny, nil
|
||||
case "one":
|
||||
return ConsistencyLevelOne, nil
|
||||
case "quorum":
|
||||
return ConsistencyLevelQuorum, nil
|
||||
case "all":
|
||||
return ConsistencyLevelAll, nil
|
||||
default:
|
||||
return 0, ErrInvalidConsistencyLevel
|
||||
}
|
||||
}
|
32
vendor/github.com/influxdata/influxdb/models/inline_fnv.go
generated
vendored
Normal file
32
vendor/github.com/influxdata/influxdb/models/inline_fnv.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
package models // import "github.com/influxdata/influxdb/models"
|
||||
|
||||
// from stdlib hash/fnv/fnv.go
|
||||
const (
|
||||
prime64 = 1099511628211
|
||||
offset64 = 14695981039346656037
|
||||
)
|
||||
|
||||
// InlineFNV64a is an alloc-free port of the standard library's fnv64a.
|
||||
// See https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function.
|
||||
type InlineFNV64a uint64
|
||||
|
||||
// NewInlineFNV64a returns a new instance of InlineFNV64a.
|
||||
func NewInlineFNV64a() InlineFNV64a {
|
||||
return offset64
|
||||
}
|
||||
|
||||
// Write adds data to the running hash.
|
||||
func (s *InlineFNV64a) Write(data []byte) (int, error) {
|
||||
hash := uint64(*s)
|
||||
for _, c := range data {
|
||||
hash ^= uint64(c)
|
||||
hash *= prime64
|
||||
}
|
||||
*s = InlineFNV64a(hash)
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
// Sum64 returns the uint64 of the current resulting hash.
|
||||
func (s *InlineFNV64a) Sum64() uint64 {
|
||||
return uint64(*s)
|
||||
}
|
38
vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go
generated
vendored
Normal file
38
vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
package models // import "github.com/influxdata/influxdb/models"
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt.
|
||||
func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) {
|
||||
s := unsafeBytesToString(b)
|
||||
return strconv.ParseInt(s, base, bitSize)
|
||||
}
|
||||
|
||||
// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat.
|
||||
func parseFloatBytes(b []byte, bitSize int) (float64, error) {
|
||||
s := unsafeBytesToString(b)
|
||||
return strconv.ParseFloat(s, bitSize)
|
||||
}
|
||||
|
||||
// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool.
|
||||
func parseBoolBytes(b []byte) (bool, error) {
|
||||
return strconv.ParseBool(unsafeBytesToString(b))
|
||||
}
|
||||
|
||||
// unsafeBytesToString converts a []byte to a string without a heap allocation.
|
||||
//
|
||||
// It is unsafe, and is intended to prepare input to short-lived functions
|
||||
// that require strings.
|
||||
func unsafeBytesToString(in []byte) string {
|
||||
src := *(*reflect.SliceHeader)(unsafe.Pointer(&in))
|
||||
dst := reflect.StringHeader{
|
||||
Data: src.Data,
|
||||
Len: src.Len,
|
||||
}
|
||||
s := *(*string)(unsafe.Pointer(&dst))
|
||||
return s
|
||||
}
|
2231
vendor/github.com/influxdata/influxdb/models/points.go
generated
vendored
Normal file
2231
vendor/github.com/influxdata/influxdb/models/points.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
62
vendor/github.com/influxdata/influxdb/models/rows.go
generated
vendored
Normal file
62
vendor/github.com/influxdata/influxdb/models/rows.go
generated
vendored
Normal file
|
@ -0,0 +1,62 @@
|
|||
package models
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Row represents a single row returned from the execution of a statement.
|
||||
type Row struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Tags map[string]string `json:"tags,omitempty"`
|
||||
Columns []string `json:"columns,omitempty"`
|
||||
Values [][]interface{} `json:"values,omitempty"`
|
||||
Partial bool `json:"partial,omitempty"`
|
||||
}
|
||||
|
||||
// SameSeries returns true if r contains values for the same series as o.
|
||||
func (r *Row) SameSeries(o *Row) bool {
|
||||
return r.tagsHash() == o.tagsHash() && r.Name == o.Name
|
||||
}
|
||||
|
||||
// tagsHash returns a hash of tag key/value pairs.
|
||||
func (r *Row) tagsHash() uint64 {
|
||||
h := NewInlineFNV64a()
|
||||
keys := r.tagsKeys()
|
||||
for _, k := range keys {
|
||||
h.Write([]byte(k))
|
||||
h.Write([]byte(r.Tags[k]))
|
||||
}
|
||||
return h.Sum64()
|
||||
}
|
||||
|
||||
// tagKeys returns a sorted list of tag keys.
|
||||
func (r *Row) tagsKeys() []string {
|
||||
a := make([]string, 0, len(r.Tags))
|
||||
for k := range r.Tags {
|
||||
a = append(a, k)
|
||||
}
|
||||
sort.Strings(a)
|
||||
return a
|
||||
}
|
||||
|
||||
// Rows represents a collection of rows. Rows implements sort.Interface.
|
||||
type Rows []*Row
|
||||
|
||||
// Len implements sort.Interface.
|
||||
func (p Rows) Len() int { return len(p) }
|
||||
|
||||
// Less implements sort.Interface.
|
||||
func (p Rows) Less(i, j int) bool {
|
||||
// Sort by name first.
|
||||
if p[i].Name != p[j].Name {
|
||||
return p[i].Name < p[j].Name
|
||||
}
|
||||
|
||||
// Sort by tag set hash. Tags don't have a meaningful sort order so we
|
||||
// just compute a hash and sort by that instead. This allows the tests
|
||||
// to receive rows in a predictable order every time.
|
||||
return p[i].tagsHash() < p[j].tagsHash()
|
||||
}
|
||||
|
||||
// Swap implements sort.Interface.
|
||||
func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
42
vendor/github.com/influxdata/influxdb/models/statistic.go
generated
vendored
Normal file
42
vendor/github.com/influxdata/influxdb/models/statistic.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
package models
|
||||
|
||||
// Statistic is the representation of a statistic used by the monitoring service.
|
||||
type Statistic struct {
|
||||
Name string `json:"name"`
|
||||
Tags map[string]string `json:"tags"`
|
||||
Values map[string]interface{} `json:"values"`
|
||||
}
|
||||
|
||||
// NewStatistic returns an initialized Statistic.
|
||||
func NewStatistic(name string) Statistic {
|
||||
return Statistic{
|
||||
Name: name,
|
||||
Tags: make(map[string]string),
|
||||
Values: make(map[string]interface{}),
|
||||
}
|
||||
}
|
||||
|
||||
// StatisticTags is a map that can be merged with others without causing
|
||||
// mutations to either map.
|
||||
type StatisticTags map[string]string
|
||||
|
||||
// Merge creates a new map containing the merged contents of tags and t.
|
||||
// If both tags and the receiver map contain the same key, the value in tags
|
||||
// is used in the resulting map.
|
||||
//
|
||||
// Merge always returns a usable map.
|
||||
func (t StatisticTags) Merge(tags map[string]string) map[string]string {
|
||||
// Add everything in tags to the result.
|
||||
out := make(map[string]string, len(tags))
|
||||
for k, v := range tags {
|
||||
out[k] = v
|
||||
}
|
||||
|
||||
// Only add values from t that don't appear in tags.
|
||||
for k, v := range t {
|
||||
if _, ok := tags[k]; !ok {
|
||||
out[k] = v
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
74
vendor/github.com/influxdata/influxdb/models/time.go
generated
vendored
Normal file
74
vendor/github.com/influxdata/influxdb/models/time.go
generated
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
package models
|
||||
|
||||
// Helper time methods since parsing time can easily overflow and we only support a
|
||||
// specific time range.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// MinNanoTime is the minumum time that can be represented.
|
||||
//
|
||||
// 1677-09-21 00:12:43.145224194 +0000 UTC
|
||||
//
|
||||
// The two lowest minimum integers are used as sentinel values. The
|
||||
// minimum value needs to be used as a value lower than any other value for
|
||||
// comparisons and another separate value is needed to act as a sentinel
|
||||
// default value that is unusable by the user, but usable internally.
|
||||
// Because these two values need to be used for a special purpose, we do
|
||||
// not allow users to write points at these two times.
|
||||
MinNanoTime = int64(math.MinInt64) + 2
|
||||
|
||||
// MaxNanoTime is the maximum time that can be represented.
|
||||
//
|
||||
// 2262-04-11 23:47:16.854775806 +0000 UTC
|
||||
//
|
||||
// The highest time represented by a nanosecond needs to be used for an
|
||||
// exclusive range in the shard group, so the maximum time needs to be one
|
||||
// less than the possible maximum number of nanoseconds representable by an
|
||||
// int64 so that we don't lose a point at that one time.
|
||||
MaxNanoTime = int64(math.MaxInt64) - 1
|
||||
)
|
||||
|
||||
var (
|
||||
minNanoTime = time.Unix(0, MinNanoTime).UTC()
|
||||
maxNanoTime = time.Unix(0, MaxNanoTime).UTC()
|
||||
|
||||
// ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch.
|
||||
ErrTimeOutOfRange = fmt.Errorf("time outside range %d - %d", MinNanoTime, MaxNanoTime)
|
||||
)
|
||||
|
||||
// SafeCalcTime safely calculates the time given. Will return error if the time is outside the
|
||||
// supported range.
|
||||
func SafeCalcTime(timestamp int64, precision string) (time.Time, error) {
|
||||
mult := GetPrecisionMultiplier(precision)
|
||||
if t, ok := safeSignedMult(timestamp, mult); ok {
|
||||
tme := time.Unix(0, t).UTC()
|
||||
return tme, CheckTime(tme)
|
||||
}
|
||||
|
||||
return time.Time{}, ErrTimeOutOfRange
|
||||
}
|
||||
|
||||
// CheckTime checks that a time is within the safe range.
|
||||
func CheckTime(t time.Time) error {
|
||||
if t.Before(minNanoTime) || t.After(maxNanoTime) {
|
||||
return ErrTimeOutOfRange
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perform the multiplication and check to make sure it didn't overflow.
|
||||
func safeSignedMult(a, b int64) (int64, bool) {
|
||||
if a == 0 || b == 0 || a == 1 || b == 1 {
|
||||
return a * b, true
|
||||
}
|
||||
if a == MinNanoTime || b == MaxNanoTime {
|
||||
return 0, false
|
||||
}
|
||||
c := a * b
|
||||
return c, c/b == a
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue