1
0
Fork 0

Opentracing support

This commit is contained in:
Michael 2018-01-10 17:48:04 +01:00 committed by Traefiker
parent 8394549857
commit 30ffba78e6
272 changed files with 44352 additions and 63 deletions

View file

@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2016 The OpenTracing Authors
Copyright (c) 2016 Bas van Beek
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -0,0 +1,228 @@
package zipkintracer
import (
"bytes"
"net/http"
"sync"
"time"
"github.com/apache/thrift/lib/go/thrift"
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
)
// Default timeout for http request in seconds
const defaultHTTPTimeout = time.Second * 5
// defaultBatchInterval in seconds
const defaultHTTPBatchInterval = 1
const defaultHTTPBatchSize = 100
const defaultHTTPMaxBacklog = 1000
// HTTPCollector implements Collector by forwarding spans to a http server.
type HTTPCollector struct {
logger Logger
url string
client *http.Client
batchInterval time.Duration
batchSize int
maxBacklog int
batch []*zipkincore.Span
spanc chan *zipkincore.Span
quit chan struct{}
shutdown chan error
sendMutex *sync.Mutex
batchMutex *sync.Mutex
reqCallback RequestCallback
}
// RequestCallback receives the initialized request from the Collector before
// sending it over the wire. This allows one to plug in additional headers or
// do other customization.
type RequestCallback func(*http.Request)
// HTTPOption sets a parameter for the HttpCollector
type HTTPOption func(c *HTTPCollector)
// HTTPLogger sets the logger used to report errors in the collection
// process. By default, a no-op logger is used, i.e. no errors are logged
// anywhere. It's important to set this option in a production service.
func HTTPLogger(logger Logger) HTTPOption {
return func(c *HTTPCollector) { c.logger = logger }
}
// HTTPTimeout sets maximum timeout for http request.
func HTTPTimeout(duration time.Duration) HTTPOption {
return func(c *HTTPCollector) { c.client.Timeout = duration }
}
// HTTPBatchSize sets the maximum batch size, after which a collect will be
// triggered. The default batch size is 100 traces.
func HTTPBatchSize(n int) HTTPOption {
return func(c *HTTPCollector) { c.batchSize = n }
}
// HTTPMaxBacklog sets the maximum backlog size,
// when batch size reaches this threshold, spans from the
// beginning of the batch will be disposed
func HTTPMaxBacklog(n int) HTTPOption {
return func(c *HTTPCollector) { c.maxBacklog = n }
}
// HTTPBatchInterval sets the maximum duration we will buffer traces before
// emitting them to the collector. The default batch interval is 1 second.
func HTTPBatchInterval(d time.Duration) HTTPOption {
return func(c *HTTPCollector) { c.batchInterval = d }
}
// HTTPClient sets a custom http client to use.
func HTTPClient(client *http.Client) HTTPOption {
return func(c *HTTPCollector) { c.client = client }
}
// HTTPRequestCallback registers a callback function to adjust the collector
// *http.Request before it sends the request to Zipkin.
func HTTPRequestCallback(rc RequestCallback) HTTPOption {
return func(c *HTTPCollector) { c.reqCallback = rc }
}
// NewHTTPCollector returns a new HTTP-backend Collector. url should be a http
// url for handle post request. timeout is passed to http client. queueSize control
// the maximum size of buffer of async queue. The logger is used to log errors,
// such as send failures;
func NewHTTPCollector(url string, options ...HTTPOption) (Collector, error) {
c := &HTTPCollector{
logger: NewNopLogger(),
url: url,
client: &http.Client{Timeout: defaultHTTPTimeout},
batchInterval: defaultHTTPBatchInterval * time.Second,
batchSize: defaultHTTPBatchSize,
maxBacklog: defaultHTTPMaxBacklog,
batch: []*zipkincore.Span{},
spanc: make(chan *zipkincore.Span),
quit: make(chan struct{}, 1),
shutdown: make(chan error, 1),
sendMutex: &sync.Mutex{},
batchMutex: &sync.Mutex{},
}
for _, option := range options {
option(c)
}
go c.loop()
return c, nil
}
// Collect implements Collector.
func (c *HTTPCollector) Collect(s *zipkincore.Span) error {
c.spanc <- s
return nil
}
// Close implements Collector.
func (c *HTTPCollector) Close() error {
close(c.quit)
return <-c.shutdown
}
func httpSerialize(spans []*zipkincore.Span) *bytes.Buffer {
t := thrift.NewTMemoryBuffer()
p := thrift.NewTBinaryProtocolTransport(t)
if err := p.WriteListBegin(thrift.STRUCT, len(spans)); err != nil {
panic(err)
}
for _, s := range spans {
if err := s.Write(p); err != nil {
panic(err)
}
}
if err := p.WriteListEnd(); err != nil {
panic(err)
}
return t.Buffer
}
func (c *HTTPCollector) loop() {
var (
nextSend = time.Now().Add(c.batchInterval)
ticker = time.NewTicker(c.batchInterval / 10)
tickc = ticker.C
)
defer ticker.Stop()
for {
select {
case span := <-c.spanc:
currentBatchSize := c.append(span)
if currentBatchSize >= c.batchSize {
nextSend = time.Now().Add(c.batchInterval)
go c.send()
}
case <-tickc:
if time.Now().After(nextSend) {
nextSend = time.Now().Add(c.batchInterval)
go c.send()
}
case <-c.quit:
c.shutdown <- c.send()
return
}
}
}
func (c *HTTPCollector) append(span *zipkincore.Span) (newBatchSize int) {
c.batchMutex.Lock()
defer c.batchMutex.Unlock()
c.batch = append(c.batch, span)
if len(c.batch) > c.maxBacklog {
dispose := len(c.batch) - c.maxBacklog
c.logger.Log("msg", "backlog too long, disposing spans.", "count", dispose)
c.batch = c.batch[dispose:]
}
newBatchSize = len(c.batch)
return
}
func (c *HTTPCollector) send() error {
// in order to prevent sending the same batch twice
c.sendMutex.Lock()
defer c.sendMutex.Unlock()
// Select all current spans in the batch to be sent
c.batchMutex.Lock()
sendBatch := c.batch[:]
c.batchMutex.Unlock()
// Do not send an empty batch
if len(sendBatch) == 0 {
return nil
}
req, err := http.NewRequest(
"POST",
c.url,
httpSerialize(sendBatch))
if err != nil {
c.logger.Log("err", err.Error())
return err
}
req.Header.Set("Content-Type", "application/x-thrift")
if c.reqCallback != nil {
c.reqCallback(req)
}
if _, err = c.client.Do(req); err != nil {
c.logger.Log("err", err.Error())
return err
}
// Remove sent spans from the batch
c.batchMutex.Lock()
c.batch = c.batch[len(sendBatch):]
c.batchMutex.Unlock()
return nil
}

View file

@ -0,0 +1,95 @@
package zipkintracer
import (
"github.com/Shopify/sarama"
"github.com/apache/thrift/lib/go/thrift"
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
)
// defaultKafkaTopic sets the standard Kafka topic our Collector will publish
// on. The default topic for zipkin-receiver-kafka is "zipkin", see:
// https://github.com/openzipkin/zipkin/tree/master/zipkin-receiver-kafka
const defaultKafkaTopic = "zipkin"
// KafkaCollector implements Collector by publishing spans to a Kafka
// broker.
type KafkaCollector struct {
producer sarama.AsyncProducer
logger Logger
topic string
}
// KafkaOption sets a parameter for the KafkaCollector
type KafkaOption func(c *KafkaCollector)
// KafkaLogger sets the logger used to report errors in the collection
// process. By default, a no-op logger is used, i.e. no errors are logged
// anywhere. It's important to set this option.
func KafkaLogger(logger Logger) KafkaOption {
return func(c *KafkaCollector) { c.logger = logger }
}
// KafkaProducer sets the producer used to produce to Kafka.
func KafkaProducer(p sarama.AsyncProducer) KafkaOption {
return func(c *KafkaCollector) { c.producer = p }
}
// KafkaTopic sets the kafka topic to attach the collector producer on.
func KafkaTopic(t string) KafkaOption {
return func(c *KafkaCollector) { c.topic = t }
}
// NewKafkaCollector returns a new Kafka-backed Collector. addrs should be a
// slice of TCP endpoints of the form "host:port".
func NewKafkaCollector(addrs []string, options ...KafkaOption) (Collector, error) {
c := &KafkaCollector{
logger: NewNopLogger(),
topic: defaultKafkaTopic,
}
for _, option := range options {
option(c)
}
if c.producer == nil {
p, err := sarama.NewAsyncProducer(addrs, nil)
if err != nil {
return nil, err
}
c.producer = p
}
go c.logErrors()
return c, nil
}
func (c *KafkaCollector) logErrors() {
for pe := range c.producer.Errors() {
_ = c.logger.Log("msg", pe.Msg, "err", pe.Err, "result", "failed to produce msg")
}
}
// Collect implements Collector.
func (c *KafkaCollector) Collect(s *zipkincore.Span) error {
c.producer.Input() <- &sarama.ProducerMessage{
Topic: c.topic,
Key: nil,
Value: sarama.ByteEncoder(kafkaSerialize(s)),
}
return nil
}
// Close implements Collector.
func (c *KafkaCollector) Close() error {
return c.producer.Close()
}
func kafkaSerialize(s *zipkincore.Span) []byte {
t := thrift.NewTMemoryBuffer()
p := thrift.NewTBinaryProtocolTransport(t)
if err := s.Write(p); err != nil {
panic(err)
}
return t.Buffer.Bytes()
}

View file

@ -0,0 +1,234 @@
package zipkintracer
import (
"encoding/base64"
"fmt"
"net"
"sync"
"time"
"github.com/apache/thrift/lib/go/thrift"
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe"
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
)
const defaultScribeCategory = "zipkin"
// defaultScribeBatchInterval in seconds
const defaultScribeBatchInterval = 1
const defaultScribeBatchSize = 100
const defaultScribeMaxBacklog = 1000
// ScribeCollector implements Collector by forwarding spans to a Scribe
// service, in batches.
type ScribeCollector struct {
logger Logger
category string
factory func() (scribe.Scribe, error)
client scribe.Scribe
batchInterval time.Duration
batchSize int
maxBacklog int
batch []*scribe.LogEntry
spanc chan *zipkincore.Span
quit chan struct{}
shutdown chan error
sendMutex *sync.Mutex
batchMutex *sync.Mutex
}
// ScribeOption sets a parameter for the StdlibAdapter.
type ScribeOption func(s *ScribeCollector)
// ScribeLogger sets the logger used to report errors in the collection
// process. By default, a no-op logger is used, i.e. no errors are logged
// anywhere. It's important to set this option in a production service.
func ScribeLogger(logger Logger) ScribeOption {
return func(s *ScribeCollector) { s.logger = logger }
}
// ScribeBatchSize sets the maximum batch size, after which a collect will be
// triggered. The default batch size is 100 traces.
func ScribeBatchSize(n int) ScribeOption {
return func(s *ScribeCollector) { s.batchSize = n }
}
// ScribeMaxBacklog sets the maximum backlog size,
// when batch size reaches this threshold, spans from the
// beginning of the batch will be disposed
func ScribeMaxBacklog(n int) ScribeOption {
return func(c *ScribeCollector) { c.maxBacklog = n }
}
// ScribeBatchInterval sets the maximum duration we will buffer traces before
// emitting them to the collector. The default batch interval is 1 second.
func ScribeBatchInterval(d time.Duration) ScribeOption {
return func(s *ScribeCollector) { s.batchInterval = d }
}
// ScribeCategory sets the Scribe category used to transmit the spans.
func ScribeCategory(category string) ScribeOption {
return func(s *ScribeCollector) { s.category = category }
}
// NewScribeCollector returns a new Scribe-backed Collector. addr should be a
// TCP endpoint of the form "host:port". timeout is passed to the Thrift dial
// function NewTSocketFromAddrTimeout. batchSize and batchInterval control the
// maximum size and interval of a batch of spans; as soon as either limit is
// reached, the batch is sent. The logger is used to log errors, such as batch
// send failures; users should provide an appropriate context, if desired.
func NewScribeCollector(addr string, timeout time.Duration, options ...ScribeOption) (Collector, error) {
factory := scribeClientFactory(addr, timeout)
client, err := factory()
if err != nil {
return nil, err
}
c := &ScribeCollector{
logger: NewNopLogger(),
category: defaultScribeCategory,
factory: factory,
client: client,
batchInterval: defaultScribeBatchInterval * time.Second,
batchSize: defaultScribeBatchSize,
maxBacklog: defaultScribeMaxBacklog,
batch: []*scribe.LogEntry{},
spanc: make(chan *zipkincore.Span),
quit: make(chan struct{}),
shutdown: make(chan error, 1),
sendMutex: &sync.Mutex{},
batchMutex: &sync.Mutex{},
}
for _, option := range options {
option(c)
}
go c.loop()
return c, nil
}
// Collect implements Collector.
func (c *ScribeCollector) Collect(s *zipkincore.Span) error {
c.spanc <- s
return nil // accepted
}
// Close implements Collector.
func (c *ScribeCollector) Close() error {
close(c.quit)
return <-c.shutdown
}
func scribeSerialize(s *zipkincore.Span) string {
t := thrift.NewTMemoryBuffer()
p := thrift.NewTBinaryProtocolTransport(t)
if err := s.Write(p); err != nil {
panic(err)
}
return base64.StdEncoding.EncodeToString(t.Buffer.Bytes())
}
func (c *ScribeCollector) loop() {
var (
nextSend = time.Now().Add(c.batchInterval)
ticker = time.NewTicker(c.batchInterval / 10)
tickc = ticker.C
)
defer ticker.Stop()
for {
select {
case span := <-c.spanc:
currentBatchSize := c.append(span)
if currentBatchSize >= c.batchSize {
nextSend = time.Now().Add(c.batchInterval)
go c.send()
}
case <-tickc:
if time.Now().After(nextSend) {
nextSend = time.Now().Add(c.batchInterval)
go c.send()
}
case <-c.quit:
c.shutdown <- c.send()
return
}
}
}
func (c *ScribeCollector) append(span *zipkincore.Span) (newBatchSize int) {
c.batchMutex.Lock()
defer c.batchMutex.Unlock()
c.batch = append(c.batch, &scribe.LogEntry{
Category: c.category,
Message: scribeSerialize(span),
})
if len(c.batch) > c.maxBacklog {
dispose := len(c.batch) - c.maxBacklog
c.logger.Log("Backlog too long, disposing spans.", "count", dispose)
c.batch = c.batch[dispose:]
}
newBatchSize = len(c.batch)
return
}
func (c *ScribeCollector) send() error {
// in order to prevent sending the same batch twice
c.sendMutex.Lock()
defer c.sendMutex.Unlock()
// Select all current spans in the batch to be sent
c.batchMutex.Lock()
sendBatch := c.batch[:]
c.batchMutex.Unlock()
// Do not send an empty batch
if len(sendBatch) == 0 {
return nil
}
if c.client == nil {
var err error
if c.client, err = c.factory(); err != nil {
_ = c.logger.Log("err", fmt.Sprintf("during reconnect: %v", err))
return err
}
}
if rc, err := c.client.Log(sendBatch); err != nil {
c.client = nil
_ = c.logger.Log("err", fmt.Sprintf("during Log: %v", err))
return err
} else if rc != scribe.ResultCode_OK {
// probably transient error; don't reset client
_ = c.logger.Log("err", fmt.Sprintf("remote returned %s", rc))
}
// Remove sent spans from the batch
c.batchMutex.Lock()
c.batch = c.batch[len(sendBatch):]
c.batchMutex.Unlock()
return nil
}
func scribeClientFactory(addr string, timeout time.Duration) func() (scribe.Scribe, error) {
return func() (scribe.Scribe, error) {
a, err := net.ResolveTCPAddr("tcp", addr)
if err != nil {
return nil, err
}
socket := thrift.NewTSocketFromAddrTimeout(a, timeout)
transport := thrift.NewTFramedTransport(socket)
if err := transport.Open(); err != nil {
_ = socket.Close()
return nil, err
}
proto := thrift.NewTBinaryProtocolTransport(transport)
client := scribe.NewScribeClientProtocol(transport, proto, proto)
return client, nil
}
}

View file

@ -0,0 +1,77 @@
package zipkintracer
import (
"strings"
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
)
// Collector represents a Zipkin trace collector, which is probably a set of
// remote endpoints.
type Collector interface {
Collect(*zipkincore.Span) error
Close() error
}
// NopCollector implements Collector but performs no work.
type NopCollector struct{}
// Collect implements Collector.
func (NopCollector) Collect(*zipkincore.Span) error { return nil }
// Close implements Collector.
func (NopCollector) Close() error { return nil }
// MultiCollector implements Collector by sending spans to all collectors.
type MultiCollector []Collector
// Collect implements Collector.
func (c MultiCollector) Collect(s *zipkincore.Span) error {
return c.aggregateErrors(func(coll Collector) error { return coll.Collect(s) })
}
// Close implements Collector.
func (c MultiCollector) Close() error {
return c.aggregateErrors(func(coll Collector) error { return coll.Close() })
}
func (c MultiCollector) aggregateErrors(f func(c Collector) error) error {
var e *collectionError
for i, collector := range c {
if err := f(collector); err != nil {
if e == nil {
e = &collectionError{
errs: make([]error, len(c)),
}
}
e.errs[i] = err
}
}
return e
}
// CollectionError represents an array of errors returned by one or more
// failed Collector methods.
type CollectionError interface {
Error() string
GetErrors() []error
}
type collectionError struct {
errs []error
}
func (c *collectionError) Error() string {
errs := []string{}
for _, err := range c.errs {
if err != nil {
errs = append(errs, err.Error())
}
}
return strings.Join(errs, "; ")
}
// GetErrors implements CollectionError
func (c *collectionError) GetErrors() []error {
return c.errs
}

View file

@ -0,0 +1,61 @@
package zipkintracer
import (
"github.com/openzipkin/zipkin-go-opentracing/flag"
"github.com/openzipkin/zipkin-go-opentracing/types"
)
// SpanContext holds the basic Span metadata.
type SpanContext struct {
// A probabilistically unique identifier for a [multi-span] trace.
TraceID types.TraceID
// A probabilistically unique identifier for a span.
SpanID uint64
// Whether the trace is sampled.
Sampled bool
// The span's associated baggage.
Baggage map[string]string // initialized on first use
// The SpanID of this Context's parent, or nil if there is no parent.
ParentSpanID *uint64
// Flags provides the ability to create and communicate feature flags.
Flags flag.Flags
// Whether the span is owned by the current process
Owner bool
}
// ForeachBaggageItem belongs to the opentracing.SpanContext interface
func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) {
for k, v := range c.Baggage {
if !handler(k, v) {
break
}
}
}
// WithBaggageItem returns an entirely new basictracer SpanContext with the
// given key:value baggage pair set.
func (c SpanContext) WithBaggageItem(key, val string) SpanContext {
var newBaggage map[string]string
if c.Baggage == nil {
newBaggage = map[string]string{key: val}
} else {
newBaggage = make(map[string]string, len(c.Baggage)+1)
for k, v := range c.Baggage {
newBaggage[k] = v
}
newBaggage[key] = val
}
var parentSpanID *uint64
if c.ParentSpanID != nil {
parentSpanID = new(uint64)
*parentSpanID = *c.ParentSpanID
}
// Use positional parameters so the compiler will help catch new fields.
return SpanContext{c.TraceID, c.SpanID, c.Sampled, newBaggage, parentSpanID, c.Flags, c.Owner}
}

View file

@ -0,0 +1,78 @@
package zipkintracer
import (
"bytes"
"fmt"
"runtime"
"strconv"
"sync"
)
const debugGoroutineIDTag = "_initial_goroutine"
type errAssertionFailed struct {
span *spanImpl
msg string
}
// Error implements the error interface.
func (err *errAssertionFailed) Error() string {
return fmt.Sprintf("%s:\n%+v", err.msg, err.span)
}
func (s *spanImpl) Lock() {
s.Mutex.Lock()
s.maybeAssertSanityLocked()
}
func (s *spanImpl) maybeAssertSanityLocked() {
if s.tracer == nil {
s.Mutex.Unlock()
panic(&errAssertionFailed{span: s, msg: "span used after call to Finish()"})
}
if s.tracer.options.debugAssertSingleGoroutine {
startID := curGoroutineID()
curID, ok := s.raw.Tags[debugGoroutineIDTag].(uint64)
if !ok {
// This is likely invoked in the context of the SetTag which sets
// debugGoroutineTag.
return
}
if startID != curID {
s.Mutex.Unlock()
panic(&errAssertionFailed{
span: s,
msg: fmt.Sprintf("span started on goroutine %d, but now running on %d", startID, curID),
})
}
}
}
var goroutineSpace = []byte("goroutine ")
var littleBuf = sync.Pool{
New: func() interface{} {
buf := make([]byte, 64)
return &buf
},
}
// Credit to @bradfitz:
// https://github.com/golang/net/blob/master/http2/gotrack.go#L51
func curGoroutineID() uint64 {
bp := littleBuf.Get().(*[]byte)
defer littleBuf.Put(bp)
b := *bp
b = b[:runtime.Stack(b, false)]
// Parse the 4707 out of "goroutine 4707 ["
b = bytes.TrimPrefix(b, goroutineSpace)
i := bytes.IndexByte(b, ' ')
if i < 0 {
panic(fmt.Sprintf("No space found in %q", b))
}
b = b[:i]
n, err := strconv.ParseUint(string(b), 10, 64)
if err != nil {
panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
}
return n
}

View file

@ -0,0 +1,62 @@
package zipkintracer
import "github.com/opentracing/opentracing-go"
// A SpanEvent is emitted when a mutating command is called on a Span.
type SpanEvent interface{}
// EventCreate is emitted when a Span is created.
type EventCreate struct{ OperationName string }
// EventTag is received when SetTag is called.
type EventTag struct {
Key string
Value interface{}
}
// EventBaggage is received when SetBaggageItem is called.
type EventBaggage struct {
Key, Value string
}
// EventLogFields is received when LogFields or LogKV is called.
type EventLogFields opentracing.LogRecord
// EventLog is received when Log (or one of its derivatives) is called.
//
// DEPRECATED
type EventLog opentracing.LogData
// EventFinish is received when Finish is called.
type EventFinish RawSpan
func (s *spanImpl) onCreate(opName string) {
if s.event != nil {
s.event(EventCreate{OperationName: opName})
}
}
func (s *spanImpl) onTag(key string, value interface{}) {
if s.event != nil {
s.event(EventTag{Key: key, Value: value})
}
}
func (s *spanImpl) onLog(ld opentracing.LogData) {
if s.event != nil {
s.event(EventLog(ld))
}
}
func (s *spanImpl) onLogFields(lr opentracing.LogRecord) {
if s.event != nil {
s.event(EventLogFields(lr))
}
}
func (s *spanImpl) onBaggage(key, value string) {
if s.event != nil {
s.event(EventBaggage{Key: key, Value: value})
}
}
func (s *spanImpl) onFinish(sp RawSpan) {
if s.event != nil {
s.event(EventFinish(sp))
}
}

View file

@ -0,0 +1,39 @@
package flag
// Flags provides the ability to create and communicate feature flags.
type Flags uint64
// Flags is a bitset
const (
Debug Flags = 1 << 0
// All flags below deal with binaryPropagators. They will be discarded in the
// textMapPropagator (not read and not set)
// SamplingSet and Sampled handle Sampled tribool logic for interop with
// instrumenting libraries / propagation channels not using a separate Sampled
// header and potentially encoding this in flags.
//
// When we receive a flag we do this:
// 1. Sampled bit is set => true
// 2. Sampled bit is not set => inspect SamplingSet bit.
// 2a. SamplingSet bit is set => false
// 2b. SamplingSet bit is not set => null
// Note on 2b.: depending on the propagator having a separate Sampled header
// we either assume Sampling is false or unknown. In the latter case we will
// run our sampler even though we are not the root of the trace.
//
// When propagating to a downstream service we will always be explicit and
// will provide a set SamplingSet bit in case of our binary propagator either
SamplingSet Flags = 1 << 1
Sampled Flags = 1 << 2
// When set, we can ignore the value of the parentId. This is used for binary
// fixed width transports or transports like proto3 that return a default
// value if a value has not been set (thus not enabling you to distinguish
// between the value being set to the default or not set at all).
//
// While many zipkin systems re-use a trace id as the root span id, we know
// that some don't. With this flag, we can tell for sure if the span is root
// as opposed to the convention trace id == span id == parent id.
IsRoot Flags = 1 << 3
)

View file

@ -0,0 +1,113 @@
// Copyright (c) 2016 Uber Technologies, Inc.
// Copyright (c) 2016 Bas van Beek
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zipkintracer
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"github.com/go-logfmt/logfmt"
"github.com/opentracing/opentracing-go/log"
)
var errEventLogNotFound = errors.New("event log field not found")
type fieldsAsMap map[string]string
// MaterializeWithJSON converts log Fields into JSON string
func MaterializeWithJSON(logFields []log.Field) ([]byte, error) {
fields := fieldsAsMap(make(map[string]string, len(logFields)))
for _, field := range logFields {
field.Marshal(fields)
}
return json.Marshal(fields)
}
// MaterializeWithLogFmt converts log Fields into LogFmt string
func MaterializeWithLogFmt(logFields []log.Field) ([]byte, error) {
var (
buffer = bytes.NewBuffer(nil)
encoder = logfmt.NewEncoder(buffer)
)
for _, field := range logFields {
if err := encoder.EncodeKeyval(field.Key(), field.Value()); err != nil {
encoder.EncodeKeyval(field.Key(), err.Error())
}
}
return buffer.Bytes(), nil
}
// StrictZipkinMaterializer will only record a log.Field of type "event".
func StrictZipkinMaterializer(logFields []log.Field) ([]byte, error) {
for _, field := range logFields {
if field.Key() == "event" {
return []byte(fmt.Sprintf("%+v", field.Value())), nil
}
}
return nil, errEventLogNotFound
}
func (ml fieldsAsMap) EmitString(key, value string) {
ml[key] = value
}
func (ml fieldsAsMap) EmitBool(key string, value bool) {
ml[key] = fmt.Sprintf("%t", value)
}
func (ml fieldsAsMap) EmitInt(key string, value int) {
ml[key] = fmt.Sprintf("%d", value)
}
func (ml fieldsAsMap) EmitInt32(key string, value int32) {
ml[key] = fmt.Sprintf("%d", value)
}
func (ml fieldsAsMap) EmitInt64(key string, value int64) {
ml[key] = fmt.Sprintf("%d", value)
}
func (ml fieldsAsMap) EmitUint32(key string, value uint32) {
ml[key] = fmt.Sprintf("%d", value)
}
func (ml fieldsAsMap) EmitUint64(key string, value uint64) {
ml[key] = fmt.Sprintf("%d", value)
}
func (ml fieldsAsMap) EmitFloat32(key string, value float32) {
ml[key] = fmt.Sprintf("%f", value)
}
func (ml fieldsAsMap) EmitFloat64(key string, value float64) {
ml[key] = fmt.Sprintf("%f", value)
}
func (ml fieldsAsMap) EmitObject(key string, value interface{}) {
ml[key] = fmt.Sprintf("%+v", value)
}
func (ml fieldsAsMap) EmitLazyLogger(value log.LazyLogger) {
value(ml)
}

View file

@ -0,0 +1,64 @@
package zipkintracer
import (
"errors"
"fmt"
"log"
"strings"
)
// ErrMissingValue adds a Missing Value Error when the Logging Parameters are
// not even in number
var ErrMissingValue = errors.New("(MISSING)")
// Logger is the fundamental interface for all log operations. Log creates a
// log event from keyvals, a variadic sequence of alternating keys and values.
// The signature is compatible with the Go kit log package.
type Logger interface {
Log(keyvals ...interface{}) error
}
// NewNopLogger provides a Logger that discards all Log data sent to it.
func NewNopLogger() Logger {
return &nopLogger{}
}
// LogWrapper wraps a standard library logger into a Logger compatible with this
// package.
func LogWrapper(l *log.Logger) Logger {
return &wrappedLogger{l: l}
}
// wrappedLogger implements Logger
type wrappedLogger struct {
l *log.Logger
}
// Log implements Logger
func (l *wrappedLogger) Log(k ...interface{}) error {
if len(k)%2 == 1 {
k = append(k, ErrMissingValue)
}
o := make([]string, len(k)/2)
for i := 0; i < len(k); i += 2 {
o[i/2] = fmt.Sprintf("%s=%q", k[i], k[i+1])
}
l.l.Println(strings.Join(o, " "))
return nil
}
// nopLogger implements Logger
type nopLogger struct{}
// Log implements Logger
func (*nopLogger) Log(_ ...interface{}) error { return nil }
// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If
// f is a function with the appropriate signature, LoggerFunc(f) is a Logger
// object that calls f.
type LoggerFunc func(...interface{}) error
// Log implements Logger by calling f(keyvals...).
func (f LoggerFunc) Log(keyvals ...interface{}) error {
return f(keyvals...)
}

View file

@ -0,0 +1,52 @@
package zipkintracer
import (
opentracing "github.com/opentracing/opentracing-go"
otobserver "github.com/opentracing-contrib/go-observer"
)
// observer is a dispatcher to other observers
type observer struct {
observers []otobserver.Observer
}
// spanObserver is a dispatcher to other span observers
type spanObserver struct {
observers []otobserver.SpanObserver
}
func (o observer) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (otobserver.SpanObserver, bool) {
var spanObservers []otobserver.SpanObserver
for _, obs := range o.observers {
spanObs, ok := obs.OnStartSpan(sp, operationName, options)
if ok {
if spanObservers == nil {
spanObservers = make([]otobserver.SpanObserver, 0, len(o.observers))
}
spanObservers = append(spanObservers, spanObs)
}
}
if len(spanObservers) == 0 {
return nil, false
}
return spanObserver{observers: spanObservers}, true
}
func (o spanObserver) OnSetOperationName(operationName string) {
for _, obs := range o.observers {
obs.OnSetOperationName(operationName)
}
}
func (o spanObserver) OnSetTag(key string, value interface{}) {
for _, obs := range o.observers {
obs.OnSetTag(key, value)
}
}
func (o spanObserver) OnFinish(options opentracing.FinishOptions) {
for _, obs := range o.observers {
obs.OnFinish(options)
}
}

View file

@ -0,0 +1,68 @@
package zipkintracer
import (
opentracing "github.com/opentracing/opentracing-go"
"github.com/openzipkin/zipkin-go-opentracing/flag"
"github.com/openzipkin/zipkin-go-opentracing/types"
)
type accessorPropagator struct {
tracer *tracerImpl
}
// DelegatingCarrier is a flexible carrier interface which can be implemented
// by types which have a means of storing the trace metadata and already know
// how to serialize themselves (for example, protocol buffers).
type DelegatingCarrier interface {
SetState(traceID types.TraceID, spanID uint64, parentSpanID *uint64, sampled bool, flags flag.Flags)
State() (traceID types.TraceID, spanID uint64, parentSpanID *uint64, sampled bool, flags flag.Flags)
SetBaggageItem(key, value string)
GetBaggage(func(key, value string))
}
func (p *accessorPropagator) Inject(
spanContext opentracing.SpanContext,
carrier interface{},
) error {
dc, ok := carrier.(DelegatingCarrier)
if !ok || dc == nil {
return opentracing.ErrInvalidCarrier
}
sc, ok := spanContext.(SpanContext)
if !ok {
return opentracing.ErrInvalidSpanContext
}
dc.SetState(sc.TraceID, sc.SpanID, sc.ParentSpanID, sc.Sampled, sc.Flags)
for k, v := range sc.Baggage {
dc.SetBaggageItem(k, v)
}
return nil
}
func (p *accessorPropagator) Extract(
carrier interface{},
) (opentracing.SpanContext, error) {
dc, ok := carrier.(DelegatingCarrier)
if !ok || dc == nil {
return nil, opentracing.ErrInvalidCarrier
}
traceID, spanID, parentSpanID, sampled, flags := dc.State()
sc := SpanContext{
TraceID: traceID,
SpanID: spanID,
Sampled: sampled,
Baggage: nil,
ParentSpanID: parentSpanID,
Flags: flags,
}
dc.GetBaggage(func(k, v string) {
if sc.Baggage == nil {
sc.Baggage = map[string]string{}
}
sc.Baggage[k] = v
})
return sc, nil
}

View file

@ -0,0 +1,252 @@
package zipkintracer
import (
"encoding/binary"
"io"
"strconv"
"strings"
"github.com/gogo/protobuf/proto"
opentracing "github.com/opentracing/opentracing-go"
"github.com/openzipkin/zipkin-go-opentracing/flag"
"github.com/openzipkin/zipkin-go-opentracing/types"
"github.com/openzipkin/zipkin-go-opentracing/wire"
)
type textMapPropagator struct {
tracer *tracerImpl
}
type binaryPropagator struct {
tracer *tracerImpl
}
const (
prefixTracerState = "x-b3-" // we default to interop with non-opentracing zipkin tracers
prefixBaggage = "ot-baggage-"
tracerStateFieldCount = 3 // not 5, X-B3-ParentSpanId is optional and we allow optional Sampled header
zipkinTraceID = prefixTracerState + "traceid"
zipkinSpanID = prefixTracerState + "spanid"
zipkinParentSpanID = prefixTracerState + "parentspanid"
zipkinSampled = prefixTracerState + "sampled"
zipkinFlags = prefixTracerState + "flags"
)
func (p *textMapPropagator) Inject(
spanContext opentracing.SpanContext,
opaqueCarrier interface{},
) error {
sc, ok := spanContext.(SpanContext)
if !ok {
return opentracing.ErrInvalidSpanContext
}
carrier, ok := opaqueCarrier.(opentracing.TextMapWriter)
if !ok {
return opentracing.ErrInvalidCarrier
}
carrier.Set(zipkinTraceID, sc.TraceID.ToHex())
carrier.Set(zipkinSpanID, strconv.FormatUint(sc.SpanID, 16))
carrier.Set(zipkinSampled, strconv.FormatBool(sc.Sampled))
if sc.ParentSpanID != nil {
// we only set ParentSpanID header if there is a parent span
carrier.Set(zipkinParentSpanID, strconv.FormatUint(*sc.ParentSpanID, 16))
}
// we only need to inject the debug flag if set. see flag package for details.
flags := sc.Flags & flag.Debug
carrier.Set(zipkinFlags, strconv.FormatUint(uint64(flags), 10))
for k, v := range sc.Baggage {
carrier.Set(prefixBaggage+k, v)
}
return nil
}
func (p *textMapPropagator) Extract(
opaqueCarrier interface{},
) (opentracing.SpanContext, error) {
carrier, ok := opaqueCarrier.(opentracing.TextMapReader)
if !ok {
return nil, opentracing.ErrInvalidCarrier
}
requiredFieldCount := 0
var (
traceID types.TraceID
spanID uint64
sampled bool
parentSpanID *uint64
flags flag.Flags
err error
)
decodedBaggage := make(map[string]string)
err = carrier.ForeachKey(func(k, v string) error {
switch strings.ToLower(k) {
case zipkinTraceID:
traceID, err = types.TraceIDFromHex(v)
if err != nil {
return opentracing.ErrSpanContextCorrupted
}
case zipkinSpanID:
spanID, err = strconv.ParseUint(v, 16, 64)
if err != nil {
return opentracing.ErrSpanContextCorrupted
}
case zipkinParentSpanID:
var id uint64
id, err = strconv.ParseUint(v, 16, 64)
if err != nil {
return opentracing.ErrSpanContextCorrupted
}
parentSpanID = &id
case zipkinSampled:
sampled, err = strconv.ParseBool(v)
if err != nil {
return opentracing.ErrSpanContextCorrupted
}
// Sampled header was explicitly set
flags |= flag.SamplingSet
case zipkinFlags:
var f uint64
f, err = strconv.ParseUint(v, 10, 64)
if err != nil {
return opentracing.ErrSpanContextCorrupted
}
if flag.Flags(f)&flag.Debug == flag.Debug {
flags |= flag.Debug
}
default:
lowercaseK := strings.ToLower(k)
if strings.HasPrefix(lowercaseK, prefixBaggage) {
decodedBaggage[strings.TrimPrefix(lowercaseK, prefixBaggage)] = v
}
// Balance off the requiredFieldCount++ just below...
requiredFieldCount--
}
requiredFieldCount++
return nil
})
if err != nil {
return nil, err
}
if requiredFieldCount < tracerStateFieldCount {
if requiredFieldCount == 0 {
return nil, opentracing.ErrSpanContextNotFound
}
return nil, opentracing.ErrSpanContextCorrupted
}
// check if Sample state was communicated through the Flags bitset
if !sampled && flags&flag.Sampled == flag.Sampled {
sampled = true
}
return SpanContext{
TraceID: traceID,
SpanID: spanID,
Sampled: sampled,
Baggage: decodedBaggage,
ParentSpanID: parentSpanID,
Flags: flags,
}, nil
}
func (p *binaryPropagator) Inject(
spanContext opentracing.SpanContext,
opaqueCarrier interface{},
) error {
sc, ok := spanContext.(SpanContext)
if !ok {
return opentracing.ErrInvalidSpanContext
}
carrier, ok := opaqueCarrier.(io.Writer)
if !ok {
return opentracing.ErrInvalidCarrier
}
state := wire.TracerState{}
state.TraceId = sc.TraceID.Low
state.TraceIdHigh = sc.TraceID.High
state.SpanId = sc.SpanID
state.Sampled = sc.Sampled
state.BaggageItems = sc.Baggage
// encode the debug bit
flags := sc.Flags & flag.Debug
if sc.ParentSpanID != nil {
state.ParentSpanId = *sc.ParentSpanID
} else {
// root span...
state.ParentSpanId = 0
flags |= flag.IsRoot
}
// we explicitly inform our sampling state downstream
flags |= flag.SamplingSet
if sc.Sampled {
flags |= flag.Sampled
}
state.Flags = uint64(flags)
b, err := proto.Marshal(&state)
if err != nil {
return err
}
// Write the length of the marshalled binary to the writer.
length := uint32(len(b))
if err = binary.Write(carrier, binary.BigEndian, &length); err != nil {
return err
}
_, err = carrier.Write(b)
return err
}
func (p *binaryPropagator) Extract(
opaqueCarrier interface{},
) (opentracing.SpanContext, error) {
carrier, ok := opaqueCarrier.(io.Reader)
if !ok {
return nil, opentracing.ErrInvalidCarrier
}
// Read the length of marshalled binary. io.ReadAll isn't that performant
// since it keeps resizing the underlying buffer as it encounters more bytes
// to read. By reading the length, we can allocate a fixed sized buf and read
// the exact amount of bytes into it.
var length uint32
if err := binary.Read(carrier, binary.BigEndian, &length); err != nil {
return nil, opentracing.ErrSpanContextCorrupted
}
buf := make([]byte, length)
if n, err := carrier.Read(buf); err != nil {
if n > 0 {
return nil, opentracing.ErrSpanContextCorrupted
}
return nil, opentracing.ErrSpanContextNotFound
}
ctx := wire.TracerState{}
if err := proto.Unmarshal(buf, &ctx); err != nil {
return nil, opentracing.ErrSpanContextCorrupted
}
flags := flag.Flags(ctx.Flags)
if flags&flag.Sampled == flag.Sampled {
ctx.Sampled = true
}
// this propagator expects sampling state to be explicitly propagated by the
// upstream service. so set this flag to indentify to tracer it should not
// run its sampler in case it is not the root of the trace.
flags |= flag.SamplingSet
return SpanContext{
TraceID: types.TraceID{Low: ctx.TraceId, High: ctx.TraceIdHigh},
SpanID: ctx.SpanId,
Sampled: ctx.Sampled,
Baggage: ctx.BaggageItems,
ParentSpanID: &ctx.ParentSpanId,
Flags: flags,
}, nil
}

View file

@ -0,0 +1,30 @@
package zipkintracer
import (
"time"
opentracing "github.com/opentracing/opentracing-go"
)
// RawSpan encapsulates all state associated with a (finished) Span.
type RawSpan struct {
// Those recording the RawSpan should also record the contents of its
// SpanContext.
Context SpanContext
// The name of the "operation" this span is an instance of. (Called a "span
// name" in some implementations)
Operation string
// We store <start, duration> rather than <start, end> so that only
// one of the timestamps has global clock uncertainty issues.
Start time.Time
Duration time.Duration
// Essentially an extension mechanism. Can be used for many purposes,
// not to be enumerated here.
Tags opentracing.Tags
// The span's "microlog".
Logs []opentracing.LogRecord
}

View file

@ -0,0 +1,60 @@
package zipkintracer
import "sync"
// A SpanRecorder handles all of the `RawSpan` data generated via an
// associated `Tracer` (see `NewStandardTracer`) instance. It also names
// the containing process and provides access to a straightforward tag map.
type SpanRecorder interface {
// Implementations must determine whether and where to store `span`.
RecordSpan(span RawSpan)
}
// InMemorySpanRecorder is a simple thread-safe implementation of
// SpanRecorder that stores all reported spans in memory, accessible
// via reporter.GetSpans(). It is primarily intended for testing purposes.
type InMemorySpanRecorder struct {
sync.RWMutex
spans []RawSpan
}
// NewInMemoryRecorder creates new InMemorySpanRecorder
func NewInMemoryRecorder() *InMemorySpanRecorder {
return new(InMemorySpanRecorder)
}
// RecordSpan implements the respective method of SpanRecorder.
func (r *InMemorySpanRecorder) RecordSpan(span RawSpan) {
r.Lock()
defer r.Unlock()
r.spans = append(r.spans, span)
}
// GetSpans returns a copy of the array of spans accumulated so far.
func (r *InMemorySpanRecorder) GetSpans() []RawSpan {
r.RLock()
defer r.RUnlock()
spans := make([]RawSpan, len(r.spans))
copy(spans, r.spans)
return spans
}
// GetSampledSpans returns a slice of spans accumulated so far which were sampled.
func (r *InMemorySpanRecorder) GetSampledSpans() []RawSpan {
r.RLock()
defer r.RUnlock()
spans := make([]RawSpan, 0, len(r.spans))
for _, span := range r.spans {
if span.Context.Sampled {
spans = append(spans, span)
}
}
return spans
}
// Reset clears the internal array of spans.
func (r *InMemorySpanRecorder) Reset() {
r.Lock()
defer r.Unlock()
r.spans = nil
}

View file

@ -0,0 +1,99 @@
package zipkintracer
import (
"math"
"math/rand"
"sync"
"time"
)
// Sampler functions return if a Zipkin span should be sampled, based on its
// traceID.
type Sampler func(id uint64) bool
func neverSample(_ uint64) bool { return false }
func alwaysSample(_ uint64) bool { return true }
// ModuloSampler provides a typical OpenTracing type Sampler.
func ModuloSampler(mod uint64) Sampler {
if mod < 2 {
return alwaysSample
}
return func(id uint64) bool {
return (id % mod) == 0
}
}
// NewBoundarySampler is appropriate for high-traffic instrumentation who
// provision random trace ids, and make the sampling decision only once.
// It defends against nodes in the cluster selecting exactly the same ids.
func NewBoundarySampler(rate float64, salt int64) Sampler {
if rate <= 0 {
return neverSample
}
if rate >= 1.0 {
return alwaysSample
}
var (
boundary = int64(rate * 10000)
usalt = uint64(salt)
)
return func(id uint64) bool {
return int64(math.Abs(float64(id^usalt)))%10000 < boundary
}
}
// NewCountingSampler is appropriate for low-traffic instrumentation or
// those who do not provision random trace ids. It is not appropriate for
// collectors as the sampling decision isn't idempotent (consistent based
// on trace id).
func NewCountingSampler(rate float64) Sampler {
if rate <= 0 {
return neverSample
}
if rate >= 1.0 {
return alwaysSample
}
var (
i = 0
outOf100 = int(rate*100 + math.Copysign(0.5, rate*100)) // for rounding float to int conversion instead of truncation
decisions = randomBitSet(100, outOf100, rand.New(rand.NewSource(time.Now().UnixNano())))
mtx = &sync.Mutex{}
)
return func(_ uint64) bool {
mtx.Lock()
defer mtx.Unlock()
result := decisions[i]
i++
if i == 100 {
i = 0
}
return result
}
}
/**
* Reservoir sampling algorithm borrowed from Stack Overflow.
*
* http://stackoverflow.com/questions/12817946/generate-a-random-bitset-with-n-1s
*/
func randomBitSet(size int, cardinality int, rnd *rand.Rand) []bool {
result := make([]bool, size)
chosen := make([]int, cardinality)
var i int
for i = 0; i < cardinality; i++ {
chosen[i] = i
result[i] = true
}
for ; i < size; i++ {
j := rnd.Intn(i + 1)
if j < cardinality {
result[chosen[j]] = false
result[i] = true
chosen[j] = i
}
}
return result
}

View file

@ -0,0 +1,290 @@
package zipkintracer
import (
"sync"
"time"
opentracing "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"github.com/opentracing/opentracing-go/log"
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
otobserver "github.com/opentracing-contrib/go-observer"
)
// Span provides access to the essential details of the span, for use
// by zipkintracer consumers. These methods may only be called prior
// to (*opentracing.Span).Finish().
type Span interface {
opentracing.Span
// Operation names the work done by this span instance
Operation() string
// Start indicates when the span began
Start() time.Time
}
// Implements the `Span` interface. Created via tracerImpl (see
// `zipkintracer.NewTracer()`).
type spanImpl struct {
tracer *tracerImpl
event func(SpanEvent)
observer otobserver.SpanObserver
sync.Mutex // protects the fields below
raw RawSpan
// The number of logs dropped because of MaxLogsPerSpan.
numDroppedLogs int
Endpoint *zipkincore.Endpoint
}
var spanPool = &sync.Pool{New: func() interface{} {
return &spanImpl{}
}}
func (s *spanImpl) reset() {
s.tracer, s.event = nil, nil
// Note: Would like to do the following, but then the consumer of RawSpan
// (the recorder) needs to make sure that they're not holding on to the
// baggage or logs when they return (i.e. they need to copy if they care):
//
// logs, baggage := s.raw.Logs[:0], s.raw.Baggage
// for k := range baggage {
// delete(baggage, k)
// }
// s.raw.Logs, s.raw.Baggage = logs, baggage
//
// That's likely too much to ask for. But there is some magic we should
// be able to do with `runtime.SetFinalizer` to reclaim that memory into
// a buffer pool when GC considers them unreachable, which should ease
// some of the load. Hard to say how quickly that would be in practice
// though.
s.raw = RawSpan{
Context: SpanContext{},
}
}
func (s *spanImpl) SetOperationName(operationName string) opentracing.Span {
if s.observer != nil {
s.observer.OnSetOperationName(operationName)
}
s.Lock()
defer s.Unlock()
s.raw.Operation = operationName
return s
}
func (s *spanImpl) trim() bool {
return !s.raw.Context.Sampled && s.tracer.options.trimUnsampledSpans
}
func (s *spanImpl) SetTag(key string, value interface{}) opentracing.Span {
defer s.onTag(key, value)
if s.observer != nil {
s.observer.OnSetTag(key, value)
}
s.Lock()
defer s.Unlock()
if key == string(ext.SamplingPriority) {
if v, ok := value.(uint16); ok {
s.raw.Context.Sampled = v != 0
return s
}
}
if s.trim() {
return s
}
if s.raw.Tags == nil {
s.raw.Tags = opentracing.Tags{}
}
s.raw.Tags[key] = value
return s
}
func (s *spanImpl) LogKV(keyValues ...interface{}) {
fields, err := log.InterleavedKVToFields(keyValues...)
if err != nil {
s.LogFields(log.Error(err), log.String("function", "LogKV"))
return
}
s.LogFields(fields...)
}
func (s *spanImpl) appendLog(lr opentracing.LogRecord) {
maxLogs := s.tracer.options.maxLogsPerSpan
if maxLogs == 0 || len(s.raw.Logs) < maxLogs {
s.raw.Logs = append(s.raw.Logs, lr)
return
}
// We have too many logs. We don't touch the first numOld logs; we treat the
// rest as a circular buffer and overwrite the oldest log among those.
numOld := (maxLogs - 1) / 2
numNew := maxLogs - numOld
s.raw.Logs[numOld+s.numDroppedLogs%numNew] = lr
s.numDroppedLogs++
}
func (s *spanImpl) LogFields(fields ...log.Field) {
lr := opentracing.LogRecord{
Fields: fields,
}
defer s.onLogFields(lr)
s.Lock()
defer s.Unlock()
if s.trim() || s.tracer.options.dropAllLogs {
return
}
if lr.Timestamp.IsZero() {
lr.Timestamp = time.Now()
}
s.appendLog(lr)
}
func (s *spanImpl) LogEvent(event string) {
s.Log(opentracing.LogData{
Event: event,
})
}
func (s *spanImpl) LogEventWithPayload(event string, payload interface{}) {
s.Log(opentracing.LogData{
Event: event,
Payload: payload,
})
}
func (s *spanImpl) Log(ld opentracing.LogData) {
defer s.onLog(ld)
s.Lock()
defer s.Unlock()
if s.trim() || s.tracer.options.dropAllLogs {
return
}
if ld.Timestamp.IsZero() {
ld.Timestamp = time.Now()
}
s.appendLog(ld.ToLogRecord())
}
func (s *spanImpl) Finish() {
s.FinishWithOptions(opentracing.FinishOptions{})
}
// rotateLogBuffer rotates the records in the buffer: records 0 to pos-1 move at
// the end (i.e. pos circular left shifts).
func rotateLogBuffer(buf []opentracing.LogRecord, pos int) {
// This algorithm is described in:
// http://www.cplusplus.com/reference/algorithm/rotate
for first, middle, next := 0, pos, pos; first != middle; {
buf[first], buf[next] = buf[next], buf[first]
first++
next++
if next == len(buf) {
next = middle
} else if first == middle {
middle = next
}
}
}
func (s *spanImpl) FinishWithOptions(opts opentracing.FinishOptions) {
finishTime := opts.FinishTime
if finishTime.IsZero() {
finishTime = time.Now()
}
duration := finishTime.Sub(s.raw.Start)
if s.observer != nil {
s.observer.OnFinish(opts)
}
s.Lock()
defer s.Unlock()
for _, lr := range opts.LogRecords {
s.appendLog(lr)
}
for _, ld := range opts.BulkLogData {
s.appendLog(ld.ToLogRecord())
}
if s.numDroppedLogs > 0 {
// We dropped some log events, which means that we used part of Logs as a
// circular buffer (see appendLog). De-circularize it.
numOld := (len(s.raw.Logs) - 1) / 2
numNew := len(s.raw.Logs) - numOld
rotateLogBuffer(s.raw.Logs[numOld:], s.numDroppedLogs%numNew)
// Replace the log in the middle (the oldest "new" log) with information
// about the dropped logs. This means that we are effectively dropping one
// more "new" log.
numDropped := s.numDroppedLogs + 1
s.raw.Logs[numOld] = opentracing.LogRecord{
// Keep the timestamp of the last dropped event.
Timestamp: s.raw.Logs[numOld].Timestamp,
Fields: []log.Field{
log.String("event", "dropped Span logs"),
log.Int("dropped_log_count", numDropped),
log.String("component", "zipkintracer"),
},
}
}
s.raw.Duration = duration
s.onFinish(s.raw)
s.tracer.options.recorder.RecordSpan(s.raw)
// Last chance to get options before the span is possibly reset.
poolEnabled := s.tracer.options.enableSpanPool
if s.tracer.options.debugAssertUseAfterFinish {
// This makes it much more likely to catch a panic on any subsequent
// operation since s.tracer is accessed on every call to `Lock`.
// We don't call `reset()` here to preserve the logs in the Span
// which are printed when the assertion triggers.
s.tracer = nil
}
if poolEnabled {
spanPool.Put(s)
}
}
func (s *spanImpl) Tracer() opentracing.Tracer {
return s.tracer
}
func (s *spanImpl) Context() opentracing.SpanContext {
return s.raw.Context
}
func (s *spanImpl) SetBaggageItem(key, val string) opentracing.Span {
s.onBaggage(key, val)
if s.trim() {
return s
}
s.Lock()
defer s.Unlock()
s.raw.Context = s.raw.Context.WithBaggageItem(key, val)
return s
}
func (s *spanImpl) BaggageItem(key string) string {
s.Lock()
defer s.Unlock()
return s.raw.Context.Baggage[key]
}
func (s *spanImpl) Operation() string {
return s.raw.Operation
}
func (s *spanImpl) Start() time.Time {
return s.raw.Start
}

View file

@ -0,0 +1,18 @@
// Autogenerated by Thrift Compiler (0.9.3)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package scribe
import (
"bytes"
"fmt"
"github.com/apache/thrift/lib/go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = bytes.Equal
func init() {
}

View file

@ -0,0 +1,431 @@
// Autogenerated by Thrift Compiler (0.9.3)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package scribe
import (
"bytes"
"fmt"
"github.com/apache/thrift/lib/go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = bytes.Equal
type Scribe interface {
// Parameters:
// - Messages
Log(messages []*LogEntry) (r ResultCode, err error)
}
type ScribeClient struct {
Transport thrift.TTransport
ProtocolFactory thrift.TProtocolFactory
InputProtocol thrift.TProtocol
OutputProtocol thrift.TProtocol
SeqId int32
}
func NewScribeClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ScribeClient {
return &ScribeClient{Transport: t,
ProtocolFactory: f,
InputProtocol: f.GetProtocol(t),
OutputProtocol: f.GetProtocol(t),
SeqId: 0,
}
}
func NewScribeClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ScribeClient {
return &ScribeClient{Transport: t,
ProtocolFactory: nil,
InputProtocol: iprot,
OutputProtocol: oprot,
SeqId: 0,
}
}
// Parameters:
// - Messages
func (p *ScribeClient) Log(messages []*LogEntry) (r ResultCode, err error) {
if err = p.sendLog(messages); err != nil {
return
}
return p.recvLog()
}
func (p *ScribeClient) sendLog(messages []*LogEntry) (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
if err = oprot.WriteMessageBegin("Log", thrift.CALL, p.SeqId); err != nil {
return
}
args := ScribeLogArgs{
Messages: messages,
}
if err = args.Write(oprot); err != nil {
return
}
if err = oprot.WriteMessageEnd(); err != nil {
return
}
return oprot.Flush()
}
func (p *ScribeClient) recvLog() (value ResultCode, err error) {
iprot := p.InputProtocol
if iprot == nil {
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.InputProtocol = iprot
}
method, mTypeId, seqId, err := iprot.ReadMessageBegin()
if err != nil {
return
}
if method != "Log" {
err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "Log failed: wrong method name")
return
}
if p.SeqId != seqId {
err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "Log failed: out of sequence response")
return
}
if mTypeId == thrift.EXCEPTION {
error0 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
var error1 error
error1, err = error0.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
err = error1
return
}
if mTypeId != thrift.REPLY {
err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "Log failed: invalid message type")
return
}
result := ScribeLogResult{}
if err = result.Read(iprot); err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
value = result.GetSuccess()
return
}
type ScribeProcessor struct {
processorMap map[string]thrift.TProcessorFunction
handler Scribe
}
func (p *ScribeProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
p.processorMap[key] = processor
}
func (p *ScribeProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
processor, ok = p.processorMap[key]
return processor, ok
}
func (p *ScribeProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
return p.processorMap
}
func NewScribeProcessor(handler Scribe) *ScribeProcessor {
self2 := &ScribeProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
self2.processorMap["Log"] = &scribeProcessorLog{handler: handler}
return self2
}
func (p *ScribeProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
name, _, seqId, err := iprot.ReadMessageBegin()
if err != nil {
return false, err
}
if processor, ok := p.GetProcessorFunction(name); ok {
return processor.Process(seqId, iprot, oprot)
}
iprot.Skip(thrift.STRUCT)
iprot.ReadMessageEnd()
x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
x3.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
return false, x3
}
type scribeProcessorLog struct {
handler Scribe
}
func (p *scribeProcessorLog) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
args := ScribeLogArgs{}
if err = args.Read(iprot); err != nil {
iprot.ReadMessageEnd()
x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
oprot.WriteMessageBegin("Log", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
return false, err
}
iprot.ReadMessageEnd()
result := ScribeLogResult{}
var retval ResultCode
var err2 error
if retval, err2 = p.handler.Log(args.Messages); err2 != nil {
x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing Log: "+err2.Error())
oprot.WriteMessageBegin("Log", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
return true, err2
} else {
result.Success = &retval
}
if err2 = oprot.WriteMessageBegin("Log", thrift.REPLY, seqId); err2 != nil {
err = err2
}
if err2 = result.Write(oprot); err == nil && err2 != nil {
err = err2
}
if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
err = err2
}
if err2 = oprot.Flush(); err == nil && err2 != nil {
err = err2
}
if err != nil {
return
}
return true, err
}
// HELPER FUNCTIONS AND STRUCTURES
// Attributes:
// - Messages
type ScribeLogArgs struct {
Messages []*LogEntry `thrift:"messages,1" json:"messages"`
}
func NewScribeLogArgs() *ScribeLogArgs {
return &ScribeLogArgs{}
}
func (p *ScribeLogArgs) GetMessages() []*LogEntry {
return p.Messages
}
func (p *ScribeLogArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
for {
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
if err != nil {
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
if fieldTypeId == thrift.STOP {
break
}
switch fieldId {
case 1:
if err := p.readField1(iprot); err != nil {
return err
}
default:
if err := iprot.Skip(fieldTypeId); err != nil {
return err
}
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
return nil
}
func (p *ScribeLogArgs) readField1(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return thrift.PrependError("error reading list begin: ", err)
}
tSlice := make([]*LogEntry, 0, size)
p.Messages = tSlice
for i := 0; i < size; i++ {
_elem4 := &LogEntry{}
if err := _elem4.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
}
p.Messages = append(p.Messages, _elem4)
}
if err := iprot.ReadListEnd(); err != nil {
return thrift.PrependError("error reading list end: ", err)
}
return nil
}
func (p *ScribeLogArgs) Write(oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin("Log_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if err := p.writeField1(oprot); err != nil {
return err
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
}
if err := oprot.WriteStructEnd(); err != nil {
return thrift.PrependError("write struct stop error: ", err)
}
return nil
}
func (p *ScribeLogArgs) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("messages", thrift.LIST, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:messages: ", p), err)
}
if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Messages)); err != nil {
return thrift.PrependError("error writing list begin: ", err)
}
for _, v := range p.Messages {
if err := v.Write(oprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
}
}
if err := oprot.WriteListEnd(); err != nil {
return thrift.PrependError("error writing list end: ", err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:messages: ", p), err)
}
return err
}
func (p *ScribeLogArgs) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("ScribeLogArgs(%+v)", *p)
}
// Attributes:
// - Success
type ScribeLogResult struct {
Success *ResultCode `thrift:"success,0" json:"success,omitempty"`
}
func NewScribeLogResult() *ScribeLogResult {
return &ScribeLogResult{}
}
var ScribeLogResult_Success_DEFAULT ResultCode
func (p *ScribeLogResult) GetSuccess() ResultCode {
if !p.IsSetSuccess() {
return ScribeLogResult_Success_DEFAULT
}
return *p.Success
}
func (p *ScribeLogResult) IsSetSuccess() bool {
return p.Success != nil
}
func (p *ScribeLogResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
for {
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
if err != nil {
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
if fieldTypeId == thrift.STOP {
break
}
switch fieldId {
case 0:
if err := p.readField0(iprot); err != nil {
return err
}
default:
if err := iprot.Skip(fieldTypeId); err != nil {
return err
}
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
return nil
}
func (p *ScribeLogResult) readField0(iprot thrift.TProtocol) error {
if v, err := iprot.ReadI32(); err != nil {
return thrift.PrependError("error reading field 0: ", err)
} else {
temp := ResultCode(v)
p.Success = &temp
}
return nil
}
func (p *ScribeLogResult) Write(oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin("Log_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if err := p.writeField0(oprot); err != nil {
return err
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
}
if err := oprot.WriteStructEnd(); err != nil {
return thrift.PrependError("write struct stop error: ", err)
}
return nil
}
func (p *ScribeLogResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err := oprot.WriteFieldBegin("success", thrift.I32, 0); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
}
if err := oprot.WriteI32(int32(*p.Success)); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.success (0) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
}
}
return err
}
func (p *ScribeLogResult) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("ScribeLogResult(%+v)", *p)
}

View file

@ -0,0 +1,185 @@
// Autogenerated by Thrift Compiler (0.9.3)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package scribe
import (
"bytes"
"fmt"
"github.com/apache/thrift/lib/go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = bytes.Equal
var GoUnusedProtection__ int
type ResultCode int64
const (
ResultCode_OK ResultCode = 0
ResultCode_TRY_LATER ResultCode = 1
)
func (p ResultCode) String() string {
switch p {
case ResultCode_OK:
return "OK"
case ResultCode_TRY_LATER:
return "TRY_LATER"
}
return "<UNSET>"
}
func ResultCodeFromString(s string) (ResultCode, error) {
switch s {
case "OK":
return ResultCode_OK, nil
case "TRY_LATER":
return ResultCode_TRY_LATER, nil
}
return ResultCode(0), fmt.Errorf("not a valid ResultCode string")
}
func ResultCodePtr(v ResultCode) *ResultCode { return &v }
func (p ResultCode) MarshalText() ([]byte, error) {
return []byte(p.String()), nil
}
func (p *ResultCode) UnmarshalText(text []byte) error {
q, err := ResultCodeFromString(string(text))
if err != nil {
return err
}
*p = q
return nil
}
// Attributes:
// - Category
// - Message
type LogEntry struct {
Category string `thrift:"category,1" json:"category"`
Message string `thrift:"message,2" json:"message"`
}
func NewLogEntry() *LogEntry {
return &LogEntry{}
}
func (p *LogEntry) GetCategory() string {
return p.Category
}
func (p *LogEntry) GetMessage() string {
return p.Message
}
func (p *LogEntry) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
for {
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
if err != nil {
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
if fieldTypeId == thrift.STOP {
break
}
switch fieldId {
case 1:
if err := p.readField1(iprot); err != nil {
return err
}
case 2:
if err := p.readField2(iprot); err != nil {
return err
}
default:
if err := iprot.Skip(fieldTypeId); err != nil {
return err
}
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
return nil
}
func (p *LogEntry) readField1(iprot thrift.TProtocol) error {
if v, err := iprot.ReadString(); err != nil {
return thrift.PrependError("error reading field 1: ", err)
} else {
p.Category = v
}
return nil
}
func (p *LogEntry) readField2(iprot thrift.TProtocol) error {
if v, err := iprot.ReadString(); err != nil {
return thrift.PrependError("error reading field 2: ", err)
} else {
p.Message = v
}
return nil
}
func (p *LogEntry) Write(oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin("LogEntry"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if err := p.writeField1(oprot); err != nil {
return err
}
if err := p.writeField2(oprot); err != nil {
return err
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
}
if err := oprot.WriteStructEnd(); err != nil {
return thrift.PrependError("write struct stop error: ", err)
}
return nil
}
func (p *LogEntry) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("category", thrift.STRING, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:category: ", p), err)
}
if err := oprot.WriteString(string(p.Category)); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.category (1) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:category: ", p), err)
}
return err
}
func (p *LogEntry) writeField2(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("message", thrift.STRING, 2); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:message: ", p), err)
}
if err := oprot.WriteString(string(p.Message)); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.message (2) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 2:message: ", p), err)
}
return err
}
func (p *LogEntry) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("LogEntry(%+v)", *p)
}

View file

@ -0,0 +1,40 @@
// Autogenerated by Thrift Compiler (0.9.3)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package zipkincore
import (
"bytes"
"fmt"
"github.com/apache/thrift/lib/go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = bytes.Equal
const CLIENT_SEND = "cs"
const CLIENT_RECV = "cr"
const SERVER_SEND = "ss"
const SERVER_RECV = "sr"
const WIRE_SEND = "ws"
const WIRE_RECV = "wr"
const CLIENT_SEND_FRAGMENT = "csf"
const CLIENT_RECV_FRAGMENT = "crf"
const SERVER_SEND_FRAGMENT = "ssf"
const SERVER_RECV_FRAGMENT = "srf"
const HTTP_HOST = "http.host"
const HTTP_METHOD = "http.method"
const HTTP_PATH = "http.path"
const HTTP_URL = "http.url"
const HTTP_STATUS_CODE = "http.status_code"
const HTTP_REQUEST_SIZE = "http.request.size"
const HTTP_RESPONSE_SIZE = "http.response.size"
const LOCAL_COMPONENT = "lc"
const ERROR = "error"
const CLIENT_ADDR = "ca"
const SERVER_ADDR = "sa"
func init() {
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,440 @@
package zipkintracer
import (
"errors"
"time"
opentracing "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"github.com/openzipkin/zipkin-go-opentracing/flag"
otobserver "github.com/opentracing-contrib/go-observer"
)
// ErrInvalidEndpoint will be thrown if hostPort parameter is corrupted or host
// can't be resolved
var ErrInvalidEndpoint = errors.New("Invalid Endpoint. Please check hostPort parameter")
// Tracer extends the opentracing.Tracer interface with methods to
// probe implementation state, for use by zipkintracer consumers.
type Tracer interface {
opentracing.Tracer
// Options gets the Options used in New() or NewWithOptions().
Options() TracerOptions
}
// TracerOptions allows creating a customized Tracer.
type TracerOptions struct {
// shouldSample is a function which is called when creating a new Span and
// determines whether that Span is sampled. The randomized TraceID is supplied
// to allow deterministic sampling decisions to be made across different nodes.
shouldSample func(traceID uint64) bool
// trimUnsampledSpans turns potentially expensive operations on unsampled
// Spans into no-ops. More precisely, tags and log events are silently
// discarded. If NewSpanEventListener is set, the callbacks will still fire.
trimUnsampledSpans bool
// recorder receives Spans which have been finished.
recorder SpanRecorder
// newSpanEventListener can be used to enhance the tracer by effectively
// attaching external code to trace events. See NetTraceIntegrator for a
// practical example, and event.go for the list of possible events.
newSpanEventListener func() func(SpanEvent)
// dropAllLogs turns log events on all Spans into no-ops.
// If NewSpanEventListener is set, the callbacks will still fire.
dropAllLogs bool
// MaxLogsPerSpan limits the number of Logs in a span (if set to a nonzero
// value). If a span has more logs than this value, logs are dropped as
// necessary (and replaced with a log describing how many were dropped).
//
// About half of the MaxLogPerSpan logs kept are the oldest logs, and about
// half are the newest logs.
//
// If NewSpanEventListener is set, the callbacks will still fire for all log
// events. This value is ignored if DropAllLogs is true.
maxLogsPerSpan int
// debugAssertSingleGoroutine internally records the ID of the goroutine
// creating each Span and verifies that no operation is carried out on
// it on a different goroutine.
// Provided strictly for development purposes.
// Passing Spans between goroutine without proper synchronization often
// results in use-after-Finish() errors. For a simple example, consider the
// following pseudocode:
//
// func (s *Server) Handle(req http.Request) error {
// sp := s.StartSpan("server")
// defer sp.Finish()
// wait := s.queueProcessing(opentracing.ContextWithSpan(context.Background(), sp), req)
// select {
// case resp := <-wait:
// return resp.Error
// case <-time.After(10*time.Second):
// sp.LogEvent("timed out waiting for processing")
// return ErrTimedOut
// }
// }
//
// This looks reasonable at first, but a request which spends more than ten
// seconds in the queue is abandoned by the main goroutine and its trace
// finished, leading to use-after-finish when the request is finally
// processed. Note also that even joining on to a finished Span via
// StartSpanWithOptions constitutes an illegal operation.
//
// Code bases which do not require (or decide they do not want) Spans to
// be passed across goroutine boundaries can run with this flag enabled in
// tests to increase their chances of spotting wrong-doers.
debugAssertSingleGoroutine bool
// debugAssertUseAfterFinish is provided strictly for development purposes.
// When set, it attempts to exacerbate issues emanating from use of Spans
// after calling Finish by running additional assertions.
debugAssertUseAfterFinish bool
// enableSpanPool enables the use of a pool, so that the tracer reuses spans
// after Finish has been called on it. Adds a slight performance gain as it
// reduces allocations. However, if you have any use-after-finish race
// conditions the code may panic.
enableSpanPool bool
// logger ...
logger Logger
// clientServerSameSpan allows for Zipkin V1 style span per RPC. This places
// both client end and server end of a RPC call into the same span.
clientServerSameSpan bool
// debugMode activates Zipkin's debug request allowing for all Spans originating
// from this tracer to pass through and bypass sampling. Use with extreme care
// as it might flood your system if you have many traces starting from the
// service you are instrumenting.
debugMode bool
// traceID128Bit enables the generation of 128 bit traceIDs in case the tracer
// needs to create a root span. By default regular 64 bit traceIDs are used.
// Regardless of this setting, the library will propagate and support both
// 64 and 128 bit incoming traces from upstream sources.
traceID128Bit bool
observer otobserver.Observer
}
// TracerOption allows for functional options.
// See: http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
type TracerOption func(opts *TracerOptions) error
// WithSampler allows one to add a Sampler function
func WithSampler(sampler Sampler) TracerOption {
return func(opts *TracerOptions) error {
opts.shouldSample = sampler
return nil
}
}
// TrimUnsampledSpans option
func TrimUnsampledSpans(trim bool) TracerOption {
return func(opts *TracerOptions) error {
opts.trimUnsampledSpans = trim
return nil
}
}
// DropAllLogs option
func DropAllLogs(dropAllLogs bool) TracerOption {
return func(opts *TracerOptions) error {
opts.dropAllLogs = dropAllLogs
return nil
}
}
// WithLogger option
func WithLogger(logger Logger) TracerOption {
return func(opts *TracerOptions) error {
opts.logger = logger
return nil
}
}
// DebugAssertSingleGoroutine option
func DebugAssertSingleGoroutine(val bool) TracerOption {
return func(opts *TracerOptions) error {
opts.debugAssertSingleGoroutine = val
return nil
}
}
// DebugAssertUseAfterFinish option
func DebugAssertUseAfterFinish(val bool) TracerOption {
return func(opts *TracerOptions) error {
opts.debugAssertUseAfterFinish = val
return nil
}
}
// TraceID128Bit option
func TraceID128Bit(val bool) TracerOption {
return func(opts *TracerOptions) error {
opts.traceID128Bit = val
return nil
}
}
// ClientServerSameSpan allows to place client-side and server-side annotations
// for a RPC call in the same span (Zipkin V1 behavior) or different spans
// (more in line with other tracing solutions). By default this Tracer
// uses shared host spans (so client-side and server-side in the same span).
// If using separate spans you might run into trouble with Zipkin V1 as clock
// skew issues can't be remedied at Zipkin server side.
func ClientServerSameSpan(val bool) TracerOption {
return func(opts *TracerOptions) error {
opts.clientServerSameSpan = val
return nil
}
}
// DebugMode allows to set the tracer to Zipkin debug mode
func DebugMode(val bool) TracerOption {
return func(opts *TracerOptions) error {
opts.debugMode = val
return nil
}
}
// EnableSpanPool ...
func EnableSpanPool(val bool) TracerOption {
return func(opts *TracerOptions) error {
opts.enableSpanPool = val
return nil
}
}
// NewSpanEventListener option
func NewSpanEventListener(f func() func(SpanEvent)) TracerOption {
return func(opts *TracerOptions) error {
opts.newSpanEventListener = f
return nil
}
}
// WithMaxLogsPerSpan option
func WithMaxLogsPerSpan(limit int) TracerOption {
return func(opts *TracerOptions) error {
if limit < 5 || limit > 10000 {
return errors.New("invalid MaxLogsPerSpan limit. Should be between 5 and 10000")
}
opts.maxLogsPerSpan = limit
return nil
}
}
// NewTracer creates a new OpenTracing compatible Zipkin Tracer.
func NewTracer(recorder SpanRecorder, options ...TracerOption) (opentracing.Tracer, error) {
opts := &TracerOptions{
recorder: recorder,
shouldSample: alwaysSample,
trimUnsampledSpans: false,
newSpanEventListener: func() func(SpanEvent) { return nil },
logger: &nopLogger{},
debugAssertSingleGoroutine: false,
debugAssertUseAfterFinish: false,
clientServerSameSpan: true,
debugMode: false,
traceID128Bit: false,
maxLogsPerSpan: 10000,
observer: nil,
}
for _, o := range options {
err := o(opts)
if err != nil {
return nil, err
}
}
rval := &tracerImpl{options: *opts}
rval.textPropagator = &textMapPropagator{rval}
rval.binaryPropagator = &binaryPropagator{rval}
rval.accessorPropagator = &accessorPropagator{rval}
return rval, nil
}
// Implements the `Tracer` interface.
type tracerImpl struct {
options TracerOptions
textPropagator *textMapPropagator
binaryPropagator *binaryPropagator
accessorPropagator *accessorPropagator
}
func (t *tracerImpl) StartSpan(
operationName string,
opts ...opentracing.StartSpanOption,
) opentracing.Span {
sso := opentracing.StartSpanOptions{}
for _, o := range opts {
o.Apply(&sso)
}
return t.startSpanWithOptions(operationName, sso)
}
func (t *tracerImpl) getSpan() *spanImpl {
if t.options.enableSpanPool {
sp := spanPool.Get().(*spanImpl)
sp.reset()
return sp
}
return &spanImpl{}
}
func (t *tracerImpl) startSpanWithOptions(
operationName string,
opts opentracing.StartSpanOptions,
) opentracing.Span {
// Start time.
startTime := opts.StartTime
if startTime.IsZero() {
startTime = time.Now()
}
// Tags.
tags := opts.Tags
// Build the new span. This is the only allocation: We'll return this as
// an opentracing.Span.
sp := t.getSpan()
if t.options.observer != nil {
sp.observer, _ = t.options.observer.OnStartSpan(sp, operationName, opts)
}
// Look for a parent in the list of References.
//
// TODO: would be nice if basictracer did something with all
// References, not just the first one.
ReferencesLoop:
for _, ref := range opts.References {
switch ref.Type {
case opentracing.ChildOfRef:
refCtx := ref.ReferencedContext.(SpanContext)
sp.raw.Context.TraceID = refCtx.TraceID
sp.raw.Context.ParentSpanID = &refCtx.SpanID
sp.raw.Context.Sampled = refCtx.Sampled
sp.raw.Context.Flags = refCtx.Flags
sp.raw.Context.Flags &^= flag.IsRoot // unset IsRoot flag if needed
if t.options.clientServerSameSpan &&
tags[string(ext.SpanKind)] == ext.SpanKindRPCServer.Value {
sp.raw.Context.SpanID = refCtx.SpanID
sp.raw.Context.ParentSpanID = refCtx.ParentSpanID
sp.raw.Context.Owner = false
} else {
sp.raw.Context.SpanID = randomID()
sp.raw.Context.ParentSpanID = &refCtx.SpanID
sp.raw.Context.Owner = true
}
if l := len(refCtx.Baggage); l > 0 {
sp.raw.Context.Baggage = make(map[string]string, l)
for k, v := range refCtx.Baggage {
sp.raw.Context.Baggage[k] = v
}
}
break ReferencesLoop
case opentracing.FollowsFromRef:
refCtx := ref.ReferencedContext.(SpanContext)
sp.raw.Context.TraceID = refCtx.TraceID
sp.raw.Context.ParentSpanID = &refCtx.SpanID
sp.raw.Context.Sampled = refCtx.Sampled
sp.raw.Context.Flags = refCtx.Flags
sp.raw.Context.Flags &^= flag.IsRoot // unset IsRoot flag if needed
sp.raw.Context.SpanID = randomID()
sp.raw.Context.ParentSpanID = &refCtx.SpanID
sp.raw.Context.Owner = true
if l := len(refCtx.Baggage); l > 0 {
sp.raw.Context.Baggage = make(map[string]string, l)
for k, v := range refCtx.Baggage {
sp.raw.Context.Baggage[k] = v
}
}
break ReferencesLoop
}
}
if sp.raw.Context.TraceID.Empty() {
// No parent Span found; allocate new trace and span ids and determine
// the Sampled status.
if t.options.traceID128Bit {
sp.raw.Context.TraceID.High = randomID()
}
sp.raw.Context.TraceID.Low, sp.raw.Context.SpanID = randomID2()
sp.raw.Context.Sampled = t.options.shouldSample(sp.raw.Context.TraceID.Low)
sp.raw.Context.Flags = flag.IsRoot
sp.raw.Context.Owner = true
}
if t.options.debugMode {
sp.raw.Context.Flags |= flag.Debug
}
return t.startSpanInternal(
sp,
operationName,
startTime,
tags,
)
}
func (t *tracerImpl) startSpanInternal(
sp *spanImpl,
operationName string,
startTime time.Time,
tags opentracing.Tags,
) opentracing.Span {
sp.tracer = t
if t.options.newSpanEventListener != nil {
sp.event = t.options.newSpanEventListener()
}
sp.raw.Operation = operationName
sp.raw.Start = startTime
sp.raw.Duration = -1
sp.raw.Tags = tags
if t.options.debugAssertSingleGoroutine {
sp.SetTag(debugGoroutineIDTag, curGoroutineID())
}
defer sp.onCreate(operationName)
return sp
}
type delegatorType struct{}
// Delegator is the format to use for DelegatingCarrier.
var Delegator delegatorType
func (t *tracerImpl) Inject(sc opentracing.SpanContext, format interface{}, carrier interface{}) error {
switch format {
case opentracing.TextMap, opentracing.HTTPHeaders:
return t.textPropagator.Inject(sc, carrier)
case opentracing.Binary:
return t.binaryPropagator.Inject(sc, carrier)
}
if _, ok := format.(delegatorType); ok {
return t.accessorPropagator.Inject(sc, carrier)
}
return opentracing.ErrUnsupportedFormat
}
func (t *tracerImpl) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) {
switch format {
case opentracing.TextMap, opentracing.HTTPHeaders:
return t.textPropagator.Extract(carrier)
case opentracing.Binary:
return t.binaryPropagator.Extract(carrier)
}
if _, ok := format.(delegatorType); ok {
return t.accessorPropagator.Extract(carrier)
}
return nil, opentracing.ErrUnsupportedFormat
}
func (t *tracerImpl) Options() TracerOptions {
return t.options
}
// WithObserver assigns an initialized observer to opts.observer
func WithObserver(observer otobserver.Observer) TracerOption {
return func(opts *TracerOptions) error {
opts.observer = observer
return nil
}
}

View file

@ -0,0 +1,40 @@
package types
import (
"fmt"
"strconv"
)
// TraceID is a 128 bit number internally stored as 2x uint64 (high & low).
type TraceID struct {
High uint64
Low uint64
}
// TraceIDFromHex returns the TraceID from a Hex string.
func TraceIDFromHex(h string) (t TraceID, err error) {
if len(h) > 16 {
if t.High, err = strconv.ParseUint(h[0:len(h)-16], 16, 64); err != nil {
return
}
t.Low, err = strconv.ParseUint(h[len(h)-16:], 16, 64)
return
}
t.Low, err = strconv.ParseUint(h, 16, 64)
return
}
// ToHex outputs the 128-bit traceID as hex string.
func (t TraceID) ToHex() string {
if t.High == 0 {
return strconv.FormatUint(t.Low, 16)
}
return fmt.Sprintf(
"%016s%016s", strconv.FormatUint(t.High, 16), strconv.FormatUint(t.Low, 16),
)
}
// Empty returns if TraceID has zero value
func (t TraceID) Empty() bool {
return t.Low == 0 && t.High == 0
}

View file

@ -0,0 +1,25 @@
package zipkintracer
import (
"math/rand"
"sync"
"time"
)
var (
seededIDGen = rand.New(rand.NewSource(time.Now().UnixNano()))
// The golang rand generators are *not* intrinsically thread-safe.
seededIDLock sync.Mutex
)
func randomID() uint64 {
seededIDLock.Lock()
defer seededIDLock.Unlock()
return uint64(seededIDGen.Int63())
}
func randomID2() (uint64, uint64) {
seededIDLock.Lock()
defer seededIDLock.Unlock()
return uint64(seededIDGen.Int63()), uint64(seededIDGen.Int63())
}

View file

@ -0,0 +1,65 @@
package wire
import (
"github.com/openzipkin/zipkin-go-opentracing/flag"
"github.com/openzipkin/zipkin-go-opentracing/types"
)
// ProtobufCarrier is a DelegatingCarrier that uses protocol buffers as the
// the underlying datastructure. The reason for implementing DelagatingCarrier
// is to allow for end users to serialize the underlying protocol buffers using
// jsonpb or any other serialization forms they want.
type ProtobufCarrier TracerState
// SetState set's the tracer state.
func (p *ProtobufCarrier) SetState(traceID types.TraceID, spanID uint64, parentSpanID *uint64, sampled bool, flags flag.Flags) {
p.TraceId = traceID.Low
p.TraceIdHigh = traceID.High
p.SpanId = spanID
if parentSpanID == nil {
flags |= flag.IsRoot
p.ParentSpanId = 0
} else {
flags &^= flag.IsRoot
p.ParentSpanId = *parentSpanID
}
flags |= flag.SamplingSet
if sampled {
flags |= flag.Sampled
p.Sampled = sampled
} else {
flags &^= flag.Sampled
}
p.Flags = uint64(flags)
}
// State returns the tracer state.
func (p *ProtobufCarrier) State() (traceID types.TraceID, spanID uint64, parentSpanID *uint64, sampled bool, flags flag.Flags) {
traceID.Low = p.TraceId
traceID.High = p.TraceIdHigh
spanID = p.SpanId
sampled = p.Sampled
flags = flag.Flags(p.Flags)
if flags&flag.IsRoot == 0 {
parentSpanID = &p.ParentSpanId
}
return traceID, spanID, parentSpanID, sampled, flags
}
// SetBaggageItem sets a baggage item.
func (p *ProtobufCarrier) SetBaggageItem(key, value string) {
if p.BaggageItems == nil {
p.BaggageItems = map[string]string{key: value}
return
}
p.BaggageItems[key] = value
}
// GetBaggage iterates over each baggage item and executes the callback with
// the key:value pair.
func (p *ProtobufCarrier) GetBaggage(f func(k, v string)) {
for k, v := range p.BaggageItems {
f(k, v)
}
}

View file

@ -0,0 +1,6 @@
package wire
//go:generate protoc --gogofaster_out=$GOPATH/src/github.com/openzipkin/zipkin-go-opentracing/wire wire.proto
// Run `go get github.com/gogo/protobuf/protoc-gen-gogofaster` to install the
// gogofaster generator binary.

View file

@ -0,0 +1,647 @@
// Code generated by protoc-gen-gogo.
// source: wire.proto
// DO NOT EDIT!
/*
Package wire is a generated protocol buffer package.
It is generated from these files:
wire.proto
It has these top-level messages:
TracerState
*/
package wire
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type TracerState struct {
TraceId uint64 `protobuf:"fixed64,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
SpanId uint64 `protobuf:"fixed64,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
Sampled bool `protobuf:"varint,3,opt,name=sampled,proto3" json:"sampled,omitempty"`
BaggageItems map[string]string `protobuf:"bytes,4,rep,name=baggage_items,json=baggageItems" json:"baggage_items,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
TraceIdHigh uint64 `protobuf:"fixed64,20,opt,name=trace_id_high,json=traceIdHigh,proto3" json:"trace_id_high,omitempty"`
ParentSpanId uint64 `protobuf:"fixed64,21,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"`
Flags uint64 `protobuf:"fixed64,22,opt,name=flags,proto3" json:"flags,omitempty"`
}
func (m *TracerState) Reset() { *m = TracerState{} }
func (m *TracerState) String() string { return proto.CompactTextString(m) }
func (*TracerState) ProtoMessage() {}
func (*TracerState) Descriptor() ([]byte, []int) { return fileDescriptorWire, []int{0} }
func (m *TracerState) GetTraceId() uint64 {
if m != nil {
return m.TraceId
}
return 0
}
func (m *TracerState) GetSpanId() uint64 {
if m != nil {
return m.SpanId
}
return 0
}
func (m *TracerState) GetSampled() bool {
if m != nil {
return m.Sampled
}
return false
}
func (m *TracerState) GetBaggageItems() map[string]string {
if m != nil {
return m.BaggageItems
}
return nil
}
func (m *TracerState) GetTraceIdHigh() uint64 {
if m != nil {
return m.TraceIdHigh
}
return 0
}
func (m *TracerState) GetParentSpanId() uint64 {
if m != nil {
return m.ParentSpanId
}
return 0
}
func (m *TracerState) GetFlags() uint64 {
if m != nil {
return m.Flags
}
return 0
}
func init() {
proto.RegisterType((*TracerState)(nil), "zipkintracer_go.wire.TracerState")
}
func (m *TracerState) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *TracerState) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.TraceId != 0 {
dAtA[i] = 0x9
i++
i = encodeFixed64Wire(dAtA, i, uint64(m.TraceId))
}
if m.SpanId != 0 {
dAtA[i] = 0x11
i++
i = encodeFixed64Wire(dAtA, i, uint64(m.SpanId))
}
if m.Sampled {
dAtA[i] = 0x18
i++
if m.Sampled {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if len(m.BaggageItems) > 0 {
for k, _ := range m.BaggageItems {
dAtA[i] = 0x22
i++
v := m.BaggageItems[k]
mapSize := 1 + len(k) + sovWire(uint64(len(k))) + 1 + len(v) + sovWire(uint64(len(v)))
i = encodeVarintWire(dAtA, i, uint64(mapSize))
dAtA[i] = 0xa
i++
i = encodeVarintWire(dAtA, i, uint64(len(k)))
i += copy(dAtA[i:], k)
dAtA[i] = 0x12
i++
i = encodeVarintWire(dAtA, i, uint64(len(v)))
i += copy(dAtA[i:], v)
}
}
if m.TraceIdHigh != 0 {
dAtA[i] = 0xa1
i++
dAtA[i] = 0x1
i++
i = encodeFixed64Wire(dAtA, i, uint64(m.TraceIdHigh))
}
if m.ParentSpanId != 0 {
dAtA[i] = 0xa9
i++
dAtA[i] = 0x1
i++
i = encodeFixed64Wire(dAtA, i, uint64(m.ParentSpanId))
}
if m.Flags != 0 {
dAtA[i] = 0xb1
i++
dAtA[i] = 0x1
i++
i = encodeFixed64Wire(dAtA, i, uint64(m.Flags))
}
return i, nil
}
func encodeFixed64Wire(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
dAtA[offset+4] = uint8(v >> 32)
dAtA[offset+5] = uint8(v >> 40)
dAtA[offset+6] = uint8(v >> 48)
dAtA[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Wire(dAtA []byte, offset int, v uint32) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintWire(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *TracerState) Size() (n int) {
var l int
_ = l
if m.TraceId != 0 {
n += 9
}
if m.SpanId != 0 {
n += 9
}
if m.Sampled {
n += 2
}
if len(m.BaggageItems) > 0 {
for k, v := range m.BaggageItems {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovWire(uint64(len(k))) + 1 + len(v) + sovWire(uint64(len(v)))
n += mapEntrySize + 1 + sovWire(uint64(mapEntrySize))
}
}
if m.TraceIdHigh != 0 {
n += 10
}
if m.ParentSpanId != 0 {
n += 10
}
if m.Flags != 0 {
n += 10
}
return n
}
func sovWire(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozWire(x uint64) (n int) {
return sovWire(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *TracerState) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowWire
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: TracerState: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: TracerState: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
}
m.TraceId = 0
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
iNdEx += 8
m.TraceId = uint64(dAtA[iNdEx-8])
m.TraceId |= uint64(dAtA[iNdEx-7]) << 8
m.TraceId |= uint64(dAtA[iNdEx-6]) << 16
m.TraceId |= uint64(dAtA[iNdEx-5]) << 24
m.TraceId |= uint64(dAtA[iNdEx-4]) << 32
m.TraceId |= uint64(dAtA[iNdEx-3]) << 40
m.TraceId |= uint64(dAtA[iNdEx-2]) << 48
m.TraceId |= uint64(dAtA[iNdEx-1]) << 56
case 2:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
}
m.SpanId = 0
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
iNdEx += 8
m.SpanId = uint64(dAtA[iNdEx-8])
m.SpanId |= uint64(dAtA[iNdEx-7]) << 8
m.SpanId |= uint64(dAtA[iNdEx-6]) << 16
m.SpanId |= uint64(dAtA[iNdEx-5]) << 24
m.SpanId |= uint64(dAtA[iNdEx-4]) << 32
m.SpanId |= uint64(dAtA[iNdEx-3]) << 40
m.SpanId |= uint64(dAtA[iNdEx-2]) << 48
m.SpanId |= uint64(dAtA[iNdEx-1]) << 56
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Sampled", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowWire
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Sampled = bool(v != 0)
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field BaggageItems", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowWire
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthWire
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
var keykey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowWire
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
keykey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowWire
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthWire
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
if m.BaggageItems == nil {
m.BaggageItems = make(map[string]string)
}
if iNdEx < postIndex {
var valuekey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowWire
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
valuekey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowWire
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLengthWire
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
m.BaggageItems[mapkey] = mapvalue
} else {
var mapvalue string
m.BaggageItems[mapkey] = mapvalue
}
iNdEx = postIndex
case 20:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field TraceIdHigh", wireType)
}
m.TraceIdHigh = 0
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
iNdEx += 8
m.TraceIdHigh = uint64(dAtA[iNdEx-8])
m.TraceIdHigh |= uint64(dAtA[iNdEx-7]) << 8
m.TraceIdHigh |= uint64(dAtA[iNdEx-6]) << 16
m.TraceIdHigh |= uint64(dAtA[iNdEx-5]) << 24
m.TraceIdHigh |= uint64(dAtA[iNdEx-4]) << 32
m.TraceIdHigh |= uint64(dAtA[iNdEx-3]) << 40
m.TraceIdHigh |= uint64(dAtA[iNdEx-2]) << 48
m.TraceIdHigh |= uint64(dAtA[iNdEx-1]) << 56
case 21:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field ParentSpanId", wireType)
}
m.ParentSpanId = 0
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
iNdEx += 8
m.ParentSpanId = uint64(dAtA[iNdEx-8])
m.ParentSpanId |= uint64(dAtA[iNdEx-7]) << 8
m.ParentSpanId |= uint64(dAtA[iNdEx-6]) << 16
m.ParentSpanId |= uint64(dAtA[iNdEx-5]) << 24
m.ParentSpanId |= uint64(dAtA[iNdEx-4]) << 32
m.ParentSpanId |= uint64(dAtA[iNdEx-3]) << 40
m.ParentSpanId |= uint64(dAtA[iNdEx-2]) << 48
m.ParentSpanId |= uint64(dAtA[iNdEx-1]) << 56
case 22:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
}
m.Flags = 0
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
iNdEx += 8
m.Flags = uint64(dAtA[iNdEx-8])
m.Flags |= uint64(dAtA[iNdEx-7]) << 8
m.Flags |= uint64(dAtA[iNdEx-6]) << 16
m.Flags |= uint64(dAtA[iNdEx-5]) << 24
m.Flags |= uint64(dAtA[iNdEx-4]) << 32
m.Flags |= uint64(dAtA[iNdEx-3]) << 40
m.Flags |= uint64(dAtA[iNdEx-2]) << 48
m.Flags |= uint64(dAtA[iNdEx-1]) << 56
default:
iNdEx = preIndex
skippy, err := skipWire(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthWire
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipWire(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowWire
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowWire
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowWire
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthWire
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowWire
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipWire(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthWire = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowWire = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("wire.proto", fileDescriptorWire) }
var fileDescriptorWire = []byte{
// 300 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xcf, 0x2c, 0x4a,
0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xa9, 0xca, 0x2c, 0xc8, 0xce, 0xcc, 0x2b, 0x29,
0x4a, 0x4c, 0x4e, 0x2d, 0x8a, 0x4f, 0xcf, 0xd7, 0x03, 0xc9, 0x29, 0x5d, 0x63, 0xe2, 0xe2, 0x0e,
0x01, 0x0b, 0x05, 0x97, 0x24, 0x96, 0xa4, 0x0a, 0x49, 0x72, 0x71, 0x80, 0x55, 0xc4, 0x67, 0xa6,
0x48, 0x30, 0x2a, 0x30, 0x6a, 0xb0, 0x05, 0xb1, 0x83, 0xf9, 0x9e, 0x29, 0x42, 0xe2, 0x5c, 0xec,
0xc5, 0x05, 0x89, 0x79, 0x20, 0x19, 0x26, 0xb0, 0x0c, 0x1b, 0x88, 0xeb, 0x99, 0x22, 0x24, 0xc1,
0xc5, 0x5e, 0x9c, 0x98, 0x5b, 0x90, 0x93, 0x9a, 0x22, 0xc1, 0xac, 0xc0, 0xa8, 0xc1, 0x11, 0x04,
0xe3, 0x0a, 0x45, 0x70, 0xf1, 0x26, 0x25, 0xa6, 0xa7, 0x27, 0xa6, 0xa7, 0xc6, 0x67, 0x96, 0xa4,
0xe6, 0x16, 0x4b, 0xb0, 0x28, 0x30, 0x6b, 0x70, 0x1b, 0x19, 0xeb, 0x61, 0x73, 0x8b, 0x1e, 0x92,
0x3b, 0xf4, 0x9c, 0x20, 0xda, 0x3c, 0x41, 0xba, 0x5c, 0xf3, 0x4a, 0x8a, 0x2a, 0x83, 0x78, 0x92,
0x90, 0x84, 0x84, 0x94, 0xb8, 0x78, 0x61, 0xee, 0x8c, 0xcf, 0xc8, 0x4c, 0xcf, 0x90, 0x10, 0x01,
0x3b, 0x89, 0x1b, 0xea, 0x58, 0x8f, 0xcc, 0xf4, 0x0c, 0x21, 0x15, 0x2e, 0xbe, 0x82, 0xc4, 0xa2,
0xd4, 0xbc, 0x92, 0x78, 0x98, 0xbb, 0x45, 0xc1, 0x8a, 0x78, 0x20, 0xa2, 0xc1, 0x10, 0xd7, 0x8b,
0x70, 0xb1, 0xa6, 0xe5, 0x24, 0xa6, 0x17, 0x4b, 0x88, 0x81, 0x25, 0x21, 0x1c, 0x29, 0x7b, 0x2e,
0x41, 0x0c, 0x27, 0x08, 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x82, 0xc3, 0x85, 0x33, 0x08, 0xc4,
0x04, 0x69, 0x2e, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0x87, 0x08, 0x67, 0x10, 0x84, 0x63, 0xc5, 0x64,
0xc1, 0xe8, 0x24, 0x76, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31,
0x4e, 0x78, 0x2c, 0xc7, 0x10, 0xc5, 0x02, 0xf2, 0x64, 0x12, 0x1b, 0x38, 0x36, 0x8c, 0x01, 0x01,
0x00, 0x00, 0xff, 0xff, 0xb5, 0x5e, 0x0d, 0x33, 0x9b, 0x01, 0x00, 0x00,
}

View file

@ -0,0 +1,72 @@
package zipkintracer
import (
"encoding/binary"
"net"
"strconv"
"strings"
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
)
// makeEndpoint takes the hostport and service name that represent this Zipkin
// service, and returns an endpoint that's embedded into the Zipkin core Span
// type. It will return a nil endpoint if the input parameters are malformed.
func makeEndpoint(hostport, serviceName string) (ep *zipkincore.Endpoint) {
ep = zipkincore.NewEndpoint()
// Set the ServiceName
ep.ServiceName = serviceName
if strings.IndexByte(hostport, ':') < 0 {
// "<host>" becomes "<host>:0"
hostport = hostport + ":0"
}
// try to parse provided "<host>:<port>"
host, port, err := net.SplitHostPort(hostport)
if err != nil {
// if unparsable, return as "undefined:0"
return
}
// try to set port number
p, _ := strconv.ParseUint(port, 10, 16)
ep.Port = int16(p)
// if <host> is a domain name, look it up
addrs, err := net.LookupIP(host)
if err != nil {
// return as "undefined:<port>"
return
}
var addr4, addr16 net.IP
for i := range addrs {
addr := addrs[i].To4()
if addr == nil {
// IPv6
if addr16 == nil {
addr16 = addrs[i].To16() // IPv6 - 16 bytes
}
} else {
// IPv4
if addr4 == nil {
addr4 = addr // IPv4 - 4 bytes
}
}
if addr16 != nil && addr4 != nil {
// IPv4 & IPv6 have been set, we can stop looking further
break
}
}
// default to 0 filled 4 byte array for IPv4 if IPv6 only host was found
if addr4 == nil {
addr4 = make([]byte, 4)
}
// set IPv4 and IPv6 addresses
ep.Ipv4 = (int32)(binary.BigEndian.Uint32(addr4))
ep.Ipv6 = []byte(addr16)
return
}

View file

@ -0,0 +1,287 @@
package zipkintracer
import (
"encoding/binary"
"fmt"
"math"
"net"
"strconv"
"time"
otext "github.com/opentracing/opentracing-go/ext"
"github.com/opentracing/opentracing-go/log"
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
"github.com/openzipkin/zipkin-go-opentracing/flag"
)
var (
// SpanKindResource will be regarded as a SA annotation by Zipkin.
SpanKindResource = otext.SpanKindEnum("resource")
)
// Recorder implements the SpanRecorder interface.
type Recorder struct {
collector Collector
debug bool
endpoint *zipkincore.Endpoint
materializer func(logFields []log.Field) ([]byte, error)
}
// RecorderOption allows for functional options.
type RecorderOption func(r *Recorder)
// WithLogFmtMaterializer will convert OpenTracing Log fields to a LogFmt representation.
func WithLogFmtMaterializer() RecorderOption {
return func(r *Recorder) {
r.materializer = MaterializeWithLogFmt
}
}
// WithJSONMaterializer will convert OpenTracing Log fields to a JSON representation.
func WithJSONMaterializer() RecorderOption {
return func(r *Recorder) {
r.materializer = MaterializeWithJSON
}
}
// WithStrictMaterializer will only record event Log fields and discard the rest.
func WithStrictMaterializer() RecorderOption {
return func(r *Recorder) {
r.materializer = StrictZipkinMaterializer
}
}
// NewRecorder creates a new Zipkin Recorder backed by the provided Collector.
//
// hostPort and serviceName allow you to set the default Zipkin endpoint
// information which will be added to the application's standard core
// annotations. hostPort will be resolved into an IPv4 and/or IPv6 address and
// Port number, serviceName will be used as the application's service
// identifier.
//
// If application does not listen for incoming requests or an endpoint Context
// does not involve network address and/or port these cases can be solved like
// this:
// # port is not applicable:
// NewRecorder(c, debug, "192.168.1.12:0", "ServiceA")
//
// # network address and port are not applicable:
// NewRecorder(c, debug, "0.0.0.0:0", "ServiceB")
func NewRecorder(c Collector, debug bool, hostPort, serviceName string, options ...RecorderOption) SpanRecorder {
r := &Recorder{
collector: c,
debug: debug,
endpoint: makeEndpoint(hostPort, serviceName),
materializer: MaterializeWithLogFmt,
}
for _, opts := range options {
opts(r)
}
return r
}
// RecordSpan converts a RawSpan into the Zipkin representation of a span
// and records it to the underlying collector.
func (r *Recorder) RecordSpan(sp RawSpan) {
if !sp.Context.Sampled {
return
}
var parentSpanID *int64
if sp.Context.ParentSpanID != nil {
id := int64(*sp.Context.ParentSpanID)
parentSpanID = &id
}
var traceIDHigh *int64
if sp.Context.TraceID.High > 0 {
tidh := int64(sp.Context.TraceID.High)
traceIDHigh = &tidh
}
span := &zipkincore.Span{
Name: sp.Operation,
ID: int64(sp.Context.SpanID),
TraceID: int64(sp.Context.TraceID.Low),
TraceIDHigh: traceIDHigh,
ParentID: parentSpanID,
Debug: r.debug || (sp.Context.Flags&flag.Debug == flag.Debug),
}
// only send timestamp and duration if this process owns the current span.
if sp.Context.Owner {
timestamp := sp.Start.UnixNano() / 1e3
duration := sp.Duration.Nanoseconds() / 1e3
// since we always time our spans we will round up to 1 microsecond if the
// span took less.
if duration == 0 {
duration = 1
}
span.Timestamp = &timestamp
span.Duration = &duration
}
if kind, ok := sp.Tags[string(otext.SpanKind)]; ok {
switch kind {
case otext.SpanKindRPCClient, otext.SpanKindRPCClientEnum:
annotate(span, sp.Start, zipkincore.CLIENT_SEND, r.endpoint)
annotate(span, sp.Start.Add(sp.Duration), zipkincore.CLIENT_RECV, r.endpoint)
case otext.SpanKindRPCServer, otext.SpanKindRPCServerEnum:
annotate(span, sp.Start, zipkincore.SERVER_RECV, r.endpoint)
annotate(span, sp.Start.Add(sp.Duration), zipkincore.SERVER_SEND, r.endpoint)
case SpanKindResource:
serviceName, ok := sp.Tags[string(otext.PeerService)]
if !ok {
serviceName = r.endpoint.GetServiceName()
}
host, ok := sp.Tags[string(otext.PeerHostname)].(string)
if !ok {
if r.endpoint.GetIpv4() > 0 {
ip := make([]byte, 4)
binary.BigEndian.PutUint32(ip, uint32(r.endpoint.GetIpv4()))
host = net.IP(ip).To4().String()
} else {
ip := r.endpoint.GetIpv6()
host = net.IP(ip).String()
}
}
var sPort string
port, ok := sp.Tags[string(otext.PeerPort)]
if !ok {
sPort = strconv.FormatInt(int64(r.endpoint.GetPort()), 10)
} else {
sPort = strconv.FormatInt(int64(port.(uint16)), 10)
}
re := makeEndpoint(net.JoinHostPort(host, sPort), serviceName.(string))
if re != nil {
annotateBinary(span, zipkincore.SERVER_ADDR, serviceName, re)
} else {
fmt.Printf("endpoint creation failed: host: %q port: %q", host, sPort)
}
annotate(span, sp.Start, zipkincore.CLIENT_SEND, r.endpoint)
annotate(span, sp.Start.Add(sp.Duration), zipkincore.CLIENT_RECV, r.endpoint)
default:
annotateBinary(span, zipkincore.LOCAL_COMPONENT, r.endpoint.GetServiceName(), r.endpoint)
}
} else {
annotateBinary(span, zipkincore.LOCAL_COMPONENT, r.endpoint.GetServiceName(), r.endpoint)
}
for key, value := range sp.Tags {
annotateBinary(span, key, value, r.endpoint)
}
for _, spLog := range sp.Logs {
if len(spLog.Fields) == 1 && spLog.Fields[0].Key() == "event" {
// proper Zipkin annotation
annotate(span, spLog.Timestamp, fmt.Sprintf("%+v", spLog.Fields[0].Value()), r.endpoint)
continue
}
// OpenTracing Log with key-value pair(s). Try to materialize using the
// materializer chosen for the recorder.
if logs, err := r.materializer(spLog.Fields); err != nil {
fmt.Printf("Materialization of OpenTracing LogFields failed: %+v", err)
} else {
annotate(span, spLog.Timestamp, string(logs), r.endpoint)
}
}
_ = r.collector.Collect(span)
}
// annotate annotates the span with the given value.
func annotate(span *zipkincore.Span, timestamp time.Time, value string, host *zipkincore.Endpoint) {
if timestamp.IsZero() {
timestamp = time.Now()
}
span.Annotations = append(span.Annotations, &zipkincore.Annotation{
Timestamp: timestamp.UnixNano() / 1e3,
Value: value,
Host: host,
})
}
// annotateBinary annotates the span with a key and a value that will be []byte
// encoded.
func annotateBinary(span *zipkincore.Span, key string, value interface{}, host *zipkincore.Endpoint) {
var a zipkincore.AnnotationType
var b []byte
// We are not using zipkincore.AnnotationType_I16 for types that could fit
// as reporting on it seems to be broken on the zipkin web interface
// (however, we can properly extract the number from zipkin storage
// directly). int64 has issues with negative numbers but seems ok for
// positive numbers needing more than 32 bit.
switch v := value.(type) {
case bool:
a = zipkincore.AnnotationType_BOOL
b = []byte("\x00")
if v {
b = []byte("\x01")
}
case []byte:
a = zipkincore.AnnotationType_BYTES
b = v
case byte:
a = zipkincore.AnnotationType_I32
b = make([]byte, 4)
binary.BigEndian.PutUint32(b, uint32(v))
case int8:
a = zipkincore.AnnotationType_I32
b = make([]byte, 4)
binary.BigEndian.PutUint32(b, uint32(v))
case int16:
a = zipkincore.AnnotationType_I32
b = make([]byte, 4)
binary.BigEndian.PutUint32(b, uint32(v))
case uint16:
a = zipkincore.AnnotationType_I32
b = make([]byte, 4)
binary.BigEndian.PutUint32(b, uint32(v))
case int32:
a = zipkincore.AnnotationType_I32
b = make([]byte, 4)
binary.BigEndian.PutUint32(b, uint32(v))
case uint32:
a = zipkincore.AnnotationType_I32
b = make([]byte, 4)
binary.BigEndian.PutUint32(b, v)
case int64:
a = zipkincore.AnnotationType_I64
b = make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(v))
case int:
a = zipkincore.AnnotationType_I32
b = make([]byte, 8)
binary.BigEndian.PutUint32(b, uint32(v))
case uint:
a = zipkincore.AnnotationType_I32
b = make([]byte, 8)
binary.BigEndian.PutUint32(b, uint32(v))
case uint64:
a = zipkincore.AnnotationType_I64
b = make([]byte, 8)
binary.BigEndian.PutUint64(b, v)
case float32:
a = zipkincore.AnnotationType_DOUBLE
b = make([]byte, 8)
bits := math.Float64bits(float64(v))
binary.BigEndian.PutUint64(b, bits)
case float64:
a = zipkincore.AnnotationType_DOUBLE
b = make([]byte, 8)
bits := math.Float64bits(v)
binary.BigEndian.PutUint64(b, bits)
case string:
a = zipkincore.AnnotationType_STRING
b = []byte(v)
default:
// we have no handler for type's value, but let's get a string
// representation of it.
a = zipkincore.AnnotationType_STRING
b = []byte(fmt.Sprintf("%+v", value))
}
span.BinaryAnnotations = append(span.BinaryAnnotations, &zipkincore.BinaryAnnotation{
Key: key,
Value: b,
AnnotationType: a,
Host: host,
})
}