Opentracing support
This commit is contained in:
parent
8394549857
commit
30ffba78e6
272 changed files with 44352 additions and 63 deletions
20
vendor/github.com/Shopify/sarama/LICENSE
generated
vendored
Normal file
20
vendor/github.com/Shopify/sarama/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
Copyright (c) 2013 Evan Huus
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
24
vendor/github.com/Shopify/sarama/api_versions_request.go
generated
vendored
Normal file
24
vendor/github.com/Shopify/sarama/api_versions_request.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
package sarama
|
||||
|
||||
type ApiVersionsRequest struct {
|
||||
}
|
||||
|
||||
func (r *ApiVersionsRequest) encode(pe packetEncoder) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApiVersionsRequest) key() int16 {
|
||||
return 18
|
||||
}
|
||||
|
||||
func (r *ApiVersionsRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ApiVersionsRequest) requiredVersion() KafkaVersion {
|
||||
return V0_10_0_0
|
||||
}
|
87
vendor/github.com/Shopify/sarama/api_versions_response.go
generated
vendored
Normal file
87
vendor/github.com/Shopify/sarama/api_versions_response.go
generated
vendored
Normal file
|
@ -0,0 +1,87 @@
|
|||
package sarama
|
||||
|
||||
type ApiVersionsResponseBlock struct {
|
||||
ApiKey int16
|
||||
MinVersion int16
|
||||
MaxVersion int16
|
||||
}
|
||||
|
||||
func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error {
|
||||
pe.putInt16(b.ApiKey)
|
||||
pe.putInt16(b.MinVersion)
|
||||
pe.putInt16(b.MaxVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error {
|
||||
var err error
|
||||
|
||||
if b.ApiKey, err = pd.getInt16(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.MinVersion, err = pd.getInt16(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.MaxVersion, err = pd.getInt16(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type ApiVersionsResponse struct {
|
||||
Err KError
|
||||
ApiVersions []*ApiVersionsResponseBlock
|
||||
}
|
||||
|
||||
func (r *ApiVersionsResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.Err))
|
||||
if err := pe.putArrayLength(len(r.ApiVersions)); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, apiVersion := range r.ApiVersions {
|
||||
if err := apiVersion.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error {
|
||||
kerr, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Err = KError(kerr)
|
||||
|
||||
numBlocks, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks)
|
||||
for i := 0; i < numBlocks; i++ {
|
||||
block := new(ApiVersionsResponseBlock)
|
||||
if err := block.decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
r.ApiVersions[i] = block
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApiVersionsResponse) key() int16 {
|
||||
return 18
|
||||
}
|
||||
|
||||
func (r *ApiVersionsResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
|
||||
return V0_10_0_0
|
||||
}
|
904
vendor/github.com/Shopify/sarama/async_producer.go
generated
vendored
Normal file
904
vendor/github.com/Shopify/sarama/async_producer.go
generated
vendored
Normal file
|
@ -0,0 +1,904 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/eapache/go-resiliency/breaker"
|
||||
"github.com/eapache/queue"
|
||||
)
|
||||
|
||||
// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
|
||||
// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
|
||||
// and parses responses for errors. You must read from the Errors() channel or the
|
||||
// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
|
||||
// leaks: it will not be garbage-collected automatically when it passes out of
|
||||
// scope.
|
||||
type AsyncProducer interface {
|
||||
|
||||
// AsyncClose triggers a shutdown of the producer. The shutdown has completed
|
||||
// when both the Errors and Successes channels have been closed. When calling
|
||||
// AsyncClose, you *must* continue to read from those channels in order to
|
||||
// drain the results of any messages in flight.
|
||||
AsyncClose()
|
||||
|
||||
// Close shuts down the producer and waits for any buffered messages to be
|
||||
// flushed. You must call this function before a producer object passes out of
|
||||
// scope, as it may otherwise leak memory. You must call this before calling
|
||||
// Close on the underlying client.
|
||||
Close() error
|
||||
|
||||
// Input is the input channel for the user to write messages to that they
|
||||
// wish to send.
|
||||
Input() chan<- *ProducerMessage
|
||||
|
||||
// Successes is the success output channel back to the user when Return.Successes is
|
||||
// enabled. If Return.Successes is true, you MUST read from this channel or the
|
||||
// Producer will deadlock. It is suggested that you send and read messages
|
||||
// together in a single select statement.
|
||||
Successes() <-chan *ProducerMessage
|
||||
|
||||
// Errors is the error output channel back to the user. You MUST read from this
|
||||
// channel or the Producer will deadlock when the channel is full. Alternatively,
|
||||
// you can set Producer.Return.Errors in your config to false, which prevents
|
||||
// errors to be returned.
|
||||
Errors() <-chan *ProducerError
|
||||
}
|
||||
|
||||
type asyncProducer struct {
|
||||
client Client
|
||||
conf *Config
|
||||
ownClient bool
|
||||
|
||||
errors chan *ProducerError
|
||||
input, successes, retries chan *ProducerMessage
|
||||
inFlight sync.WaitGroup
|
||||
|
||||
brokers map[*Broker]chan<- *ProducerMessage
|
||||
brokerRefs map[chan<- *ProducerMessage]int
|
||||
brokerLock sync.Mutex
|
||||
}
|
||||
|
||||
// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
|
||||
func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
|
||||
client, err := NewClient(addrs, conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p, err := NewAsyncProducerFromClient(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.(*asyncProducer).ownClient = true
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
|
||||
// necessary to call Close() on the underlying client when shutting down this producer.
|
||||
func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
|
||||
// Check that we are not dealing with a closed Client before processing any other arguments
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
p := &asyncProducer{
|
||||
client: client,
|
||||
conf: client.Config(),
|
||||
errors: make(chan *ProducerError),
|
||||
input: make(chan *ProducerMessage),
|
||||
successes: make(chan *ProducerMessage),
|
||||
retries: make(chan *ProducerMessage),
|
||||
brokers: make(map[*Broker]chan<- *ProducerMessage),
|
||||
brokerRefs: make(map[chan<- *ProducerMessage]int),
|
||||
}
|
||||
|
||||
// launch our singleton dispatchers
|
||||
go withRecover(p.dispatcher)
|
||||
go withRecover(p.retryHandler)
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
type flagSet int8
|
||||
|
||||
const (
|
||||
syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer
|
||||
fin // final message from partitionProducer to brokerProducer and back
|
||||
shutdown // start the shutdown process
|
||||
)
|
||||
|
||||
// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
|
||||
type ProducerMessage struct {
|
||||
Topic string // The Kafka topic for this message.
|
||||
// The partitioning key for this message. Pre-existing Encoders include
|
||||
// StringEncoder and ByteEncoder.
|
||||
Key Encoder
|
||||
// The actual message to store in Kafka. Pre-existing Encoders include
|
||||
// StringEncoder and ByteEncoder.
|
||||
Value Encoder
|
||||
|
||||
// This field is used to hold arbitrary data you wish to include so it
|
||||
// will be available when receiving on the Successes and Errors channels.
|
||||
// Sarama completely ignores this field and is only to be used for
|
||||
// pass-through data.
|
||||
Metadata interface{}
|
||||
|
||||
// Below this point are filled in by the producer as the message is processed
|
||||
|
||||
// Offset is the offset of the message stored on the broker. This is only
|
||||
// guaranteed to be defined if the message was successfully delivered and
|
||||
// RequiredAcks is not NoResponse.
|
||||
Offset int64
|
||||
// Partition is the partition that the message was sent to. This is only
|
||||
// guaranteed to be defined if the message was successfully delivered.
|
||||
Partition int32
|
||||
// Timestamp is the timestamp assigned to the message by the broker. This
|
||||
// is only guaranteed to be defined if the message was successfully
|
||||
// delivered, RequiredAcks is not NoResponse, and the Kafka broker is at
|
||||
// least version 0.10.0.
|
||||
Timestamp time.Time
|
||||
|
||||
retries int
|
||||
flags flagSet
|
||||
}
|
||||
|
||||
const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
|
||||
|
||||
func (m *ProducerMessage) byteSize() int {
|
||||
size := producerMessageOverhead
|
||||
if m.Key != nil {
|
||||
size += m.Key.Length()
|
||||
}
|
||||
if m.Value != nil {
|
||||
size += m.Value.Length()
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
func (m *ProducerMessage) clear() {
|
||||
m.flags = 0
|
||||
m.retries = 0
|
||||
}
|
||||
|
||||
// ProducerError is the type of error generated when the producer fails to deliver a message.
|
||||
// It contains the original ProducerMessage as well as the actual error value.
|
||||
type ProducerError struct {
|
||||
Msg *ProducerMessage
|
||||
Err error
|
||||
}
|
||||
|
||||
func (pe ProducerError) Error() string {
|
||||
return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
|
||||
}
|
||||
|
||||
// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
|
||||
// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
|
||||
// when closing a producer.
|
||||
type ProducerErrors []*ProducerError
|
||||
|
||||
func (pe ProducerErrors) Error() string {
|
||||
return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
|
||||
}
|
||||
|
||||
func (p *asyncProducer) Errors() <-chan *ProducerError {
|
||||
return p.errors
|
||||
}
|
||||
|
||||
func (p *asyncProducer) Successes() <-chan *ProducerMessage {
|
||||
return p.successes
|
||||
}
|
||||
|
||||
func (p *asyncProducer) Input() chan<- *ProducerMessage {
|
||||
return p.input
|
||||
}
|
||||
|
||||
func (p *asyncProducer) Close() error {
|
||||
p.AsyncClose()
|
||||
|
||||
if p.conf.Producer.Return.Successes {
|
||||
go withRecover(func() {
|
||||
for range p.successes {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var errors ProducerErrors
|
||||
if p.conf.Producer.Return.Errors {
|
||||
for event := range p.errors {
|
||||
errors = append(errors, event)
|
||||
}
|
||||
} else {
|
||||
<-p.errors
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return errors
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *asyncProducer) AsyncClose() {
|
||||
go withRecover(p.shutdown)
|
||||
}
|
||||
|
||||
// singleton
|
||||
// dispatches messages by topic
|
||||
func (p *asyncProducer) dispatcher() {
|
||||
handlers := make(map[string]chan<- *ProducerMessage)
|
||||
shuttingDown := false
|
||||
|
||||
for msg := range p.input {
|
||||
if msg == nil {
|
||||
Logger.Println("Something tried to send a nil message, it was ignored.")
|
||||
continue
|
||||
}
|
||||
|
||||
if msg.flags&shutdown != 0 {
|
||||
shuttingDown = true
|
||||
p.inFlight.Done()
|
||||
continue
|
||||
} else if msg.retries == 0 {
|
||||
if shuttingDown {
|
||||
// we can't just call returnError here because that decrements the wait group,
|
||||
// which hasn't been incremented yet for this message, and shouldn't be
|
||||
pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
|
||||
if p.conf.Producer.Return.Errors {
|
||||
p.errors <- pErr
|
||||
} else {
|
||||
Logger.Println(pErr)
|
||||
}
|
||||
continue
|
||||
}
|
||||
p.inFlight.Add(1)
|
||||
}
|
||||
|
||||
if msg.byteSize() > p.conf.Producer.MaxMessageBytes {
|
||||
p.returnError(msg, ErrMessageSizeTooLarge)
|
||||
continue
|
||||
}
|
||||
|
||||
handler := handlers[msg.Topic]
|
||||
if handler == nil {
|
||||
handler = p.newTopicProducer(msg.Topic)
|
||||
handlers[msg.Topic] = handler
|
||||
}
|
||||
|
||||
handler <- msg
|
||||
}
|
||||
|
||||
for _, handler := range handlers {
|
||||
close(handler)
|
||||
}
|
||||
}
|
||||
|
||||
// one per topic
|
||||
// partitions messages, then dispatches them by partition
|
||||
type topicProducer struct {
|
||||
parent *asyncProducer
|
||||
topic string
|
||||
input <-chan *ProducerMessage
|
||||
|
||||
breaker *breaker.Breaker
|
||||
handlers map[int32]chan<- *ProducerMessage
|
||||
partitioner Partitioner
|
||||
}
|
||||
|
||||
func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
|
||||
input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
|
||||
tp := &topicProducer{
|
||||
parent: p,
|
||||
topic: topic,
|
||||
input: input,
|
||||
breaker: breaker.New(3, 1, 10*time.Second),
|
||||
handlers: make(map[int32]chan<- *ProducerMessage),
|
||||
partitioner: p.conf.Producer.Partitioner(topic),
|
||||
}
|
||||
go withRecover(tp.dispatch)
|
||||
return input
|
||||
}
|
||||
|
||||
func (tp *topicProducer) dispatch() {
|
||||
for msg := range tp.input {
|
||||
if msg.retries == 0 {
|
||||
if err := tp.partitionMessage(msg); err != nil {
|
||||
tp.parent.returnError(msg, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
handler := tp.handlers[msg.Partition]
|
||||
if handler == nil {
|
||||
handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
|
||||
tp.handlers[msg.Partition] = handler
|
||||
}
|
||||
|
||||
handler <- msg
|
||||
}
|
||||
|
||||
for _, handler := range tp.handlers {
|
||||
close(handler)
|
||||
}
|
||||
}
|
||||
|
||||
func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
|
||||
var partitions []int32
|
||||
|
||||
err := tp.breaker.Run(func() (err error) {
|
||||
if tp.partitioner.RequiresConsistency() {
|
||||
partitions, err = tp.parent.client.Partitions(msg.Topic)
|
||||
} else {
|
||||
partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
|
||||
}
|
||||
return
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numPartitions := int32(len(partitions))
|
||||
|
||||
if numPartitions == 0 {
|
||||
return ErrLeaderNotAvailable
|
||||
}
|
||||
|
||||
choice, err := tp.partitioner.Partition(msg, numPartitions)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
} else if choice < 0 || choice >= numPartitions {
|
||||
return ErrInvalidPartition
|
||||
}
|
||||
|
||||
msg.Partition = partitions[choice]
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// one per partition per topic
|
||||
// dispatches messages to the appropriate broker
|
||||
// also responsible for maintaining message order during retries
|
||||
type partitionProducer struct {
|
||||
parent *asyncProducer
|
||||
topic string
|
||||
partition int32
|
||||
input <-chan *ProducerMessage
|
||||
|
||||
leader *Broker
|
||||
breaker *breaker.Breaker
|
||||
output chan<- *ProducerMessage
|
||||
|
||||
// highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
|
||||
// all other messages get buffered in retryState[msg.retries].buf to preserve ordering
|
||||
// retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and
|
||||
// therefore whether our buffer is complete and safe to flush)
|
||||
highWatermark int
|
||||
retryState []partitionRetryState
|
||||
}
|
||||
|
||||
type partitionRetryState struct {
|
||||
buf []*ProducerMessage
|
||||
expectChaser bool
|
||||
}
|
||||
|
||||
func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
|
||||
input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
|
||||
pp := &partitionProducer{
|
||||
parent: p,
|
||||
topic: topic,
|
||||
partition: partition,
|
||||
input: input,
|
||||
|
||||
breaker: breaker.New(3, 1, 10*time.Second),
|
||||
retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
|
||||
}
|
||||
go withRecover(pp.dispatch)
|
||||
return input
|
||||
}
|
||||
|
||||
func (pp *partitionProducer) dispatch() {
|
||||
// try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
|
||||
// on the first message
|
||||
pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
|
||||
if pp.leader != nil {
|
||||
pp.output = pp.parent.getBrokerProducer(pp.leader)
|
||||
pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
|
||||
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
|
||||
}
|
||||
|
||||
for msg := range pp.input {
|
||||
if msg.retries > pp.highWatermark {
|
||||
// a new, higher, retry level; handle it and then back off
|
||||
pp.newHighWatermark(msg.retries)
|
||||
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
|
||||
} else if pp.highWatermark > 0 {
|
||||
// we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
|
||||
if msg.retries < pp.highWatermark {
|
||||
// in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin)
|
||||
if msg.flags&fin == fin {
|
||||
pp.retryState[msg.retries].expectChaser = false
|
||||
pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
|
||||
} else {
|
||||
pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
|
||||
}
|
||||
continue
|
||||
} else if msg.flags&fin == fin {
|
||||
// this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set,
|
||||
// meaning this retry level is done and we can go down (at least) one level and flush that
|
||||
pp.retryState[pp.highWatermark].expectChaser = false
|
||||
pp.flushRetryBuffers()
|
||||
pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// if we made it this far then the current msg contains real data, and can be sent to the next goroutine
|
||||
// without breaking any of our ordering guarantees
|
||||
|
||||
if pp.output == nil {
|
||||
if err := pp.updateLeader(); err != nil {
|
||||
pp.parent.returnError(msg, err)
|
||||
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
|
||||
continue
|
||||
}
|
||||
Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
|
||||
}
|
||||
|
||||
pp.output <- msg
|
||||
}
|
||||
|
||||
if pp.output != nil {
|
||||
pp.parent.unrefBrokerProducer(pp.leader, pp.output)
|
||||
}
|
||||
}
|
||||
|
||||
func (pp *partitionProducer) newHighWatermark(hwm int) {
|
||||
Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
|
||||
pp.highWatermark = hwm
|
||||
|
||||
// send off a fin so that we know when everything "in between" has made it
|
||||
// back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
|
||||
pp.retryState[pp.highWatermark].expectChaser = true
|
||||
pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
|
||||
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
|
||||
|
||||
// a new HWM means that our current broker selection is out of date
|
||||
Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
|
||||
pp.parent.unrefBrokerProducer(pp.leader, pp.output)
|
||||
pp.output = nil
|
||||
}
|
||||
|
||||
func (pp *partitionProducer) flushRetryBuffers() {
|
||||
Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
|
||||
for {
|
||||
pp.highWatermark--
|
||||
|
||||
if pp.output == nil {
|
||||
if err := pp.updateLeader(); err != nil {
|
||||
pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
|
||||
goto flushDone
|
||||
}
|
||||
Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
|
||||
}
|
||||
|
||||
for _, msg := range pp.retryState[pp.highWatermark].buf {
|
||||
pp.output <- msg
|
||||
}
|
||||
|
||||
flushDone:
|
||||
pp.retryState[pp.highWatermark].buf = nil
|
||||
if pp.retryState[pp.highWatermark].expectChaser {
|
||||
Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
|
||||
break
|
||||
} else if pp.highWatermark == 0 {
|
||||
Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pp *partitionProducer) updateLeader() error {
|
||||
return pp.breaker.Run(func() (err error) {
|
||||
if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pp.output = pp.parent.getBrokerProducer(pp.leader)
|
||||
pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
|
||||
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// one per broker; also constructs an associated flusher
|
||||
func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage {
|
||||
var (
|
||||
input = make(chan *ProducerMessage)
|
||||
bridge = make(chan *produceSet)
|
||||
responses = make(chan *brokerProducerResponse)
|
||||
)
|
||||
|
||||
bp := &brokerProducer{
|
||||
parent: p,
|
||||
broker: broker,
|
||||
input: input,
|
||||
output: bridge,
|
||||
responses: responses,
|
||||
buffer: newProduceSet(p),
|
||||
currentRetries: make(map[string]map[int32]error),
|
||||
}
|
||||
go withRecover(bp.run)
|
||||
|
||||
// minimal bridge to make the network response `select`able
|
||||
go withRecover(func() {
|
||||
for set := range bridge {
|
||||
request := set.buildRequest()
|
||||
|
||||
response, err := broker.Produce(request)
|
||||
|
||||
responses <- &brokerProducerResponse{
|
||||
set: set,
|
||||
err: err,
|
||||
res: response,
|
||||
}
|
||||
}
|
||||
close(responses)
|
||||
})
|
||||
|
||||
return input
|
||||
}
|
||||
|
||||
type brokerProducerResponse struct {
|
||||
set *produceSet
|
||||
err error
|
||||
res *ProduceResponse
|
||||
}
|
||||
|
||||
// groups messages together into appropriately-sized batches for sending to the broker
|
||||
// handles state related to retries etc
|
||||
type brokerProducer struct {
|
||||
parent *asyncProducer
|
||||
broker *Broker
|
||||
|
||||
input <-chan *ProducerMessage
|
||||
output chan<- *produceSet
|
||||
responses <-chan *brokerProducerResponse
|
||||
|
||||
buffer *produceSet
|
||||
timer <-chan time.Time
|
||||
timerFired bool
|
||||
|
||||
closing error
|
||||
currentRetries map[string]map[int32]error
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) run() {
|
||||
var output chan<- *produceSet
|
||||
Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID())
|
||||
|
||||
for {
|
||||
select {
|
||||
case msg := <-bp.input:
|
||||
if msg == nil {
|
||||
bp.shutdown()
|
||||
return
|
||||
}
|
||||
|
||||
if msg.flags&syn == syn {
|
||||
Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
|
||||
bp.broker.ID(), msg.Topic, msg.Partition)
|
||||
if bp.currentRetries[msg.Topic] == nil {
|
||||
bp.currentRetries[msg.Topic] = make(map[int32]error)
|
||||
}
|
||||
bp.currentRetries[msg.Topic][msg.Partition] = nil
|
||||
bp.parent.inFlight.Done()
|
||||
continue
|
||||
}
|
||||
|
||||
if reason := bp.needsRetry(msg); reason != nil {
|
||||
bp.parent.retryMessage(msg, reason)
|
||||
|
||||
if bp.closing == nil && msg.flags&fin == fin {
|
||||
// we were retrying this partition but we can start processing again
|
||||
delete(bp.currentRetries[msg.Topic], msg.Partition)
|
||||
Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n",
|
||||
bp.broker.ID(), msg.Topic, msg.Partition)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if bp.buffer.wouldOverflow(msg) {
|
||||
if err := bp.waitForSpace(msg); err != nil {
|
||||
bp.parent.retryMessage(msg, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if err := bp.buffer.add(msg); err != nil {
|
||||
bp.parent.returnError(msg, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil {
|
||||
bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency)
|
||||
}
|
||||
case <-bp.timer:
|
||||
bp.timerFired = true
|
||||
case output <- bp.buffer:
|
||||
bp.rollOver()
|
||||
case response := <-bp.responses:
|
||||
bp.handleResponse(response)
|
||||
}
|
||||
|
||||
if bp.timerFired || bp.buffer.readyToFlush() {
|
||||
output = bp.output
|
||||
} else {
|
||||
output = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) shutdown() {
|
||||
for !bp.buffer.empty() {
|
||||
select {
|
||||
case response := <-bp.responses:
|
||||
bp.handleResponse(response)
|
||||
case bp.output <- bp.buffer:
|
||||
bp.rollOver()
|
||||
}
|
||||
}
|
||||
close(bp.output)
|
||||
for response := range bp.responses {
|
||||
bp.handleResponse(response)
|
||||
}
|
||||
|
||||
Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
|
||||
if bp.closing != nil {
|
||||
return bp.closing
|
||||
}
|
||||
|
||||
return bp.currentRetries[msg.Topic][msg.Partition]
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error {
|
||||
Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
|
||||
|
||||
for {
|
||||
select {
|
||||
case response := <-bp.responses:
|
||||
bp.handleResponse(response)
|
||||
// handling a response can change our state, so re-check some things
|
||||
if reason := bp.needsRetry(msg); reason != nil {
|
||||
return reason
|
||||
} else if !bp.buffer.wouldOverflow(msg) {
|
||||
return nil
|
||||
}
|
||||
case bp.output <- bp.buffer:
|
||||
bp.rollOver()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) rollOver() {
|
||||
bp.timer = nil
|
||||
bp.timerFired = false
|
||||
bp.buffer = newProduceSet(bp.parent)
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
|
||||
if response.err != nil {
|
||||
bp.handleError(response.set, response.err)
|
||||
} else {
|
||||
bp.handleSuccess(response.set, response.res)
|
||||
}
|
||||
|
||||
if bp.buffer.empty() {
|
||||
bp.rollOver() // this can happen if the response invalidated our buffer
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
|
||||
// we iterate through the blocks in the request set, not the response, so that we notice
|
||||
// if the response is missing a block completely
|
||||
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
|
||||
if response == nil {
|
||||
// this only happens when RequiredAcks is NoResponse, so we have to assume success
|
||||
bp.parent.returnSuccesses(msgs)
|
||||
return
|
||||
}
|
||||
|
||||
block := response.GetBlock(topic, partition)
|
||||
if block == nil {
|
||||
bp.parent.returnErrors(msgs, ErrIncompleteResponse)
|
||||
return
|
||||
}
|
||||
|
||||
switch block.Err {
|
||||
// Success
|
||||
case ErrNoError:
|
||||
if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() {
|
||||
for _, msg := range msgs {
|
||||
msg.Timestamp = block.Timestamp
|
||||
}
|
||||
}
|
||||
for i, msg := range msgs {
|
||||
msg.Offset = block.Offset + int64(i)
|
||||
}
|
||||
bp.parent.returnSuccesses(msgs)
|
||||
// Retriable errors
|
||||
case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
|
||||
ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
|
||||
Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
|
||||
bp.broker.ID(), topic, partition, block.Err)
|
||||
bp.currentRetries[topic][partition] = block.Err
|
||||
bp.parent.retryMessages(msgs, block.Err)
|
||||
bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
|
||||
// Other non-retriable errors
|
||||
default:
|
||||
bp.parent.returnErrors(msgs, block.Err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) handleError(sent *produceSet, err error) {
|
||||
switch err.(type) {
|
||||
case PacketEncodingError:
|
||||
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
|
||||
bp.parent.returnErrors(msgs, err)
|
||||
})
|
||||
default:
|
||||
Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
|
||||
bp.parent.abandonBrokerConnection(bp.broker)
|
||||
_ = bp.broker.Close()
|
||||
bp.closing = err
|
||||
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
|
||||
bp.parent.retryMessages(msgs, err)
|
||||
})
|
||||
bp.buffer.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
|
||||
bp.parent.retryMessages(msgs, err)
|
||||
})
|
||||
bp.rollOver()
|
||||
}
|
||||
}
|
||||
|
||||
// singleton
|
||||
// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
|
||||
// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
|
||||
func (p *asyncProducer) retryHandler() {
|
||||
var msg *ProducerMessage
|
||||
buf := queue.New()
|
||||
|
||||
for {
|
||||
if buf.Length() == 0 {
|
||||
msg = <-p.retries
|
||||
} else {
|
||||
select {
|
||||
case msg = <-p.retries:
|
||||
case p.input <- buf.Peek().(*ProducerMessage):
|
||||
buf.Remove()
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if msg == nil {
|
||||
return
|
||||
}
|
||||
|
||||
buf.Add(msg)
|
||||
}
|
||||
}
|
||||
|
||||
// utility functions
|
||||
|
||||
func (p *asyncProducer) shutdown() {
|
||||
Logger.Println("Producer shutting down.")
|
||||
p.inFlight.Add(1)
|
||||
p.input <- &ProducerMessage{flags: shutdown}
|
||||
|
||||
p.inFlight.Wait()
|
||||
|
||||
if p.ownClient {
|
||||
err := p.client.Close()
|
||||
if err != nil {
|
||||
Logger.Println("producer/shutdown failed to close the embedded client:", err)
|
||||
}
|
||||
}
|
||||
|
||||
close(p.input)
|
||||
close(p.retries)
|
||||
close(p.errors)
|
||||
close(p.successes)
|
||||
}
|
||||
|
||||
func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
|
||||
msg.clear()
|
||||
pErr := &ProducerError{Msg: msg, Err: err}
|
||||
if p.conf.Producer.Return.Errors {
|
||||
p.errors <- pErr
|
||||
} else {
|
||||
Logger.Println(pErr)
|
||||
}
|
||||
p.inFlight.Done()
|
||||
}
|
||||
|
||||
func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
|
||||
for _, msg := range batch {
|
||||
p.returnError(msg, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
|
||||
for _, msg := range batch {
|
||||
if p.conf.Producer.Return.Successes {
|
||||
msg.clear()
|
||||
p.successes <- msg
|
||||
}
|
||||
p.inFlight.Done()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) {
|
||||
if msg.retries >= p.conf.Producer.Retry.Max {
|
||||
p.returnError(msg, err)
|
||||
} else {
|
||||
msg.retries++
|
||||
p.retries <- msg
|
||||
}
|
||||
}
|
||||
|
||||
func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
|
||||
for _, msg := range batch {
|
||||
p.retryMessage(msg, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage {
|
||||
p.brokerLock.Lock()
|
||||
defer p.brokerLock.Unlock()
|
||||
|
||||
bp := p.brokers[broker]
|
||||
|
||||
if bp == nil {
|
||||
bp = p.newBrokerProducer(broker)
|
||||
p.brokers[broker] = bp
|
||||
p.brokerRefs[bp] = 0
|
||||
}
|
||||
|
||||
p.brokerRefs[bp]++
|
||||
|
||||
return bp
|
||||
}
|
||||
|
||||
func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) {
|
||||
p.brokerLock.Lock()
|
||||
defer p.brokerLock.Unlock()
|
||||
|
||||
p.brokerRefs[bp]--
|
||||
if p.brokerRefs[bp] == 0 {
|
||||
close(bp)
|
||||
delete(p.brokerRefs, bp)
|
||||
|
||||
if p.brokers[broker] == bp {
|
||||
delete(p.brokers, broker)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
|
||||
p.brokerLock.Lock()
|
||||
defer p.brokerLock.Unlock()
|
||||
|
||||
delete(p.brokers, broker)
|
||||
}
|
685
vendor/github.com/Shopify/sarama/broker.go
generated
vendored
Normal file
685
vendor/github.com/Shopify/sarama/broker.go
generated
vendored
Normal file
|
@ -0,0 +1,685 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
|
||||
type Broker struct {
|
||||
id int32
|
||||
addr string
|
||||
|
||||
conf *Config
|
||||
correlationID int32
|
||||
conn net.Conn
|
||||
connErr error
|
||||
lock sync.Mutex
|
||||
opened int32
|
||||
|
||||
responses chan responsePromise
|
||||
done chan bool
|
||||
|
||||
incomingByteRate metrics.Meter
|
||||
requestRate metrics.Meter
|
||||
requestSize metrics.Histogram
|
||||
requestLatency metrics.Histogram
|
||||
outgoingByteRate metrics.Meter
|
||||
responseRate metrics.Meter
|
||||
responseSize metrics.Histogram
|
||||
brokerIncomingByteRate metrics.Meter
|
||||
brokerRequestRate metrics.Meter
|
||||
brokerRequestSize metrics.Histogram
|
||||
brokerRequestLatency metrics.Histogram
|
||||
brokerOutgoingByteRate metrics.Meter
|
||||
brokerResponseRate metrics.Meter
|
||||
brokerResponseSize metrics.Histogram
|
||||
}
|
||||
|
||||
type responsePromise struct {
|
||||
requestTime time.Time
|
||||
correlationID int32
|
||||
packets chan []byte
|
||||
errors chan error
|
||||
}
|
||||
|
||||
// NewBroker creates and returns a Broker targeting the given host:port address.
|
||||
// This does not attempt to actually connect, you have to call Open() for that.
|
||||
func NewBroker(addr string) *Broker {
|
||||
return &Broker{id: -1, addr: addr}
|
||||
}
|
||||
|
||||
// Open tries to connect to the Broker if it is not already connected or connecting, but does not block
|
||||
// waiting for the connection to complete. This means that any subsequent operations on the broker will
|
||||
// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call,
|
||||
// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or
|
||||
// AlreadyConnected. If conf is nil, the result of NewConfig() is used.
|
||||
func (b *Broker) Open(conf *Config) error {
|
||||
if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) {
|
||||
return ErrAlreadyConnected
|
||||
}
|
||||
|
||||
if conf == nil {
|
||||
conf = NewConfig()
|
||||
}
|
||||
|
||||
err := conf.Validate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
|
||||
go withRecover(func() {
|
||||
defer b.lock.Unlock()
|
||||
|
||||
dialer := net.Dialer{
|
||||
Timeout: conf.Net.DialTimeout,
|
||||
KeepAlive: conf.Net.KeepAlive,
|
||||
}
|
||||
|
||||
if conf.Net.TLS.Enable {
|
||||
b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config)
|
||||
} else {
|
||||
b.conn, b.connErr = dialer.Dial("tcp", b.addr)
|
||||
}
|
||||
if b.connErr != nil {
|
||||
Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
|
||||
b.conn = nil
|
||||
atomic.StoreInt32(&b.opened, 0)
|
||||
return
|
||||
}
|
||||
b.conn = newBufConn(b.conn)
|
||||
|
||||
b.conf = conf
|
||||
|
||||
// Create or reuse the global metrics shared between brokers
|
||||
b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry)
|
||||
b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry)
|
||||
b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry)
|
||||
b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry)
|
||||
b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry)
|
||||
b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry)
|
||||
b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry)
|
||||
// Do not gather metrics for seeded broker (only used during bootstrap) because they share
|
||||
// the same id (-1) and are already exposed through the global metrics above
|
||||
if b.id >= 0 {
|
||||
b.brokerIncomingByteRate = getOrRegisterBrokerMeter("incoming-byte-rate", b, conf.MetricRegistry)
|
||||
b.brokerRequestRate = getOrRegisterBrokerMeter("request-rate", b, conf.MetricRegistry)
|
||||
b.brokerRequestSize = getOrRegisterBrokerHistogram("request-size", b, conf.MetricRegistry)
|
||||
b.brokerRequestLatency = getOrRegisterBrokerHistogram("request-latency-in-ms", b, conf.MetricRegistry)
|
||||
b.brokerOutgoingByteRate = getOrRegisterBrokerMeter("outgoing-byte-rate", b, conf.MetricRegistry)
|
||||
b.brokerResponseRate = getOrRegisterBrokerMeter("response-rate", b, conf.MetricRegistry)
|
||||
b.brokerResponseSize = getOrRegisterBrokerHistogram("response-size", b, conf.MetricRegistry)
|
||||
}
|
||||
|
||||
if conf.Net.SASL.Enable {
|
||||
b.connErr = b.sendAndReceiveSASLPlainAuth()
|
||||
if b.connErr != nil {
|
||||
err = b.conn.Close()
|
||||
if err == nil {
|
||||
Logger.Printf("Closed connection to broker %s\n", b.addr)
|
||||
} else {
|
||||
Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
|
||||
}
|
||||
b.conn = nil
|
||||
atomic.StoreInt32(&b.opened, 0)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
b.done = make(chan bool)
|
||||
b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1)
|
||||
|
||||
if b.id >= 0 {
|
||||
Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id)
|
||||
} else {
|
||||
Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr)
|
||||
}
|
||||
go withRecover(b.responseReceiver)
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Connected returns true if the broker is connected and false otherwise. If the broker is not
|
||||
// connected but it had tried to connect, the error from that connection attempt is also returned.
|
||||
func (b *Broker) Connected() (bool, error) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
return b.conn != nil, b.connErr
|
||||
}
|
||||
|
||||
func (b *Broker) Close() error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.conn == nil {
|
||||
return ErrNotConnected
|
||||
}
|
||||
|
||||
close(b.responses)
|
||||
<-b.done
|
||||
|
||||
err := b.conn.Close()
|
||||
|
||||
b.conn = nil
|
||||
b.connErr = nil
|
||||
b.done = nil
|
||||
b.responses = nil
|
||||
|
||||
if err == nil {
|
||||
Logger.Printf("Closed connection to broker %s\n", b.addr)
|
||||
} else {
|
||||
Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
|
||||
}
|
||||
|
||||
atomic.StoreInt32(&b.opened, 0)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known.
|
||||
func (b *Broker) ID() int32 {
|
||||
return b.id
|
||||
}
|
||||
|
||||
// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker.
|
||||
func (b *Broker) Addr() string {
|
||||
return b.addr
|
||||
}
|
||||
|
||||
func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
|
||||
response := new(MetadataResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
|
||||
response := new(ConsumerMetadataResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
|
||||
response := new(OffsetResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
|
||||
var response *ProduceResponse
|
||||
var err error
|
||||
|
||||
if request.RequiredAcks == NoResponse {
|
||||
err = b.sendAndReceive(request, nil)
|
||||
} else {
|
||||
response = new(ProduceResponse)
|
||||
err = b.sendAndReceive(request, response)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
|
||||
response := new(FetchResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
|
||||
response := new(OffsetCommitResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
|
||||
response := new(OffsetFetchResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) {
|
||||
response := new(JoinGroupResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) {
|
||||
response := new(SyncGroupResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) {
|
||||
response := new(LeaveGroupResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) {
|
||||
response := new(HeartbeatResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) {
|
||||
response := new(ListGroupsResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) {
|
||||
response := new(DescribeGroupsResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) {
|
||||
response := new(ApiVersionsResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.conn == nil {
|
||||
if b.connErr != nil {
|
||||
return nil, b.connErr
|
||||
}
|
||||
return nil, ErrNotConnected
|
||||
}
|
||||
|
||||
if !b.conf.Version.IsAtLeast(rb.requiredVersion()) {
|
||||
return nil, ErrUnsupportedVersion
|
||||
}
|
||||
|
||||
req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
|
||||
buf, err := encode(req, b.conf.MetricRegistry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
requestTime := time.Now()
|
||||
bytes, err := b.conn.Write(buf)
|
||||
b.updateOutgoingCommunicationMetrics(bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.correlationID++
|
||||
|
||||
if !promiseResponse {
|
||||
// Record request latency without the response
|
||||
b.updateRequestLatencyMetrics(time.Since(requestTime))
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)}
|
||||
b.responses <- promise
|
||||
|
||||
return &promise, nil
|
||||
}
|
||||
|
||||
func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error {
|
||||
promise, err := b.send(req, res != nil)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if promise == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case buf := <-promise.packets:
|
||||
return versionedDecode(buf, res, req.version())
|
||||
case err = <-promise.errors:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Broker) decode(pd packetDecoder) (err error) {
|
||||
b.id, err = pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
port, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.addr = net.JoinHostPort(host, fmt.Sprint(port))
|
||||
if _, _, err := net.SplitHostPort(b.addr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Broker) encode(pe packetEncoder) (err error) {
|
||||
|
||||
host, portstr, err := net.SplitHostPort(b.addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
port, err := strconv.Atoi(portstr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pe.putInt32(b.id)
|
||||
|
||||
err = pe.putString(host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pe.putInt32(int32(port))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Broker) responseReceiver() {
|
||||
var dead error
|
||||
header := make([]byte, 8)
|
||||
for response := range b.responses {
|
||||
if dead != nil {
|
||||
response.errors <- dead
|
||||
continue
|
||||
}
|
||||
|
||||
err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout))
|
||||
if err != nil {
|
||||
dead = err
|
||||
response.errors <- err
|
||||
continue
|
||||
}
|
||||
|
||||
bytesReadHeader, err := io.ReadFull(b.conn, header)
|
||||
requestLatency := time.Since(response.requestTime)
|
||||
if err != nil {
|
||||
b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
|
||||
dead = err
|
||||
response.errors <- err
|
||||
continue
|
||||
}
|
||||
|
||||
decodedHeader := responseHeader{}
|
||||
err = decode(header, &decodedHeader)
|
||||
if err != nil {
|
||||
b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
|
||||
dead = err
|
||||
response.errors <- err
|
||||
continue
|
||||
}
|
||||
if decodedHeader.correlationID != response.correlationID {
|
||||
b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
|
||||
// TODO if decoded ID < cur ID, discard until we catch up
|
||||
// TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response
|
||||
dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)}
|
||||
response.errors <- dead
|
||||
continue
|
||||
}
|
||||
|
||||
buf := make([]byte, decodedHeader.length-4)
|
||||
bytesReadBody, err := io.ReadFull(b.conn, buf)
|
||||
b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency)
|
||||
if err != nil {
|
||||
dead = err
|
||||
response.errors <- err
|
||||
continue
|
||||
}
|
||||
|
||||
response.packets <- buf
|
||||
}
|
||||
close(b.done)
|
||||
}
|
||||
|
||||
func (b *Broker) sendAndReceiveSASLPlainHandshake() error {
|
||||
rb := &SaslHandshakeRequest{"PLAIN"}
|
||||
req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
|
||||
buf, err := encode(req, b.conf.MetricRegistry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
requestTime := time.Now()
|
||||
bytes, err := b.conn.Write(buf)
|
||||
b.updateOutgoingCommunicationMetrics(bytes)
|
||||
if err != nil {
|
||||
Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error())
|
||||
return err
|
||||
}
|
||||
b.correlationID++
|
||||
//wait for the response
|
||||
header := make([]byte, 8) // response header
|
||||
_, err = io.ReadFull(b.conn, header)
|
||||
if err != nil {
|
||||
Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error())
|
||||
return err
|
||||
}
|
||||
length := binary.BigEndian.Uint32(header[:4])
|
||||
payload := make([]byte, length-4)
|
||||
n, err := io.ReadFull(b.conn, payload)
|
||||
if err != nil {
|
||||
Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error())
|
||||
return err
|
||||
}
|
||||
b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime))
|
||||
res := &SaslHandshakeResponse{}
|
||||
err = versionedDecode(payload, res, 0)
|
||||
if err != nil {
|
||||
Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error())
|
||||
return err
|
||||
}
|
||||
if res.Err != ErrNoError {
|
||||
Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error())
|
||||
return res.Err
|
||||
}
|
||||
Logger.Print("Successful SASL handshake")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Kafka 0.10.0 plans to support SASL Plain and Kerberos as per PR #812 (KIP-43)/(JIRA KAFKA-3149)
|
||||
// Some hosted kafka services such as IBM Message Hub already offer SASL/PLAIN auth with Kafka 0.9
|
||||
//
|
||||
// In SASL Plain, Kafka expects the auth header to be in the following format
|
||||
// Message format (from https://tools.ietf.org/html/rfc4616):
|
||||
//
|
||||
// message = [authzid] UTF8NUL authcid UTF8NUL passwd
|
||||
// authcid = 1*SAFE ; MUST accept up to 255 octets
|
||||
// authzid = 1*SAFE ; MUST accept up to 255 octets
|
||||
// passwd = 1*SAFE ; MUST accept up to 255 octets
|
||||
// UTF8NUL = %x00 ; UTF-8 encoded NUL character
|
||||
//
|
||||
// SAFE = UTF1 / UTF2 / UTF3 / UTF4
|
||||
// ;; any UTF-8 encoded Unicode character except NUL
|
||||
//
|
||||
// When credentials are valid, Kafka returns a 4 byte array of null characters.
|
||||
// When credentials are invalid, Kafka closes the connection. This does not seem to be the ideal way
|
||||
// of responding to bad credentials but thats how its being done today.
|
||||
func (b *Broker) sendAndReceiveSASLPlainAuth() error {
|
||||
if b.conf.Net.SASL.Handshake {
|
||||
handshakeErr := b.sendAndReceiveSASLPlainHandshake()
|
||||
if handshakeErr != nil {
|
||||
Logger.Printf("Error while performing SASL handshake %s\n", b.addr)
|
||||
return handshakeErr
|
||||
}
|
||||
}
|
||||
length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
|
||||
authBytes := make([]byte, length+4) //4 byte length header + auth data
|
||||
binary.BigEndian.PutUint32(authBytes, uint32(length))
|
||||
copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password))
|
||||
|
||||
err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
|
||||
if err != nil {
|
||||
Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
requestTime := time.Now()
|
||||
bytesWritten, err := b.conn.Write(authBytes)
|
||||
b.updateOutgoingCommunicationMetrics(bytesWritten)
|
||||
if err != nil {
|
||||
Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
header := make([]byte, 4)
|
||||
n, err := io.ReadFull(b.conn, header)
|
||||
b.updateIncomingCommunicationMetrics(n, time.Since(requestTime))
|
||||
// If the credentials are valid, we would get a 4 byte response filled with null characters.
|
||||
// Otherwise, the broker closes the connection and we get an EOF
|
||||
if err != nil {
|
||||
Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) {
|
||||
b.updateRequestLatencyMetrics(requestLatency)
|
||||
b.responseRate.Mark(1)
|
||||
if b.brokerResponseRate != nil {
|
||||
b.brokerResponseRate.Mark(1)
|
||||
}
|
||||
responseSize := int64(bytes)
|
||||
b.incomingByteRate.Mark(responseSize)
|
||||
if b.brokerIncomingByteRate != nil {
|
||||
b.brokerIncomingByteRate.Mark(responseSize)
|
||||
}
|
||||
b.responseSize.Update(responseSize)
|
||||
if b.brokerResponseSize != nil {
|
||||
b.brokerResponseSize.Update(responseSize)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) {
|
||||
requestLatencyInMs := int64(requestLatency / time.Millisecond)
|
||||
b.requestLatency.Update(requestLatencyInMs)
|
||||
if b.brokerRequestLatency != nil {
|
||||
b.brokerRequestLatency.Update(requestLatencyInMs)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
|
||||
b.requestRate.Mark(1)
|
||||
if b.brokerRequestRate != nil {
|
||||
b.brokerRequestRate.Mark(1)
|
||||
}
|
||||
requestSize := int64(bytes)
|
||||
b.outgoingByteRate.Mark(requestSize)
|
||||
if b.brokerOutgoingByteRate != nil {
|
||||
b.brokerOutgoingByteRate.Mark(requestSize)
|
||||
}
|
||||
b.requestSize.Update(requestSize)
|
||||
if b.brokerRequestSize != nil {
|
||||
b.brokerRequestSize.Update(requestSize)
|
||||
}
|
||||
}
|
794
vendor/github.com/Shopify/sarama/client.go
generated
vendored
Normal file
794
vendor/github.com/Shopify/sarama/client.go
generated
vendored
Normal file
|
@ -0,0 +1,794 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Client is a generic Kafka client. It manages connections to one or more Kafka brokers.
|
||||
// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected
|
||||
// automatically when it passes out of scope. It is safe to share a client amongst many
|
||||
// users, however Kafka will process requests from a single client strictly in serial,
|
||||
// so it is generally more efficient to use the default one client per producer/consumer.
|
||||
type Client interface {
|
||||
// Config returns the Config struct of the client. This struct should not be
|
||||
// altered after it has been created.
|
||||
Config() *Config
|
||||
|
||||
// Brokers returns the current set of active brokers as retrieved from cluster metadata.
|
||||
Brokers() []*Broker
|
||||
|
||||
// Topics returns the set of available topics as retrieved from cluster metadata.
|
||||
Topics() ([]string, error)
|
||||
|
||||
// Partitions returns the sorted list of all partition IDs for the given topic.
|
||||
Partitions(topic string) ([]int32, error)
|
||||
|
||||
// WritablePartitions returns the sorted list of all writable partition IDs for
|
||||
// the given topic, where "writable" means "having a valid leader accepting
|
||||
// writes".
|
||||
WritablePartitions(topic string) ([]int32, error)
|
||||
|
||||
// Leader returns the broker object that is the leader of the current
|
||||
// topic/partition, as determined by querying the cluster metadata.
|
||||
Leader(topic string, partitionID int32) (*Broker, error)
|
||||
|
||||
// Replicas returns the set of all replica IDs for the given partition.
|
||||
Replicas(topic string, partitionID int32) ([]int32, error)
|
||||
|
||||
// InSyncReplicas returns the set of all in-sync replica IDs for the given
|
||||
// partition. In-sync replicas are replicas which are fully caught up with
|
||||
// the partition leader.
|
||||
InSyncReplicas(topic string, partitionID int32) ([]int32, error)
|
||||
|
||||
// RefreshMetadata takes a list of topics and queries the cluster to refresh the
|
||||
// available metadata for those topics. If no topics are provided, it will refresh
|
||||
// metadata for all topics.
|
||||
RefreshMetadata(topics ...string) error
|
||||
|
||||
// GetOffset queries the cluster to get the most recent available offset at the
|
||||
// given time on the topic/partition combination. Time should be OffsetOldest for
|
||||
// the earliest available offset, OffsetNewest for the offset of the message that
|
||||
// will be produced next, or a time.
|
||||
GetOffset(topic string, partitionID int32, time int64) (int64, error)
|
||||
|
||||
// Coordinator returns the coordinating broker for a consumer group. It will
|
||||
// return a locally cached value if it's available. You can call
|
||||
// RefreshCoordinator to update the cached value. This function only works on
|
||||
// Kafka 0.8.2 and higher.
|
||||
Coordinator(consumerGroup string) (*Broker, error)
|
||||
|
||||
// RefreshCoordinator retrieves the coordinator for a consumer group and stores it
|
||||
// in local cache. This function only works on Kafka 0.8.2 and higher.
|
||||
RefreshCoordinator(consumerGroup string) error
|
||||
|
||||
// Close shuts down all broker connections managed by this client. It is required
|
||||
// to call this function before a client object passes out of scope, as it will
|
||||
// otherwise leak memory. You must close any Producers or Consumers using a client
|
||||
// before you close the client.
|
||||
Close() error
|
||||
|
||||
// Closed returns true if the client has already had Close called on it
|
||||
Closed() bool
|
||||
}
|
||||
|
||||
const (
|
||||
// OffsetNewest stands for the log head offset, i.e. the offset that will be
|
||||
// assigned to the next message that will be produced to the partition. You
|
||||
// can send this to a client's GetOffset method to get this offset, or when
|
||||
// calling ConsumePartition to start consuming new messages.
|
||||
OffsetNewest int64 = -1
|
||||
// OffsetOldest stands for the oldest offset available on the broker for a
|
||||
// partition. You can send this to a client's GetOffset method to get this
|
||||
// offset, or when calling ConsumePartition to start consuming from the
|
||||
// oldest offset that is still available on the broker.
|
||||
OffsetOldest int64 = -2
|
||||
)
|
||||
|
||||
type client struct {
|
||||
conf *Config
|
||||
closer, closed chan none // for shutting down background metadata updater
|
||||
|
||||
// the broker addresses given to us through the constructor are not guaranteed to be returned in
|
||||
// the cluster metadata (I *think* it only returns brokers who are currently leading partitions?)
|
||||
// so we store them separately
|
||||
seedBrokers []*Broker
|
||||
deadSeeds []*Broker
|
||||
|
||||
brokers map[int32]*Broker // maps broker ids to brokers
|
||||
metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
|
||||
coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs
|
||||
|
||||
// If the number of partitions is large, we can get some churn calling cachedPartitions,
|
||||
// so the result is cached. It is important to update this value whenever metadata is changed
|
||||
cachedPartitionsResults map[string][maxPartitionIndex][]int32
|
||||
|
||||
lock sync.RWMutex // protects access to the maps that hold cluster state.
|
||||
}
|
||||
|
||||
// NewClient creates a new Client. It connects to one of the given broker addresses
|
||||
// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
|
||||
// be retrieved from any of the given broker addresses, the client is not created.
|
||||
func NewClient(addrs []string, conf *Config) (Client, error) {
|
||||
Logger.Println("Initializing new client")
|
||||
|
||||
if conf == nil {
|
||||
conf = NewConfig()
|
||||
}
|
||||
|
||||
if err := conf.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(addrs) < 1 {
|
||||
return nil, ConfigurationError("You must provide at least one broker address")
|
||||
}
|
||||
|
||||
client := &client{
|
||||
conf: conf,
|
||||
closer: make(chan none),
|
||||
closed: make(chan none),
|
||||
brokers: make(map[int32]*Broker),
|
||||
metadata: make(map[string]map[int32]*PartitionMetadata),
|
||||
cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32),
|
||||
coordinators: make(map[string]int32),
|
||||
}
|
||||
|
||||
random := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for _, index := range random.Perm(len(addrs)) {
|
||||
client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
|
||||
}
|
||||
|
||||
if conf.Metadata.Full {
|
||||
// do an initial fetch of all cluster metadata by specifying an empty list of topics
|
||||
err := client.RefreshMetadata()
|
||||
switch err {
|
||||
case nil:
|
||||
break
|
||||
case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed:
|
||||
// indicates that maybe part of the cluster is down, but is not fatal to creating the client
|
||||
Logger.Println(err)
|
||||
default:
|
||||
close(client.closed) // we haven't started the background updater yet, so we have to do this manually
|
||||
_ = client.Close()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
go withRecover(client.backgroundMetadataUpdater)
|
||||
|
||||
Logger.Println("Successfully initialized new client")
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (client *client) Config() *Config {
|
||||
return client.conf
|
||||
}
|
||||
|
||||
func (client *client) Brokers() []*Broker {
|
||||
client.lock.RLock()
|
||||
defer client.lock.RUnlock()
|
||||
brokers := make([]*Broker, 0)
|
||||
for _, broker := range client.brokers {
|
||||
brokers = append(brokers, broker)
|
||||
}
|
||||
return brokers
|
||||
}
|
||||
|
||||
func (client *client) Close() error {
|
||||
if client.Closed() {
|
||||
// Chances are this is being called from a defer() and the error will go unobserved
|
||||
// so we go ahead and log the event in this case.
|
||||
Logger.Printf("Close() called on already closed client")
|
||||
return ErrClosedClient
|
||||
}
|
||||
|
||||
// shutdown and wait for the background thread before we take the lock, to avoid races
|
||||
close(client.closer)
|
||||
<-client.closed
|
||||
|
||||
client.lock.Lock()
|
||||
defer client.lock.Unlock()
|
||||
Logger.Println("Closing Client")
|
||||
|
||||
for _, broker := range client.brokers {
|
||||
safeAsyncClose(broker)
|
||||
}
|
||||
|
||||
for _, broker := range client.seedBrokers {
|
||||
safeAsyncClose(broker)
|
||||
}
|
||||
|
||||
client.brokers = nil
|
||||
client.metadata = nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *client) Closed() bool {
|
||||
return client.brokers == nil
|
||||
}
|
||||
|
||||
func (client *client) Topics() ([]string, error) {
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
client.lock.RLock()
|
||||
defer client.lock.RUnlock()
|
||||
|
||||
ret := make([]string, 0, len(client.metadata))
|
||||
for topic := range client.metadata {
|
||||
ret = append(ret, topic)
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (client *client) Partitions(topic string) ([]int32, error) {
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
partitions := client.cachedPartitions(topic, allPartitions)
|
||||
|
||||
if len(partitions) == 0 {
|
||||
err := client.RefreshMetadata(topic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
partitions = client.cachedPartitions(topic, allPartitions)
|
||||
}
|
||||
|
||||
if partitions == nil {
|
||||
return nil, ErrUnknownTopicOrPartition
|
||||
}
|
||||
|
||||
return partitions, nil
|
||||
}
|
||||
|
||||
func (client *client) WritablePartitions(topic string) ([]int32, error) {
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
partitions := client.cachedPartitions(topic, writablePartitions)
|
||||
|
||||
// len==0 catches when it's nil (no such topic) and the odd case when every single
|
||||
// partition is undergoing leader election simultaneously. Callers have to be able to handle
|
||||
// this function returning an empty slice (which is a valid return value) but catching it
|
||||
// here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers
|
||||
// a metadata refresh as a nicety so callers can just try again and don't have to manually
|
||||
// trigger a refresh (otherwise they'd just keep getting a stale cached copy).
|
||||
if len(partitions) == 0 {
|
||||
err := client.RefreshMetadata(topic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
partitions = client.cachedPartitions(topic, writablePartitions)
|
||||
}
|
||||
|
||||
if partitions == nil {
|
||||
return nil, ErrUnknownTopicOrPartition
|
||||
}
|
||||
|
||||
return partitions, nil
|
||||
}
|
||||
|
||||
func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) {
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
metadata := client.cachedMetadata(topic, partitionID)
|
||||
|
||||
if metadata == nil {
|
||||
err := client.RefreshMetadata(topic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
metadata = client.cachedMetadata(topic, partitionID)
|
||||
}
|
||||
|
||||
if metadata == nil {
|
||||
return nil, ErrUnknownTopicOrPartition
|
||||
}
|
||||
|
||||
if metadata.Err == ErrReplicaNotAvailable {
|
||||
return nil, metadata.Err
|
||||
}
|
||||
return dupInt32Slice(metadata.Replicas), nil
|
||||
}
|
||||
|
||||
func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) {
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
metadata := client.cachedMetadata(topic, partitionID)
|
||||
|
||||
if metadata == nil {
|
||||
err := client.RefreshMetadata(topic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
metadata = client.cachedMetadata(topic, partitionID)
|
||||
}
|
||||
|
||||
if metadata == nil {
|
||||
return nil, ErrUnknownTopicOrPartition
|
||||
}
|
||||
|
||||
if metadata.Err == ErrReplicaNotAvailable {
|
||||
return nil, metadata.Err
|
||||
}
|
||||
return dupInt32Slice(metadata.Isr), nil
|
||||
}
|
||||
|
||||
func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
leader, err := client.cachedLeader(topic, partitionID)
|
||||
|
||||
if leader == nil {
|
||||
err = client.RefreshMetadata(topic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
leader, err = client.cachedLeader(topic, partitionID)
|
||||
}
|
||||
|
||||
return leader, err
|
||||
}
|
||||
|
||||
func (client *client) RefreshMetadata(topics ...string) error {
|
||||
if client.Closed() {
|
||||
return ErrClosedClient
|
||||
}
|
||||
|
||||
// Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper
|
||||
// error. This handles the case by returning an error instead of sending it
|
||||
// off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
|
||||
for _, topic := range topics {
|
||||
if len(topic) == 0 {
|
||||
return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
|
||||
}
|
||||
}
|
||||
|
||||
return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max)
|
||||
}
|
||||
|
||||
func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) {
|
||||
if client.Closed() {
|
||||
return -1, ErrClosedClient
|
||||
}
|
||||
|
||||
offset, err := client.getOffset(topic, partitionID, time)
|
||||
|
||||
if err != nil {
|
||||
if err := client.RefreshMetadata(topic); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return client.getOffset(topic, partitionID, time)
|
||||
}
|
||||
|
||||
return offset, err
|
||||
}
|
||||
|
||||
func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
coordinator := client.cachedCoordinator(consumerGroup)
|
||||
|
||||
if coordinator == nil {
|
||||
if err := client.RefreshCoordinator(consumerGroup); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
coordinator = client.cachedCoordinator(consumerGroup)
|
||||
}
|
||||
|
||||
if coordinator == nil {
|
||||
return nil, ErrConsumerCoordinatorNotAvailable
|
||||
}
|
||||
|
||||
_ = coordinator.Open(client.conf)
|
||||
return coordinator, nil
|
||||
}
|
||||
|
||||
func (client *client) RefreshCoordinator(consumerGroup string) error {
|
||||
if client.Closed() {
|
||||
return ErrClosedClient
|
||||
}
|
||||
|
||||
response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client.lock.Lock()
|
||||
defer client.lock.Unlock()
|
||||
client.registerBroker(response.Coordinator)
|
||||
client.coordinators[consumerGroup] = response.Coordinator.ID()
|
||||
return nil
|
||||
}
|
||||
|
||||
// private broker management helpers
|
||||
|
||||
// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
|
||||
// in the brokers map. It returns the broker that is registered, which may be the provided broker,
|
||||
// or a previously registered Broker instance. You must hold the write lock before calling this function.
|
||||
func (client *client) registerBroker(broker *Broker) {
|
||||
if client.brokers[broker.ID()] == nil {
|
||||
client.brokers[broker.ID()] = broker
|
||||
Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
|
||||
} else if broker.Addr() != client.brokers[broker.ID()].Addr() {
|
||||
safeAsyncClose(client.brokers[broker.ID()])
|
||||
client.brokers[broker.ID()] = broker
|
||||
Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
|
||||
}
|
||||
}
|
||||
|
||||
// deregisterBroker removes a broker from the seedsBroker list, and if it's
|
||||
// not the seedbroker, removes it from brokers map completely.
|
||||
func (client *client) deregisterBroker(broker *Broker) {
|
||||
client.lock.Lock()
|
||||
defer client.lock.Unlock()
|
||||
|
||||
if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] {
|
||||
client.deadSeeds = append(client.deadSeeds, broker)
|
||||
client.seedBrokers = client.seedBrokers[1:]
|
||||
} else {
|
||||
// we do this so that our loop in `tryRefreshMetadata` doesn't go on forever,
|
||||
// but we really shouldn't have to; once that loop is made better this case can be
|
||||
// removed, and the function generally can be renamed from `deregisterBroker` to
|
||||
// `nextSeedBroker` or something
|
||||
Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr())
|
||||
delete(client.brokers, broker.ID())
|
||||
}
|
||||
}
|
||||
|
||||
func (client *client) resurrectDeadBrokers() {
|
||||
client.lock.Lock()
|
||||
defer client.lock.Unlock()
|
||||
|
||||
Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds))
|
||||
client.seedBrokers = append(client.seedBrokers, client.deadSeeds...)
|
||||
client.deadSeeds = nil
|
||||
}
|
||||
|
||||
func (client *client) any() *Broker {
|
||||
client.lock.RLock()
|
||||
defer client.lock.RUnlock()
|
||||
|
||||
if len(client.seedBrokers) > 0 {
|
||||
_ = client.seedBrokers[0].Open(client.conf)
|
||||
return client.seedBrokers[0]
|
||||
}
|
||||
|
||||
// not guaranteed to be random *or* deterministic
|
||||
for _, broker := range client.brokers {
|
||||
_ = broker.Open(client.conf)
|
||||
return broker
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// private caching/lazy metadata helpers
|
||||
|
||||
type partitionType int
|
||||
|
||||
const (
|
||||
allPartitions partitionType = iota
|
||||
writablePartitions
|
||||
// If you add any more types, update the partition cache in update()
|
||||
|
||||
// Ensure this is the last partition type value
|
||||
maxPartitionIndex
|
||||
)
|
||||
|
||||
func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
|
||||
client.lock.RLock()
|
||||
defer client.lock.RUnlock()
|
||||
|
||||
partitions := client.metadata[topic]
|
||||
if partitions != nil {
|
||||
return partitions[partitionID]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 {
|
||||
client.lock.RLock()
|
||||
defer client.lock.RUnlock()
|
||||
|
||||
partitions, exists := client.cachedPartitionsResults[topic]
|
||||
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
return partitions[partitionSet]
|
||||
}
|
||||
|
||||
func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 {
|
||||
partitions := client.metadata[topic]
|
||||
|
||||
if partitions == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
ret := make([]int32, 0, len(partitions))
|
||||
for _, partition := range partitions {
|
||||
if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable {
|
||||
continue
|
||||
}
|
||||
ret = append(ret, partition.ID)
|
||||
}
|
||||
|
||||
sort.Sort(int32Slice(ret))
|
||||
return ret
|
||||
}
|
||||
|
||||
func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) {
|
||||
client.lock.RLock()
|
||||
defer client.lock.RUnlock()
|
||||
|
||||
partitions := client.metadata[topic]
|
||||
if partitions != nil {
|
||||
metadata, ok := partitions[partitionID]
|
||||
if ok {
|
||||
if metadata.Err == ErrLeaderNotAvailable {
|
||||
return nil, ErrLeaderNotAvailable
|
||||
}
|
||||
b := client.brokers[metadata.Leader]
|
||||
if b == nil {
|
||||
return nil, ErrLeaderNotAvailable
|
||||
}
|
||||
_ = b.Open(client.conf)
|
||||
return b, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, ErrUnknownTopicOrPartition
|
||||
}
|
||||
|
||||
func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) {
|
||||
broker, err := client.Leader(topic, partitionID)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
request := &OffsetRequest{}
|
||||
if client.conf.Version.IsAtLeast(V0_10_1_0) {
|
||||
request.Version = 1
|
||||
}
|
||||
request.AddBlock(topic, partitionID, time, 1)
|
||||
|
||||
response, err := broker.GetAvailableOffsets(request)
|
||||
if err != nil {
|
||||
_ = broker.Close()
|
||||
return -1, err
|
||||
}
|
||||
|
||||
block := response.GetBlock(topic, partitionID)
|
||||
if block == nil {
|
||||
_ = broker.Close()
|
||||
return -1, ErrIncompleteResponse
|
||||
}
|
||||
if block.Err != ErrNoError {
|
||||
return -1, block.Err
|
||||
}
|
||||
if len(block.Offsets) != 1 {
|
||||
return -1, ErrOffsetOutOfRange
|
||||
}
|
||||
|
||||
return block.Offsets[0], nil
|
||||
}
|
||||
|
||||
// core metadata update logic
|
||||
|
||||
func (client *client) backgroundMetadataUpdater() {
|
||||
defer close(client.closed)
|
||||
|
||||
if client.conf.Metadata.RefreshFrequency == time.Duration(0) {
|
||||
return
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
topics := []string{}
|
||||
if !client.conf.Metadata.Full {
|
||||
if specificTopics, err := client.Topics(); err != nil {
|
||||
Logger.Println("Client background metadata topic load:", err)
|
||||
break
|
||||
} else if len(specificTopics) == 0 {
|
||||
Logger.Println("Client background metadata update: no specific topics to update")
|
||||
break
|
||||
} else {
|
||||
topics = specificTopics
|
||||
}
|
||||
}
|
||||
|
||||
if err := client.RefreshMetadata(topics...); err != nil {
|
||||
Logger.Println("Client background metadata update:", err)
|
||||
}
|
||||
case <-client.closer:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error {
|
||||
retry := func(err error) error {
|
||||
if attemptsRemaining > 0 {
|
||||
Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
|
||||
time.Sleep(client.conf.Metadata.Retry.Backoff)
|
||||
return client.tryRefreshMetadata(topics, attemptsRemaining-1)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
for broker := client.any(); broker != nil; broker = client.any() {
|
||||
if len(topics) > 0 {
|
||||
Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
|
||||
} else {
|
||||
Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
|
||||
}
|
||||
response, err := broker.GetMetadata(&MetadataRequest{Topics: topics})
|
||||
|
||||
switch err.(type) {
|
||||
case nil:
|
||||
// valid response, use it
|
||||
shouldRetry, err := client.updateMetadata(response)
|
||||
if shouldRetry {
|
||||
Logger.Println("client/metadata found some partitions to be leaderless")
|
||||
return retry(err) // note: err can be nil
|
||||
}
|
||||
return err
|
||||
|
||||
case PacketEncodingError:
|
||||
// didn't even send, return the error
|
||||
return err
|
||||
default:
|
||||
// some other error, remove that broker and try again
|
||||
Logger.Println("client/metadata got error from broker while fetching metadata:", err)
|
||||
_ = broker.Close()
|
||||
client.deregisterBroker(broker)
|
||||
}
|
||||
}
|
||||
|
||||
Logger.Println("client/metadata no available broker to send metadata request to")
|
||||
client.resurrectDeadBrokers()
|
||||
return retry(ErrOutOfBrokers)
|
||||
}
|
||||
|
||||
// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
|
||||
func (client *client) updateMetadata(data *MetadataResponse) (retry bool, err error) {
|
||||
client.lock.Lock()
|
||||
defer client.lock.Unlock()
|
||||
|
||||
// For all the brokers we received:
|
||||
// - if it is a new ID, save it
|
||||
// - if it is an existing ID, but the address we have is stale, discard the old one and save it
|
||||
// - otherwise ignore it, replacing our existing one would just bounce the connection
|
||||
for _, broker := range data.Brokers {
|
||||
client.registerBroker(broker)
|
||||
}
|
||||
|
||||
for _, topic := range data.Topics {
|
||||
delete(client.metadata, topic.Name)
|
||||
delete(client.cachedPartitionsResults, topic.Name)
|
||||
|
||||
switch topic.Err {
|
||||
case ErrNoError:
|
||||
break
|
||||
case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results
|
||||
err = topic.Err
|
||||
continue
|
||||
case ErrUnknownTopicOrPartition: // retry, do not store partial partition results
|
||||
err = topic.Err
|
||||
retry = true
|
||||
continue
|
||||
case ErrLeaderNotAvailable: // retry, but store partial partition results
|
||||
retry = true
|
||||
break
|
||||
default: // don't retry, don't store partial results
|
||||
Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
|
||||
err = topic.Err
|
||||
continue
|
||||
}
|
||||
|
||||
client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
|
||||
for _, partition := range topic.Partitions {
|
||||
client.metadata[topic.Name][partition.ID] = partition
|
||||
if partition.Err == ErrLeaderNotAvailable {
|
||||
retry = true
|
||||
}
|
||||
}
|
||||
|
||||
var partitionCache [maxPartitionIndex][]int32
|
||||
partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions)
|
||||
partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions)
|
||||
client.cachedPartitionsResults[topic.Name] = partitionCache
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (client *client) cachedCoordinator(consumerGroup string) *Broker {
|
||||
client.lock.RLock()
|
||||
defer client.lock.RUnlock()
|
||||
if coordinatorID, ok := client.coordinators[consumerGroup]; ok {
|
||||
return client.brokers[coordinatorID]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*ConsumerMetadataResponse, error) {
|
||||
retry := func(err error) (*ConsumerMetadataResponse, error) {
|
||||
if attemptsRemaining > 0 {
|
||||
Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
|
||||
time.Sleep(client.conf.Metadata.Retry.Backoff)
|
||||
return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for broker := client.any(); broker != nil; broker = client.any() {
|
||||
Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr())
|
||||
|
||||
request := new(ConsumerMetadataRequest)
|
||||
request.ConsumerGroup = consumerGroup
|
||||
|
||||
response, err := broker.GetConsumerMetadata(request)
|
||||
|
||||
if err != nil {
|
||||
Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
|
||||
|
||||
switch err.(type) {
|
||||
case PacketEncodingError:
|
||||
return nil, err
|
||||
default:
|
||||
_ = broker.Close()
|
||||
client.deregisterBroker(broker)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
switch response.Err {
|
||||
case ErrNoError:
|
||||
Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr())
|
||||
return response, nil
|
||||
|
||||
case ErrConsumerCoordinatorNotAvailable:
|
||||
Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup)
|
||||
|
||||
// This is very ugly, but this scenario will only happen once per cluster.
|
||||
// The __consumer_offsets topic only has to be created one time.
|
||||
// The number of partitions not configurable, but partition 0 should always exist.
|
||||
if _, err := client.Leader("__consumer_offsets", 0); err != nil {
|
||||
Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n")
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
|
||||
return retry(ErrConsumerCoordinatorNotAvailable)
|
||||
default:
|
||||
return nil, response.Err
|
||||
}
|
||||
}
|
||||
|
||||
Logger.Println("client/coordinator no available broker to send consumer metadata request to")
|
||||
client.resurrectDeadBrokers()
|
||||
return retry(ErrOutOfBrokers)
|
||||
}
|
442
vendor/github.com/Shopify/sarama/config.go
generated
vendored
Normal file
442
vendor/github.com/Shopify/sarama/config.go
generated
vendored
Normal file
|
@ -0,0 +1,442 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
const defaultClientID = "sarama"
|
||||
|
||||
var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)
|
||||
|
||||
// Config is used to pass multiple configuration options to Sarama's constructors.
|
||||
type Config struct {
|
||||
// Net is the namespace for network-level properties used by the Broker, and
|
||||
// shared by the Client/Producer/Consumer.
|
||||
Net struct {
|
||||
// How many outstanding requests a connection is allowed to have before
|
||||
// sending on it blocks (default 5).
|
||||
MaxOpenRequests int
|
||||
|
||||
// All three of the below configurations are similar to the
|
||||
// `socket.timeout.ms` setting in JVM kafka. All of them default
|
||||
// to 30 seconds.
|
||||
DialTimeout time.Duration // How long to wait for the initial connection.
|
||||
ReadTimeout time.Duration // How long to wait for a response.
|
||||
WriteTimeout time.Duration // How long to wait for a transmit.
|
||||
|
||||
TLS struct {
|
||||
// Whether or not to use TLS when connecting to the broker
|
||||
// (defaults to false).
|
||||
Enable bool
|
||||
// The TLS configuration to use for secure connections if
|
||||
// enabled (defaults to nil).
|
||||
Config *tls.Config
|
||||
}
|
||||
|
||||
// SASL based authentication with broker. While there are multiple SASL authentication methods
|
||||
// the current implementation is limited to plaintext (SASL/PLAIN) authentication
|
||||
SASL struct {
|
||||
// Whether or not to use SASL authentication when connecting to the broker
|
||||
// (defaults to false).
|
||||
Enable bool
|
||||
// Whether or not to send the Kafka SASL handshake first if enabled
|
||||
// (defaults to true). You should only set this to false if you're using
|
||||
// a non-Kafka SASL proxy.
|
||||
Handshake bool
|
||||
//username and password for SASL/PLAIN authentication
|
||||
User string
|
||||
Password string
|
||||
}
|
||||
|
||||
// KeepAlive specifies the keep-alive period for an active network connection.
|
||||
// If zero, keep-alives are disabled. (default is 0: disabled).
|
||||
KeepAlive time.Duration
|
||||
}
|
||||
|
||||
// Metadata is the namespace for metadata management properties used by the
|
||||
// Client, and shared by the Producer/Consumer.
|
||||
Metadata struct {
|
||||
Retry struct {
|
||||
// The total number of times to retry a metadata request when the
|
||||
// cluster is in the middle of a leader election (default 3).
|
||||
Max int
|
||||
// How long to wait for leader election to occur before retrying
|
||||
// (default 250ms). Similar to the JVM's `retry.backoff.ms`.
|
||||
Backoff time.Duration
|
||||
}
|
||||
// How frequently to refresh the cluster metadata in the background.
|
||||
// Defaults to 10 minutes. Set to 0 to disable. Similar to
|
||||
// `topic.metadata.refresh.interval.ms` in the JVM version.
|
||||
RefreshFrequency time.Duration
|
||||
|
||||
// Whether to maintain a full set of metadata for all topics, or just
|
||||
// the minimal set that has been necessary so far. The full set is simpler
|
||||
// and usually more convenient, but can take up a substantial amount of
|
||||
// memory if you have many topics and partitions. Defaults to true.
|
||||
Full bool
|
||||
}
|
||||
|
||||
// Producer is the namespace for configuration related to producing messages,
|
||||
// used by the Producer.
|
||||
Producer struct {
|
||||
// The maximum permitted size of a message (defaults to 1000000). Should be
|
||||
// set equal to or smaller than the broker's `message.max.bytes`.
|
||||
MaxMessageBytes int
|
||||
// The level of acknowledgement reliability needed from the broker (defaults
|
||||
// to WaitForLocal). Equivalent to the `request.required.acks` setting of the
|
||||
// JVM producer.
|
||||
RequiredAcks RequiredAcks
|
||||
// The maximum duration the broker will wait the receipt of the number of
|
||||
// RequiredAcks (defaults to 10 seconds). This is only relevant when
|
||||
// RequiredAcks is set to WaitForAll or a number > 1. Only supports
|
||||
// millisecond resolution, nanoseconds will be truncated. Equivalent to
|
||||
// the JVM producer's `request.timeout.ms` setting.
|
||||
Timeout time.Duration
|
||||
// The type of compression to use on messages (defaults to no compression).
|
||||
// Similar to `compression.codec` setting of the JVM producer.
|
||||
Compression CompressionCodec
|
||||
// Generates partitioners for choosing the partition to send messages to
|
||||
// (defaults to hashing the message key). Similar to the `partitioner.class`
|
||||
// setting for the JVM producer.
|
||||
Partitioner PartitionerConstructor
|
||||
|
||||
// Return specifies what channels will be populated. If they are set to true,
|
||||
// you must read from the respective channels to prevent deadlock. If,
|
||||
// however, this config is used to create a `SyncProducer`, both must be set
|
||||
// to true and you shall not read from the channels since the producer does
|
||||
// this internally.
|
||||
Return struct {
|
||||
// If enabled, successfully delivered messages will be returned on the
|
||||
// Successes channel (default disabled).
|
||||
Successes bool
|
||||
|
||||
// If enabled, messages that failed to deliver will be returned on the
|
||||
// Errors channel, including error (default enabled).
|
||||
Errors bool
|
||||
}
|
||||
|
||||
// The following config options control how often messages are batched up and
|
||||
// sent to the broker. By default, messages are sent as fast as possible, and
|
||||
// all messages received while the current batch is in-flight are placed
|
||||
// into the subsequent batch.
|
||||
Flush struct {
|
||||
// The best-effort number of bytes needed to trigger a flush. Use the
|
||||
// global sarama.MaxRequestSize to set a hard upper limit.
|
||||
Bytes int
|
||||
// The best-effort number of messages needed to trigger a flush. Use
|
||||
// `MaxMessages` to set a hard upper limit.
|
||||
Messages int
|
||||
// The best-effort frequency of flushes. Equivalent to
|
||||
// `queue.buffering.max.ms` setting of JVM producer.
|
||||
Frequency time.Duration
|
||||
// The maximum number of messages the producer will send in a single
|
||||
// broker request. Defaults to 0 for unlimited. Similar to
|
||||
// `queue.buffering.max.messages` in the JVM producer.
|
||||
MaxMessages int
|
||||
}
|
||||
|
||||
Retry struct {
|
||||
// The total number of times to retry sending a message (default 3).
|
||||
// Similar to the `message.send.max.retries` setting of the JVM producer.
|
||||
Max int
|
||||
// How long to wait for the cluster to settle between retries
|
||||
// (default 100ms). Similar to the `retry.backoff.ms` setting of the
|
||||
// JVM producer.
|
||||
Backoff time.Duration
|
||||
}
|
||||
}
|
||||
|
||||
// Consumer is the namespace for configuration related to consuming messages,
|
||||
// used by the Consumer.
|
||||
//
|
||||
// Note that Sarama's Consumer type does not currently support automatic
|
||||
// consumer-group rebalancing and offset tracking. For Zookeeper-based
|
||||
// tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka
|
||||
// library builds on Sarama to add this support. For Kafka-based tracking
|
||||
// (Kafka 0.9 and later), the https://github.com/bsm/sarama-cluster library
|
||||
// builds on Sarama to add this support.
|
||||
Consumer struct {
|
||||
Retry struct {
|
||||
// How long to wait after a failing to read from a partition before
|
||||
// trying again (default 2s).
|
||||
Backoff time.Duration
|
||||
}
|
||||
|
||||
// Fetch is the namespace for controlling how many bytes are retrieved by any
|
||||
// given request.
|
||||
Fetch struct {
|
||||
// The minimum number of message bytes to fetch in a request - the broker
|
||||
// will wait until at least this many are available. The default is 1,
|
||||
// as 0 causes the consumer to spin when no messages are available.
|
||||
// Equivalent to the JVM's `fetch.min.bytes`.
|
||||
Min int32
|
||||
// The default number of message bytes to fetch from the broker in each
|
||||
// request (default 32768). This should be larger than the majority of
|
||||
// your messages, or else the consumer will spend a lot of time
|
||||
// negotiating sizes and not actually consuming. Similar to the JVM's
|
||||
// `fetch.message.max.bytes`.
|
||||
Default int32
|
||||
// The maximum number of message bytes to fetch from the broker in a
|
||||
// single request. Messages larger than this will return
|
||||
// ErrMessageTooLarge and will not be consumable, so you must be sure
|
||||
// this is at least as large as your largest message. Defaults to 0
|
||||
// (no limit). Similar to the JVM's `fetch.message.max.bytes`. The
|
||||
// global `sarama.MaxResponseSize` still applies.
|
||||
Max int32
|
||||
}
|
||||
// The maximum amount of time the broker will wait for Consumer.Fetch.Min
|
||||
// bytes to become available before it returns fewer than that anyways. The
|
||||
// default is 250ms, since 0 causes the consumer to spin when no events are
|
||||
// available. 100-500ms is a reasonable range for most cases. Kafka only
|
||||
// supports precision up to milliseconds; nanoseconds will be truncated.
|
||||
// Equivalent to the JVM's `fetch.wait.max.ms`.
|
||||
MaxWaitTime time.Duration
|
||||
|
||||
// The maximum amount of time the consumer expects a message takes to
|
||||
// process for the user. If writing to the Messages channel takes longer
|
||||
// than this, that partition will stop fetching more messages until it
|
||||
// can proceed again.
|
||||
// Note that, since the Messages channel is buffered, the actual grace time is
|
||||
// (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms.
|
||||
// If a message is not written to the Messages channel between two ticks
|
||||
// of the expiryTicker then a timeout is detected.
|
||||
// Using a ticker instead of a timer to detect timeouts should typically
|
||||
// result in many fewer calls to Timer functions which may result in a
|
||||
// significant performance improvement if many messages are being sent
|
||||
// and timeouts are infrequent.
|
||||
// The disadvantage of using a ticker instead of a timer is that
|
||||
// timeouts will be less accurate. That is, the effective timeout could
|
||||
// be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For
|
||||
// example, if `MaxProcessingTime` is 100ms then a delay of 180ms
|
||||
// between two messages being sent may not be recognized as a timeout.
|
||||
MaxProcessingTime time.Duration
|
||||
|
||||
// Return specifies what channels will be populated. If they are set to true,
|
||||
// you must read from them to prevent deadlock.
|
||||
Return struct {
|
||||
// If enabled, any errors that occurred while consuming are returned on
|
||||
// the Errors channel (default disabled).
|
||||
Errors bool
|
||||
}
|
||||
|
||||
// Offsets specifies configuration for how and when to commit consumed
|
||||
// offsets. This currently requires the manual use of an OffsetManager
|
||||
// but will eventually be automated.
|
||||
Offsets struct {
|
||||
// How frequently to commit updated offsets. Defaults to 1s.
|
||||
CommitInterval time.Duration
|
||||
|
||||
// The initial offset to use if no offset was previously committed.
|
||||
// Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
|
||||
Initial int64
|
||||
|
||||
// The retention duration for committed offsets. If zero, disabled
|
||||
// (in which case the `offsets.retention.minutes` option on the
|
||||
// broker will be used). Kafka only supports precision up to
|
||||
// milliseconds; nanoseconds will be truncated. Requires Kafka
|
||||
// broker version 0.9.0 or later.
|
||||
// (default is 0: disabled).
|
||||
Retention time.Duration
|
||||
}
|
||||
}
|
||||
|
||||
// A user-provided string sent with every request to the brokers for logging,
|
||||
// debugging, and auditing purposes. Defaults to "sarama", but you should
|
||||
// probably set it to something specific to your application.
|
||||
ClientID string
|
||||
// The number of events to buffer in internal and external channels. This
|
||||
// permits the producer and consumer to continue processing some messages
|
||||
// in the background while user code is working, greatly improving throughput.
|
||||
// Defaults to 256.
|
||||
ChannelBufferSize int
|
||||
// The version of Kafka that Sarama will assume it is running against.
|
||||
// Defaults to the oldest supported stable version. Since Kafka provides
|
||||
// backwards-compatibility, setting it to a version older than you have
|
||||
// will not break anything, although it may prevent you from using the
|
||||
// latest features. Setting it to a version greater than you are actually
|
||||
// running may lead to random breakage.
|
||||
Version KafkaVersion
|
||||
// The registry to define metrics into.
|
||||
// Defaults to a local registry.
|
||||
// If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true"
|
||||
// prior to starting Sarama.
|
||||
// See Examples on how to use the metrics registry
|
||||
MetricRegistry metrics.Registry
|
||||
}
|
||||
|
||||
// NewConfig returns a new configuration instance with sane defaults.
|
||||
func NewConfig() *Config {
|
||||
c := &Config{}
|
||||
|
||||
c.Net.MaxOpenRequests = 5
|
||||
c.Net.DialTimeout = 30 * time.Second
|
||||
c.Net.ReadTimeout = 30 * time.Second
|
||||
c.Net.WriteTimeout = 30 * time.Second
|
||||
c.Net.SASL.Handshake = true
|
||||
|
||||
c.Metadata.Retry.Max = 3
|
||||
c.Metadata.Retry.Backoff = 250 * time.Millisecond
|
||||
c.Metadata.RefreshFrequency = 10 * time.Minute
|
||||
c.Metadata.Full = true
|
||||
|
||||
c.Producer.MaxMessageBytes = 1000000
|
||||
c.Producer.RequiredAcks = WaitForLocal
|
||||
c.Producer.Timeout = 10 * time.Second
|
||||
c.Producer.Partitioner = NewHashPartitioner
|
||||
c.Producer.Retry.Max = 3
|
||||
c.Producer.Retry.Backoff = 100 * time.Millisecond
|
||||
c.Producer.Return.Errors = true
|
||||
|
||||
c.Consumer.Fetch.Min = 1
|
||||
c.Consumer.Fetch.Default = 32768
|
||||
c.Consumer.Retry.Backoff = 2 * time.Second
|
||||
c.Consumer.MaxWaitTime = 250 * time.Millisecond
|
||||
c.Consumer.MaxProcessingTime = 100 * time.Millisecond
|
||||
c.Consumer.Return.Errors = false
|
||||
c.Consumer.Offsets.CommitInterval = 1 * time.Second
|
||||
c.Consumer.Offsets.Initial = OffsetNewest
|
||||
|
||||
c.ClientID = defaultClientID
|
||||
c.ChannelBufferSize = 256
|
||||
c.Version = minVersion
|
||||
c.MetricRegistry = metrics.NewRegistry()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Validate checks a Config instance. It will return a
|
||||
// ConfigurationError if the specified values don't make sense.
|
||||
func (c *Config) Validate() error {
|
||||
// some configuration values should be warned on but not fail completely, do those first
|
||||
if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil {
|
||||
Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
|
||||
}
|
||||
if c.Net.SASL.Enable == false {
|
||||
if c.Net.SASL.User != "" {
|
||||
Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
|
||||
}
|
||||
if c.Net.SASL.Password != "" {
|
||||
Logger.Println("Net.SASL is disabled but a non-empty password was provided.")
|
||||
}
|
||||
}
|
||||
if c.Producer.RequiredAcks > 1 {
|
||||
Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
|
||||
}
|
||||
if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
|
||||
Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.")
|
||||
}
|
||||
if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
|
||||
Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.")
|
||||
}
|
||||
if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 {
|
||||
Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.")
|
||||
}
|
||||
if c.Producer.Timeout%time.Millisecond != 0 {
|
||||
Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
|
||||
}
|
||||
if c.Consumer.MaxWaitTime < 100*time.Millisecond {
|
||||
Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
|
||||
}
|
||||
if c.Consumer.MaxWaitTime%time.Millisecond != 0 {
|
||||
Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")
|
||||
}
|
||||
if c.Consumer.Offsets.Retention%time.Millisecond != 0 {
|
||||
Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.")
|
||||
}
|
||||
if c.ClientID == defaultClientID {
|
||||
Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
|
||||
}
|
||||
|
||||
// validate Net values
|
||||
switch {
|
||||
case c.Net.MaxOpenRequests <= 0:
|
||||
return ConfigurationError("Net.MaxOpenRequests must be > 0")
|
||||
case c.Net.DialTimeout <= 0:
|
||||
return ConfigurationError("Net.DialTimeout must be > 0")
|
||||
case c.Net.ReadTimeout <= 0:
|
||||
return ConfigurationError("Net.ReadTimeout must be > 0")
|
||||
case c.Net.WriteTimeout <= 0:
|
||||
return ConfigurationError("Net.WriteTimeout must be > 0")
|
||||
case c.Net.KeepAlive < 0:
|
||||
return ConfigurationError("Net.KeepAlive must be >= 0")
|
||||
case c.Net.SASL.Enable == true && c.Net.SASL.User == "":
|
||||
return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
|
||||
case c.Net.SASL.Enable == true && c.Net.SASL.Password == "":
|
||||
return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
|
||||
}
|
||||
|
||||
// validate the Metadata values
|
||||
switch {
|
||||
case c.Metadata.Retry.Max < 0:
|
||||
return ConfigurationError("Metadata.Retry.Max must be >= 0")
|
||||
case c.Metadata.Retry.Backoff < 0:
|
||||
return ConfigurationError("Metadata.Retry.Backoff must be >= 0")
|
||||
case c.Metadata.RefreshFrequency < 0:
|
||||
return ConfigurationError("Metadata.RefreshFrequency must be >= 0")
|
||||
}
|
||||
|
||||
// validate the Producer values
|
||||
switch {
|
||||
case c.Producer.MaxMessageBytes <= 0:
|
||||
return ConfigurationError("Producer.MaxMessageBytes must be > 0")
|
||||
case c.Producer.RequiredAcks < -1:
|
||||
return ConfigurationError("Producer.RequiredAcks must be >= -1")
|
||||
case c.Producer.Timeout <= 0:
|
||||
return ConfigurationError("Producer.Timeout must be > 0")
|
||||
case c.Producer.Partitioner == nil:
|
||||
return ConfigurationError("Producer.Partitioner must not be nil")
|
||||
case c.Producer.Flush.Bytes < 0:
|
||||
return ConfigurationError("Producer.Flush.Bytes must be >= 0")
|
||||
case c.Producer.Flush.Messages < 0:
|
||||
return ConfigurationError("Producer.Flush.Messages must be >= 0")
|
||||
case c.Producer.Flush.Frequency < 0:
|
||||
return ConfigurationError("Producer.Flush.Frequency must be >= 0")
|
||||
case c.Producer.Flush.MaxMessages < 0:
|
||||
return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")
|
||||
case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:
|
||||
return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")
|
||||
case c.Producer.Retry.Max < 0:
|
||||
return ConfigurationError("Producer.Retry.Max must be >= 0")
|
||||
case c.Producer.Retry.Backoff < 0:
|
||||
return ConfigurationError("Producer.Retry.Backoff must be >= 0")
|
||||
}
|
||||
|
||||
if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) {
|
||||
return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")
|
||||
}
|
||||
|
||||
// validate the Consumer values
|
||||
switch {
|
||||
case c.Consumer.Fetch.Min <= 0:
|
||||
return ConfigurationError("Consumer.Fetch.Min must be > 0")
|
||||
case c.Consumer.Fetch.Default <= 0:
|
||||
return ConfigurationError("Consumer.Fetch.Default must be > 0")
|
||||
case c.Consumer.Fetch.Max < 0:
|
||||
return ConfigurationError("Consumer.Fetch.Max must be >= 0")
|
||||
case c.Consumer.MaxWaitTime < 1*time.Millisecond:
|
||||
return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")
|
||||
case c.Consumer.MaxProcessingTime <= 0:
|
||||
return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
|
||||
case c.Consumer.Retry.Backoff < 0:
|
||||
return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
|
||||
case c.Consumer.Offsets.CommitInterval <= 0:
|
||||
return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0")
|
||||
case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
|
||||
return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
|
||||
|
||||
}
|
||||
|
||||
// validate misc shared values
|
||||
switch {
|
||||
case c.ChannelBufferSize < 0:
|
||||
return ConfigurationError("ChannelBufferSize must be >= 0")
|
||||
case !validID.MatchString(c.ClientID):
|
||||
return ConfigurationError("ClientID is invalid")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
749
vendor/github.com/Shopify/sarama/consumer.go
generated
vendored
Normal file
749
vendor/github.com/Shopify/sarama/consumer.go
generated
vendored
Normal file
|
@ -0,0 +1,749 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ConsumerMessage encapsulates a Kafka message returned by the consumer.
|
||||
type ConsumerMessage struct {
|
||||
Key, Value []byte
|
||||
Topic string
|
||||
Partition int32
|
||||
Offset int64
|
||||
Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp
|
||||
BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp
|
||||
}
|
||||
|
||||
// ConsumerError is what is provided to the user when an error occurs.
|
||||
// It wraps an error and includes the topic and partition.
|
||||
type ConsumerError struct {
|
||||
Topic string
|
||||
Partition int32
|
||||
Err error
|
||||
}
|
||||
|
||||
func (ce ConsumerError) Error() string {
|
||||
return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
|
||||
}
|
||||
|
||||
// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
|
||||
// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
|
||||
// when stopping.
|
||||
type ConsumerErrors []*ConsumerError
|
||||
|
||||
func (ce ConsumerErrors) Error() string {
|
||||
return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
|
||||
}
|
||||
|
||||
// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
|
||||
// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
|
||||
// scope.
|
||||
//
|
||||
// Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking.
|
||||
// For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library
|
||||
// builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the
|
||||
// https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
|
||||
type Consumer interface {
|
||||
|
||||
// Topics returns the set of available topics as retrieved from the cluster
|
||||
// metadata. This method is the same as Client.Topics(), and is provided for
|
||||
// convenience.
|
||||
Topics() ([]string, error)
|
||||
|
||||
// Partitions returns the sorted list of all partition IDs for the given topic.
|
||||
// This method is the same as Client.Partitions(), and is provided for convenience.
|
||||
Partitions(topic string) ([]int32, error)
|
||||
|
||||
// ConsumePartition creates a PartitionConsumer on the given topic/partition with
|
||||
// the given offset. It will return an error if this Consumer is already consuming
|
||||
// on the given topic/partition. Offset can be a literal offset, or OffsetNewest
|
||||
// or OffsetOldest
|
||||
ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
|
||||
|
||||
// HighWaterMarks returns the current high water marks for each topic and partition.
|
||||
// Consistency between partitions is not guaranteed since high water marks are updated separately.
|
||||
HighWaterMarks() map[string]map[int32]int64
|
||||
|
||||
// Close shuts down the consumer. It must be called after all child
|
||||
// PartitionConsumers have already been closed.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type consumer struct {
|
||||
client Client
|
||||
conf *Config
|
||||
ownClient bool
|
||||
|
||||
lock sync.Mutex
|
||||
children map[string]map[int32]*partitionConsumer
|
||||
brokerConsumers map[*Broker]*brokerConsumer
|
||||
}
|
||||
|
||||
// NewConsumer creates a new consumer using the given broker addresses and configuration.
|
||||
func NewConsumer(addrs []string, config *Config) (Consumer, error) {
|
||||
client, err := NewClient(addrs, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c, err := NewConsumerFromClient(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.(*consumer).ownClient = true
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// NewConsumerFromClient creates a new consumer using the given client. It is still
|
||||
// necessary to call Close() on the underlying client when shutting down this consumer.
|
||||
func NewConsumerFromClient(client Client) (Consumer, error) {
|
||||
// Check that we are not dealing with a closed Client before processing any other arguments
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
c := &consumer{
|
||||
client: client,
|
||||
conf: client.Config(),
|
||||
children: make(map[string]map[int32]*partitionConsumer),
|
||||
brokerConsumers: make(map[*Broker]*brokerConsumer),
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *consumer) Close() error {
|
||||
if c.ownClient {
|
||||
return c.client.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *consumer) Topics() ([]string, error) {
|
||||
return c.client.Topics()
|
||||
}
|
||||
|
||||
func (c *consumer) Partitions(topic string) ([]int32, error) {
|
||||
return c.client.Partitions(topic)
|
||||
}
|
||||
|
||||
func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
|
||||
child := &partitionConsumer{
|
||||
consumer: c,
|
||||
conf: c.conf,
|
||||
topic: topic,
|
||||
partition: partition,
|
||||
messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
|
||||
errors: make(chan *ConsumerError, c.conf.ChannelBufferSize),
|
||||
feeder: make(chan *FetchResponse, 1),
|
||||
trigger: make(chan none, 1),
|
||||
dying: make(chan none),
|
||||
fetchSize: c.conf.Consumer.Fetch.Default,
|
||||
}
|
||||
|
||||
if err := child.chooseStartingOffset(offset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var leader *Broker
|
||||
var err error
|
||||
if leader, err = c.client.Leader(child.topic, child.partition); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := c.addChild(child); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go withRecover(child.dispatcher)
|
||||
go withRecover(child.responseFeeder)
|
||||
|
||||
child.broker = c.refBrokerConsumer(leader)
|
||||
child.broker.input <- child
|
||||
|
||||
return child, nil
|
||||
}
|
||||
|
||||
func (c *consumer) HighWaterMarks() map[string]map[int32]int64 {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
hwms := make(map[string]map[int32]int64)
|
||||
for topic, p := range c.children {
|
||||
hwm := make(map[int32]int64, len(p))
|
||||
for partition, pc := range p {
|
||||
hwm[partition] = pc.HighWaterMarkOffset()
|
||||
}
|
||||
hwms[topic] = hwm
|
||||
}
|
||||
|
||||
return hwms
|
||||
}
|
||||
|
||||
func (c *consumer) addChild(child *partitionConsumer) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
topicChildren := c.children[child.topic]
|
||||
if topicChildren == nil {
|
||||
topicChildren = make(map[int32]*partitionConsumer)
|
||||
c.children[child.topic] = topicChildren
|
||||
}
|
||||
|
||||
if topicChildren[child.partition] != nil {
|
||||
return ConfigurationError("That topic/partition is already being consumed")
|
||||
}
|
||||
|
||||
topicChildren[child.partition] = child
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *consumer) removeChild(child *partitionConsumer) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
delete(c.children[child.topic], child.partition)
|
||||
}
|
||||
|
||||
func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
bc := c.brokerConsumers[broker]
|
||||
if bc == nil {
|
||||
bc = c.newBrokerConsumer(broker)
|
||||
c.brokerConsumers[broker] = bc
|
||||
}
|
||||
|
||||
bc.refs++
|
||||
|
||||
return bc
|
||||
}
|
||||
|
||||
func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
brokerWorker.refs--
|
||||
|
||||
if brokerWorker.refs == 0 {
|
||||
close(brokerWorker.input)
|
||||
if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
|
||||
delete(c.brokerConsumers, brokerWorker.broker)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
delete(c.brokerConsumers, brokerWorker.broker)
|
||||
}
|
||||
|
||||
// PartitionConsumer
|
||||
|
||||
// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or
|
||||
// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out
|
||||
// of scope.
|
||||
//
|
||||
// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
|
||||
// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
|
||||
// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
|
||||
// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
|
||||
// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
|
||||
// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
|
||||
// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
|
||||
//
|
||||
// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
|
||||
// consumer tear-down & return imediately. Continue to loop, servicing the Messages channel until the teardown process
|
||||
// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
|
||||
// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
|
||||
// also drain the Messages channel, harvest all errors & return them once cleanup has completed.
|
||||
type PartitionConsumer interface {
|
||||
|
||||
// AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
|
||||
// should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
|
||||
// function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
|
||||
// this before calling Close on the underlying client.
|
||||
AsyncClose()
|
||||
|
||||
// Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain
|
||||
// the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service
|
||||
// the Messages channel when this function is called, you will be competing with Close for messages; consider
|
||||
// calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes
|
||||
// out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client.
|
||||
Close() error
|
||||
|
||||
// Messages returns the read channel for the messages that are returned by
|
||||
// the broker.
|
||||
Messages() <-chan *ConsumerMessage
|
||||
|
||||
// Errors returns a read channel of errors that occurred during consuming, if
|
||||
// enabled. By default, errors are logged and not returned over this channel.
|
||||
// If you want to implement any custom error handling, set your config's
|
||||
// Consumer.Return.Errors setting to true, and read from this channel.
|
||||
Errors() <-chan *ConsumerError
|
||||
|
||||
// HighWaterMarkOffset returns the high water mark offset of the partition,
|
||||
// i.e. the offset that will be used for the next message that will be produced.
|
||||
// You can use this to determine how far behind the processing is.
|
||||
HighWaterMarkOffset() int64
|
||||
}
|
||||
|
||||
type partitionConsumer struct {
|
||||
highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
consumer *consumer
|
||||
conf *Config
|
||||
topic string
|
||||
partition int32
|
||||
|
||||
broker *brokerConsumer
|
||||
messages chan *ConsumerMessage
|
||||
errors chan *ConsumerError
|
||||
feeder chan *FetchResponse
|
||||
|
||||
trigger, dying chan none
|
||||
responseResult error
|
||||
|
||||
fetchSize int32
|
||||
offset int64
|
||||
}
|
||||
|
||||
var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
|
||||
|
||||
func (child *partitionConsumer) sendError(err error) {
|
||||
cErr := &ConsumerError{
|
||||
Topic: child.topic,
|
||||
Partition: child.partition,
|
||||
Err: err,
|
||||
}
|
||||
|
||||
if child.conf.Consumer.Return.Errors {
|
||||
child.errors <- cErr
|
||||
} else {
|
||||
Logger.Println(cErr)
|
||||
}
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) dispatcher() {
|
||||
for range child.trigger {
|
||||
select {
|
||||
case <-child.dying:
|
||||
close(child.trigger)
|
||||
case <-time.After(child.conf.Consumer.Retry.Backoff):
|
||||
if child.broker != nil {
|
||||
child.consumer.unrefBrokerConsumer(child.broker)
|
||||
child.broker = nil
|
||||
}
|
||||
|
||||
Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition)
|
||||
if err := child.dispatch(); err != nil {
|
||||
child.sendError(err)
|
||||
child.trigger <- none{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if child.broker != nil {
|
||||
child.consumer.unrefBrokerConsumer(child.broker)
|
||||
}
|
||||
child.consumer.removeChild(child)
|
||||
close(child.feeder)
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) dispatch() error {
|
||||
if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var leader *Broker
|
||||
var err error
|
||||
if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
child.broker = child.consumer.refBrokerConsumer(leader)
|
||||
|
||||
child.broker.input <- child
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
|
||||
newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch {
|
||||
case offset == OffsetNewest:
|
||||
child.offset = newestOffset
|
||||
case offset == OffsetOldest:
|
||||
child.offset = oldestOffset
|
||||
case offset >= oldestOffset && offset <= newestOffset:
|
||||
child.offset = offset
|
||||
default:
|
||||
return ErrOffsetOutOfRange
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
|
||||
return child.messages
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) Errors() <-chan *ConsumerError {
|
||||
return child.errors
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) AsyncClose() {
|
||||
// this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
|
||||
// the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
|
||||
// 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
|
||||
// also just close itself)
|
||||
close(child.dying)
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) Close() error {
|
||||
child.AsyncClose()
|
||||
|
||||
go withRecover(func() {
|
||||
for range child.messages {
|
||||
// drain
|
||||
}
|
||||
})
|
||||
|
||||
var errors ConsumerErrors
|
||||
for err := range child.errors {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return errors
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) HighWaterMarkOffset() int64 {
|
||||
return atomic.LoadInt64(&child.highWaterMarkOffset)
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) responseFeeder() {
|
||||
var msgs []*ConsumerMessage
|
||||
msgSent := false
|
||||
|
||||
feederLoop:
|
||||
for response := range child.feeder {
|
||||
msgs, child.responseResult = child.parseResponse(response)
|
||||
expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
|
||||
|
||||
for i, msg := range msgs {
|
||||
messageSelect:
|
||||
select {
|
||||
case child.messages <- msg:
|
||||
msgSent = true
|
||||
case <-expiryTicker.C:
|
||||
if !msgSent {
|
||||
child.responseResult = errTimedOut
|
||||
child.broker.acks.Done()
|
||||
for _, msg = range msgs[i:] {
|
||||
child.messages <- msg
|
||||
}
|
||||
child.broker.input <- child
|
||||
continue feederLoop
|
||||
} else {
|
||||
// current message has not been sent, return to select
|
||||
// statement
|
||||
msgSent = false
|
||||
goto messageSelect
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
expiryTicker.Stop()
|
||||
child.broker.acks.Done()
|
||||
}
|
||||
|
||||
close(child.messages)
|
||||
close(child.errors)
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
|
||||
block := response.GetBlock(child.topic, child.partition)
|
||||
if block == nil {
|
||||
return nil, ErrIncompleteResponse
|
||||
}
|
||||
|
||||
if block.Err != ErrNoError {
|
||||
return nil, block.Err
|
||||
}
|
||||
|
||||
if len(block.MsgSet.Messages) == 0 {
|
||||
// We got no messages. If we got a trailing one then we need to ask for more data.
|
||||
// Otherwise we just poll again and wait for one to be produced...
|
||||
if block.MsgSet.PartialTrailingMessage {
|
||||
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
|
||||
// we can't ask for more data, we've hit the configured limit
|
||||
child.sendError(ErrMessageTooLarge)
|
||||
child.offset++ // skip this one so we can keep processing future messages
|
||||
} else {
|
||||
child.fetchSize *= 2
|
||||
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
|
||||
child.fetchSize = child.conf.Consumer.Fetch.Max
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// we got messages, reset our fetch size in case it was increased for a previous request
|
||||
child.fetchSize = child.conf.Consumer.Fetch.Default
|
||||
atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
|
||||
|
||||
incomplete := false
|
||||
prelude := true
|
||||
var messages []*ConsumerMessage
|
||||
for _, msgBlock := range block.MsgSet.Messages {
|
||||
|
||||
for _, msg := range msgBlock.Messages() {
|
||||
offset := msg.Offset
|
||||
if msg.Msg.Version >= 1 {
|
||||
baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
|
||||
offset += baseOffset
|
||||
}
|
||||
if prelude && offset < child.offset {
|
||||
continue
|
||||
}
|
||||
prelude = false
|
||||
|
||||
if offset >= child.offset {
|
||||
messages = append(messages, &ConsumerMessage{
|
||||
Topic: child.topic,
|
||||
Partition: child.partition,
|
||||
Key: msg.Msg.Key,
|
||||
Value: msg.Msg.Value,
|
||||
Offset: offset,
|
||||
Timestamp: msg.Msg.Timestamp,
|
||||
BlockTimestamp: msgBlock.Msg.Timestamp,
|
||||
})
|
||||
child.offset = offset + 1
|
||||
} else {
|
||||
incomplete = true
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if incomplete || len(messages) == 0 {
|
||||
return nil, ErrIncompleteResponse
|
||||
}
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
// brokerConsumer
|
||||
|
||||
type brokerConsumer struct {
|
||||
consumer *consumer
|
||||
broker *Broker
|
||||
input chan *partitionConsumer
|
||||
newSubscriptions chan []*partitionConsumer
|
||||
wait chan none
|
||||
subscriptions map[*partitionConsumer]none
|
||||
acks sync.WaitGroup
|
||||
refs int
|
||||
}
|
||||
|
||||
func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
|
||||
bc := &brokerConsumer{
|
||||
consumer: c,
|
||||
broker: broker,
|
||||
input: make(chan *partitionConsumer),
|
||||
newSubscriptions: make(chan []*partitionConsumer),
|
||||
wait: make(chan none),
|
||||
subscriptions: make(map[*partitionConsumer]none),
|
||||
refs: 0,
|
||||
}
|
||||
|
||||
go withRecover(bc.subscriptionManager)
|
||||
go withRecover(bc.subscriptionConsumer)
|
||||
|
||||
return bc
|
||||
}
|
||||
|
||||
func (bc *brokerConsumer) subscriptionManager() {
|
||||
var buffer []*partitionConsumer
|
||||
|
||||
// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
|
||||
// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
|
||||
// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
|
||||
// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
|
||||
// so the main goroutine can block waiting for work if it has none.
|
||||
for {
|
||||
if len(buffer) > 0 {
|
||||
select {
|
||||
case event, ok := <-bc.input:
|
||||
if !ok {
|
||||
goto done
|
||||
}
|
||||
buffer = append(buffer, event)
|
||||
case bc.newSubscriptions <- buffer:
|
||||
buffer = nil
|
||||
case bc.wait <- none{}:
|
||||
}
|
||||
} else {
|
||||
select {
|
||||
case event, ok := <-bc.input:
|
||||
if !ok {
|
||||
goto done
|
||||
}
|
||||
buffer = append(buffer, event)
|
||||
case bc.newSubscriptions <- nil:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
close(bc.wait)
|
||||
if len(buffer) > 0 {
|
||||
bc.newSubscriptions <- buffer
|
||||
}
|
||||
close(bc.newSubscriptions)
|
||||
}
|
||||
|
||||
func (bc *brokerConsumer) subscriptionConsumer() {
|
||||
<-bc.wait // wait for our first piece of work
|
||||
|
||||
// the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
|
||||
for newSubscriptions := range bc.newSubscriptions {
|
||||
bc.updateSubscriptions(newSubscriptions)
|
||||
|
||||
if len(bc.subscriptions) == 0 {
|
||||
// We're about to be shut down or we're about to receive more subscriptions.
|
||||
// Either way, the signal just hasn't propagated to our goroutine yet.
|
||||
<-bc.wait
|
||||
continue
|
||||
}
|
||||
|
||||
response, err := bc.fetchNewMessages()
|
||||
|
||||
if err != nil {
|
||||
Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
|
||||
bc.abort(err)
|
||||
return
|
||||
}
|
||||
|
||||
bc.acks.Add(len(bc.subscriptions))
|
||||
for child := range bc.subscriptions {
|
||||
child.feeder <- response
|
||||
}
|
||||
bc.acks.Wait()
|
||||
bc.handleResponses()
|
||||
}
|
||||
}
|
||||
|
||||
func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) {
|
||||
for _, child := range newSubscriptions {
|
||||
bc.subscriptions[child] = none{}
|
||||
Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
|
||||
}
|
||||
|
||||
for child := range bc.subscriptions {
|
||||
select {
|
||||
case <-child.dying:
|
||||
Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
|
||||
close(child.trigger)
|
||||
delete(bc.subscriptions, child)
|
||||
default:
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bc *brokerConsumer) handleResponses() {
|
||||
// handles the response codes left for us by our subscriptions, and abandons ones that have been closed
|
||||
for child := range bc.subscriptions {
|
||||
result := child.responseResult
|
||||
child.responseResult = nil
|
||||
|
||||
switch result {
|
||||
case nil:
|
||||
break
|
||||
case errTimedOut:
|
||||
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
|
||||
bc.broker.ID(), child.topic, child.partition)
|
||||
delete(bc.subscriptions, child)
|
||||
case ErrOffsetOutOfRange:
|
||||
// there's no point in retrying this it will just fail the same way again
|
||||
// shut it down and force the user to choose what to do
|
||||
child.sendError(result)
|
||||
Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
|
||||
close(child.trigger)
|
||||
delete(bc.subscriptions, child)
|
||||
case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable:
|
||||
// not an error, but does need redispatching
|
||||
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
|
||||
bc.broker.ID(), child.topic, child.partition, result)
|
||||
child.trigger <- none{}
|
||||
delete(bc.subscriptions, child)
|
||||
default:
|
||||
// dunno, tell the user and try redispatching
|
||||
child.sendError(result)
|
||||
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
|
||||
bc.broker.ID(), child.topic, child.partition, result)
|
||||
child.trigger <- none{}
|
||||
delete(bc.subscriptions, child)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bc *brokerConsumer) abort(err error) {
|
||||
bc.consumer.abandonBrokerConsumer(bc)
|
||||
_ = bc.broker.Close() // we don't care about the error this might return, we already have one
|
||||
|
||||
for child := range bc.subscriptions {
|
||||
child.sendError(err)
|
||||
child.trigger <- none{}
|
||||
}
|
||||
|
||||
for newSubscriptions := range bc.newSubscriptions {
|
||||
if len(newSubscriptions) == 0 {
|
||||
<-bc.wait
|
||||
continue
|
||||
}
|
||||
for _, child := range newSubscriptions {
|
||||
child.sendError(err)
|
||||
child.trigger <- none{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
|
||||
request := &FetchRequest{
|
||||
MinBytes: bc.consumer.conf.Consumer.Fetch.Min,
|
||||
MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
|
||||
}
|
||||
if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
|
||||
request.Version = 2
|
||||
}
|
||||
if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) {
|
||||
request.Version = 3
|
||||
request.MaxBytes = MaxResponseSize
|
||||
}
|
||||
|
||||
for child := range bc.subscriptions {
|
||||
request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
|
||||
}
|
||||
|
||||
return bc.broker.Fetch(request)
|
||||
}
|
94
vendor/github.com/Shopify/sarama/consumer_group_members.go
generated
vendored
Normal file
94
vendor/github.com/Shopify/sarama/consumer_group_members.go
generated
vendored
Normal file
|
@ -0,0 +1,94 @@
|
|||
package sarama
|
||||
|
||||
type ConsumerGroupMemberMetadata struct {
|
||||
Version int16
|
||||
Topics []string
|
||||
UserData []byte
|
||||
}
|
||||
|
||||
func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error {
|
||||
pe.putInt16(m.Version)
|
||||
|
||||
if err := pe.putStringArray(m.Topics); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pe.putBytes(m.UserData); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) {
|
||||
if m.Version, err = pd.getInt16(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if m.Topics, err = pd.getStringArray(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if m.UserData, err = pd.getBytes(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type ConsumerGroupMemberAssignment struct {
|
||||
Version int16
|
||||
Topics map[string][]int32
|
||||
UserData []byte
|
||||
}
|
||||
|
||||
func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error {
|
||||
pe.putInt16(m.Version)
|
||||
|
||||
if err := pe.putArrayLength(len(m.Topics)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for topic, partitions := range m.Topics {
|
||||
if err := pe.putString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putInt32Array(partitions); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := pe.putBytes(m.UserData); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) {
|
||||
if m.Version, err = pd.getInt16(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var topicLen int
|
||||
if topicLen, err = pd.getArrayLength(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.Topics = make(map[string][]int32, topicLen)
|
||||
for i := 0; i < topicLen; i++ {
|
||||
var topic string
|
||||
if topic, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
if m.Topics[topic], err = pd.getInt32Array(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if m.UserData, err = pd.getBytes(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
26
vendor/github.com/Shopify/sarama/consumer_metadata_request.go
generated
vendored
Normal file
26
vendor/github.com/Shopify/sarama/consumer_metadata_request.go
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
package sarama
|
||||
|
||||
type ConsumerMetadataRequest struct {
|
||||
ConsumerGroup string
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
|
||||
return pe.putString(r.ConsumerGroup)
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.ConsumerGroup, err = pd.getString()
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataRequest) key() int16 {
|
||||
return 10
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
|
||||
return V0_8_2_0
|
||||
}
|
85
vendor/github.com/Shopify/sarama/consumer_metadata_response.go
generated
vendored
Normal file
85
vendor/github.com/Shopify/sarama/consumer_metadata_response.go
generated
vendored
Normal file
|
@ -0,0 +1,85 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type ConsumerMetadataResponse struct {
|
||||
Err KError
|
||||
Coordinator *Broker
|
||||
CoordinatorID int32 // deprecated: use Coordinator.ID()
|
||||
CoordinatorHost string // deprecated: use Coordinator.Addr()
|
||||
CoordinatorPort int32 // deprecated: use Coordinator.Addr()
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Err = KError(tmp)
|
||||
|
||||
coordinator := new(Broker)
|
||||
if err := coordinator.decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
if coordinator.addr == ":0" {
|
||||
return nil
|
||||
}
|
||||
r.Coordinator = coordinator
|
||||
|
||||
// this can all go away in 2.0, but we have to fill in deprecated fields to maintain
|
||||
// backwards compatibility
|
||||
host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
port, err := strconv.ParseInt(portstr, 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.CoordinatorID = r.Coordinator.ID()
|
||||
r.CoordinatorHost = host
|
||||
r.CoordinatorPort = int32(port)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.Err))
|
||||
if r.Coordinator != nil {
|
||||
host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
port, err := strconv.ParseInt(portstr, 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pe.putInt32(r.Coordinator.ID())
|
||||
if err := pe.putString(host); err != nil {
|
||||
return err
|
||||
}
|
||||
pe.putInt32(int32(port))
|
||||
return nil
|
||||
}
|
||||
pe.putInt32(r.CoordinatorID)
|
||||
if err := pe.putString(r.CoordinatorHost); err != nil {
|
||||
return err
|
||||
}
|
||||
pe.putInt32(r.CoordinatorPort)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataResponse) key() int16 {
|
||||
return 10
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
|
||||
return V0_8_2_0
|
||||
}
|
37
vendor/github.com/Shopify/sarama/crc32_field.go
generated
vendored
Normal file
37
vendor/github.com/Shopify/sarama/crc32_field.go
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
)
|
||||
|
||||
// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
|
||||
type crc32Field struct {
|
||||
startOffset int
|
||||
}
|
||||
|
||||
func (c *crc32Field) saveOffset(in int) {
|
||||
c.startOffset = in
|
||||
}
|
||||
|
||||
func (c *crc32Field) reserveLength() int {
|
||||
return 4
|
||||
}
|
||||
|
||||
func (c *crc32Field) run(curOffset int, buf []byte) error {
|
||||
crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
|
||||
binary.BigEndian.PutUint32(buf[c.startOffset:], crc)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *crc32Field) check(curOffset int, buf []byte) error {
|
||||
crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
|
||||
|
||||
expected := binary.BigEndian.Uint32(buf[c.startOffset:])
|
||||
if crc != expected {
|
||||
return PacketDecodingError{fmt.Sprintf("CRC didn't match expected %#x got %#x", expected, crc)}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
30
vendor/github.com/Shopify/sarama/describe_groups_request.go
generated
vendored
Normal file
30
vendor/github.com/Shopify/sarama/describe_groups_request.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
package sarama
|
||||
|
||||
type DescribeGroupsRequest struct {
|
||||
Groups []string
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsRequest) encode(pe packetEncoder) error {
|
||||
return pe.putStringArray(r.Groups)
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Groups, err = pd.getStringArray()
|
||||
return
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsRequest) key() int16 {
|
||||
return 15
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsRequest) AddGroup(group string) {
|
||||
r.Groups = append(r.Groups, group)
|
||||
}
|
187
vendor/github.com/Shopify/sarama/describe_groups_response.go
generated
vendored
Normal file
187
vendor/github.com/Shopify/sarama/describe_groups_response.go
generated
vendored
Normal file
|
@ -0,0 +1,187 @@
|
|||
package sarama
|
||||
|
||||
type DescribeGroupsResponse struct {
|
||||
Groups []*GroupDescription
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsResponse) encode(pe packetEncoder) error {
|
||||
if err := pe.putArrayLength(len(r.Groups)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, groupDescription := range r.Groups {
|
||||
if err := groupDescription.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Groups = make([]*GroupDescription, n)
|
||||
for i := 0; i < n; i++ {
|
||||
r.Groups[i] = new(GroupDescription)
|
||||
if err := r.Groups[i].decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsResponse) key() int16 {
|
||||
return 15
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
|
||||
type GroupDescription struct {
|
||||
Err KError
|
||||
GroupId string
|
||||
State string
|
||||
ProtocolType string
|
||||
Protocol string
|
||||
Members map[string]*GroupMemberDescription
|
||||
}
|
||||
|
||||
func (gd *GroupDescription) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(gd.Err))
|
||||
|
||||
if err := pe.putString(gd.GroupId); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(gd.State); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(gd.ProtocolType); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(gd.Protocol); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pe.putArrayLength(len(gd.Members)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for memberId, groupMemberDescription := range gd.Members {
|
||||
if err := pe.putString(memberId); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := groupMemberDescription.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gd *GroupDescription) decode(pd packetDecoder) (err error) {
|
||||
kerr, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gd.Err = KError(kerr)
|
||||
|
||||
if gd.GroupId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
if gd.State, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
if gd.ProtocolType, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
if gd.Protocol, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
gd.Members = make(map[string]*GroupMemberDescription)
|
||||
for i := 0; i < n; i++ {
|
||||
memberId, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gd.Members[memberId] = new(GroupMemberDescription)
|
||||
if err := gd.Members[memberId].decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type GroupMemberDescription struct {
|
||||
ClientId string
|
||||
ClientHost string
|
||||
MemberMetadata []byte
|
||||
MemberAssignment []byte
|
||||
}
|
||||
|
||||
func (gmd *GroupMemberDescription) encode(pe packetEncoder) error {
|
||||
if err := pe.putString(gmd.ClientId); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(gmd.ClientHost); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putBytes(gmd.MemberMetadata); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putBytes(gmd.MemberAssignment); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) {
|
||||
if gmd.ClientId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
if gmd.ClientHost, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
if gmd.MemberMetadata, err = pd.getBytes(); err != nil {
|
||||
return
|
||||
}
|
||||
if gmd.MemberAssignment, err = pd.getBytes(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
|
||||
assignment := new(ConsumerGroupMemberAssignment)
|
||||
err := decode(gmd.MemberAssignment, assignment)
|
||||
return assignment, err
|
||||
}
|
||||
|
||||
func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) {
|
||||
metadata := new(ConsumerGroupMemberMetadata)
|
||||
err := decode(gmd.MemberMetadata, metadata)
|
||||
return metadata, err
|
||||
}
|
89
vendor/github.com/Shopify/sarama/encoder_decoder.go
generated
vendored
Normal file
89
vendor/github.com/Shopify/sarama/encoder_decoder.go
generated
vendored
Normal file
|
@ -0,0 +1,89 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
// Encoder is the interface that wraps the basic Encode method.
|
||||
// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules.
|
||||
type encoder interface {
|
||||
encode(pe packetEncoder) error
|
||||
}
|
||||
|
||||
// Encode takes an Encoder and turns it into bytes while potentially recording metrics.
|
||||
func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
|
||||
if e == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var prepEnc prepEncoder
|
||||
var realEnc realEncoder
|
||||
|
||||
err := e.encode(&prepEnc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) {
|
||||
return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)}
|
||||
}
|
||||
|
||||
realEnc.raw = make([]byte, prepEnc.length)
|
||||
realEnc.registry = metricRegistry
|
||||
err = e.encode(&realEnc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return realEnc.raw, nil
|
||||
}
|
||||
|
||||
// Decoder is the interface that wraps the basic Decode method.
|
||||
// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
|
||||
type decoder interface {
|
||||
decode(pd packetDecoder) error
|
||||
}
|
||||
|
||||
type versionedDecoder interface {
|
||||
decode(pd packetDecoder, version int16) error
|
||||
}
|
||||
|
||||
// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes,
|
||||
// interpreted using Kafka's encoding rules.
|
||||
func decode(buf []byte, in decoder) error {
|
||||
if buf == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
helper := realDecoder{raw: buf}
|
||||
err := in.decode(&helper)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if helper.off != len(buf) {
|
||||
return PacketDecodingError{"invalid length"}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func versionedDecode(buf []byte, in versionedDecoder, version int16) error {
|
||||
if buf == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
helper := realDecoder{raw: buf}
|
||||
err := in.decode(&helper, version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if helper.off != len(buf) {
|
||||
return PacketDecodingError{"invalid length"}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
221
vendor/github.com/Shopify/sarama/errors.go
generated
vendored
Normal file
221
vendor/github.com/Shopify/sarama/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,221 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
|
||||
// or otherwise failed to respond.
|
||||
var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)")
|
||||
|
||||
// ErrClosedClient is the error returned when a method is called on a client that has been closed.
|
||||
var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
|
||||
|
||||
// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
|
||||
// not contain the expected information.
|
||||
var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks")
|
||||
|
||||
// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index
|
||||
// (meaning one outside of the range [0...numPartitions-1]).
|
||||
var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index")
|
||||
|
||||
// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting.
|
||||
var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated")
|
||||
|
||||
// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected.
|
||||
var ErrNotConnected = errors.New("kafka: broker not connected")
|
||||
|
||||
// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected
|
||||
// when requesting messages, since as an optimization the server is allowed to return a partial message at the end
|
||||
// of the message set.
|
||||
var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected")
|
||||
|
||||
// ErrShuttingDown is returned when a producer receives a message during shutdown.
|
||||
var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down")
|
||||
|
||||
// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
|
||||
var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
|
||||
|
||||
// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
|
||||
// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
|
||||
type PacketEncodingError struct {
|
||||
Info string
|
||||
}
|
||||
|
||||
func (err PacketEncodingError) Error() string {
|
||||
return fmt.Sprintf("kafka: error encoding packet: %s", err.Info)
|
||||
}
|
||||
|
||||
// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response.
|
||||
// This can be a bad CRC or length field, or any other invalid value.
|
||||
type PacketDecodingError struct {
|
||||
Info string
|
||||
}
|
||||
|
||||
func (err PacketDecodingError) Error() string {
|
||||
return fmt.Sprintf("kafka: error decoding packet: %s", err.Info)
|
||||
}
|
||||
|
||||
// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer)
|
||||
// when the specified configuration is invalid.
|
||||
type ConfigurationError string
|
||||
|
||||
func (err ConfigurationError) Error() string {
|
||||
return "kafka: invalid configuration (" + string(err) + ")"
|
||||
}
|
||||
|
||||
// KError is the type of error that can be returned directly by the Kafka broker.
|
||||
// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
|
||||
type KError int16
|
||||
|
||||
// Numeric error codes returned by the Kafka server.
|
||||
const (
|
||||
ErrNoError KError = 0
|
||||
ErrUnknown KError = -1
|
||||
ErrOffsetOutOfRange KError = 1
|
||||
ErrInvalidMessage KError = 2
|
||||
ErrUnknownTopicOrPartition KError = 3
|
||||
ErrInvalidMessageSize KError = 4
|
||||
ErrLeaderNotAvailable KError = 5
|
||||
ErrNotLeaderForPartition KError = 6
|
||||
ErrRequestTimedOut KError = 7
|
||||
ErrBrokerNotAvailable KError = 8
|
||||
ErrReplicaNotAvailable KError = 9
|
||||
ErrMessageSizeTooLarge KError = 10
|
||||
ErrStaleControllerEpochCode KError = 11
|
||||
ErrOffsetMetadataTooLarge KError = 12
|
||||
ErrNetworkException KError = 13
|
||||
ErrOffsetsLoadInProgress KError = 14
|
||||
ErrConsumerCoordinatorNotAvailable KError = 15
|
||||
ErrNotCoordinatorForConsumer KError = 16
|
||||
ErrInvalidTopic KError = 17
|
||||
ErrMessageSetSizeTooLarge KError = 18
|
||||
ErrNotEnoughReplicas KError = 19
|
||||
ErrNotEnoughReplicasAfterAppend KError = 20
|
||||
ErrInvalidRequiredAcks KError = 21
|
||||
ErrIllegalGeneration KError = 22
|
||||
ErrInconsistentGroupProtocol KError = 23
|
||||
ErrInvalidGroupId KError = 24
|
||||
ErrUnknownMemberId KError = 25
|
||||
ErrInvalidSessionTimeout KError = 26
|
||||
ErrRebalanceInProgress KError = 27
|
||||
ErrInvalidCommitOffsetSize KError = 28
|
||||
ErrTopicAuthorizationFailed KError = 29
|
||||
ErrGroupAuthorizationFailed KError = 30
|
||||
ErrClusterAuthorizationFailed KError = 31
|
||||
ErrInvalidTimestamp KError = 32
|
||||
ErrUnsupportedSASLMechanism KError = 33
|
||||
ErrIllegalSASLState KError = 34
|
||||
ErrUnsupportedVersion KError = 35
|
||||
ErrTopicAlreadyExists KError = 36
|
||||
ErrInvalidPartitions KError = 37
|
||||
ErrInvalidReplicationFactor KError = 38
|
||||
ErrInvalidReplicaAssignment KError = 39
|
||||
ErrInvalidConfig KError = 40
|
||||
ErrNotController KError = 41
|
||||
ErrInvalidRequest KError = 42
|
||||
ErrUnsupportedForMessageFormat KError = 43
|
||||
ErrPolicyViolation KError = 44
|
||||
)
|
||||
|
||||
func (err KError) Error() string {
|
||||
// Error messages stolen/adapted from
|
||||
// https://kafka.apache.org/protocol#protocol_error_codes
|
||||
switch err {
|
||||
case ErrNoError:
|
||||
return "kafka server: Not an error, why are you printing me?"
|
||||
case ErrUnknown:
|
||||
return "kafka server: Unexpected (unknown?) server error."
|
||||
case ErrOffsetOutOfRange:
|
||||
return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition."
|
||||
case ErrInvalidMessage:
|
||||
return "kafka server: Message contents does not match its CRC."
|
||||
case ErrUnknownTopicOrPartition:
|
||||
return "kafka server: Request was for a topic or partition that does not exist on this broker."
|
||||
case ErrInvalidMessageSize:
|
||||
return "kafka server: The message has a negative size."
|
||||
case ErrLeaderNotAvailable:
|
||||
return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes."
|
||||
case ErrNotLeaderForPartition:
|
||||
return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date."
|
||||
case ErrRequestTimedOut:
|
||||
return "kafka server: Request exceeded the user-specified time limit in the request."
|
||||
case ErrBrokerNotAvailable:
|
||||
return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
|
||||
case ErrReplicaNotAvailable:
|
||||
return "kafka server: Replica information not available, one or more brokers are down."
|
||||
case ErrMessageSizeTooLarge:
|
||||
return "kafka server: Message was too large, server rejected it to avoid allocation error."
|
||||
case ErrStaleControllerEpochCode:
|
||||
return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)."
|
||||
case ErrOffsetMetadataTooLarge:
|
||||
return "kafka server: Specified a string larger than the configured maximum for offset metadata."
|
||||
case ErrNetworkException:
|
||||
return "kafka server: The server disconnected before a response was received."
|
||||
case ErrOffsetsLoadInProgress:
|
||||
return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition."
|
||||
case ErrConsumerCoordinatorNotAvailable:
|
||||
return "kafka server: Offset's topic has not yet been created."
|
||||
case ErrNotCoordinatorForConsumer:
|
||||
return "kafka server: Request was for a consumer group that is not coordinated by this broker."
|
||||
case ErrInvalidTopic:
|
||||
return "kafka server: The request attempted to perform an operation on an invalid topic."
|
||||
case ErrMessageSetSizeTooLarge:
|
||||
return "kafka server: The request included message batch larger than the configured segment size on the server."
|
||||
case ErrNotEnoughReplicas:
|
||||
return "kafka server: Messages are rejected since there are fewer in-sync replicas than required."
|
||||
case ErrNotEnoughReplicasAfterAppend:
|
||||
return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required."
|
||||
case ErrInvalidRequiredAcks:
|
||||
return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)."
|
||||
case ErrIllegalGeneration:
|
||||
return "kafka server: The provided generation id is not the current generation."
|
||||
case ErrInconsistentGroupProtocol:
|
||||
return "kafka server: The provider group protocol type is incompatible with the other members."
|
||||
case ErrInvalidGroupId:
|
||||
return "kafka server: The provided group id was empty."
|
||||
case ErrUnknownMemberId:
|
||||
return "kafka server: The provided member is not known in the current generation."
|
||||
case ErrInvalidSessionTimeout:
|
||||
return "kafka server: The provided session timeout is outside the allowed range."
|
||||
case ErrRebalanceInProgress:
|
||||
return "kafka server: A rebalance for the group is in progress. Please re-join the group."
|
||||
case ErrInvalidCommitOffsetSize:
|
||||
return "kafka server: The provided commit metadata was too large."
|
||||
case ErrTopicAuthorizationFailed:
|
||||
return "kafka server: The client is not authorized to access this topic."
|
||||
case ErrGroupAuthorizationFailed:
|
||||
return "kafka server: The client is not authorized to access this group."
|
||||
case ErrClusterAuthorizationFailed:
|
||||
return "kafka server: The client is not authorized to send this request type."
|
||||
case ErrInvalidTimestamp:
|
||||
return "kafka server: The timestamp of the message is out of acceptable range."
|
||||
case ErrUnsupportedSASLMechanism:
|
||||
return "kafka server: The broker does not support the requested SASL mechanism."
|
||||
case ErrIllegalSASLState:
|
||||
return "kafka server: Request is not valid given the current SASL state."
|
||||
case ErrUnsupportedVersion:
|
||||
return "kafka server: The version of API is not supported."
|
||||
case ErrTopicAlreadyExists:
|
||||
return "kafka server: Topic with this name already exists."
|
||||
case ErrInvalidPartitions:
|
||||
return "kafka server: Number of partitions is invalid."
|
||||
case ErrInvalidReplicationFactor:
|
||||
return "kafka server: Replication-factor is invalid."
|
||||
case ErrInvalidReplicaAssignment:
|
||||
return "kafka server: Replica assignment is invalid."
|
||||
case ErrInvalidConfig:
|
||||
return "kafka server: Configuration is invalid."
|
||||
case ErrNotController:
|
||||
return "kafka server: This is not the correct controller for this cluster."
|
||||
case ErrInvalidRequest:
|
||||
return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details."
|
||||
case ErrUnsupportedForMessageFormat:
|
||||
return "kafka server: The requested operation is not supported by the message format version."
|
||||
case ErrPolicyViolation:
|
||||
return "kafka server: Request parameters do not satisfy the configured policy."
|
||||
}
|
||||
|
||||
return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
|
||||
}
|
150
vendor/github.com/Shopify/sarama/fetch_request.go
generated
vendored
Normal file
150
vendor/github.com/Shopify/sarama/fetch_request.go
generated
vendored
Normal file
|
@ -0,0 +1,150 @@
|
|||
package sarama
|
||||
|
||||
type fetchRequestBlock struct {
|
||||
fetchOffset int64
|
||||
maxBytes int32
|
||||
}
|
||||
|
||||
func (b *fetchRequestBlock) encode(pe packetEncoder) error {
|
||||
pe.putInt64(b.fetchOffset)
|
||||
pe.putInt32(b.maxBytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) {
|
||||
if b.fetchOffset, err = pd.getInt64(); err != nil {
|
||||
return err
|
||||
}
|
||||
if b.maxBytes, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See
|
||||
// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at
|
||||
// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
|
||||
type FetchRequest struct {
|
||||
MaxWaitTime int32
|
||||
MinBytes int32
|
||||
MaxBytes int32
|
||||
Version int16
|
||||
blocks map[string]map[int32]*fetchRequestBlock
|
||||
}
|
||||
|
||||
func (r *FetchRequest) encode(pe packetEncoder) (err error) {
|
||||
pe.putInt32(-1) // replica ID is always -1 for clients
|
||||
pe.putInt32(r.MaxWaitTime)
|
||||
pe.putInt32(r.MinBytes)
|
||||
if r.Version == 3 {
|
||||
pe.putInt32(r.MaxBytes)
|
||||
}
|
||||
err = pe.putArrayLength(len(r.blocks))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for topic, blocks := range r.blocks {
|
||||
err = pe.putString(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = pe.putArrayLength(len(blocks))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for partition, block := range blocks {
|
||||
pe.putInt32(partition)
|
||||
err = block.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Version = version
|
||||
if _, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
if r.MaxWaitTime, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
if r.MinBytes, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
if r.Version == 3 {
|
||||
if r.MaxBytes, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
topicCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if topicCount == 0 {
|
||||
return nil
|
||||
}
|
||||
r.blocks = make(map[string]map[int32]*fetchRequestBlock)
|
||||
for i := 0; i < topicCount; i++ {
|
||||
topic, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
partitionCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.blocks[topic] = make(map[int32]*fetchRequestBlock)
|
||||
for j := 0; j < partitionCount; j++ {
|
||||
partition, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fetchBlock := &fetchRequestBlock{}
|
||||
if err = fetchBlock.decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
r.blocks[topic][partition] = fetchBlock
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *FetchRequest) key() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (r *FetchRequest) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *FetchRequest) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_9_0_0
|
||||
case 2:
|
||||
return V0_10_0_0
|
||||
case 3:
|
||||
return V0_10_1_0
|
||||
default:
|
||||
return minVersion
|
||||
}
|
||||
}
|
||||
|
||||
func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) {
|
||||
if r.blocks == nil {
|
||||
r.blocks = make(map[string]map[int32]*fetchRequestBlock)
|
||||
}
|
||||
|
||||
if r.blocks[topic] == nil {
|
||||
r.blocks[topic] = make(map[int32]*fetchRequestBlock)
|
||||
}
|
||||
|
||||
tmp := new(fetchRequestBlock)
|
||||
tmp.maxBytes = maxBytes
|
||||
tmp.fetchOffset = fetchOffset
|
||||
|
||||
r.blocks[topic][partitionID] = tmp
|
||||
}
|
210
vendor/github.com/Shopify/sarama/fetch_response.go
generated
vendored
Normal file
210
vendor/github.com/Shopify/sarama/fetch_response.go
generated
vendored
Normal file
|
@ -0,0 +1,210 @@
|
|||
package sarama
|
||||
|
||||
import "time"
|
||||
|
||||
type FetchResponseBlock struct {
|
||||
Err KError
|
||||
HighWaterMarkOffset int64
|
||||
MsgSet MessageSet
|
||||
}
|
||||
|
||||
func (b *FetchResponseBlock) decode(pd packetDecoder) (err error) {
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.Err = KError(tmp)
|
||||
|
||||
b.HighWaterMarkOffset, err = pd.getInt64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgSetSize, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgSetDecoder, err := pd.getSubset(int(msgSetSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = (&b.MsgSet).decode(msgSetDecoder)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *FetchResponseBlock) encode(pe packetEncoder) (err error) {
|
||||
pe.putInt16(int16(b.Err))
|
||||
|
||||
pe.putInt64(b.HighWaterMarkOffset)
|
||||
|
||||
pe.push(&lengthField{})
|
||||
err = b.MsgSet.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return pe.pop()
|
||||
}
|
||||
|
||||
type FetchResponse struct {
|
||||
Blocks map[string]map[int32]*FetchResponseBlock
|
||||
ThrottleTime time.Duration
|
||||
Version int16 // v1 requires 0.9+, v2 requires 0.10+
|
||||
}
|
||||
|
||||
func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Version = version
|
||||
|
||||
if r.Version >= 1 {
|
||||
throttle, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.ThrottleTime = time.Duration(throttle) * time.Millisecond
|
||||
}
|
||||
|
||||
numTopics, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
|
||||
for i := 0; i < numTopics; i++ {
|
||||
name, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numBlocks, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
|
||||
|
||||
for j := 0; j < numBlocks; j++ {
|
||||
id, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
block := new(FetchResponseBlock)
|
||||
err = block.decode(pd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Blocks[name][id] = block
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *FetchResponse) encode(pe packetEncoder) (err error) {
|
||||
if r.Version >= 1 {
|
||||
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
|
||||
}
|
||||
|
||||
err = pe.putArrayLength(len(r.Blocks))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for topic, partitions := range r.Blocks {
|
||||
err = pe.putString(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = pe.putArrayLength(len(partitions))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for id, block := range partitions {
|
||||
pe.putInt32(id)
|
||||
err = block.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *FetchResponse) key() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (r *FetchResponse) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *FetchResponse) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_9_0_0
|
||||
case 2:
|
||||
return V0_10_0_0
|
||||
default:
|
||||
return minVersion
|
||||
}
|
||||
}
|
||||
|
||||
func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
|
||||
if r.Blocks == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if r.Blocks[topic] == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return r.Blocks[topic][partition]
|
||||
}
|
||||
|
||||
func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
|
||||
if r.Blocks == nil {
|
||||
r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
|
||||
}
|
||||
partitions, ok := r.Blocks[topic]
|
||||
if !ok {
|
||||
partitions = make(map[int32]*FetchResponseBlock)
|
||||
r.Blocks[topic] = partitions
|
||||
}
|
||||
frb, ok := partitions[partition]
|
||||
if !ok {
|
||||
frb = new(FetchResponseBlock)
|
||||
partitions[partition] = frb
|
||||
}
|
||||
frb.Err = err
|
||||
}
|
||||
|
||||
func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
|
||||
if r.Blocks == nil {
|
||||
r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
|
||||
}
|
||||
partitions, ok := r.Blocks[topic]
|
||||
if !ok {
|
||||
partitions = make(map[int32]*FetchResponseBlock)
|
||||
r.Blocks[topic] = partitions
|
||||
}
|
||||
frb, ok := partitions[partition]
|
||||
if !ok {
|
||||
frb = new(FetchResponseBlock)
|
||||
partitions[partition] = frb
|
||||
}
|
||||
var kb []byte
|
||||
var vb []byte
|
||||
if key != nil {
|
||||
kb, _ = key.Encode()
|
||||
}
|
||||
if value != nil {
|
||||
vb, _ = value.Encode()
|
||||
}
|
||||
msg := &Message{Key: kb, Value: vb}
|
||||
msgBlock := &MessageBlock{Msg: msg, Offset: offset}
|
||||
frb.MsgSet.Messages = append(frb.MsgSet.Messages, msgBlock)
|
||||
}
|
47
vendor/github.com/Shopify/sarama/heartbeat_request.go
generated
vendored
Normal file
47
vendor/github.com/Shopify/sarama/heartbeat_request.go
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
|||
package sarama
|
||||
|
||||
type HeartbeatRequest struct {
|
||||
GroupId string
|
||||
GenerationId int32
|
||||
MemberId string
|
||||
}
|
||||
|
||||
func (r *HeartbeatRequest) encode(pe packetEncoder) error {
|
||||
if err := pe.putString(r.GroupId); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pe.putInt32(r.GenerationId)
|
||||
|
||||
if err := pe.putString(r.MemberId); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
if r.GroupId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
if r.GenerationId, err = pd.getInt32(); err != nil {
|
||||
return
|
||||
}
|
||||
if r.MemberId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *HeartbeatRequest) key() int16 {
|
||||
return 12
|
||||
}
|
||||
|
||||
func (r *HeartbeatRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
32
vendor/github.com/Shopify/sarama/heartbeat_response.go
generated
vendored
Normal file
32
vendor/github.com/Shopify/sarama/heartbeat_response.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
package sarama
|
||||
|
||||
type HeartbeatResponse struct {
|
||||
Err KError
|
||||
}
|
||||
|
||||
func (r *HeartbeatResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.Err))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error {
|
||||
kerr, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Err = KError(kerr)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *HeartbeatResponse) key() int16 {
|
||||
return 12
|
||||
}
|
||||
|
||||
func (r *HeartbeatResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
143
vendor/github.com/Shopify/sarama/join_group_request.go
generated
vendored
Normal file
143
vendor/github.com/Shopify/sarama/join_group_request.go
generated
vendored
Normal file
|
@ -0,0 +1,143 @@
|
|||
package sarama
|
||||
|
||||
type GroupProtocol struct {
|
||||
Name string
|
||||
Metadata []byte
|
||||
}
|
||||
|
||||
func (p *GroupProtocol) decode(pd packetDecoder) (err error) {
|
||||
p.Name, err = pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Metadata, err = pd.getBytes()
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *GroupProtocol) encode(pe packetEncoder) (err error) {
|
||||
if err := pe.putString(p.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putBytes(p.Metadata); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type JoinGroupRequest struct {
|
||||
GroupId string
|
||||
SessionTimeout int32
|
||||
MemberId string
|
||||
ProtocolType string
|
||||
GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols
|
||||
OrderedGroupProtocols []*GroupProtocol
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) encode(pe packetEncoder) error {
|
||||
if err := pe.putString(r.GroupId); err != nil {
|
||||
return err
|
||||
}
|
||||
pe.putInt32(r.SessionTimeout)
|
||||
if err := pe.putString(r.MemberId); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(r.ProtocolType); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(r.GroupProtocols) > 0 {
|
||||
if len(r.OrderedGroupProtocols) > 0 {
|
||||
return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"}
|
||||
}
|
||||
|
||||
if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
|
||||
return err
|
||||
}
|
||||
for name, metadata := range r.GroupProtocols {
|
||||
if err := pe.putString(name); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putBytes(metadata); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, protocol := range r.OrderedGroupProtocols {
|
||||
if err := protocol.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
if r.GroupId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.SessionTimeout, err = pd.getInt32(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.MemberId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.ProtocolType, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
r.GroupProtocols = make(map[string][]byte)
|
||||
for i := 0; i < n; i++ {
|
||||
protocol := &GroupProtocol{}
|
||||
if err := protocol.decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
r.GroupProtocols[protocol.Name] = protocol.Metadata
|
||||
r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) key() int16 {
|
||||
return 11
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
|
||||
r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{
|
||||
Name: name,
|
||||
Metadata: metadata,
|
||||
})
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error {
|
||||
bin, err := encode(metadata, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.AddGroupProtocol(name, bin)
|
||||
return nil
|
||||
}
|
115
vendor/github.com/Shopify/sarama/join_group_response.go
generated
vendored
Normal file
115
vendor/github.com/Shopify/sarama/join_group_response.go
generated
vendored
Normal file
|
@ -0,0 +1,115 @@
|
|||
package sarama
|
||||
|
||||
type JoinGroupResponse struct {
|
||||
Err KError
|
||||
GenerationId int32
|
||||
GroupProtocol string
|
||||
LeaderId string
|
||||
MemberId string
|
||||
Members map[string][]byte
|
||||
}
|
||||
|
||||
func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) {
|
||||
members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members))
|
||||
for id, bin := range r.Members {
|
||||
meta := new(ConsumerGroupMemberMetadata)
|
||||
if err := decode(bin, meta); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
members[id] = *meta
|
||||
}
|
||||
return members, nil
|
||||
}
|
||||
|
||||
func (r *JoinGroupResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.Err))
|
||||
pe.putInt32(r.GenerationId)
|
||||
|
||||
if err := pe.putString(r.GroupProtocol); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(r.LeaderId); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(r.MemberId); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pe.putArrayLength(len(r.Members)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for memberId, memberMetadata := range r.Members {
|
||||
if err := pe.putString(memberId); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pe.putBytes(memberMetadata); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
kerr, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Err = KError(kerr)
|
||||
|
||||
if r.GenerationId, err = pd.getInt32(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.GroupProtocol, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.LeaderId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.MemberId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
r.Members = make(map[string][]byte)
|
||||
for i := 0; i < n; i++ {
|
||||
memberId, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
memberMetadata, err := pd.getBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Members[memberId] = memberMetadata
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *JoinGroupResponse) key() int16 {
|
||||
return 11
|
||||
}
|
||||
|
||||
func (r *JoinGroupResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
40
vendor/github.com/Shopify/sarama/leave_group_request.go
generated
vendored
Normal file
40
vendor/github.com/Shopify/sarama/leave_group_request.go
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
package sarama
|
||||
|
||||
type LeaveGroupRequest struct {
|
||||
GroupId string
|
||||
MemberId string
|
||||
}
|
||||
|
||||
func (r *LeaveGroupRequest) encode(pe packetEncoder) error {
|
||||
if err := pe.putString(r.GroupId); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(r.MemberId); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
if r.GroupId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
if r.MemberId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *LeaveGroupRequest) key() int16 {
|
||||
return 13
|
||||
}
|
||||
|
||||
func (r *LeaveGroupRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *LeaveGroupRequest) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
32
vendor/github.com/Shopify/sarama/leave_group_response.go
generated
vendored
Normal file
32
vendor/github.com/Shopify/sarama/leave_group_response.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
package sarama
|
||||
|
||||
type LeaveGroupResponse struct {
|
||||
Err KError
|
||||
}
|
||||
|
||||
func (r *LeaveGroupResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.Err))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
kerr, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Err = KError(kerr)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *LeaveGroupResponse) key() int16 {
|
||||
return 13
|
||||
}
|
||||
|
||||
func (r *LeaveGroupResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *LeaveGroupResponse) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
29
vendor/github.com/Shopify/sarama/length_field.go
generated
vendored
Normal file
29
vendor/github.com/Shopify/sarama/length_field.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
package sarama
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths.
|
||||
type lengthField struct {
|
||||
startOffset int
|
||||
}
|
||||
|
||||
func (l *lengthField) saveOffset(in int) {
|
||||
l.startOffset = in
|
||||
}
|
||||
|
||||
func (l *lengthField) reserveLength() int {
|
||||
return 4
|
||||
}
|
||||
|
||||
func (l *lengthField) run(curOffset int, buf []byte) error {
|
||||
binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *lengthField) check(curOffset int, buf []byte) error {
|
||||
if uint32(curOffset-l.startOffset-4) != binary.BigEndian.Uint32(buf[l.startOffset:]) {
|
||||
return PacketDecodingError{"length field invalid"}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
24
vendor/github.com/Shopify/sarama/list_groups_request.go
generated
vendored
Normal file
24
vendor/github.com/Shopify/sarama/list_groups_request.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
package sarama
|
||||
|
||||
type ListGroupsRequest struct {
|
||||
}
|
||||
|
||||
func (r *ListGroupsRequest) encode(pe packetEncoder) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ListGroupsRequest) key() int16 {
|
||||
return 16
|
||||
}
|
||||
|
||||
func (r *ListGroupsRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ListGroupsRequest) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
69
vendor/github.com/Shopify/sarama/list_groups_response.go
generated
vendored
Normal file
69
vendor/github.com/Shopify/sarama/list_groups_response.go
generated
vendored
Normal file
|
@ -0,0 +1,69 @@
|
|||
package sarama
|
||||
|
||||
type ListGroupsResponse struct {
|
||||
Err KError
|
||||
Groups map[string]string
|
||||
}
|
||||
|
||||
func (r *ListGroupsResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.Err))
|
||||
|
||||
if err := pe.putArrayLength(len(r.Groups)); err != nil {
|
||||
return err
|
||||
}
|
||||
for groupId, protocolType := range r.Groups {
|
||||
if err := pe.putString(groupId); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(protocolType); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error {
|
||||
kerr, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Err = KError(kerr)
|
||||
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
r.Groups = make(map[string]string)
|
||||
for i := 0; i < n; i++ {
|
||||
groupId, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
protocolType, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Groups[groupId] = protocolType
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ListGroupsResponse) key() int16 {
|
||||
return 16
|
||||
}
|
||||
|
||||
func (r *ListGroupsResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ListGroupsResponse) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
216
vendor/github.com/Shopify/sarama/message.go
generated
vendored
Normal file
216
vendor/github.com/Shopify/sarama/message.go
generated
vendored
Normal file
|
@ -0,0 +1,216 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/eapache/go-xerial-snappy"
|
||||
"github.com/pierrec/lz4"
|
||||
)
|
||||
|
||||
// CompressionCodec represents the various compression codecs recognized by Kafka in messages.
|
||||
type CompressionCodec int8
|
||||
|
||||
// only the last two bits are really used
|
||||
const compressionCodecMask int8 = 0x03
|
||||
|
||||
const (
|
||||
CompressionNone CompressionCodec = 0
|
||||
CompressionGZIP CompressionCodec = 1
|
||||
CompressionSnappy CompressionCodec = 2
|
||||
CompressionLZ4 CompressionCodec = 3
|
||||
)
|
||||
|
||||
type Message struct {
|
||||
Codec CompressionCodec // codec used to compress the message contents
|
||||
Key []byte // the message key, may be nil
|
||||
Value []byte // the message contents
|
||||
Set *MessageSet // the message set a message might wrap
|
||||
Version int8 // v1 requires Kafka 0.10
|
||||
Timestamp time.Time // the timestamp of the message (version 1+ only)
|
||||
|
||||
compressedCache []byte
|
||||
compressedSize int // used for computing the compression ratio metrics
|
||||
}
|
||||
|
||||
func (m *Message) encode(pe packetEncoder) error {
|
||||
pe.push(&crc32Field{})
|
||||
|
||||
pe.putInt8(m.Version)
|
||||
|
||||
attributes := int8(m.Codec) & compressionCodecMask
|
||||
pe.putInt8(attributes)
|
||||
|
||||
if m.Version >= 1 {
|
||||
timestamp := int64(-1)
|
||||
|
||||
if !m.Timestamp.Before(time.Unix(0, 0)) {
|
||||
timestamp = m.Timestamp.UnixNano() / int64(time.Millisecond)
|
||||
} else if !m.Timestamp.IsZero() {
|
||||
return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", m.Timestamp)}
|
||||
}
|
||||
|
||||
pe.putInt64(timestamp)
|
||||
}
|
||||
|
||||
err := pe.putBytes(m.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var payload []byte
|
||||
|
||||
if m.compressedCache != nil {
|
||||
payload = m.compressedCache
|
||||
m.compressedCache = nil
|
||||
} else if m.Value != nil {
|
||||
switch m.Codec {
|
||||
case CompressionNone:
|
||||
payload = m.Value
|
||||
case CompressionGZIP:
|
||||
var buf bytes.Buffer
|
||||
writer := gzip.NewWriter(&buf)
|
||||
if _, err = writer.Write(m.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = writer.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
m.compressedCache = buf.Bytes()
|
||||
payload = m.compressedCache
|
||||
case CompressionSnappy:
|
||||
tmp := snappy.Encode(m.Value)
|
||||
m.compressedCache = tmp
|
||||
payload = m.compressedCache
|
||||
case CompressionLZ4:
|
||||
var buf bytes.Buffer
|
||||
writer := lz4.NewWriter(&buf)
|
||||
if _, err = writer.Write(m.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = writer.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
m.compressedCache = buf.Bytes()
|
||||
payload = m.compressedCache
|
||||
|
||||
default:
|
||||
return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)}
|
||||
}
|
||||
// Keep in mind the compressed payload size for metric gathering
|
||||
m.compressedSize = len(payload)
|
||||
}
|
||||
|
||||
if err = pe.putBytes(payload); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return pe.pop()
|
||||
}
|
||||
|
||||
func (m *Message) decode(pd packetDecoder) (err error) {
|
||||
err = pd.push(&crc32Field{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.Version, err = pd.getInt8()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if m.Version > 1 {
|
||||
return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)}
|
||||
}
|
||||
|
||||
attribute, err := pd.getInt8()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Codec = CompressionCodec(attribute & compressionCodecMask)
|
||||
|
||||
if m.Version == 1 {
|
||||
millis, err := pd.getInt64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// negative timestamps are invalid, in these cases we should return
|
||||
// a zero time
|
||||
timestamp := time.Time{}
|
||||
if millis >= 0 {
|
||||
timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
|
||||
}
|
||||
|
||||
m.Timestamp = timestamp
|
||||
}
|
||||
|
||||
m.Key, err = pd.getBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.Value, err = pd.getBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Required for deep equal assertion during tests but might be useful
|
||||
// for future metrics about the compression ratio in fetch requests
|
||||
m.compressedSize = len(m.Value)
|
||||
|
||||
switch m.Codec {
|
||||
case CompressionNone:
|
||||
// nothing to do
|
||||
case CompressionGZIP:
|
||||
if m.Value == nil {
|
||||
break
|
||||
}
|
||||
reader, err := gzip.NewReader(bytes.NewReader(m.Value))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if m.Value, err = ioutil.ReadAll(reader); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.decodeSet(); err != nil {
|
||||
return err
|
||||
}
|
||||
case CompressionSnappy:
|
||||
if m.Value == nil {
|
||||
break
|
||||
}
|
||||
if m.Value, err = snappy.Decode(m.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.decodeSet(); err != nil {
|
||||
return err
|
||||
}
|
||||
case CompressionLZ4:
|
||||
if m.Value == nil {
|
||||
break
|
||||
}
|
||||
reader := lz4.NewReader(bytes.NewReader(m.Value))
|
||||
if m.Value, err = ioutil.ReadAll(reader); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.decodeSet(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)}
|
||||
}
|
||||
|
||||
return pd.pop()
|
||||
}
|
||||
|
||||
// decodes a message set from a previousy encoded bulk-message
|
||||
func (m *Message) decodeSet() (err error) {
|
||||
pd := realDecoder{raw: m.Value}
|
||||
m.Set = &MessageSet{}
|
||||
return m.Set.decode(&pd)
|
||||
}
|
89
vendor/github.com/Shopify/sarama/message_set.go
generated
vendored
Normal file
89
vendor/github.com/Shopify/sarama/message_set.go
generated
vendored
Normal file
|
@ -0,0 +1,89 @@
|
|||
package sarama
|
||||
|
||||
type MessageBlock struct {
|
||||
Offset int64
|
||||
Msg *Message
|
||||
}
|
||||
|
||||
// Messages convenience helper which returns either all the
|
||||
// messages that are wrapped in this block
|
||||
func (msb *MessageBlock) Messages() []*MessageBlock {
|
||||
if msb.Msg.Set != nil {
|
||||
return msb.Msg.Set.Messages
|
||||
}
|
||||
return []*MessageBlock{msb}
|
||||
}
|
||||
|
||||
func (msb *MessageBlock) encode(pe packetEncoder) error {
|
||||
pe.putInt64(msb.Offset)
|
||||
pe.push(&lengthField{})
|
||||
err := msb.Msg.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return pe.pop()
|
||||
}
|
||||
|
||||
func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
|
||||
if msb.Offset, err = pd.getInt64(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = pd.push(&lengthField{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msb.Msg = new(Message)
|
||||
if err = msb.Msg.decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = pd.pop(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type MessageSet struct {
|
||||
PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock
|
||||
Messages []*MessageBlock
|
||||
}
|
||||
|
||||
func (ms *MessageSet) encode(pe packetEncoder) error {
|
||||
for i := range ms.Messages {
|
||||
err := ms.Messages[i].encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *MessageSet) decode(pd packetDecoder) (err error) {
|
||||
ms.Messages = nil
|
||||
|
||||
for pd.remaining() > 0 {
|
||||
msb := new(MessageBlock)
|
||||
err = msb.decode(pd)
|
||||
switch err {
|
||||
case nil:
|
||||
ms.Messages = append(ms.Messages, msb)
|
||||
case ErrInsufficientData:
|
||||
// As an optimization the server is allowed to return a partial message at the
|
||||
// end of the message set. Clients should handle this case. So we just ignore such things.
|
||||
ms.PartialTrailingMessage = true
|
||||
return nil
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *MessageSet) addMessage(msg *Message) {
|
||||
block := new(MessageBlock)
|
||||
block.Msg = msg
|
||||
ms.Messages = append(ms.Messages, block)
|
||||
}
|
52
vendor/github.com/Shopify/sarama/metadata_request.go
generated
vendored
Normal file
52
vendor/github.com/Shopify/sarama/metadata_request.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
package sarama
|
||||
|
||||
type MetadataRequest struct {
|
||||
Topics []string
|
||||
}
|
||||
|
||||
func (r *MetadataRequest) encode(pe packetEncoder) error {
|
||||
err := pe.putArrayLength(len(r.Topics))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := range r.Topics {
|
||||
err = pe.putString(r.Topics[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *MetadataRequest) decode(pd packetDecoder, version int16) error {
|
||||
topicCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if topicCount == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
r.Topics = make([]string, topicCount)
|
||||
for i := range r.Topics {
|
||||
topic, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Topics[i] = topic
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *MetadataRequest) key() int16 {
|
||||
return 3
|
||||
}
|
||||
|
||||
func (r *MetadataRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *MetadataRequest) requiredVersion() KafkaVersion {
|
||||
return minVersion
|
||||
}
|
239
vendor/github.com/Shopify/sarama/metadata_response.go
generated
vendored
Normal file
239
vendor/github.com/Shopify/sarama/metadata_response.go
generated
vendored
Normal file
|
@ -0,0 +1,239 @@
|
|||
package sarama
|
||||
|
||||
type PartitionMetadata struct {
|
||||
Err KError
|
||||
ID int32
|
||||
Leader int32
|
||||
Replicas []int32
|
||||
Isr []int32
|
||||
}
|
||||
|
||||
func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) {
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pm.Err = KError(tmp)
|
||||
|
||||
pm.ID, err = pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pm.Leader, err = pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pm.Replicas, err = pd.getInt32Array()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pm.Isr, err = pd.getInt32Array()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pm *PartitionMetadata) encode(pe packetEncoder) (err error) {
|
||||
pe.putInt16(int16(pm.Err))
|
||||
pe.putInt32(pm.ID)
|
||||
pe.putInt32(pm.Leader)
|
||||
|
||||
err = pe.putInt32Array(pm.Replicas)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = pe.putInt32Array(pm.Isr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type TopicMetadata struct {
|
||||
Err KError
|
||||
Name string
|
||||
Partitions []*PartitionMetadata
|
||||
}
|
||||
|
||||
func (tm *TopicMetadata) decode(pd packetDecoder) (err error) {
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tm.Err = KError(tmp)
|
||||
|
||||
tm.Name, err = pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tm.Partitions = make([]*PartitionMetadata, n)
|
||||
for i := 0; i < n; i++ {
|
||||
tm.Partitions[i] = new(PartitionMetadata)
|
||||
err = tm.Partitions[i].decode(pd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tm *TopicMetadata) encode(pe packetEncoder) (err error) {
|
||||
pe.putInt16(int16(tm.Err))
|
||||
|
||||
err = pe.putString(tm.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = pe.putArrayLength(len(tm.Partitions))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, pm := range tm.Partitions {
|
||||
err = pm.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type MetadataResponse struct {
|
||||
Brokers []*Broker
|
||||
Topics []*TopicMetadata
|
||||
}
|
||||
|
||||
func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Brokers = make([]*Broker, n)
|
||||
for i := 0; i < n; i++ {
|
||||
r.Brokers[i] = new(Broker)
|
||||
err = r.Brokers[i].decode(pd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
n, err = pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Topics = make([]*TopicMetadata, n)
|
||||
for i := 0; i < n; i++ {
|
||||
r.Topics[i] = new(TopicMetadata)
|
||||
err = r.Topics[i].decode(pd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *MetadataResponse) encode(pe packetEncoder) error {
|
||||
err := pe.putArrayLength(len(r.Brokers))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, broker := range r.Brokers {
|
||||
err = broker.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = pe.putArrayLength(len(r.Topics))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, tm := range r.Topics {
|
||||
err = tm.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *MetadataResponse) key() int16 {
|
||||
return 3
|
||||
}
|
||||
|
||||
func (r *MetadataResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *MetadataResponse) requiredVersion() KafkaVersion {
|
||||
return minVersion
|
||||
}
|
||||
|
||||
// testing API
|
||||
|
||||
func (r *MetadataResponse) AddBroker(addr string, id int32) {
|
||||
r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr})
|
||||
}
|
||||
|
||||
func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata {
|
||||
var tmatch *TopicMetadata
|
||||
|
||||
for _, tm := range r.Topics {
|
||||
if tm.Name == topic {
|
||||
tmatch = tm
|
||||
goto foundTopic
|
||||
}
|
||||
}
|
||||
|
||||
tmatch = new(TopicMetadata)
|
||||
tmatch.Name = topic
|
||||
r.Topics = append(r.Topics, tmatch)
|
||||
|
||||
foundTopic:
|
||||
|
||||
tmatch.Err = err
|
||||
return tmatch
|
||||
}
|
||||
|
||||
func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) {
|
||||
tmatch := r.AddTopic(topic, ErrNoError)
|
||||
var pmatch *PartitionMetadata
|
||||
|
||||
for _, pm := range tmatch.Partitions {
|
||||
if pm.ID == partition {
|
||||
pmatch = pm
|
||||
goto foundPartition
|
||||
}
|
||||
}
|
||||
|
||||
pmatch = new(PartitionMetadata)
|
||||
pmatch.ID = partition
|
||||
tmatch.Partitions = append(tmatch.Partitions, pmatch)
|
||||
|
||||
foundPartition:
|
||||
|
||||
pmatch.Leader = brokerID
|
||||
pmatch.Replicas = replicas
|
||||
pmatch.Isr = isr
|
||||
pmatch.Err = err
|
||||
|
||||
}
|
51
vendor/github.com/Shopify/sarama/metrics.go
generated
vendored
Normal file
51
vendor/github.com/Shopify/sarama/metrics.go
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library:
|
||||
// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution,
|
||||
// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements.
|
||||
// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38
|
||||
const (
|
||||
metricsReservoirSize = 1028
|
||||
metricsAlphaFactor = 0.015
|
||||
)
|
||||
|
||||
func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram {
|
||||
return r.GetOrRegister(name, func() metrics.Histogram {
|
||||
return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor))
|
||||
}).(metrics.Histogram)
|
||||
}
|
||||
|
||||
func getMetricNameForBroker(name string, broker *Broker) string {
|
||||
// Use broker id like the Java client as it does not contain '.' or ':' characters that
|
||||
// can be interpreted as special character by monitoring tool (e.g. Graphite)
|
||||
return fmt.Sprintf(name+"-for-broker-%d", broker.ID())
|
||||
}
|
||||
|
||||
func getOrRegisterBrokerMeter(name string, broker *Broker, r metrics.Registry) metrics.Meter {
|
||||
return metrics.GetOrRegisterMeter(getMetricNameForBroker(name, broker), r)
|
||||
}
|
||||
|
||||
func getOrRegisterBrokerHistogram(name string, broker *Broker, r metrics.Registry) metrics.Histogram {
|
||||
return getOrRegisterHistogram(getMetricNameForBroker(name, broker), r)
|
||||
}
|
||||
|
||||
func getMetricNameForTopic(name string, topic string) string {
|
||||
// Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy
|
||||
// cf. KAFKA-1902 and KAFKA-2337
|
||||
return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1))
|
||||
}
|
||||
|
||||
func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter {
|
||||
return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r)
|
||||
}
|
||||
|
||||
func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram {
|
||||
return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r)
|
||||
}
|
324
vendor/github.com/Shopify/sarama/mockbroker.go
generated
vendored
Normal file
324
vendor/github.com/Shopify/sarama/mockbroker.go
generated
vendored
Normal file
|
@ -0,0 +1,324 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
const (
|
||||
expectationTimeout = 500 * time.Millisecond
|
||||
)
|
||||
|
||||
type requestHandlerFunc func(req *request) (res encoder)
|
||||
|
||||
// RequestNotifierFunc is invoked when a mock broker processes a request successfully
|
||||
// and will provides the number of bytes read and written.
|
||||
type RequestNotifierFunc func(bytesRead, bytesWritten int)
|
||||
|
||||
// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed
|
||||
// to facilitate testing of higher level or specialized consumers and producers
|
||||
// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol,
|
||||
// but rather provides a facility to do that. It takes care of the TCP
|
||||
// transport, request unmarshaling, response marshaling, and makes it the test
|
||||
// writer responsibility to program correct according to the Kafka API protocol
|
||||
// MockBroker behaviour.
|
||||
//
|
||||
// MockBroker is implemented as a TCP server listening on a kernel-selected
|
||||
// localhost port that can accept many connections. It reads Kafka requests
|
||||
// from that connection and returns responses programmed by the SetHandlerByMap
|
||||
// function. If a MockBroker receives a request that it has no programmed
|
||||
// response for, then it returns nothing and the request times out.
|
||||
//
|
||||
// A set of MockRequest builders to define mappings used by MockBroker is
|
||||
// provided by Sarama. But users can develop MockRequests of their own and use
|
||||
// them along with or instead of the standard ones.
|
||||
//
|
||||
// When running tests with MockBroker it is strongly recommended to specify
|
||||
// a timeout to `go test` so that if the broker hangs waiting for a response,
|
||||
// the test panics.
|
||||
//
|
||||
// It is not necessary to prefix message length or correlation ID to your
|
||||
// response bytes, the server does that automatically as a convenience.
|
||||
type MockBroker struct {
|
||||
brokerID int32
|
||||
port int32
|
||||
closing chan none
|
||||
stopper chan none
|
||||
expectations chan encoder
|
||||
listener net.Listener
|
||||
t TestReporter
|
||||
latency time.Duration
|
||||
handler requestHandlerFunc
|
||||
notifier RequestNotifierFunc
|
||||
history []RequestResponse
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
// RequestResponse represents a Request/Response pair processed by MockBroker.
|
||||
type RequestResponse struct {
|
||||
Request protocolBody
|
||||
Response encoder
|
||||
}
|
||||
|
||||
// SetLatency makes broker pause for the specified period every time before
|
||||
// replying.
|
||||
func (b *MockBroker) SetLatency(latency time.Duration) {
|
||||
b.latency = latency
|
||||
}
|
||||
|
||||
// SetHandlerByMap defines mapping of Request types to MockResponses. When a
|
||||
// request is received by the broker, it looks up the request type in the map
|
||||
// and uses the found MockResponse instance to generate an appropriate reply.
|
||||
// If the request type is not found in the map then nothing is sent.
|
||||
func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) {
|
||||
b.setHandler(func(req *request) (res encoder) {
|
||||
reqTypeName := reflect.TypeOf(req.body).Elem().Name()
|
||||
mockResponse := handlerMap[reqTypeName]
|
||||
if mockResponse == nil {
|
||||
return nil
|
||||
}
|
||||
return mockResponse.For(req.body)
|
||||
})
|
||||
}
|
||||
|
||||
// SetNotifier set a function that will get invoked whenever a request has been
|
||||
// processed successfully and will provide the number of bytes read and written
|
||||
func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) {
|
||||
b.lock.Lock()
|
||||
b.notifier = notifier
|
||||
b.lock.Unlock()
|
||||
}
|
||||
|
||||
// BrokerID returns broker ID assigned to the broker.
|
||||
func (b *MockBroker) BrokerID() int32 {
|
||||
return b.brokerID
|
||||
}
|
||||
|
||||
// History returns a slice of RequestResponse pairs in the order they were
|
||||
// processed by the broker. Note that in case of multiple connections to the
|
||||
// broker the order expected by a test can be different from the order recorded
|
||||
// in the history, unless some synchronization is implemented in the test.
|
||||
func (b *MockBroker) History() []RequestResponse {
|
||||
b.lock.Lock()
|
||||
history := make([]RequestResponse, len(b.history))
|
||||
copy(history, b.history)
|
||||
b.lock.Unlock()
|
||||
return history
|
||||
}
|
||||
|
||||
// Port returns the TCP port number the broker is listening for requests on.
|
||||
func (b *MockBroker) Port() int32 {
|
||||
return b.port
|
||||
}
|
||||
|
||||
// Addr returns the broker connection string in the form "<address>:<port>".
|
||||
func (b *MockBroker) Addr() string {
|
||||
return b.listener.Addr().String()
|
||||
}
|
||||
|
||||
// Close terminates the broker blocking until it stops internal goroutines and
|
||||
// releases all resources.
|
||||
func (b *MockBroker) Close() {
|
||||
close(b.expectations)
|
||||
if len(b.expectations) > 0 {
|
||||
buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID()))
|
||||
for e := range b.expectations {
|
||||
_, _ = buf.WriteString(spew.Sdump(e))
|
||||
}
|
||||
b.t.Error(buf.String())
|
||||
}
|
||||
close(b.closing)
|
||||
<-b.stopper
|
||||
}
|
||||
|
||||
// setHandler sets the specified function as the request handler. Whenever
|
||||
// a mock broker reads a request from the wire it passes the request to the
|
||||
// function and sends back whatever the handler function returns.
|
||||
func (b *MockBroker) setHandler(handler requestHandlerFunc) {
|
||||
b.lock.Lock()
|
||||
b.handler = handler
|
||||
b.lock.Unlock()
|
||||
}
|
||||
|
||||
func (b *MockBroker) serverLoop() {
|
||||
defer close(b.stopper)
|
||||
var err error
|
||||
var conn net.Conn
|
||||
|
||||
go func() {
|
||||
<-b.closing
|
||||
err := b.listener.Close()
|
||||
if err != nil {
|
||||
b.t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
i := 0
|
||||
for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() {
|
||||
wg.Add(1)
|
||||
go b.handleRequests(conn, i, wg)
|
||||
i++
|
||||
}
|
||||
wg.Wait()
|
||||
Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err)
|
||||
}
|
||||
|
||||
func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx)
|
||||
var err error
|
||||
|
||||
abort := make(chan none)
|
||||
defer close(abort)
|
||||
go func() {
|
||||
select {
|
||||
case <-b.closing:
|
||||
_ = conn.Close()
|
||||
case <-abort:
|
||||
}
|
||||
}()
|
||||
|
||||
resHeader := make([]byte, 8)
|
||||
for {
|
||||
req, bytesRead, err := decodeRequest(conn)
|
||||
if err != nil {
|
||||
Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
|
||||
b.serverError(err)
|
||||
break
|
||||
}
|
||||
|
||||
if b.latency > 0 {
|
||||
time.Sleep(b.latency)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
res := b.handler(req)
|
||||
b.history = append(b.history, RequestResponse{req.body, res})
|
||||
b.lock.Unlock()
|
||||
|
||||
if res == nil {
|
||||
Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
|
||||
continue
|
||||
}
|
||||
Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res)
|
||||
|
||||
encodedRes, err := encode(res, nil)
|
||||
if err != nil {
|
||||
b.serverError(err)
|
||||
break
|
||||
}
|
||||
if len(encodedRes) == 0 {
|
||||
b.lock.Lock()
|
||||
if b.notifier != nil {
|
||||
b.notifier(bytesRead, 0)
|
||||
}
|
||||
b.lock.Unlock()
|
||||
continue
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4))
|
||||
binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID))
|
||||
if _, err = conn.Write(resHeader); err != nil {
|
||||
b.serverError(err)
|
||||
break
|
||||
}
|
||||
if _, err = conn.Write(encodedRes); err != nil {
|
||||
b.serverError(err)
|
||||
break
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
if b.notifier != nil {
|
||||
b.notifier(bytesRead, len(resHeader)+len(encodedRes))
|
||||
}
|
||||
b.lock.Unlock()
|
||||
}
|
||||
Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
|
||||
}
|
||||
|
||||
func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) {
|
||||
select {
|
||||
case res, ok := <-b.expectations:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return res
|
||||
case <-time.After(expectationTimeout):
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (b *MockBroker) serverError(err error) {
|
||||
isConnectionClosedError := false
|
||||
if _, ok := err.(*net.OpError); ok {
|
||||
isConnectionClosedError = true
|
||||
} else if err == io.EOF {
|
||||
isConnectionClosedError = true
|
||||
} else if err.Error() == "use of closed network connection" {
|
||||
isConnectionClosedError = true
|
||||
}
|
||||
|
||||
if isConnectionClosedError {
|
||||
return
|
||||
}
|
||||
|
||||
b.t.Errorf(err.Error())
|
||||
}
|
||||
|
||||
// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the
|
||||
// test framework and a channel of responses to use. If an error occurs it is
|
||||
// simply logged to the TestReporter and the broker exits.
|
||||
func NewMockBroker(t TestReporter, brokerID int32) *MockBroker {
|
||||
return NewMockBrokerAddr(t, brokerID, "localhost:0")
|
||||
}
|
||||
|
||||
// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give
|
||||
// it rather than just some ephemeral port.
|
||||
func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker {
|
||||
var err error
|
||||
|
||||
broker := &MockBroker{
|
||||
closing: make(chan none),
|
||||
stopper: make(chan none),
|
||||
t: t,
|
||||
brokerID: brokerID,
|
||||
expectations: make(chan encoder, 512),
|
||||
}
|
||||
broker.handler = broker.defaultRequestHandler
|
||||
|
||||
broker.listener, err = net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String())
|
||||
_, portStr, err := net.SplitHostPort(broker.listener.Addr().String())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tmp, err := strconv.ParseInt(portStr, 10, 32)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
broker.port = int32(tmp)
|
||||
|
||||
go broker.serverLoop()
|
||||
|
||||
return broker
|
||||
}
|
||||
|
||||
func (b *MockBroker) Returns(e encoder) {
|
||||
b.expectations <- e
|
||||
}
|
463
vendor/github.com/Shopify/sarama/mockresponses.go
generated
vendored
Normal file
463
vendor/github.com/Shopify/sarama/mockresponses.go
generated
vendored
Normal file
|
@ -0,0 +1,463 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// TestReporter has methods matching go's testing.T to avoid importing
|
||||
// `testing` in the main part of the library.
|
||||
type TestReporter interface {
|
||||
Error(...interface{})
|
||||
Errorf(string, ...interface{})
|
||||
Fatal(...interface{})
|
||||
Fatalf(string, ...interface{})
|
||||
}
|
||||
|
||||
// MockResponse is a response builder interface it defines one method that
|
||||
// allows generating a response based on a request body. MockResponses are used
|
||||
// to program behavior of MockBroker in tests.
|
||||
type MockResponse interface {
|
||||
For(reqBody versionedDecoder) (res encoder)
|
||||
}
|
||||
|
||||
// MockWrapper is a mock response builder that returns a particular concrete
|
||||
// response regardless of the actual request passed to the `For` method.
|
||||
type MockWrapper struct {
|
||||
res encoder
|
||||
}
|
||||
|
||||
func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) {
|
||||
return mw.res
|
||||
}
|
||||
|
||||
func NewMockWrapper(res encoder) *MockWrapper {
|
||||
return &MockWrapper{res: res}
|
||||
}
|
||||
|
||||
// MockSequence is a mock response builder that is created from a sequence of
|
||||
// concrete responses. Every time when a `MockBroker` calls its `For` method
|
||||
// the next response from the sequence is returned. When the end of the
|
||||
// sequence is reached the last element from the sequence is returned.
|
||||
type MockSequence struct {
|
||||
responses []MockResponse
|
||||
}
|
||||
|
||||
func NewMockSequence(responses ...interface{}) *MockSequence {
|
||||
ms := &MockSequence{}
|
||||
ms.responses = make([]MockResponse, len(responses))
|
||||
for i, res := range responses {
|
||||
switch res := res.(type) {
|
||||
case MockResponse:
|
||||
ms.responses[i] = res
|
||||
case encoder:
|
||||
ms.responses[i] = NewMockWrapper(res)
|
||||
default:
|
||||
panic(fmt.Sprintf("Unexpected response type: %T", res))
|
||||
}
|
||||
}
|
||||
return ms
|
||||
}
|
||||
|
||||
func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) {
|
||||
res = mc.responses[0].For(reqBody)
|
||||
if len(mc.responses) > 1 {
|
||||
mc.responses = mc.responses[1:]
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// MockMetadataResponse is a `MetadataResponse` builder.
|
||||
type MockMetadataResponse struct {
|
||||
leaders map[string]map[int32]int32
|
||||
brokers map[string]int32
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse {
|
||||
return &MockMetadataResponse{
|
||||
leaders: make(map[string]map[int32]int32),
|
||||
brokers: make(map[string]int32),
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse {
|
||||
partitions := mmr.leaders[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]int32)
|
||||
mmr.leaders[topic] = partitions
|
||||
}
|
||||
partitions[partition] = brokerID
|
||||
return mmr
|
||||
}
|
||||
|
||||
func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse {
|
||||
mmr.brokers[addr] = brokerID
|
||||
return mmr
|
||||
}
|
||||
|
||||
func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder {
|
||||
metadataRequest := reqBody.(*MetadataRequest)
|
||||
metadataResponse := &MetadataResponse{}
|
||||
for addr, brokerID := range mmr.brokers {
|
||||
metadataResponse.AddBroker(addr, brokerID)
|
||||
}
|
||||
if len(metadataRequest.Topics) == 0 {
|
||||
for topic, partitions := range mmr.leaders {
|
||||
for partition, brokerID := range partitions {
|
||||
metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
|
||||
}
|
||||
}
|
||||
return metadataResponse
|
||||
}
|
||||
for _, topic := range metadataRequest.Topics {
|
||||
for partition, brokerID := range mmr.leaders[topic] {
|
||||
metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
|
||||
}
|
||||
}
|
||||
return metadataResponse
|
||||
}
|
||||
|
||||
// MockOffsetResponse is an `OffsetResponse` builder.
|
||||
type MockOffsetResponse struct {
|
||||
offsets map[string]map[int32]map[int64]int64
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse {
|
||||
return &MockOffsetResponse{
|
||||
offsets: make(map[string]map[int32]map[int64]int64),
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse {
|
||||
partitions := mor.offsets[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]map[int64]int64)
|
||||
mor.offsets[topic] = partitions
|
||||
}
|
||||
times := partitions[partition]
|
||||
if times == nil {
|
||||
times = make(map[int64]int64)
|
||||
partitions[partition] = times
|
||||
}
|
||||
times[time] = offset
|
||||
return mor
|
||||
}
|
||||
|
||||
func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder {
|
||||
offsetRequest := reqBody.(*OffsetRequest)
|
||||
offsetResponse := &OffsetResponse{}
|
||||
for topic, partitions := range offsetRequest.blocks {
|
||||
for partition, block := range partitions {
|
||||
offset := mor.getOffset(topic, partition, block.time)
|
||||
offsetResponse.AddTopicPartition(topic, partition, offset)
|
||||
}
|
||||
}
|
||||
return offsetResponse
|
||||
}
|
||||
|
||||
func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 {
|
||||
partitions := mor.offsets[topic]
|
||||
if partitions == nil {
|
||||
mor.t.Errorf("missing topic: %s", topic)
|
||||
}
|
||||
times := partitions[partition]
|
||||
if times == nil {
|
||||
mor.t.Errorf("missing partition: %d", partition)
|
||||
}
|
||||
offset, ok := times[time]
|
||||
if !ok {
|
||||
mor.t.Errorf("missing time: %d", time)
|
||||
}
|
||||
return offset
|
||||
}
|
||||
|
||||
// MockFetchResponse is a `FetchResponse` builder.
|
||||
type MockFetchResponse struct {
|
||||
messages map[string]map[int32]map[int64]Encoder
|
||||
highWaterMarks map[string]map[int32]int64
|
||||
t TestReporter
|
||||
batchSize int
|
||||
version int16
|
||||
}
|
||||
|
||||
func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
|
||||
return &MockFetchResponse{
|
||||
messages: make(map[string]map[int32]map[int64]Encoder),
|
||||
highWaterMarks: make(map[string]map[int32]int64),
|
||||
t: t,
|
||||
batchSize: batchSize,
|
||||
}
|
||||
}
|
||||
|
||||
func (mfr *MockFetchResponse) SetVersion(version int16) *MockFetchResponse {
|
||||
mfr.version = version
|
||||
return mfr
|
||||
}
|
||||
|
||||
func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse {
|
||||
partitions := mfr.messages[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]map[int64]Encoder)
|
||||
mfr.messages[topic] = partitions
|
||||
}
|
||||
messages := partitions[partition]
|
||||
if messages == nil {
|
||||
messages = make(map[int64]Encoder)
|
||||
partitions[partition] = messages
|
||||
}
|
||||
messages[offset] = msg
|
||||
return mfr
|
||||
}
|
||||
|
||||
func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse {
|
||||
partitions := mfr.highWaterMarks[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]int64)
|
||||
mfr.highWaterMarks[topic] = partitions
|
||||
}
|
||||
partitions[partition] = offset
|
||||
return mfr
|
||||
}
|
||||
|
||||
func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder {
|
||||
fetchRequest := reqBody.(*FetchRequest)
|
||||
res := &FetchResponse{
|
||||
Version: mfr.version,
|
||||
}
|
||||
for topic, partitions := range fetchRequest.blocks {
|
||||
for partition, block := range partitions {
|
||||
initialOffset := block.fetchOffset
|
||||
offset := initialOffset
|
||||
maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition))
|
||||
for i := 0; i < mfr.batchSize && offset < maxOffset; {
|
||||
msg := mfr.getMessage(topic, partition, offset)
|
||||
if msg != nil {
|
||||
res.AddMessage(topic, partition, nil, msg, offset)
|
||||
i++
|
||||
}
|
||||
offset++
|
||||
}
|
||||
fb := res.GetBlock(topic, partition)
|
||||
if fb == nil {
|
||||
res.AddError(topic, partition, ErrNoError)
|
||||
fb = res.GetBlock(topic, partition)
|
||||
}
|
||||
fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition)
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder {
|
||||
partitions := mfr.messages[topic]
|
||||
if partitions == nil {
|
||||
return nil
|
||||
}
|
||||
messages := partitions[partition]
|
||||
if messages == nil {
|
||||
return nil
|
||||
}
|
||||
return messages[offset]
|
||||
}
|
||||
|
||||
func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int {
|
||||
partitions := mfr.messages[topic]
|
||||
if partitions == nil {
|
||||
return 0
|
||||
}
|
||||
messages := partitions[partition]
|
||||
if messages == nil {
|
||||
return 0
|
||||
}
|
||||
return len(messages)
|
||||
}
|
||||
|
||||
func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 {
|
||||
partitions := mfr.highWaterMarks[topic]
|
||||
if partitions == nil {
|
||||
return 0
|
||||
}
|
||||
return partitions[partition]
|
||||
}
|
||||
|
||||
// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder.
|
||||
type MockConsumerMetadataResponse struct {
|
||||
coordinators map[string]interface{}
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse {
|
||||
return &MockConsumerMetadataResponse{
|
||||
coordinators: make(map[string]interface{}),
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse {
|
||||
mr.coordinators[group] = broker
|
||||
return mr
|
||||
}
|
||||
|
||||
func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse {
|
||||
mr.coordinators[group] = kerror
|
||||
return mr
|
||||
}
|
||||
|
||||
func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder {
|
||||
req := reqBody.(*ConsumerMetadataRequest)
|
||||
group := req.ConsumerGroup
|
||||
res := &ConsumerMetadataResponse{}
|
||||
v := mr.coordinators[group]
|
||||
switch v := v.(type) {
|
||||
case *MockBroker:
|
||||
res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
|
||||
case KError:
|
||||
res.Err = v
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// MockOffsetCommitResponse is a `OffsetCommitResponse` builder.
|
||||
type MockOffsetCommitResponse struct {
|
||||
errors map[string]map[string]map[int32]KError
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse {
|
||||
return &MockOffsetCommitResponse{t: t}
|
||||
}
|
||||
|
||||
func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse {
|
||||
if mr.errors == nil {
|
||||
mr.errors = make(map[string]map[string]map[int32]KError)
|
||||
}
|
||||
topics := mr.errors[group]
|
||||
if topics == nil {
|
||||
topics = make(map[string]map[int32]KError)
|
||||
mr.errors[group] = topics
|
||||
}
|
||||
partitions := topics[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]KError)
|
||||
topics[topic] = partitions
|
||||
}
|
||||
partitions[partition] = kerror
|
||||
return mr
|
||||
}
|
||||
|
||||
func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder {
|
||||
req := reqBody.(*OffsetCommitRequest)
|
||||
group := req.ConsumerGroup
|
||||
res := &OffsetCommitResponse{}
|
||||
for topic, partitions := range req.blocks {
|
||||
for partition := range partitions {
|
||||
res.AddError(topic, partition, mr.getError(group, topic, partition))
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError {
|
||||
topics := mr.errors[group]
|
||||
if topics == nil {
|
||||
return ErrNoError
|
||||
}
|
||||
partitions := topics[topic]
|
||||
if partitions == nil {
|
||||
return ErrNoError
|
||||
}
|
||||
kerror, ok := partitions[partition]
|
||||
if !ok {
|
||||
return ErrNoError
|
||||
}
|
||||
return kerror
|
||||
}
|
||||
|
||||
// MockProduceResponse is a `ProduceResponse` builder.
|
||||
type MockProduceResponse struct {
|
||||
errors map[string]map[int32]KError
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockProduceResponse(t TestReporter) *MockProduceResponse {
|
||||
return &MockProduceResponse{t: t}
|
||||
}
|
||||
|
||||
func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse {
|
||||
if mr.errors == nil {
|
||||
mr.errors = make(map[string]map[int32]KError)
|
||||
}
|
||||
partitions := mr.errors[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]KError)
|
||||
mr.errors[topic] = partitions
|
||||
}
|
||||
partitions[partition] = kerror
|
||||
return mr
|
||||
}
|
||||
|
||||
func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder {
|
||||
req := reqBody.(*ProduceRequest)
|
||||
res := &ProduceResponse{}
|
||||
for topic, partitions := range req.msgSets {
|
||||
for partition := range partitions {
|
||||
res.AddTopicPartition(topic, partition, mr.getError(topic, partition))
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (mr *MockProduceResponse) getError(topic string, partition int32) KError {
|
||||
partitions := mr.errors[topic]
|
||||
if partitions == nil {
|
||||
return ErrNoError
|
||||
}
|
||||
kerror, ok := partitions[partition]
|
||||
if !ok {
|
||||
return ErrNoError
|
||||
}
|
||||
return kerror
|
||||
}
|
||||
|
||||
// MockOffsetFetchResponse is a `OffsetFetchResponse` builder.
|
||||
type MockOffsetFetchResponse struct {
|
||||
offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse {
|
||||
return &MockOffsetFetchResponse{t: t}
|
||||
}
|
||||
|
||||
func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse {
|
||||
if mr.offsets == nil {
|
||||
mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock)
|
||||
}
|
||||
topics := mr.offsets[group]
|
||||
if topics == nil {
|
||||
topics = make(map[string]map[int32]*OffsetFetchResponseBlock)
|
||||
mr.offsets[group] = topics
|
||||
}
|
||||
partitions := topics[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]*OffsetFetchResponseBlock)
|
||||
topics[topic] = partitions
|
||||
}
|
||||
partitions[partition] = &OffsetFetchResponseBlock{offset, metadata, kerror}
|
||||
return mr
|
||||
}
|
||||
|
||||
func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder {
|
||||
req := reqBody.(*OffsetFetchRequest)
|
||||
group := req.ConsumerGroup
|
||||
res := &OffsetFetchResponse{}
|
||||
for topic, partitions := range mr.offsets[group] {
|
||||
for partition, block := range partitions {
|
||||
res.AddBlock(topic, partition, block)
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
190
vendor/github.com/Shopify/sarama/offset_commit_request.go
generated
vendored
Normal file
190
vendor/github.com/Shopify/sarama/offset_commit_request.go
generated
vendored
Normal file
|
@ -0,0 +1,190 @@
|
|||
package sarama
|
||||
|
||||
// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which
|
||||
// tells the broker to set the timestamp to the time at which the request was received.
|
||||
// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2.
|
||||
const ReceiveTime int64 = -1
|
||||
|
||||
// GroupGenerationUndefined is a special value for the group generation field of
|
||||
// Offset Commit Requests that should be used when a consumer group does not rely
|
||||
// on Kafka for partition management.
|
||||
const GroupGenerationUndefined = -1
|
||||
|
||||
type offsetCommitRequestBlock struct {
|
||||
offset int64
|
||||
timestamp int64
|
||||
metadata string
|
||||
}
|
||||
|
||||
func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error {
|
||||
pe.putInt64(b.offset)
|
||||
if version == 1 {
|
||||
pe.putInt64(b.timestamp)
|
||||
} else if b.timestamp != 0 {
|
||||
Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored")
|
||||
}
|
||||
|
||||
return pe.putString(b.metadata)
|
||||
}
|
||||
|
||||
func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) {
|
||||
if b.offset, err = pd.getInt64(); err != nil {
|
||||
return err
|
||||
}
|
||||
if version == 1 {
|
||||
if b.timestamp, err = pd.getInt64(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
b.metadata, err = pd.getString()
|
||||
return err
|
||||
}
|
||||
|
||||
type OffsetCommitRequest struct {
|
||||
ConsumerGroup string
|
||||
ConsumerGroupGeneration int32 // v1 or later
|
||||
ConsumerID string // v1 or later
|
||||
RetentionTime int64 // v2 or later
|
||||
|
||||
// Version can be:
|
||||
// - 0 (kafka 0.8.1 and later)
|
||||
// - 1 (kafka 0.8.2 and later)
|
||||
// - 2 (kafka 0.9.0 and later)
|
||||
Version int16
|
||||
blocks map[string]map[int32]*offsetCommitRequestBlock
|
||||
}
|
||||
|
||||
func (r *OffsetCommitRequest) encode(pe packetEncoder) error {
|
||||
if r.Version < 0 || r.Version > 2 {
|
||||
return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"}
|
||||
}
|
||||
|
||||
if err := pe.putString(r.ConsumerGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if r.Version >= 1 {
|
||||
pe.putInt32(r.ConsumerGroupGeneration)
|
||||
if err := pe.putString(r.ConsumerID); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if r.ConsumerGroupGeneration != 0 {
|
||||
Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored")
|
||||
}
|
||||
if r.ConsumerID != "" {
|
||||
Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored")
|
||||
}
|
||||
}
|
||||
|
||||
if r.Version >= 2 {
|
||||
pe.putInt64(r.RetentionTime)
|
||||
} else if r.RetentionTime != 0 {
|
||||
Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored")
|
||||
}
|
||||
|
||||
if err := pe.putArrayLength(len(r.blocks)); err != nil {
|
||||
return err
|
||||
}
|
||||
for topic, partitions := range r.blocks {
|
||||
if err := pe.putString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putArrayLength(len(partitions)); err != nil {
|
||||
return err
|
||||
}
|
||||
for partition, block := range partitions {
|
||||
pe.putInt32(partition)
|
||||
if err := block.encode(pe, r.Version); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Version = version
|
||||
|
||||
if r.ConsumerGroup, err = pd.getString(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if r.Version >= 1 {
|
||||
if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
if r.ConsumerID, err = pd.getString(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if r.Version >= 2 {
|
||||
if r.RetentionTime, err = pd.getInt64(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
topicCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if topicCount == 0 {
|
||||
return nil
|
||||
}
|
||||
r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
|
||||
for i := 0; i < topicCount; i++ {
|
||||
topic, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
partitionCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
|
||||
for j := 0; j < partitionCount; j++ {
|
||||
partition, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
block := &offsetCommitRequestBlock{}
|
||||
if err := block.decode(pd, r.Version); err != nil {
|
||||
return err
|
||||
}
|
||||
r.blocks[topic][partition] = block
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetCommitRequest) key() int16 {
|
||||
return 8
|
||||
}
|
||||
|
||||
func (r *OffsetCommitRequest) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *OffsetCommitRequest) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_8_2_0
|
||||
case 2:
|
||||
return V0_9_0_0
|
||||
default:
|
||||
return minVersion
|
||||
}
|
||||
}
|
||||
|
||||
func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) {
|
||||
if r.blocks == nil {
|
||||
r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
|
||||
}
|
||||
|
||||
if r.blocks[topic] == nil {
|
||||
r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
|
||||
}
|
||||
|
||||
r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata}
|
||||
}
|
85
vendor/github.com/Shopify/sarama/offset_commit_response.go
generated
vendored
Normal file
85
vendor/github.com/Shopify/sarama/offset_commit_response.go
generated
vendored
Normal file
|
@ -0,0 +1,85 @@
|
|||
package sarama
|
||||
|
||||
type OffsetCommitResponse struct {
|
||||
Errors map[string]map[int32]KError
|
||||
}
|
||||
|
||||
func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) {
|
||||
if r.Errors == nil {
|
||||
r.Errors = make(map[string]map[int32]KError)
|
||||
}
|
||||
partitions := r.Errors[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]KError)
|
||||
r.Errors[topic] = partitions
|
||||
}
|
||||
partitions[partition] = kerror
|
||||
}
|
||||
|
||||
func (r *OffsetCommitResponse) encode(pe packetEncoder) error {
|
||||
if err := pe.putArrayLength(len(r.Errors)); err != nil {
|
||||
return err
|
||||
}
|
||||
for topic, partitions := range r.Errors {
|
||||
if err := pe.putString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putArrayLength(len(partitions)); err != nil {
|
||||
return err
|
||||
}
|
||||
for partition, kerror := range partitions {
|
||||
pe.putInt32(partition)
|
||||
pe.putInt16(int16(kerror))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
numTopics, err := pd.getArrayLength()
|
||||
if err != nil || numTopics == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Errors = make(map[string]map[int32]KError, numTopics)
|
||||
for i := 0; i < numTopics; i++ {
|
||||
name, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numErrors, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Errors[name] = make(map[int32]KError, numErrors)
|
||||
|
||||
for j := 0; j < numErrors; j++ {
|
||||
id, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Errors[name][id] = KError(tmp)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetCommitResponse) key() int16 {
|
||||
return 8
|
||||
}
|
||||
|
||||
func (r *OffsetCommitResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *OffsetCommitResponse) requiredVersion() KafkaVersion {
|
||||
return minVersion
|
||||
}
|
81
vendor/github.com/Shopify/sarama/offset_fetch_request.go
generated
vendored
Normal file
81
vendor/github.com/Shopify/sarama/offset_fetch_request.go
generated
vendored
Normal file
|
@ -0,0 +1,81 @@
|
|||
package sarama
|
||||
|
||||
type OffsetFetchRequest struct {
|
||||
ConsumerGroup string
|
||||
Version int16
|
||||
partitions map[string][]int32
|
||||
}
|
||||
|
||||
func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) {
|
||||
if r.Version < 0 || r.Version > 1 {
|
||||
return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"}
|
||||
}
|
||||
|
||||
if err = pe.putString(r.ConsumerGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = pe.putArrayLength(len(r.partitions)); err != nil {
|
||||
return err
|
||||
}
|
||||
for topic, partitions := range r.partitions {
|
||||
if err = pe.putString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = pe.putInt32Array(partitions); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Version = version
|
||||
if r.ConsumerGroup, err = pd.getString(); err != nil {
|
||||
return err
|
||||
}
|
||||
partitionCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if partitionCount == 0 {
|
||||
return nil
|
||||
}
|
||||
r.partitions = make(map[string][]int32)
|
||||
for i := 0; i < partitionCount; i++ {
|
||||
topic, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
partitions, err := pd.getInt32Array()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.partitions[topic] = partitions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetFetchRequest) key() int16 {
|
||||
return 9
|
||||
}
|
||||
|
||||
func (r *OffsetFetchRequest) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *OffsetFetchRequest) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_8_2_0
|
||||
default:
|
||||
return minVersion
|
||||
}
|
||||
}
|
||||
|
||||
func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) {
|
||||
if r.partitions == nil {
|
||||
r.partitions = make(map[string][]int32)
|
||||
}
|
||||
|
||||
r.partitions[topic] = append(r.partitions[topic], partitionID)
|
||||
}
|
143
vendor/github.com/Shopify/sarama/offset_fetch_response.go
generated
vendored
Normal file
143
vendor/github.com/Shopify/sarama/offset_fetch_response.go
generated
vendored
Normal file
|
@ -0,0 +1,143 @@
|
|||
package sarama
|
||||
|
||||
type OffsetFetchResponseBlock struct {
|
||||
Offset int64
|
||||
Metadata string
|
||||
Err KError
|
||||
}
|
||||
|
||||
func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) {
|
||||
b.Offset, err = pd.getInt64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.Metadata, err = pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.Err = KError(tmp)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) {
|
||||
pe.putInt64(b.Offset)
|
||||
|
||||
err = pe.putString(b.Metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pe.putInt16(int16(b.Err))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type OffsetFetchResponse struct {
|
||||
Blocks map[string]map[int32]*OffsetFetchResponseBlock
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) encode(pe packetEncoder) error {
|
||||
if err := pe.putArrayLength(len(r.Blocks)); err != nil {
|
||||
return err
|
||||
}
|
||||
for topic, partitions := range r.Blocks {
|
||||
if err := pe.putString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putArrayLength(len(partitions)); err != nil {
|
||||
return err
|
||||
}
|
||||
for partition, block := range partitions {
|
||||
pe.putInt32(partition)
|
||||
if err := block.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
numTopics, err := pd.getArrayLength()
|
||||
if err != nil || numTopics == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
|
||||
for i := 0; i < numTopics; i++ {
|
||||
name, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numBlocks, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if numBlocks == 0 {
|
||||
r.Blocks[name] = nil
|
||||
continue
|
||||
}
|
||||
r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
|
||||
|
||||
for j := 0; j < numBlocks; j++ {
|
||||
id, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
block := new(OffsetFetchResponseBlock)
|
||||
err = block.decode(pd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Blocks[name][id] = block
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) key() int16 {
|
||||
return 9
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) requiredVersion() KafkaVersion {
|
||||
return minVersion
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock {
|
||||
if r.Blocks == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if r.Blocks[topic] == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return r.Blocks[topic][partition]
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) {
|
||||
if r.Blocks == nil {
|
||||
r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock)
|
||||
}
|
||||
partitions := r.Blocks[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]*OffsetFetchResponseBlock)
|
||||
r.Blocks[topic] = partitions
|
||||
}
|
||||
partitions[partition] = block
|
||||
}
|
560
vendor/github.com/Shopify/sarama/offset_manager.go
generated
vendored
Normal file
560
vendor/github.com/Shopify/sarama/offset_manager.go
generated
vendored
Normal file
|
@ -0,0 +1,560 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Offset Manager
|
||||
|
||||
// OffsetManager uses Kafka to store and fetch consumed partition offsets.
|
||||
type OffsetManager interface {
|
||||
// ManagePartition creates a PartitionOffsetManager on the given topic/partition.
|
||||
// It will return an error if this OffsetManager is already managing the given
|
||||
// topic/partition.
|
||||
ManagePartition(topic string, partition int32) (PartitionOffsetManager, error)
|
||||
|
||||
// Close stops the OffsetManager from managing offsets. It is required to call
|
||||
// this function before an OffsetManager object passes out of scope, as it
|
||||
// will otherwise leak memory. You must call this after all the
|
||||
// PartitionOffsetManagers are closed.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type offsetManager struct {
|
||||
client Client
|
||||
conf *Config
|
||||
group string
|
||||
|
||||
lock sync.Mutex
|
||||
poms map[string]map[int32]*partitionOffsetManager
|
||||
boms map[*Broker]*brokerOffsetManager
|
||||
}
|
||||
|
||||
// NewOffsetManagerFromClient creates a new OffsetManager from the given client.
|
||||
// It is still necessary to call Close() on the underlying client when finished with the partition manager.
|
||||
func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) {
|
||||
// Check that we are not dealing with a closed Client before processing any other arguments
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
om := &offsetManager{
|
||||
client: client,
|
||||
conf: client.Config(),
|
||||
group: group,
|
||||
poms: make(map[string]map[int32]*partitionOffsetManager),
|
||||
boms: make(map[*Broker]*brokerOffsetManager),
|
||||
}
|
||||
|
||||
return om, nil
|
||||
}
|
||||
|
||||
func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) {
|
||||
pom, err := om.newPartitionOffsetManager(topic, partition)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
om.lock.Lock()
|
||||
defer om.lock.Unlock()
|
||||
|
||||
topicManagers := om.poms[topic]
|
||||
if topicManagers == nil {
|
||||
topicManagers = make(map[int32]*partitionOffsetManager)
|
||||
om.poms[topic] = topicManagers
|
||||
}
|
||||
|
||||
if topicManagers[partition] != nil {
|
||||
return nil, ConfigurationError("That topic/partition is already being managed")
|
||||
}
|
||||
|
||||
topicManagers[partition] = pom
|
||||
return pom, nil
|
||||
}
|
||||
|
||||
func (om *offsetManager) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (om *offsetManager) refBrokerOffsetManager(broker *Broker) *brokerOffsetManager {
|
||||
om.lock.Lock()
|
||||
defer om.lock.Unlock()
|
||||
|
||||
bom := om.boms[broker]
|
||||
if bom == nil {
|
||||
bom = om.newBrokerOffsetManager(broker)
|
||||
om.boms[broker] = bom
|
||||
}
|
||||
|
||||
bom.refs++
|
||||
|
||||
return bom
|
||||
}
|
||||
|
||||
func (om *offsetManager) unrefBrokerOffsetManager(bom *brokerOffsetManager) {
|
||||
om.lock.Lock()
|
||||
defer om.lock.Unlock()
|
||||
|
||||
bom.refs--
|
||||
|
||||
if bom.refs == 0 {
|
||||
close(bom.updateSubscriptions)
|
||||
if om.boms[bom.broker] == bom {
|
||||
delete(om.boms, bom.broker)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (om *offsetManager) abandonBroker(bom *brokerOffsetManager) {
|
||||
om.lock.Lock()
|
||||
defer om.lock.Unlock()
|
||||
|
||||
delete(om.boms, bom.broker)
|
||||
}
|
||||
|
||||
func (om *offsetManager) abandonPartitionOffsetManager(pom *partitionOffsetManager) {
|
||||
om.lock.Lock()
|
||||
defer om.lock.Unlock()
|
||||
|
||||
delete(om.poms[pom.topic], pom.partition)
|
||||
if len(om.poms[pom.topic]) == 0 {
|
||||
delete(om.poms, pom.topic)
|
||||
}
|
||||
}
|
||||
|
||||
// Partition Offset Manager
|
||||
|
||||
// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close()
|
||||
// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes
|
||||
// out of scope.
|
||||
type PartitionOffsetManager interface {
|
||||
// NextOffset returns the next offset that should be consumed for the managed
|
||||
// partition, accompanied by metadata which can be used to reconstruct the state
|
||||
// of the partition consumer when it resumes. NextOffset() will return
|
||||
// `config.Consumer.Offsets.Initial` and an empty metadata string if no offset
|
||||
// was committed for this partition yet.
|
||||
NextOffset() (int64, string)
|
||||
|
||||
// MarkOffset marks the provided offset, alongside a metadata string
|
||||
// that represents the state of the partition consumer at that point in time. The
|
||||
// metadata string can be used by another consumer to restore that state, so it
|
||||
// can resume consumption.
|
||||
//
|
||||
// To follow upstream conventions, you are expected to mark the offset of the
|
||||
// next message to read, not the last message read. Thus, when calling `MarkOffset`
|
||||
// you should typically add one to the offset of the last consumed message.
|
||||
//
|
||||
// Note: calling MarkOffset does not necessarily commit the offset to the backend
|
||||
// store immediately for efficiency reasons, and it may never be committed if
|
||||
// your application crashes. This means that you may end up processing the same
|
||||
// message twice, and your processing should ideally be idempotent.
|
||||
MarkOffset(offset int64, metadata string)
|
||||
|
||||
// ResetOffset resets to the provided offset, alongside a metadata string that
|
||||
// represents the state of the partition consumer at that point in time. Reset
|
||||
// acts as a counterpart to MarkOffset, the difference being that it allows to
|
||||
// reset an offset to an earlier or smaller value, where MarkOffset only
|
||||
// allows incrementing the offset. cf MarkOffset for more details.
|
||||
ResetOffset(offset int64, metadata string)
|
||||
|
||||
// Errors returns a read channel of errors that occur during offset management, if
|
||||
// enabled. By default, errors are logged and not returned over this channel. If
|
||||
// you want to implement any custom error handling, set your config's
|
||||
// Consumer.Return.Errors setting to true, and read from this channel.
|
||||
Errors() <-chan *ConsumerError
|
||||
|
||||
// AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will
|
||||
// return immediately, after which you should wait until the 'errors' channel has
|
||||
// been drained and closed. It is required to call this function, or Close before
|
||||
// a consumer object passes out of scope, as it will otherwise leak memory. You
|
||||
// must call this before calling Close on the underlying client.
|
||||
AsyncClose()
|
||||
|
||||
// Close stops the PartitionOffsetManager from managing offsets. It is required to
|
||||
// call this function (or AsyncClose) before a PartitionOffsetManager object
|
||||
// passes out of scope, as it will otherwise leak memory. You must call this
|
||||
// before calling Close on the underlying client.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type partitionOffsetManager struct {
|
||||
parent *offsetManager
|
||||
topic string
|
||||
partition int32
|
||||
|
||||
lock sync.Mutex
|
||||
offset int64
|
||||
metadata string
|
||||
dirty bool
|
||||
clean sync.Cond
|
||||
broker *brokerOffsetManager
|
||||
|
||||
errors chan *ConsumerError
|
||||
rebalance chan none
|
||||
dying chan none
|
||||
}
|
||||
|
||||
func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) {
|
||||
pom := &partitionOffsetManager{
|
||||
parent: om,
|
||||
topic: topic,
|
||||
partition: partition,
|
||||
errors: make(chan *ConsumerError, om.conf.ChannelBufferSize),
|
||||
rebalance: make(chan none, 1),
|
||||
dying: make(chan none),
|
||||
}
|
||||
pom.clean.L = &pom.lock
|
||||
|
||||
if err := pom.selectBroker(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := pom.fetchInitialOffset(om.conf.Metadata.Retry.Max); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pom.broker.updateSubscriptions <- pom
|
||||
|
||||
go withRecover(pom.mainLoop)
|
||||
|
||||
return pom, nil
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) mainLoop() {
|
||||
for {
|
||||
select {
|
||||
case <-pom.rebalance:
|
||||
if err := pom.selectBroker(); err != nil {
|
||||
pom.handleError(err)
|
||||
pom.rebalance <- none{}
|
||||
} else {
|
||||
pom.broker.updateSubscriptions <- pom
|
||||
}
|
||||
case <-pom.dying:
|
||||
if pom.broker != nil {
|
||||
select {
|
||||
case <-pom.rebalance:
|
||||
case pom.broker.updateSubscriptions <- pom:
|
||||
}
|
||||
pom.parent.unrefBrokerOffsetManager(pom.broker)
|
||||
}
|
||||
pom.parent.abandonPartitionOffsetManager(pom)
|
||||
close(pom.errors)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) selectBroker() error {
|
||||
if pom.broker != nil {
|
||||
pom.parent.unrefBrokerOffsetManager(pom.broker)
|
||||
pom.broker = nil
|
||||
}
|
||||
|
||||
var broker *Broker
|
||||
var err error
|
||||
|
||||
if err = pom.parent.client.RefreshCoordinator(pom.parent.group); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if broker, err = pom.parent.client.Coordinator(pom.parent.group); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pom.broker = pom.parent.refBrokerOffsetManager(broker)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) fetchInitialOffset(retries int) error {
|
||||
request := new(OffsetFetchRequest)
|
||||
request.Version = 1
|
||||
request.ConsumerGroup = pom.parent.group
|
||||
request.AddPartition(pom.topic, pom.partition)
|
||||
|
||||
response, err := pom.broker.broker.FetchOffset(request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
block := response.GetBlock(pom.topic, pom.partition)
|
||||
if block == nil {
|
||||
return ErrIncompleteResponse
|
||||
}
|
||||
|
||||
switch block.Err {
|
||||
case ErrNoError:
|
||||
pom.offset = block.Offset
|
||||
pom.metadata = block.Metadata
|
||||
return nil
|
||||
case ErrNotCoordinatorForConsumer:
|
||||
if retries <= 0 {
|
||||
return block.Err
|
||||
}
|
||||
if err := pom.selectBroker(); err != nil {
|
||||
return err
|
||||
}
|
||||
return pom.fetchInitialOffset(retries - 1)
|
||||
case ErrOffsetsLoadInProgress:
|
||||
if retries <= 0 {
|
||||
return block.Err
|
||||
}
|
||||
time.Sleep(pom.parent.conf.Metadata.Retry.Backoff)
|
||||
return pom.fetchInitialOffset(retries - 1)
|
||||
default:
|
||||
return block.Err
|
||||
}
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) handleError(err error) {
|
||||
cErr := &ConsumerError{
|
||||
Topic: pom.topic,
|
||||
Partition: pom.partition,
|
||||
Err: err,
|
||||
}
|
||||
|
||||
if pom.parent.conf.Consumer.Return.Errors {
|
||||
pom.errors <- cErr
|
||||
} else {
|
||||
Logger.Println(cErr)
|
||||
}
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError {
|
||||
return pom.errors
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) {
|
||||
pom.lock.Lock()
|
||||
defer pom.lock.Unlock()
|
||||
|
||||
if offset > pom.offset {
|
||||
pom.offset = offset
|
||||
pom.metadata = metadata
|
||||
pom.dirty = true
|
||||
}
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) {
|
||||
pom.lock.Lock()
|
||||
defer pom.lock.Unlock()
|
||||
|
||||
if offset <= pom.offset {
|
||||
pom.offset = offset
|
||||
pom.metadata = metadata
|
||||
pom.dirty = true
|
||||
}
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) {
|
||||
pom.lock.Lock()
|
||||
defer pom.lock.Unlock()
|
||||
|
||||
if pom.offset == offset && pom.metadata == metadata {
|
||||
pom.dirty = false
|
||||
pom.clean.Signal()
|
||||
}
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) NextOffset() (int64, string) {
|
||||
pom.lock.Lock()
|
||||
defer pom.lock.Unlock()
|
||||
|
||||
if pom.offset >= 0 {
|
||||
return pom.offset, pom.metadata
|
||||
}
|
||||
|
||||
return pom.parent.conf.Consumer.Offsets.Initial, ""
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) AsyncClose() {
|
||||
go func() {
|
||||
pom.lock.Lock()
|
||||
defer pom.lock.Unlock()
|
||||
|
||||
for pom.dirty {
|
||||
pom.clean.Wait()
|
||||
}
|
||||
|
||||
close(pom.dying)
|
||||
}()
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) Close() error {
|
||||
pom.AsyncClose()
|
||||
|
||||
var errors ConsumerErrors
|
||||
for err := range pom.errors {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return errors
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Broker Offset Manager
|
||||
|
||||
type brokerOffsetManager struct {
|
||||
parent *offsetManager
|
||||
broker *Broker
|
||||
timer *time.Ticker
|
||||
updateSubscriptions chan *partitionOffsetManager
|
||||
subscriptions map[*partitionOffsetManager]none
|
||||
refs int
|
||||
}
|
||||
|
||||
func (om *offsetManager) newBrokerOffsetManager(broker *Broker) *brokerOffsetManager {
|
||||
bom := &brokerOffsetManager{
|
||||
parent: om,
|
||||
broker: broker,
|
||||
timer: time.NewTicker(om.conf.Consumer.Offsets.CommitInterval),
|
||||
updateSubscriptions: make(chan *partitionOffsetManager),
|
||||
subscriptions: make(map[*partitionOffsetManager]none),
|
||||
}
|
||||
|
||||
go withRecover(bom.mainLoop)
|
||||
|
||||
return bom
|
||||
}
|
||||
|
||||
func (bom *brokerOffsetManager) mainLoop() {
|
||||
for {
|
||||
select {
|
||||
case <-bom.timer.C:
|
||||
if len(bom.subscriptions) > 0 {
|
||||
bom.flushToBroker()
|
||||
}
|
||||
case s, ok := <-bom.updateSubscriptions:
|
||||
if !ok {
|
||||
bom.timer.Stop()
|
||||
return
|
||||
}
|
||||
if _, ok := bom.subscriptions[s]; ok {
|
||||
delete(bom.subscriptions, s)
|
||||
} else {
|
||||
bom.subscriptions[s] = none{}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bom *brokerOffsetManager) flushToBroker() {
|
||||
request := bom.constructRequest()
|
||||
if request == nil {
|
||||
return
|
||||
}
|
||||
|
||||
response, err := bom.broker.CommitOffset(request)
|
||||
|
||||
if err != nil {
|
||||
bom.abort(err)
|
||||
return
|
||||
}
|
||||
|
||||
for s := range bom.subscriptions {
|
||||
if request.blocks[s.topic] == nil || request.blocks[s.topic][s.partition] == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var err KError
|
||||
var ok bool
|
||||
|
||||
if response.Errors[s.topic] == nil {
|
||||
s.handleError(ErrIncompleteResponse)
|
||||
delete(bom.subscriptions, s)
|
||||
s.rebalance <- none{}
|
||||
continue
|
||||
}
|
||||
if err, ok = response.Errors[s.topic][s.partition]; !ok {
|
||||
s.handleError(ErrIncompleteResponse)
|
||||
delete(bom.subscriptions, s)
|
||||
s.rebalance <- none{}
|
||||
continue
|
||||
}
|
||||
|
||||
switch err {
|
||||
case ErrNoError:
|
||||
block := request.blocks[s.topic][s.partition]
|
||||
s.updateCommitted(block.offset, block.metadata)
|
||||
case ErrNotLeaderForPartition, ErrLeaderNotAvailable,
|
||||
ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer:
|
||||
// not a critical error, we just need to redispatch
|
||||
delete(bom.subscriptions, s)
|
||||
s.rebalance <- none{}
|
||||
case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize:
|
||||
// nothing we can do about this, just tell the user and carry on
|
||||
s.handleError(err)
|
||||
case ErrOffsetsLoadInProgress:
|
||||
// nothing wrong but we didn't commit, we'll get it next time round
|
||||
break
|
||||
case ErrUnknownTopicOrPartition:
|
||||
// let the user know *and* try redispatching - if topic-auto-create is
|
||||
// enabled, redispatching should trigger a metadata request and create the
|
||||
// topic; if not then re-dispatching won't help, but we've let the user
|
||||
// know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706)
|
||||
fallthrough
|
||||
default:
|
||||
// dunno, tell the user and try redispatching
|
||||
s.handleError(err)
|
||||
delete(bom.subscriptions, s)
|
||||
s.rebalance <- none{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bom *brokerOffsetManager) constructRequest() *OffsetCommitRequest {
|
||||
var r *OffsetCommitRequest
|
||||
var perPartitionTimestamp int64
|
||||
if bom.parent.conf.Consumer.Offsets.Retention == 0 {
|
||||
perPartitionTimestamp = ReceiveTime
|
||||
r = &OffsetCommitRequest{
|
||||
Version: 1,
|
||||
ConsumerGroup: bom.parent.group,
|
||||
ConsumerGroupGeneration: GroupGenerationUndefined,
|
||||
}
|
||||
} else {
|
||||
r = &OffsetCommitRequest{
|
||||
Version: 2,
|
||||
RetentionTime: int64(bom.parent.conf.Consumer.Offsets.Retention / time.Millisecond),
|
||||
ConsumerGroup: bom.parent.group,
|
||||
ConsumerGroupGeneration: GroupGenerationUndefined,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
for s := range bom.subscriptions {
|
||||
s.lock.Lock()
|
||||
if s.dirty {
|
||||
r.AddBlock(s.topic, s.partition, s.offset, perPartitionTimestamp, s.metadata)
|
||||
}
|
||||
s.lock.Unlock()
|
||||
}
|
||||
|
||||
if len(r.blocks) > 0 {
|
||||
return r
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bom *brokerOffsetManager) abort(err error) {
|
||||
_ = bom.broker.Close() // we don't care about the error this might return, we already have one
|
||||
bom.parent.abandonBroker(bom)
|
||||
|
||||
for pom := range bom.subscriptions {
|
||||
pom.handleError(err)
|
||||
pom.rebalance <- none{}
|
||||
}
|
||||
|
||||
for s := range bom.updateSubscriptions {
|
||||
if _, ok := bom.subscriptions[s]; !ok {
|
||||
s.handleError(err)
|
||||
s.rebalance <- none{}
|
||||
}
|
||||
}
|
||||
|
||||
bom.subscriptions = make(map[*partitionOffsetManager]none)
|
||||
}
|
132
vendor/github.com/Shopify/sarama/offset_request.go
generated
vendored
Normal file
132
vendor/github.com/Shopify/sarama/offset_request.go
generated
vendored
Normal file
|
@ -0,0 +1,132 @@
|
|||
package sarama
|
||||
|
||||
type offsetRequestBlock struct {
|
||||
time int64
|
||||
maxOffsets int32 // Only used in version 0
|
||||
}
|
||||
|
||||
func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error {
|
||||
pe.putInt64(int64(b.time))
|
||||
if version == 0 {
|
||||
pe.putInt32(b.maxOffsets)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) {
|
||||
if b.time, err = pd.getInt64(); err != nil {
|
||||
return err
|
||||
}
|
||||
if version == 0 {
|
||||
if b.maxOffsets, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type OffsetRequest struct {
|
||||
Version int16
|
||||
blocks map[string]map[int32]*offsetRequestBlock
|
||||
}
|
||||
|
||||
func (r *OffsetRequest) encode(pe packetEncoder) error {
|
||||
pe.putInt32(-1) // replica ID is always -1 for clients
|
||||
err := pe.putArrayLength(len(r.blocks))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for topic, partitions := range r.blocks {
|
||||
err = pe.putString(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = pe.putArrayLength(len(partitions))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for partition, block := range partitions {
|
||||
pe.putInt32(partition)
|
||||
if err = block.encode(pe, r.Version); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetRequest) decode(pd packetDecoder, version int16) error {
|
||||
r.Version = version
|
||||
|
||||
// Ignore replica ID
|
||||
if _, err := pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
blockCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if blockCount == 0 {
|
||||
return nil
|
||||
}
|
||||
r.blocks = make(map[string]map[int32]*offsetRequestBlock)
|
||||
for i := 0; i < blockCount; i++ {
|
||||
topic, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
partitionCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.blocks[topic] = make(map[int32]*offsetRequestBlock)
|
||||
for j := 0; j < partitionCount; j++ {
|
||||
partition, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
block := &offsetRequestBlock{}
|
||||
if err := block.decode(pd, version); err != nil {
|
||||
return err
|
||||
}
|
||||
r.blocks[topic][partition] = block
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetRequest) key() int16 {
|
||||
return 2
|
||||
}
|
||||
|
||||
func (r *OffsetRequest) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *OffsetRequest) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_10_1_0
|
||||
default:
|
||||
return minVersion
|
||||
}
|
||||
}
|
||||
|
||||
func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) {
|
||||
if r.blocks == nil {
|
||||
r.blocks = make(map[string]map[int32]*offsetRequestBlock)
|
||||
}
|
||||
|
||||
if r.blocks[topic] == nil {
|
||||
r.blocks[topic] = make(map[int32]*offsetRequestBlock)
|
||||
}
|
||||
|
||||
tmp := new(offsetRequestBlock)
|
||||
tmp.time = time
|
||||
if r.Version == 0 {
|
||||
tmp.maxOffsets = maxOffsets
|
||||
}
|
||||
|
||||
r.blocks[topic][partitionID] = tmp
|
||||
}
|
174
vendor/github.com/Shopify/sarama/offset_response.go
generated
vendored
Normal file
174
vendor/github.com/Shopify/sarama/offset_response.go
generated
vendored
Normal file
|
@ -0,0 +1,174 @@
|
|||
package sarama
|
||||
|
||||
type OffsetResponseBlock struct {
|
||||
Err KError
|
||||
Offsets []int64 // Version 0
|
||||
Offset int64 // Version 1
|
||||
Timestamp int64 // Version 1
|
||||
}
|
||||
|
||||
func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) {
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.Err = KError(tmp)
|
||||
|
||||
if version == 0 {
|
||||
b.Offsets, err = pd.getInt64Array()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
b.Timestamp, err = pd.getInt64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.Offset, err = pd.getInt64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// For backwards compatibility put the offset in the offsets array too
|
||||
b.Offsets = []int64{b.Offset}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) {
|
||||
pe.putInt16(int16(b.Err))
|
||||
|
||||
if version == 0 {
|
||||
return pe.putInt64Array(b.Offsets)
|
||||
}
|
||||
|
||||
pe.putInt64(b.Timestamp)
|
||||
pe.putInt64(b.Offset)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type OffsetResponse struct {
|
||||
Version int16
|
||||
Blocks map[string]map[int32]*OffsetResponseBlock
|
||||
}
|
||||
|
||||
func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
numTopics, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics)
|
||||
for i := 0; i < numTopics; i++ {
|
||||
name, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numBlocks, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks)
|
||||
|
||||
for j := 0; j < numBlocks; j++ {
|
||||
id, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
block := new(OffsetResponseBlock)
|
||||
err = block.decode(pd, version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Blocks[name][id] = block
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock {
|
||||
if r.Blocks == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if r.Blocks[topic] == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return r.Blocks[topic][partition]
|
||||
}
|
||||
|
||||
/*
|
||||
// [0 0 0 1 ntopics
|
||||
0 8 109 121 95 116 111 112 105 99 topic
|
||||
0 0 0 1 npartitions
|
||||
0 0 0 0 id
|
||||
0 0
|
||||
|
||||
0 0 0 1 0 0 0 0
|
||||
0 1 1 1 0 0 0 1
|
||||
0 8 109 121 95 116 111 112
|
||||
105 99 0 0 0 1 0 0
|
||||
0 0 0 0 0 0 0 1
|
||||
0 0 0 0 0 1 1 1] <nil>
|
||||
|
||||
*/
|
||||
func (r *OffsetResponse) encode(pe packetEncoder) (err error) {
|
||||
if err = pe.putArrayLength(len(r.Blocks)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for topic, partitions := range r.Blocks {
|
||||
if err = pe.putString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = pe.putArrayLength(len(partitions)); err != nil {
|
||||
return err
|
||||
}
|
||||
for partition, block := range partitions {
|
||||
pe.putInt32(partition)
|
||||
if err = block.encode(pe, r.version()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetResponse) key() int16 {
|
||||
return 2
|
||||
}
|
||||
|
||||
func (r *OffsetResponse) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *OffsetResponse) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_10_1_0
|
||||
default:
|
||||
return minVersion
|
||||
}
|
||||
}
|
||||
|
||||
// testing API
|
||||
|
||||
func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) {
|
||||
if r.Blocks == nil {
|
||||
r.Blocks = make(map[string]map[int32]*OffsetResponseBlock)
|
||||
}
|
||||
byTopic, ok := r.Blocks[topic]
|
||||
if !ok {
|
||||
byTopic = make(map[int32]*OffsetResponseBlock)
|
||||
r.Blocks[topic] = byTopic
|
||||
}
|
||||
byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset}
|
||||
}
|
45
vendor/github.com/Shopify/sarama/packet_decoder.go
generated
vendored
Normal file
45
vendor/github.com/Shopify/sarama/packet_decoder.go
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
|||
package sarama
|
||||
|
||||
// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules.
|
||||
// Types implementing Decoder only need to worry about calling methods like GetString,
|
||||
// not about how a string is represented in Kafka.
|
||||
type packetDecoder interface {
|
||||
// Primitives
|
||||
getInt8() (int8, error)
|
||||
getInt16() (int16, error)
|
||||
getInt32() (int32, error)
|
||||
getInt64() (int64, error)
|
||||
getArrayLength() (int, error)
|
||||
|
||||
// Collections
|
||||
getBytes() ([]byte, error)
|
||||
getString() (string, error)
|
||||
getInt32Array() ([]int32, error)
|
||||
getInt64Array() ([]int64, error)
|
||||
getStringArray() ([]string, error)
|
||||
|
||||
// Subsets
|
||||
remaining() int
|
||||
getSubset(length int) (packetDecoder, error)
|
||||
|
||||
// Stacks, see PushDecoder
|
||||
push(in pushDecoder) error
|
||||
pop() error
|
||||
}
|
||||
|
||||
// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity
|
||||
// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where
|
||||
// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they
|
||||
// depend upon have been decoded.
|
||||
type pushDecoder interface {
|
||||
// Saves the offset into the input buffer as the location to actually read the calculated value when able.
|
||||
saveOffset(in int)
|
||||
|
||||
// Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32).
|
||||
reserveLength() int
|
||||
|
||||
// Indicates that all required data is now available to calculate and check the field.
|
||||
// SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes
|
||||
// of data from the saved offset, and verify it based on the data between the saved offset and curOffset.
|
||||
check(curOffset int, buf []byte) error
|
||||
}
|
50
vendor/github.com/Shopify/sarama/packet_encoder.go
generated
vendored
Normal file
50
vendor/github.com/Shopify/sarama/packet_encoder.go
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
package sarama
|
||||
|
||||
import "github.com/rcrowley/go-metrics"
|
||||
|
||||
// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules.
|
||||
// Types implementing Encoder only need to worry about calling methods like PutString,
|
||||
// not about how a string is represented in Kafka.
|
||||
type packetEncoder interface {
|
||||
// Primitives
|
||||
putInt8(in int8)
|
||||
putInt16(in int16)
|
||||
putInt32(in int32)
|
||||
putInt64(in int64)
|
||||
putArrayLength(in int) error
|
||||
|
||||
// Collections
|
||||
putBytes(in []byte) error
|
||||
putRawBytes(in []byte) error
|
||||
putString(in string) error
|
||||
putStringArray(in []string) error
|
||||
putInt32Array(in []int32) error
|
||||
putInt64Array(in []int64) error
|
||||
|
||||
// Provide the current offset to record the batch size metric
|
||||
offset() int
|
||||
|
||||
// Stacks, see PushEncoder
|
||||
push(in pushEncoder)
|
||||
pop() error
|
||||
|
||||
// To record metrics when provided
|
||||
metricRegistry() metrics.Registry
|
||||
}
|
||||
|
||||
// PushEncoder is the interface for encoding fields like CRCs and lengths where the value
|
||||
// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where
|
||||
// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they
|
||||
// depend upon have been written.
|
||||
type pushEncoder interface {
|
||||
// Saves the offset into the input buffer as the location to actually write the calculated value when able.
|
||||
saveOffset(in int)
|
||||
|
||||
// Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32).
|
||||
reserveLength() int
|
||||
|
||||
// Indicates that all required data is now available to calculate and write the field.
|
||||
// SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes
|
||||
// of data to the saved offset, based on the data between the saved offset and curOffset.
|
||||
run(curOffset int, buf []byte) error
|
||||
}
|
135
vendor/github.com/Shopify/sarama/partitioner.go
generated
vendored
Normal file
135
vendor/github.com/Shopify/sarama/partitioner.go
generated
vendored
Normal file
|
@ -0,0 +1,135 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"hash"
|
||||
"hash/fnv"
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1],
|
||||
// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided
|
||||
// as simple default implementations.
|
||||
type Partitioner interface {
|
||||
// Partition takes a message and partition count and chooses a partition
|
||||
Partition(message *ProducerMessage, numPartitions int32) (int32, error)
|
||||
|
||||
// RequiresConsistency indicates to the user of the partitioner whether the
|
||||
// mapping of key->partition is consistent or not. Specifically, if a
|
||||
// partitioner requires consistency then it must be allowed to choose from all
|
||||
// partitions (even ones known to be unavailable), and its choice must be
|
||||
// respected by the caller. The obvious example is the HashPartitioner.
|
||||
RequiresConsistency() bool
|
||||
}
|
||||
|
||||
// PartitionerConstructor is the type for a function capable of constructing new Partitioners.
|
||||
type PartitionerConstructor func(topic string) Partitioner
|
||||
|
||||
type manualPartitioner struct{}
|
||||
|
||||
// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided
|
||||
// ProducerMessage's Partition field as the partition to produce to.
|
||||
func NewManualPartitioner(topic string) Partitioner {
|
||||
return new(manualPartitioner)
|
||||
}
|
||||
|
||||
func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
|
||||
return message.Partition, nil
|
||||
}
|
||||
|
||||
func (p *manualPartitioner) RequiresConsistency() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type randomPartitioner struct {
|
||||
generator *rand.Rand
|
||||
}
|
||||
|
||||
// NewRandomPartitioner returns a Partitioner which chooses a random partition each time.
|
||||
func NewRandomPartitioner(topic string) Partitioner {
|
||||
p := new(randomPartitioner)
|
||||
p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
|
||||
return int32(p.generator.Intn(int(numPartitions))), nil
|
||||
}
|
||||
|
||||
func (p *randomPartitioner) RequiresConsistency() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type roundRobinPartitioner struct {
|
||||
partition int32
|
||||
}
|
||||
|
||||
// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time.
|
||||
func NewRoundRobinPartitioner(topic string) Partitioner {
|
||||
return &roundRobinPartitioner{}
|
||||
}
|
||||
|
||||
func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
|
||||
if p.partition >= numPartitions {
|
||||
p.partition = 0
|
||||
}
|
||||
ret := p.partition
|
||||
p.partition++
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (p *roundRobinPartitioner) RequiresConsistency() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type hashPartitioner struct {
|
||||
random Partitioner
|
||||
hasher hash.Hash32
|
||||
}
|
||||
|
||||
// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher.
|
||||
// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that
|
||||
// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance.
|
||||
func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor {
|
||||
return func(topic string) Partitioner {
|
||||
p := new(hashPartitioner)
|
||||
p.random = NewRandomPartitioner(topic)
|
||||
p.hasher = hasher()
|
||||
return p
|
||||
}
|
||||
}
|
||||
|
||||
// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a
|
||||
// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used,
|
||||
// modulus the number of partitions. This ensures that messages with the same key always end up on the
|
||||
// same partition.
|
||||
func NewHashPartitioner(topic string) Partitioner {
|
||||
p := new(hashPartitioner)
|
||||
p.random = NewRandomPartitioner(topic)
|
||||
p.hasher = fnv.New32a()
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
|
||||
if message.Key == nil {
|
||||
return p.random.Partition(message, numPartitions)
|
||||
}
|
||||
bytes, err := message.Key.Encode()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
p.hasher.Reset()
|
||||
_, err = p.hasher.Write(bytes)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
partition := int32(p.hasher.Sum32()) % numPartitions
|
||||
if partition < 0 {
|
||||
partition = -partition
|
||||
}
|
||||
return partition, nil
|
||||
}
|
||||
|
||||
func (p *hashPartitioner) RequiresConsistency() bool {
|
||||
return true
|
||||
}
|
121
vendor/github.com/Shopify/sarama/prep_encoder.go
generated
vendored
Normal file
121
vendor/github.com/Shopify/sarama/prep_encoder.go
generated
vendored
Normal file
|
@ -0,0 +1,121 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
type prepEncoder struct {
|
||||
length int
|
||||
}
|
||||
|
||||
// primitives
|
||||
|
||||
func (pe *prepEncoder) putInt8(in int8) {
|
||||
pe.length++
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) putInt16(in int16) {
|
||||
pe.length += 2
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) putInt32(in int32) {
|
||||
pe.length += 4
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) putInt64(in int64) {
|
||||
pe.length += 8
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) putArrayLength(in int) error {
|
||||
if in > math.MaxInt32 {
|
||||
return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)}
|
||||
}
|
||||
pe.length += 4
|
||||
return nil
|
||||
}
|
||||
|
||||
// arrays
|
||||
|
||||
func (pe *prepEncoder) putBytes(in []byte) error {
|
||||
pe.length += 4
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
if len(in) > math.MaxInt32 {
|
||||
return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
|
||||
}
|
||||
pe.length += len(in)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) putRawBytes(in []byte) error {
|
||||
if len(in) > math.MaxInt32 {
|
||||
return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
|
||||
}
|
||||
pe.length += len(in)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) putString(in string) error {
|
||||
pe.length += 2
|
||||
if len(in) > math.MaxInt16 {
|
||||
return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))}
|
||||
}
|
||||
pe.length += len(in)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) putStringArray(in []string) error {
|
||||
err := pe.putArrayLength(len(in))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, str := range in {
|
||||
if err := pe.putString(str); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) putInt32Array(in []int32) error {
|
||||
err := pe.putArrayLength(len(in))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pe.length += 4 * len(in)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) putInt64Array(in []int64) error {
|
||||
err := pe.putArrayLength(len(in))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pe.length += 8 * len(in)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) offset() int {
|
||||
return pe.length
|
||||
}
|
||||
|
||||
// stackable
|
||||
|
||||
func (pe *prepEncoder) push(in pushEncoder) {
|
||||
pe.length += in.reserveLength()
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) pop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// we do not record metrics during the prep encoder pass
|
||||
func (pe *prepEncoder) metricRegistry() metrics.Registry {
|
||||
return nil
|
||||
}
|
209
vendor/github.com/Shopify/sarama/produce_request.go
generated
vendored
Normal file
209
vendor/github.com/Shopify/sarama/produce_request.go
generated
vendored
Normal file
|
@ -0,0 +1,209 @@
|
|||
package sarama
|
||||
|
||||
import "github.com/rcrowley/go-metrics"
|
||||
|
||||
// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements
|
||||
// it must see before responding. Any of the constants defined here are valid. On broker versions
|
||||
// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many
|
||||
// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced
|
||||
// by setting the `min.isr` value in the brokers configuration).
|
||||
type RequiredAcks int16
|
||||
|
||||
const (
|
||||
// NoResponse doesn't send any response, the TCP ACK is all you get.
|
||||
NoResponse RequiredAcks = 0
|
||||
// WaitForLocal waits for only the local commit to succeed before responding.
|
||||
WaitForLocal RequiredAcks = 1
|
||||
// WaitForAll waits for all in-sync replicas to commit before responding.
|
||||
// The minimum number of in-sync replicas is configured on the broker via
|
||||
// the `min.insync.replicas` configuration key.
|
||||
WaitForAll RequiredAcks = -1
|
||||
)
|
||||
|
||||
type ProduceRequest struct {
|
||||
RequiredAcks RequiredAcks
|
||||
Timeout int32
|
||||
Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10
|
||||
msgSets map[string]map[int32]*MessageSet
|
||||
}
|
||||
|
||||
func (r *ProduceRequest) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.RequiredAcks))
|
||||
pe.putInt32(r.Timeout)
|
||||
err := pe.putArrayLength(len(r.msgSets))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
metricRegistry := pe.metricRegistry()
|
||||
var batchSizeMetric metrics.Histogram
|
||||
var compressionRatioMetric metrics.Histogram
|
||||
if metricRegistry != nil {
|
||||
batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry)
|
||||
compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry)
|
||||
}
|
||||
|
||||
totalRecordCount := int64(0)
|
||||
for topic, partitions := range r.msgSets {
|
||||
err = pe.putString(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = pe.putArrayLength(len(partitions))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
topicRecordCount := int64(0)
|
||||
var topicCompressionRatioMetric metrics.Histogram
|
||||
if metricRegistry != nil {
|
||||
topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry)
|
||||
}
|
||||
for id, msgSet := range partitions {
|
||||
startOffset := pe.offset()
|
||||
pe.putInt32(id)
|
||||
pe.push(&lengthField{})
|
||||
err = msgSet.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = pe.pop()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if metricRegistry != nil {
|
||||
for _, messageBlock := range msgSet.Messages {
|
||||
// Is this a fake "message" wrapping real messages?
|
||||
if messageBlock.Msg.Set != nil {
|
||||
topicRecordCount += int64(len(messageBlock.Msg.Set.Messages))
|
||||
} else {
|
||||
// A single uncompressed message
|
||||
topicRecordCount++
|
||||
}
|
||||
// Better be safe than sorry when computing the compression ratio
|
||||
if messageBlock.Msg.compressedSize != 0 {
|
||||
compressionRatio := float64(len(messageBlock.Msg.Value)) /
|
||||
float64(messageBlock.Msg.compressedSize)
|
||||
// Histogram do not support decimal values, let's multiple it by 100 for better precision
|
||||
intCompressionRatio := int64(100 * compressionRatio)
|
||||
compressionRatioMetric.Update(intCompressionRatio)
|
||||
topicCompressionRatioMetric.Update(intCompressionRatio)
|
||||
}
|
||||
}
|
||||
batchSize := int64(pe.offset() - startOffset)
|
||||
batchSizeMetric.Update(batchSize)
|
||||
getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize)
|
||||
}
|
||||
}
|
||||
if topicRecordCount > 0 {
|
||||
getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount)
|
||||
getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount)
|
||||
totalRecordCount += topicRecordCount
|
||||
}
|
||||
}
|
||||
if totalRecordCount > 0 {
|
||||
metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount)
|
||||
getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
|
||||
requiredAcks, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.RequiredAcks = RequiredAcks(requiredAcks)
|
||||
if r.Timeout, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
topicCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if topicCount == 0 {
|
||||
return nil
|
||||
}
|
||||
r.msgSets = make(map[string]map[int32]*MessageSet)
|
||||
for i := 0; i < topicCount; i++ {
|
||||
topic, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
partitionCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.msgSets[topic] = make(map[int32]*MessageSet)
|
||||
for j := 0; j < partitionCount; j++ {
|
||||
partition, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
messageSetSize, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgSetDecoder, err := pd.getSubset(int(messageSetSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgSet := &MessageSet{}
|
||||
err = msgSet.decode(msgSetDecoder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.msgSets[topic][partition] = msgSet
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ProduceRequest) key() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ProduceRequest) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *ProduceRequest) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_9_0_0
|
||||
case 2:
|
||||
return V0_10_0_0
|
||||
default:
|
||||
return minVersion
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) {
|
||||
if r.msgSets == nil {
|
||||
r.msgSets = make(map[string]map[int32]*MessageSet)
|
||||
}
|
||||
|
||||
if r.msgSets[topic] == nil {
|
||||
r.msgSets[topic] = make(map[int32]*MessageSet)
|
||||
}
|
||||
|
||||
set := r.msgSets[topic][partition]
|
||||
|
||||
if set == nil {
|
||||
set = new(MessageSet)
|
||||
r.msgSets[topic][partition] = set
|
||||
}
|
||||
|
||||
set.addMessage(msg)
|
||||
}
|
||||
|
||||
func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) {
|
||||
if r.msgSets == nil {
|
||||
r.msgSets = make(map[string]map[int32]*MessageSet)
|
||||
}
|
||||
|
||||
if r.msgSets[topic] == nil {
|
||||
r.msgSets[topic] = make(map[int32]*MessageSet)
|
||||
}
|
||||
|
||||
r.msgSets[topic][partition] = set
|
||||
}
|
159
vendor/github.com/Shopify/sarama/produce_response.go
generated
vendored
Normal file
159
vendor/github.com/Shopify/sarama/produce_response.go
generated
vendored
Normal file
|
@ -0,0 +1,159 @@
|
|||
package sarama
|
||||
|
||||
import "time"
|
||||
|
||||
type ProduceResponseBlock struct {
|
||||
Err KError
|
||||
Offset int64
|
||||
// only provided if Version >= 2 and the broker is configured with `LogAppendTime`
|
||||
Timestamp time.Time
|
||||
}
|
||||
|
||||
func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) {
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.Err = KError(tmp)
|
||||
|
||||
b.Offset, err = pd.getInt64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if version >= 2 {
|
||||
if millis, err := pd.getInt64(); err != nil {
|
||||
return err
|
||||
} else if millis != -1 {
|
||||
b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type ProduceResponse struct {
|
||||
Blocks map[string]map[int32]*ProduceResponseBlock
|
||||
Version int16
|
||||
ThrottleTime time.Duration // only provided if Version >= 1
|
||||
}
|
||||
|
||||
func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Version = version
|
||||
|
||||
numTopics, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics)
|
||||
for i := 0; i < numTopics; i++ {
|
||||
name, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numBlocks, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks)
|
||||
|
||||
for j := 0; j < numBlocks; j++ {
|
||||
id, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
block := new(ProduceResponseBlock)
|
||||
err = block.decode(pd, version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Blocks[name][id] = block
|
||||
}
|
||||
}
|
||||
|
||||
if r.Version >= 1 {
|
||||
millis, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.ThrottleTime = time.Duration(millis) * time.Millisecond
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ProduceResponse) encode(pe packetEncoder) error {
|
||||
err := pe.putArrayLength(len(r.Blocks))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for topic, partitions := range r.Blocks {
|
||||
err = pe.putString(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = pe.putArrayLength(len(partitions))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for id, prb := range partitions {
|
||||
pe.putInt32(id)
|
||||
pe.putInt16(int16(prb.Err))
|
||||
pe.putInt64(prb.Offset)
|
||||
}
|
||||
}
|
||||
if r.Version >= 1 {
|
||||
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ProduceResponse) key() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ProduceResponse) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *ProduceResponse) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_9_0_0
|
||||
case 2:
|
||||
return V0_10_0_0
|
||||
default:
|
||||
return minVersion
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock {
|
||||
if r.Blocks == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if r.Blocks[topic] == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return r.Blocks[topic][partition]
|
||||
}
|
||||
|
||||
// Testing API
|
||||
|
||||
func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) {
|
||||
if r.Blocks == nil {
|
||||
r.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
|
||||
}
|
||||
byTopic, ok := r.Blocks[topic]
|
||||
if !ok {
|
||||
byTopic = make(map[int32]*ProduceResponseBlock)
|
||||
r.Blocks[topic] = byTopic
|
||||
}
|
||||
byTopic[partition] = &ProduceResponseBlock{Err: err}
|
||||
}
|
176
vendor/github.com/Shopify/sarama/produce_set.go
generated
vendored
Normal file
176
vendor/github.com/Shopify/sarama/produce_set.go
generated
vendored
Normal file
|
@ -0,0 +1,176 @@
|
|||
package sarama
|
||||
|
||||
import "time"
|
||||
|
||||
type partitionSet struct {
|
||||
msgs []*ProducerMessage
|
||||
setToSend *MessageSet
|
||||
bufferBytes int
|
||||
}
|
||||
|
||||
type produceSet struct {
|
||||
parent *asyncProducer
|
||||
msgs map[string]map[int32]*partitionSet
|
||||
|
||||
bufferBytes int
|
||||
bufferCount int
|
||||
}
|
||||
|
||||
func newProduceSet(parent *asyncProducer) *produceSet {
|
||||
return &produceSet{
|
||||
msgs: make(map[string]map[int32]*partitionSet),
|
||||
parent: parent,
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *produceSet) add(msg *ProducerMessage) error {
|
||||
var err error
|
||||
var key, val []byte
|
||||
|
||||
if msg.Key != nil {
|
||||
if key, err = msg.Key.Encode(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if msg.Value != nil {
|
||||
if val, err = msg.Value.Encode(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
partitions := ps.msgs[msg.Topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]*partitionSet)
|
||||
ps.msgs[msg.Topic] = partitions
|
||||
}
|
||||
|
||||
set := partitions[msg.Partition]
|
||||
if set == nil {
|
||||
set = &partitionSet{setToSend: new(MessageSet)}
|
||||
partitions[msg.Partition] = set
|
||||
}
|
||||
|
||||
set.msgs = append(set.msgs, msg)
|
||||
msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
|
||||
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
|
||||
if msg.Timestamp.IsZero() {
|
||||
msgToSend.Timestamp = time.Now()
|
||||
} else {
|
||||
msgToSend.Timestamp = msg.Timestamp
|
||||
}
|
||||
msgToSend.Version = 1
|
||||
}
|
||||
set.setToSend.addMessage(msgToSend)
|
||||
|
||||
size := producerMessageOverhead + len(key) + len(val)
|
||||
set.bufferBytes += size
|
||||
ps.bufferBytes += size
|
||||
ps.bufferCount++
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps *produceSet) buildRequest() *ProduceRequest {
|
||||
req := &ProduceRequest{
|
||||
RequiredAcks: ps.parent.conf.Producer.RequiredAcks,
|
||||
Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond),
|
||||
}
|
||||
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
|
||||
req.Version = 2
|
||||
}
|
||||
|
||||
for topic, partitionSet := range ps.msgs {
|
||||
for partition, set := range partitionSet {
|
||||
if ps.parent.conf.Producer.Compression == CompressionNone {
|
||||
req.AddSet(topic, partition, set.setToSend)
|
||||
} else {
|
||||
// When compression is enabled, the entire set for each partition is compressed
|
||||
// and sent as the payload of a single fake "message" with the appropriate codec
|
||||
// set and no key. When the server sees a message with a compression codec, it
|
||||
// decompresses the payload and treats the result as its message set.
|
||||
payload, err := encode(set.setToSend, ps.parent.conf.MetricRegistry)
|
||||
if err != nil {
|
||||
Logger.Println(err) // if this happens, it's basically our fault.
|
||||
panic(err)
|
||||
}
|
||||
compMsg := &Message{
|
||||
Codec: ps.parent.conf.Producer.Compression,
|
||||
Key: nil,
|
||||
Value: payload,
|
||||
Set: set.setToSend, // Provide the underlying message set for accurate metrics
|
||||
}
|
||||
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
|
||||
compMsg.Version = 1
|
||||
compMsg.Timestamp = set.setToSend.Messages[0].Msg.Timestamp
|
||||
}
|
||||
req.AddMessage(topic, partition, compMsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return req
|
||||
}
|
||||
|
||||
func (ps *produceSet) eachPartition(cb func(topic string, partition int32, msgs []*ProducerMessage)) {
|
||||
for topic, partitionSet := range ps.msgs {
|
||||
for partition, set := range partitionSet {
|
||||
cb(topic, partition, set.msgs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage {
|
||||
if ps.msgs[topic] == nil {
|
||||
return nil
|
||||
}
|
||||
set := ps.msgs[topic][partition]
|
||||
if set == nil {
|
||||
return nil
|
||||
}
|
||||
ps.bufferBytes -= set.bufferBytes
|
||||
ps.bufferCount -= len(set.msgs)
|
||||
delete(ps.msgs[topic], partition)
|
||||
return set.msgs
|
||||
}
|
||||
|
||||
func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool {
|
||||
switch {
|
||||
// Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
|
||||
case ps.bufferBytes+msg.byteSize() >= int(MaxRequestSize-(10*1024)):
|
||||
return true
|
||||
// Would we overflow the size-limit of a compressed message-batch for this partition?
|
||||
case ps.parent.conf.Producer.Compression != CompressionNone &&
|
||||
ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil &&
|
||||
ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize() >= ps.parent.conf.Producer.MaxMessageBytes:
|
||||
return true
|
||||
// Would we overflow simply in number of messages?
|
||||
case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *produceSet) readyToFlush() bool {
|
||||
switch {
|
||||
// If we don't have any messages, nothing else matters
|
||||
case ps.empty():
|
||||
return false
|
||||
// If all three config values are 0, we always flush as-fast-as-possible
|
||||
case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0:
|
||||
return true
|
||||
// If we've passed the message trigger-point
|
||||
case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages:
|
||||
return true
|
||||
// If we've passed the byte trigger-point
|
||||
case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *produceSet) empty() bool {
|
||||
return ps.bufferCount == 0
|
||||
}
|
260
vendor/github.com/Shopify/sarama/real_decoder.go
generated
vendored
Normal file
260
vendor/github.com/Shopify/sarama/real_decoder.go
generated
vendored
Normal file
|
@ -0,0 +1,260 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
)
|
||||
|
||||
var errInvalidArrayLength = PacketDecodingError{"invalid array length"}
|
||||
var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"}
|
||||
var errInvalidStringLength = PacketDecodingError{"invalid string length"}
|
||||
var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"}
|
||||
|
||||
type realDecoder struct {
|
||||
raw []byte
|
||||
off int
|
||||
stack []pushDecoder
|
||||
}
|
||||
|
||||
// primitives
|
||||
|
||||
func (rd *realDecoder) getInt8() (int8, error) {
|
||||
if rd.remaining() < 1 {
|
||||
rd.off = len(rd.raw)
|
||||
return -1, ErrInsufficientData
|
||||
}
|
||||
tmp := int8(rd.raw[rd.off])
|
||||
rd.off++
|
||||
return tmp, nil
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getInt16() (int16, error) {
|
||||
if rd.remaining() < 2 {
|
||||
rd.off = len(rd.raw)
|
||||
return -1, ErrInsufficientData
|
||||
}
|
||||
tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:]))
|
||||
rd.off += 2
|
||||
return tmp, nil
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getInt32() (int32, error) {
|
||||
if rd.remaining() < 4 {
|
||||
rd.off = len(rd.raw)
|
||||
return -1, ErrInsufficientData
|
||||
}
|
||||
tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
|
||||
rd.off += 4
|
||||
return tmp, nil
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getInt64() (int64, error) {
|
||||
if rd.remaining() < 8 {
|
||||
rd.off = len(rd.raw)
|
||||
return -1, ErrInsufficientData
|
||||
}
|
||||
tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
|
||||
rd.off += 8
|
||||
return tmp, nil
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getArrayLength() (int, error) {
|
||||
if rd.remaining() < 4 {
|
||||
rd.off = len(rd.raw)
|
||||
return -1, ErrInsufficientData
|
||||
}
|
||||
tmp := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
|
||||
rd.off += 4
|
||||
if tmp > rd.remaining() {
|
||||
rd.off = len(rd.raw)
|
||||
return -1, ErrInsufficientData
|
||||
} else if tmp > 2*math.MaxUint16 {
|
||||
return -1, errInvalidArrayLength
|
||||
}
|
||||
return tmp, nil
|
||||
}
|
||||
|
||||
// collections
|
||||
|
||||
func (rd *realDecoder) getBytes() ([]byte, error) {
|
||||
tmp, err := rd.getInt32()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n := int(tmp)
|
||||
|
||||
switch {
|
||||
case n < -1:
|
||||
return nil, errInvalidByteSliceLength
|
||||
case n == -1:
|
||||
return nil, nil
|
||||
case n == 0:
|
||||
return make([]byte, 0), nil
|
||||
case n > rd.remaining():
|
||||
rd.off = len(rd.raw)
|
||||
return nil, ErrInsufficientData
|
||||
}
|
||||
|
||||
tmpStr := rd.raw[rd.off : rd.off+n]
|
||||
rd.off += n
|
||||
return tmpStr, nil
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getString() (string, error) {
|
||||
tmp, err := rd.getInt16()
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
n := int(tmp)
|
||||
|
||||
switch {
|
||||
case n < -1:
|
||||
return "", errInvalidStringLength
|
||||
case n == -1:
|
||||
return "", nil
|
||||
case n == 0:
|
||||
return "", nil
|
||||
case n > rd.remaining():
|
||||
rd.off = len(rd.raw)
|
||||
return "", ErrInsufficientData
|
||||
}
|
||||
|
||||
tmpStr := string(rd.raw[rd.off : rd.off+n])
|
||||
rd.off += n
|
||||
return tmpStr, nil
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getInt32Array() ([]int32, error) {
|
||||
if rd.remaining() < 4 {
|
||||
rd.off = len(rd.raw)
|
||||
return nil, ErrInsufficientData
|
||||
}
|
||||
n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
|
||||
rd.off += 4
|
||||
|
||||
if rd.remaining() < 4*n {
|
||||
rd.off = len(rd.raw)
|
||||
return nil, ErrInsufficientData
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if n < 0 {
|
||||
return nil, errInvalidArrayLength
|
||||
}
|
||||
|
||||
ret := make([]int32, n)
|
||||
for i := range ret {
|
||||
ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
|
||||
rd.off += 4
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getInt64Array() ([]int64, error) {
|
||||
if rd.remaining() < 4 {
|
||||
rd.off = len(rd.raw)
|
||||
return nil, ErrInsufficientData
|
||||
}
|
||||
n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
|
||||
rd.off += 4
|
||||
|
||||
if rd.remaining() < 8*n {
|
||||
rd.off = len(rd.raw)
|
||||
return nil, ErrInsufficientData
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if n < 0 {
|
||||
return nil, errInvalidArrayLength
|
||||
}
|
||||
|
||||
ret := make([]int64, n)
|
||||
for i := range ret {
|
||||
ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
|
||||
rd.off += 8
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getStringArray() ([]string, error) {
|
||||
if rd.remaining() < 4 {
|
||||
rd.off = len(rd.raw)
|
||||
return nil, ErrInsufficientData
|
||||
}
|
||||
n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
|
||||
rd.off += 4
|
||||
|
||||
if n == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if n < 0 {
|
||||
return nil, errInvalidArrayLength
|
||||
}
|
||||
|
||||
ret := make([]string, n)
|
||||
for i := range ret {
|
||||
str, err := rd.getString()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret[i] = str
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// subsets
|
||||
|
||||
func (rd *realDecoder) remaining() int {
|
||||
return len(rd.raw) - rd.off
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
|
||||
if length < 0 {
|
||||
return nil, errInvalidSubsetSize
|
||||
} else if length > rd.remaining() {
|
||||
rd.off = len(rd.raw)
|
||||
return nil, ErrInsufficientData
|
||||
}
|
||||
|
||||
start := rd.off
|
||||
rd.off += length
|
||||
return &realDecoder{raw: rd.raw[start:rd.off]}, nil
|
||||
}
|
||||
|
||||
// stacks
|
||||
|
||||
func (rd *realDecoder) push(in pushDecoder) error {
|
||||
in.saveOffset(rd.off)
|
||||
|
||||
reserve := in.reserveLength()
|
||||
if rd.remaining() < reserve {
|
||||
rd.off = len(rd.raw)
|
||||
return ErrInsufficientData
|
||||
}
|
||||
|
||||
rd.stack = append(rd.stack, in)
|
||||
|
||||
rd.off += reserve
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rd *realDecoder) pop() error {
|
||||
// this is go's ugly pop pattern (the inverse of append)
|
||||
in := rd.stack[len(rd.stack)-1]
|
||||
rd.stack = rd.stack[:len(rd.stack)-1]
|
||||
|
||||
return in.check(rd.off, rd.raw)
|
||||
}
|
129
vendor/github.com/Shopify/sarama/real_encoder.go
generated
vendored
Normal file
129
vendor/github.com/Shopify/sarama/real_encoder.go
generated
vendored
Normal file
|
@ -0,0 +1,129 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
type realEncoder struct {
|
||||
raw []byte
|
||||
off int
|
||||
stack []pushEncoder
|
||||
registry metrics.Registry
|
||||
}
|
||||
|
||||
// primitives
|
||||
|
||||
func (re *realEncoder) putInt8(in int8) {
|
||||
re.raw[re.off] = byte(in)
|
||||
re.off++
|
||||
}
|
||||
|
||||
func (re *realEncoder) putInt16(in int16) {
|
||||
binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in))
|
||||
re.off += 2
|
||||
}
|
||||
|
||||
func (re *realEncoder) putInt32(in int32) {
|
||||
binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in))
|
||||
re.off += 4
|
||||
}
|
||||
|
||||
func (re *realEncoder) putInt64(in int64) {
|
||||
binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in))
|
||||
re.off += 8
|
||||
}
|
||||
|
||||
func (re *realEncoder) putArrayLength(in int) error {
|
||||
re.putInt32(int32(in))
|
||||
return nil
|
||||
}
|
||||
|
||||
// collection
|
||||
|
||||
func (re *realEncoder) putRawBytes(in []byte) error {
|
||||
copy(re.raw[re.off:], in)
|
||||
re.off += len(in)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (re *realEncoder) putBytes(in []byte) error {
|
||||
if in == nil {
|
||||
re.putInt32(-1)
|
||||
return nil
|
||||
}
|
||||
re.putInt32(int32(len(in)))
|
||||
copy(re.raw[re.off:], in)
|
||||
re.off += len(in)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (re *realEncoder) putString(in string) error {
|
||||
re.putInt16(int16(len(in)))
|
||||
copy(re.raw[re.off:], in)
|
||||
re.off += len(in)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (re *realEncoder) putStringArray(in []string) error {
|
||||
err := re.putArrayLength(len(in))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, val := range in {
|
||||
if err := re.putString(val); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (re *realEncoder) putInt32Array(in []int32) error {
|
||||
err := re.putArrayLength(len(in))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, val := range in {
|
||||
re.putInt32(val)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (re *realEncoder) putInt64Array(in []int64) error {
|
||||
err := re.putArrayLength(len(in))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, val := range in {
|
||||
re.putInt64(val)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (re *realEncoder) offset() int {
|
||||
return re.off
|
||||
}
|
||||
|
||||
// stacks
|
||||
|
||||
func (re *realEncoder) push(in pushEncoder) {
|
||||
in.saveOffset(re.off)
|
||||
re.off += in.reserveLength()
|
||||
re.stack = append(re.stack, in)
|
||||
}
|
||||
|
||||
func (re *realEncoder) pop() error {
|
||||
// this is go's ugly pop pattern (the inverse of append)
|
||||
in := re.stack[len(re.stack)-1]
|
||||
re.stack = re.stack[:len(re.stack)-1]
|
||||
|
||||
return in.run(re.off, re.raw)
|
||||
}
|
||||
|
||||
// we do record metrics during the real encoder pass
|
||||
func (re *realEncoder) metricRegistry() metrics.Registry {
|
||||
return re.registry
|
||||
}
|
119
vendor/github.com/Shopify/sarama/request.go
generated
vendored
Normal file
119
vendor/github.com/Shopify/sarama/request.go
generated
vendored
Normal file
|
@ -0,0 +1,119 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
type protocolBody interface {
|
||||
encoder
|
||||
versionedDecoder
|
||||
key() int16
|
||||
version() int16
|
||||
requiredVersion() KafkaVersion
|
||||
}
|
||||
|
||||
type request struct {
|
||||
correlationID int32
|
||||
clientID string
|
||||
body protocolBody
|
||||
}
|
||||
|
||||
func (r *request) encode(pe packetEncoder) (err error) {
|
||||
pe.push(&lengthField{})
|
||||
pe.putInt16(r.body.key())
|
||||
pe.putInt16(r.body.version())
|
||||
pe.putInt32(r.correlationID)
|
||||
err = pe.putString(r.clientID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = r.body.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return pe.pop()
|
||||
}
|
||||
|
||||
func (r *request) decode(pd packetDecoder) (err error) {
|
||||
var key int16
|
||||
if key, err = pd.getInt16(); err != nil {
|
||||
return err
|
||||
}
|
||||
var version int16
|
||||
if version, err = pd.getInt16(); err != nil {
|
||||
return err
|
||||
}
|
||||
if r.correlationID, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
r.clientID, err = pd.getString()
|
||||
|
||||
r.body = allocateBody(key, version)
|
||||
if r.body == nil {
|
||||
return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)}
|
||||
}
|
||||
return r.body.decode(pd, version)
|
||||
}
|
||||
|
||||
func decodeRequest(r io.Reader) (req *request, bytesRead int, err error) {
|
||||
lengthBytes := make([]byte, 4)
|
||||
if _, err := io.ReadFull(r, lengthBytes); err != nil {
|
||||
return nil, bytesRead, err
|
||||
}
|
||||
bytesRead += len(lengthBytes)
|
||||
|
||||
length := int32(binary.BigEndian.Uint32(lengthBytes))
|
||||
if length <= 4 || length > MaxRequestSize {
|
||||
return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
|
||||
}
|
||||
|
||||
encodedReq := make([]byte, length)
|
||||
if _, err := io.ReadFull(r, encodedReq); err != nil {
|
||||
return nil, bytesRead, err
|
||||
}
|
||||
bytesRead += len(encodedReq)
|
||||
|
||||
req = &request{}
|
||||
if err := decode(encodedReq, req); err != nil {
|
||||
return nil, bytesRead, err
|
||||
}
|
||||
return req, bytesRead, nil
|
||||
}
|
||||
|
||||
func allocateBody(key, version int16) protocolBody {
|
||||
switch key {
|
||||
case 0:
|
||||
return &ProduceRequest{}
|
||||
case 1:
|
||||
return &FetchRequest{}
|
||||
case 2:
|
||||
return &OffsetRequest{Version: version}
|
||||
case 3:
|
||||
return &MetadataRequest{}
|
||||
case 8:
|
||||
return &OffsetCommitRequest{Version: version}
|
||||
case 9:
|
||||
return &OffsetFetchRequest{}
|
||||
case 10:
|
||||
return &ConsumerMetadataRequest{}
|
||||
case 11:
|
||||
return &JoinGroupRequest{}
|
||||
case 12:
|
||||
return &HeartbeatRequest{}
|
||||
case 13:
|
||||
return &LeaveGroupRequest{}
|
||||
case 14:
|
||||
return &SyncGroupRequest{}
|
||||
case 15:
|
||||
return &DescribeGroupsRequest{}
|
||||
case 16:
|
||||
return &ListGroupsRequest{}
|
||||
case 17:
|
||||
return &SaslHandshakeRequest{}
|
||||
case 18:
|
||||
return &ApiVersionsRequest{}
|
||||
}
|
||||
return nil
|
||||
}
|
21
vendor/github.com/Shopify/sarama/response_header.go
generated
vendored
Normal file
21
vendor/github.com/Shopify/sarama/response_header.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
package sarama
|
||||
|
||||
import "fmt"
|
||||
|
||||
type responseHeader struct {
|
||||
length int32
|
||||
correlationID int32
|
||||
}
|
||||
|
||||
func (r *responseHeader) decode(pd packetDecoder) (err error) {
|
||||
r.length, err = pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if r.length <= 4 || r.length > MaxResponseSize {
|
||||
return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)}
|
||||
}
|
||||
|
||||
r.correlationID, err = pd.getInt32()
|
||||
return err
|
||||
}
|
99
vendor/github.com/Shopify/sarama/sarama.go
generated
vendored
Normal file
99
vendor/github.com/Shopify/sarama/sarama.go
generated
vendored
Normal file
|
@ -0,0 +1,99 @@
|
|||
/*
|
||||
Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level
|
||||
API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level
|
||||
API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation.
|
||||
|
||||
To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel
|
||||
and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases.
|
||||
The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be
|
||||
useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees
|
||||
depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the
|
||||
SyncProducer can still sometimes be lost.
|
||||
|
||||
To consume messages, use the Consumer. Note that Sarama's Consumer implementation does not currently support automatic
|
||||
consumer-group rebalancing and offset tracking. For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the
|
||||
https://github.com/wvanbergen/kafka library builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9
|
||||
and later), the https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
|
||||
|
||||
For lower-level needs, the Broker and Request/Response objects permit precise control over each connection
|
||||
and message sent on the wire; the Client provides higher-level metadata management that is shared between
|
||||
the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up
|
||||
exactly with the protocol fields documented by Kafka at
|
||||
https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
|
||||
|
||||
Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry.
|
||||
|
||||
Broker related metrics:
|
||||
|
||||
+----------------------------------------------+------------+---------------------------------------------------------------+
|
||||
| Name | Type | Description |
|
||||
+----------------------------------------------+------------+---------------------------------------------------------------+
|
||||
| incoming-byte-rate | meter | Bytes/second read off all brokers |
|
||||
| incoming-byte-rate-for-broker-<broker-id> | meter | Bytes/second read off a given broker |
|
||||
| outgoing-byte-rate | meter | Bytes/second written off all brokers |
|
||||
| outgoing-byte-rate-for-broker-<broker-id> | meter | Bytes/second written off a given broker |
|
||||
| request-rate | meter | Requests/second sent to all brokers |
|
||||
| request-rate-for-broker-<broker-id> | meter | Requests/second sent to a given broker |
|
||||
| request-size | histogram | Distribution of the request size in bytes for all brokers |
|
||||
| request-size-for-broker-<broker-id> | histogram | Distribution of the request size in bytes for a given broker |
|
||||
| request-latency-in-ms | histogram | Distribution of the request latency in ms for all brokers |
|
||||
| request-latency-in-ms-for-broker-<broker-id> | histogram | Distribution of the request latency in ms for a given broker |
|
||||
| response-rate | meter | Responses/second received from all brokers |
|
||||
| response-rate-for-broker-<broker-id> | meter | Responses/second received from a given broker |
|
||||
| response-size | histogram | Distribution of the response size in bytes for all brokers |
|
||||
| response-size-for-broker-<broker-id> | histogram | Distribution of the response size in bytes for a given broker |
|
||||
+----------------------------------------------+------------+---------------------------------------------------------------+
|
||||
|
||||
Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics.
|
||||
|
||||
Producer related metrics:
|
||||
|
||||
+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
|
||||
| Name | Type | Description |
|
||||
+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
|
||||
| batch-size | histogram | Distribution of the number of bytes sent per partition per request for all topics |
|
||||
| batch-size-for-topic-<topic> | histogram | Distribution of the number of bytes sent per partition per request for a given topic |
|
||||
| record-send-rate | meter | Records/second sent to all topics |
|
||||
| record-send-rate-for-topic-<topic> | meter | Records/second sent to a given topic |
|
||||
| records-per-request | histogram | Distribution of the number of records sent per request for all topics |
|
||||
| records-per-request-for-topic-<topic> | histogram | Distribution of the number of records sent per request for a given topic |
|
||||
| compression-ratio | histogram | Distribution of the compression ratio times 100 of record batches for all topics |
|
||||
| compression-ratio-for-topic-<topic> | histogram | Distribution of the compression ratio times 100 of record batches for a given topic |
|
||||
+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
|
||||
|
||||
*/
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
)
|
||||
|
||||
// Logger is the instance of a StdLogger interface that Sarama writes connection
|
||||
// management events to. By default it is set to discard all log messages via ioutil.Discard,
|
||||
// but you can set it to redirect wherever you want.
|
||||
var Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags)
|
||||
|
||||
// StdLogger is used to log error messages.
|
||||
type StdLogger interface {
|
||||
Print(v ...interface{})
|
||||
Printf(format string, v ...interface{})
|
||||
Println(v ...interface{})
|
||||
}
|
||||
|
||||
// PanicHandler is called for recovering from panics spawned internally to the library (and thus
|
||||
// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered.
|
||||
var PanicHandler func(interface{})
|
||||
|
||||
// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying
|
||||
// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned
|
||||
// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt
|
||||
// to process.
|
||||
var MaxRequestSize int32 = 100 * 1024 * 1024
|
||||
|
||||
// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If
|
||||
// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to
|
||||
// protect the client from running out of memory. Please note that brokers do not have any natural limit on
|
||||
// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers
|
||||
// (see https://issues.apache.org/jira/browse/KAFKA-2063).
|
||||
var MaxResponseSize int32 = 100 * 1024 * 1024
|
33
vendor/github.com/Shopify/sarama/sasl_handshake_request.go
generated
vendored
Normal file
33
vendor/github.com/Shopify/sarama/sasl_handshake_request.go
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
package sarama
|
||||
|
||||
type SaslHandshakeRequest struct {
|
||||
Mechanism string
|
||||
}
|
||||
|
||||
func (r *SaslHandshakeRequest) encode(pe packetEncoder) error {
|
||||
if err := pe.putString(r.Mechanism); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
if r.Mechanism, err = pd.getString(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *SaslHandshakeRequest) key() int16 {
|
||||
return 17
|
||||
}
|
||||
|
||||
func (r *SaslHandshakeRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion {
|
||||
return V0_10_0_0
|
||||
}
|
38
vendor/github.com/Shopify/sarama/sasl_handshake_response.go
generated
vendored
Normal file
38
vendor/github.com/Shopify/sarama/sasl_handshake_response.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
package sarama
|
||||
|
||||
type SaslHandshakeResponse struct {
|
||||
Err KError
|
||||
EnabledMechanisms []string
|
||||
}
|
||||
|
||||
func (r *SaslHandshakeResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.Err))
|
||||
return pe.putStringArray(r.EnabledMechanisms)
|
||||
}
|
||||
|
||||
func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error {
|
||||
kerr, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Err = KError(kerr)
|
||||
|
||||
if r.EnabledMechanisms, err = pd.getStringArray(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *SaslHandshakeResponse) key() int16 {
|
||||
return 17
|
||||
}
|
||||
|
||||
func (r *SaslHandshakeResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion {
|
||||
return V0_10_0_0
|
||||
}
|
100
vendor/github.com/Shopify/sarama/sync_group_request.go
generated
vendored
Normal file
100
vendor/github.com/Shopify/sarama/sync_group_request.go
generated
vendored
Normal file
|
@ -0,0 +1,100 @@
|
|||
package sarama
|
||||
|
||||
type SyncGroupRequest struct {
|
||||
GroupId string
|
||||
GenerationId int32
|
||||
MemberId string
|
||||
GroupAssignments map[string][]byte
|
||||
}
|
||||
|
||||
func (r *SyncGroupRequest) encode(pe packetEncoder) error {
|
||||
if err := pe.putString(r.GroupId); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pe.putInt32(r.GenerationId)
|
||||
|
||||
if err := pe.putString(r.MemberId); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil {
|
||||
return err
|
||||
}
|
||||
for memberId, memberAssignment := range r.GroupAssignments {
|
||||
if err := pe.putString(memberId); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putBytes(memberAssignment); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
if r.GroupId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
if r.GenerationId, err = pd.getInt32(); err != nil {
|
||||
return
|
||||
}
|
||||
if r.MemberId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
r.GroupAssignments = make(map[string][]byte)
|
||||
for i := 0; i < n; i++ {
|
||||
memberId, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
memberAssignment, err := pd.getBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.GroupAssignments[memberId] = memberAssignment
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *SyncGroupRequest) key() int16 {
|
||||
return 14
|
||||
}
|
||||
|
||||
func (r *SyncGroupRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SyncGroupRequest) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
|
||||
func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) {
|
||||
if r.GroupAssignments == nil {
|
||||
r.GroupAssignments = make(map[string][]byte)
|
||||
}
|
||||
|
||||
r.GroupAssignments[memberId] = memberAssignment
|
||||
}
|
||||
|
||||
func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error {
|
||||
bin, err := encode(memberAssignment, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.AddGroupAssignment(memberId, bin)
|
||||
return nil
|
||||
}
|
41
vendor/github.com/Shopify/sarama/sync_group_response.go
generated
vendored
Normal file
41
vendor/github.com/Shopify/sarama/sync_group_response.go
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
package sarama
|
||||
|
||||
type SyncGroupResponse struct {
|
||||
Err KError
|
||||
MemberAssignment []byte
|
||||
}
|
||||
|
||||
func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
|
||||
assignment := new(ConsumerGroupMemberAssignment)
|
||||
err := decode(r.MemberAssignment, assignment)
|
||||
return assignment, err
|
||||
}
|
||||
|
||||
func (r *SyncGroupResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.Err))
|
||||
return pe.putBytes(r.MemberAssignment)
|
||||
}
|
||||
|
||||
func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
kerr, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Err = KError(kerr)
|
||||
|
||||
r.MemberAssignment, err = pd.getBytes()
|
||||
return
|
||||
}
|
||||
|
||||
func (r *SyncGroupResponse) key() int16 {
|
||||
return 14
|
||||
}
|
||||
|
||||
func (r *SyncGroupResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SyncGroupResponse) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
164
vendor/github.com/Shopify/sarama/sync_producer.go
generated
vendored
Normal file
164
vendor/github.com/Shopify/sarama/sync_producer.go
generated
vendored
Normal file
|
@ -0,0 +1,164 @@
|
|||
package sarama
|
||||
|
||||
import "sync"
|
||||
|
||||
// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct
|
||||
// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer
|
||||
// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope.
|
||||
//
|
||||
// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual
|
||||
// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`.
|
||||
// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost.
|
||||
//
|
||||
// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to
|
||||
// be set to true in its configuration.
|
||||
type SyncProducer interface {
|
||||
|
||||
// SendMessage produces a given message, and returns only when it either has
|
||||
// succeeded or failed to produce. It will return the partition and the offset
|
||||
// of the produced message, or an error if the message failed to produce.
|
||||
SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error)
|
||||
|
||||
// SendMessages produces a given set of messages, and returns only when all
|
||||
// messages in the set have either succeeded or failed. Note that messages
|
||||
// can succeed and fail individually; if some succeed and some fail,
|
||||
// SendMessages will return an error.
|
||||
SendMessages(msgs []*ProducerMessage) error
|
||||
|
||||
// Close shuts down the producer and waits for any buffered messages to be
|
||||
// flushed. You must call this function before a producer object passes out of
|
||||
// scope, as it may otherwise leak memory. You must call this before calling
|
||||
// Close on the underlying client.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type syncProducer struct {
|
||||
producer *asyncProducer
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration.
|
||||
func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) {
|
||||
if config == nil {
|
||||
config = NewConfig()
|
||||
config.Producer.Return.Successes = true
|
||||
}
|
||||
|
||||
if err := verifyProducerConfig(config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p, err := NewAsyncProducer(addrs, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
|
||||
}
|
||||
|
||||
// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still
|
||||
// necessary to call Close() on the underlying client when shutting down this producer.
|
||||
func NewSyncProducerFromClient(client Client) (SyncProducer, error) {
|
||||
if err := verifyProducerConfig(client.Config()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p, err := NewAsyncProducerFromClient(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
|
||||
}
|
||||
|
||||
func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer {
|
||||
sp := &syncProducer{producer: p}
|
||||
|
||||
sp.wg.Add(2)
|
||||
go withRecover(sp.handleSuccesses)
|
||||
go withRecover(sp.handleErrors)
|
||||
|
||||
return sp
|
||||
}
|
||||
|
||||
func verifyProducerConfig(config *Config) error {
|
||||
if !config.Producer.Return.Errors {
|
||||
return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer")
|
||||
}
|
||||
if !config.Producer.Return.Successes {
|
||||
return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) {
|
||||
oldMetadata := msg.Metadata
|
||||
defer func() {
|
||||
msg.Metadata = oldMetadata
|
||||
}()
|
||||
|
||||
expectation := make(chan *ProducerError, 1)
|
||||
msg.Metadata = expectation
|
||||
sp.producer.Input() <- msg
|
||||
|
||||
if err := <-expectation; err != nil {
|
||||
return -1, -1, err.Err
|
||||
}
|
||||
|
||||
return msg.Partition, msg.Offset, nil
|
||||
}
|
||||
|
||||
func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error {
|
||||
savedMetadata := make([]interface{}, len(msgs))
|
||||
for i := range msgs {
|
||||
savedMetadata[i] = msgs[i].Metadata
|
||||
}
|
||||
defer func() {
|
||||
for i := range msgs {
|
||||
msgs[i].Metadata = savedMetadata[i]
|
||||
}
|
||||
}()
|
||||
|
||||
expectations := make(chan chan *ProducerError, len(msgs))
|
||||
go func() {
|
||||
for _, msg := range msgs {
|
||||
expectation := make(chan *ProducerError, 1)
|
||||
msg.Metadata = expectation
|
||||
sp.producer.Input() <- msg
|
||||
expectations <- expectation
|
||||
}
|
||||
close(expectations)
|
||||
}()
|
||||
|
||||
var errors ProducerErrors
|
||||
for expectation := range expectations {
|
||||
if err := <-expectation; err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return errors
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sp *syncProducer) handleSuccesses() {
|
||||
defer sp.wg.Done()
|
||||
for msg := range sp.producer.Successes() {
|
||||
expectation := msg.Metadata.(chan *ProducerError)
|
||||
expectation <- nil
|
||||
}
|
||||
}
|
||||
|
||||
func (sp *syncProducer) handleErrors() {
|
||||
defer sp.wg.Done()
|
||||
for err := range sp.producer.Errors() {
|
||||
expectation := err.Msg.Metadata.(chan *ProducerError)
|
||||
expectation <- err
|
||||
}
|
||||
}
|
||||
|
||||
func (sp *syncProducer) Close() error {
|
||||
sp.producer.AsyncClose()
|
||||
sp.wg.Wait()
|
||||
return nil
|
||||
}
|
150
vendor/github.com/Shopify/sarama/utils.go
generated
vendored
Normal file
150
vendor/github.com/Shopify/sarama/utils.go
generated
vendored
Normal file
|
@ -0,0 +1,150 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"net"
|
||||
)
|
||||
|
||||
type none struct{}
|
||||
|
||||
// make []int32 sortable so we can sort partition numbers
|
||||
type int32Slice []int32
|
||||
|
||||
func (slice int32Slice) Len() int {
|
||||
return len(slice)
|
||||
}
|
||||
|
||||
func (slice int32Slice) Less(i, j int) bool {
|
||||
return slice[i] < slice[j]
|
||||
}
|
||||
|
||||
func (slice int32Slice) Swap(i, j int) {
|
||||
slice[i], slice[j] = slice[j], slice[i]
|
||||
}
|
||||
|
||||
func dupInt32Slice(input []int32) []int32 {
|
||||
ret := make([]int32, 0, len(input))
|
||||
for _, val := range input {
|
||||
ret = append(ret, val)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func withRecover(fn func()) {
|
||||
defer func() {
|
||||
handler := PanicHandler
|
||||
if handler != nil {
|
||||
if err := recover(); err != nil {
|
||||
handler(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
fn()
|
||||
}
|
||||
|
||||
func safeAsyncClose(b *Broker) {
|
||||
tmp := b // local var prevents clobbering in goroutine
|
||||
go withRecover(func() {
|
||||
if connected, _ := tmp.Connected(); connected {
|
||||
if err := tmp.Close(); err != nil {
|
||||
Logger.Println("Error closing broker", tmp.ID(), ":", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Encoder is a simple interface for any type that can be encoded as an array of bytes
|
||||
// in order to be sent as the key or value of a Kafka message. Length() is provided as an
|
||||
// optimization, and must return the same as len() on the result of Encode().
|
||||
type Encoder interface {
|
||||
Encode() ([]byte, error)
|
||||
Length() int
|
||||
}
|
||||
|
||||
// make strings and byte slices encodable for convenience so they can be used as keys
|
||||
// and/or values in kafka messages
|
||||
|
||||
// StringEncoder implements the Encoder interface for Go strings so that they can be used
|
||||
// as the Key or Value in a ProducerMessage.
|
||||
type StringEncoder string
|
||||
|
||||
func (s StringEncoder) Encode() ([]byte, error) {
|
||||
return []byte(s), nil
|
||||
}
|
||||
|
||||
func (s StringEncoder) Length() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used
|
||||
// as the Key or Value in a ProducerMessage.
|
||||
type ByteEncoder []byte
|
||||
|
||||
func (b ByteEncoder) Encode() ([]byte, error) {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (b ByteEncoder) Length() int {
|
||||
return len(b)
|
||||
}
|
||||
|
||||
// bufConn wraps a net.Conn with a buffer for reads to reduce the number of
|
||||
// reads that trigger syscalls.
|
||||
type bufConn struct {
|
||||
net.Conn
|
||||
buf *bufio.Reader
|
||||
}
|
||||
|
||||
func newBufConn(conn net.Conn) *bufConn {
|
||||
return &bufConn{
|
||||
Conn: conn,
|
||||
buf: bufio.NewReader(conn),
|
||||
}
|
||||
}
|
||||
|
||||
func (bc *bufConn) Read(b []byte) (n int, err error) {
|
||||
return bc.buf.Read(b)
|
||||
}
|
||||
|
||||
// KafkaVersion instances represent versions of the upstream Kafka broker.
|
||||
type KafkaVersion struct {
|
||||
// it's a struct rather than just typing the array directly to make it opaque and stop people
|
||||
// generating their own arbitrary versions
|
||||
version [4]uint
|
||||
}
|
||||
|
||||
func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion {
|
||||
return KafkaVersion{
|
||||
version: [4]uint{major, minor, veryMinor, patch},
|
||||
}
|
||||
}
|
||||
|
||||
// IsAtLeast return true if and only if the version it is called on is
|
||||
// greater than or equal to the version passed in:
|
||||
// V1.IsAtLeast(V2) // false
|
||||
// V2.IsAtLeast(V1) // true
|
||||
func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool {
|
||||
for i := range v.version {
|
||||
if v.version[i] > other.version[i] {
|
||||
return true
|
||||
} else if v.version[i] < other.version[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Effective constants defining the supported kafka versions.
|
||||
var (
|
||||
V0_8_2_0 = newKafkaVersion(0, 8, 2, 0)
|
||||
V0_8_2_1 = newKafkaVersion(0, 8, 2, 1)
|
||||
V0_8_2_2 = newKafkaVersion(0, 8, 2, 2)
|
||||
V0_9_0_0 = newKafkaVersion(0, 9, 0, 0)
|
||||
V0_9_0_1 = newKafkaVersion(0, 9, 0, 1)
|
||||
V0_10_0_0 = newKafkaVersion(0, 10, 0, 0)
|
||||
V0_10_0_1 = newKafkaVersion(0, 10, 0, 1)
|
||||
V0_10_1_0 = newKafkaVersion(0, 10, 1, 0)
|
||||
V0_10_2_0 = newKafkaVersion(0, 10, 2, 0)
|
||||
minVersion = V0_8_2_0
|
||||
)
|
Loading…
Add table
Add a link
Reference in a new issue