Refactor configuration reload/throttling
Co-authored-by: Mathieu Lonjaret <mathieu.lonjaret@gmail.com>
This commit is contained in:
parent
764bf59d4d
commit
5780dc2b15
15 changed files with 872 additions and 242 deletions
|
@ -1,6 +1,9 @@
|
|||
package aggregator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/traefik/traefik/v2/pkg/config/dynamic"
|
||||
"github.com/traefik/traefik/v2/pkg/config/static"
|
||||
"github.com/traefik/traefik/v2/pkg/log"
|
||||
|
@ -11,16 +14,63 @@ import (
|
|||
"github.com/traefik/traefik/v2/pkg/safe"
|
||||
)
|
||||
|
||||
// throttled defines what kind of config refresh throttling the aggregator should
|
||||
// set up for a given provider.
|
||||
// If a provider implements throttled, the configuration changes it sends will be
|
||||
// taken into account no more often than the frequency inferred from ThrottleDuration().
|
||||
// If ThrottleDuration returns zero, no throttling will take place.
|
||||
// If throttled is not implemented, the throttling will be set up in accordance
|
||||
// with the global providersThrottleDuration option.
|
||||
type throttled interface {
|
||||
ThrottleDuration() time.Duration
|
||||
}
|
||||
|
||||
// maybeThrottledProvide returns the Provide method of the given provider,
|
||||
// potentially augmented with some throttling depending on whether and how the
|
||||
// provider implements the throttled interface.
|
||||
func maybeThrottledProvide(prd provider.Provider, defaultDuration time.Duration) func(chan<- dynamic.Message, *safe.Pool) error {
|
||||
providerThrottleDuration := defaultDuration
|
||||
if throttled, ok := prd.(throttled); ok {
|
||||
// per-provider throttling
|
||||
providerThrottleDuration = throttled.ThrottleDuration()
|
||||
}
|
||||
|
||||
if providerThrottleDuration == 0 {
|
||||
// throttling disabled
|
||||
return prd.Provide
|
||||
}
|
||||
|
||||
return func(configurationChan chan<- dynamic.Message, pool *safe.Pool) error {
|
||||
rc := newRingChannel()
|
||||
pool.GoCtx(func(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case msg := <-rc.out():
|
||||
configurationChan <- msg
|
||||
time.Sleep(providerThrottleDuration)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return prd.Provide(rc.in(), pool)
|
||||
}
|
||||
}
|
||||
|
||||
// ProviderAggregator aggregates providers.
|
||||
type ProviderAggregator struct {
|
||||
internalProvider provider.Provider
|
||||
fileProvider provider.Provider
|
||||
providers []provider.Provider
|
||||
internalProvider provider.Provider
|
||||
fileProvider provider.Provider
|
||||
providers []provider.Provider
|
||||
providersThrottleDuration time.Duration
|
||||
}
|
||||
|
||||
// NewProviderAggregator returns an aggregate of all the providers configured in the static configuration.
|
||||
func NewProviderAggregator(conf static.Providers) ProviderAggregator {
|
||||
p := ProviderAggregator{}
|
||||
p := ProviderAggregator{
|
||||
providersThrottleDuration: time.Duration(conf.ProvidersThrottleDuration),
|
||||
}
|
||||
|
||||
if conf.File != nil {
|
||||
p.quietAddProvider(conf.File)
|
||||
|
@ -119,26 +169,26 @@ func (p ProviderAggregator) Init() error {
|
|||
// Provide calls the provide method of every providers.
|
||||
func (p ProviderAggregator) Provide(configurationChan chan<- dynamic.Message, pool *safe.Pool) error {
|
||||
if p.fileProvider != nil {
|
||||
launchProvider(configurationChan, pool, p.fileProvider)
|
||||
p.launchProvider(configurationChan, pool, p.fileProvider)
|
||||
}
|
||||
|
||||
for _, prd := range p.providers {
|
||||
prd := prd
|
||||
safe.Go(func() {
|
||||
launchProvider(configurationChan, pool, prd)
|
||||
p.launchProvider(configurationChan, pool, prd)
|
||||
})
|
||||
}
|
||||
|
||||
// internal provider must be the last because we use it to know if all the providers are loaded.
|
||||
// ConfigurationWatcher will wait for this requiredProvider before applying configurations.
|
||||
if p.internalProvider != nil {
|
||||
launchProvider(configurationChan, pool, p.internalProvider)
|
||||
p.launchProvider(configurationChan, pool, p.internalProvider)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func launchProvider(configurationChan chan<- dynamic.Message, pool *safe.Pool, prd provider.Provider) {
|
||||
func (p ProviderAggregator) launchProvider(configurationChan chan<- dynamic.Message, pool *safe.Pool, prd provider.Provider) {
|
||||
jsonConf, err := redactor.RemoveCredentials(prd)
|
||||
if err != nil {
|
||||
log.WithoutContext().Debugf("Cannot marshal the provider configuration %T: %v", prd, err)
|
||||
|
@ -147,9 +197,8 @@ func launchProvider(configurationChan chan<- dynamic.Message, pool *safe.Pool, p
|
|||
log.WithoutContext().Infof("Starting provider %T", prd)
|
||||
log.WithoutContext().Debugf("%T provider configuration: %s", prd, jsonConf)
|
||||
|
||||
currentProvider := prd
|
||||
err = currentProvider.Provide(configurationChan, pool)
|
||||
if err != nil {
|
||||
if err := maybeThrottledProvide(prd, p.providersThrottleDuration)(configurationChan, pool); err != nil {
|
||||
log.WithoutContext().Errorf("Cannot start the provider %T: %v", prd, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
71
pkg/provider/aggregator/ring_channel.go
Normal file
71
pkg/provider/aggregator/ring_channel.go
Normal file
|
@ -0,0 +1,71 @@
|
|||
package aggregator
|
||||
|
||||
import (
|
||||
"github.com/traefik/traefik/v2/pkg/config/dynamic"
|
||||
)
|
||||
|
||||
// RingChannel implements a channel in a way that never blocks the writer.
|
||||
// Specifically, if a value is written to a RingChannel when its buffer is full then the oldest
|
||||
// value in the buffer is discarded to make room (just like a standard ring-buffer).
|
||||
// Note that Go's scheduler can cause discarded values when they could be avoided, simply by scheduling
|
||||
// the writer before the reader, so caveat emptor.
|
||||
type RingChannel struct {
|
||||
input, output chan dynamic.Message
|
||||
buffer *dynamic.Message
|
||||
}
|
||||
|
||||
func newRingChannel() *RingChannel {
|
||||
ch := &RingChannel{
|
||||
input: make(chan dynamic.Message),
|
||||
output: make(chan dynamic.Message),
|
||||
}
|
||||
go ch.ringBuffer()
|
||||
return ch
|
||||
}
|
||||
|
||||
func (ch *RingChannel) in() chan<- dynamic.Message {
|
||||
return ch.input
|
||||
}
|
||||
|
||||
func (ch *RingChannel) out() <-chan dynamic.Message {
|
||||
return ch.output
|
||||
}
|
||||
|
||||
// for all buffered cases.
|
||||
func (ch *RingChannel) ringBuffer() {
|
||||
var input, output chan dynamic.Message
|
||||
var next dynamic.Message
|
||||
input = ch.input
|
||||
|
||||
for input != nil || output != nil {
|
||||
select {
|
||||
// Prefer to write if possible, which is surprisingly effective in reducing
|
||||
// dropped elements due to overflow. The naive read/write select chooses randomly
|
||||
// when both channels are ready, which produces unnecessary drops 50% of the time.
|
||||
case output <- next:
|
||||
ch.buffer = nil
|
||||
default:
|
||||
select {
|
||||
case elem, open := <-input:
|
||||
if !open {
|
||||
input = nil
|
||||
break
|
||||
}
|
||||
|
||||
ch.buffer = &elem
|
||||
case output <- next:
|
||||
ch.buffer = nil
|
||||
}
|
||||
}
|
||||
|
||||
if ch.buffer == nil {
|
||||
output = nil
|
||||
continue
|
||||
}
|
||||
|
||||
output = ch.output
|
||||
next = *ch.buffer
|
||||
}
|
||||
|
||||
close(ch.output)
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue