Fix kubernetes providers shutdown and clean safe.Pool

This commit is contained in:
Julien Salleyron 2020-02-03 17:56:04 +01:00 committed by GitHub
parent c80d53e7e5
commit 1b63c95c4e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
11 changed files with 73 additions and 190 deletions

View file

@ -1,6 +1,7 @@
package server
import (
"context"
"encoding/json"
"reflect"
"time"
@ -49,8 +50,8 @@ func NewConfigurationWatcher(routinesPool *safe.Pool, pvd provider.Provider, pro
// Start the configuration watcher.
func (c *ConfigurationWatcher) Start() {
c.routinesPool.Go(c.listenProviders)
c.routinesPool.Go(c.listenConfigurations)
c.routinesPool.GoCtx(c.listenProviders)
c.routinesPool.GoCtx(c.listenConfigurations)
c.startProvider()
}
@ -90,10 +91,10 @@ func (c *ConfigurationWatcher) startProvider() {
// listenProviders receives configuration changes from the providers.
// The configuration message then gets passed along a series of check
// to finally end up in a throttler that sends it to listenConfigurations (through c. configurationValidatedChan).
func (c *ConfigurationWatcher) listenProviders(stop chan bool) {
func (c *ConfigurationWatcher) listenProviders(ctx context.Context) {
for {
select {
case <-stop:
case <-ctx.Done():
return
case configMsg, ok := <-c.configurationChan:
if !ok {
@ -111,10 +112,10 @@ func (c *ConfigurationWatcher) listenProviders(stop chan bool) {
}
}
func (c *ConfigurationWatcher) listenConfigurations(stop chan bool) {
func (c *ConfigurationWatcher) listenConfigurations(ctx context.Context) {
for {
select {
case <-stop:
case <-ctx.Done():
return
case configMsg, ok := <-c.configurationValidatedChan:
if !ok || configMsg.Configuration == nil {
@ -178,8 +179,8 @@ func (c *ConfigurationWatcher) preLoadConfiguration(configMsg dynamic.Message) {
if !ok {
providerConfigUpdateCh = make(chan dynamic.Message)
c.providerConfigUpdateMap[configMsg.ProviderName] = providerConfigUpdateCh
c.routinesPool.Go(func(stop chan bool) {
c.throttleProviderConfigReload(c.providersThrottleDuration, c.configurationValidatedChan, providerConfigUpdateCh, stop)
c.routinesPool.GoCtx(func(ctxPool context.Context) {
c.throttleProviderConfigReload(ctxPool, c.providersThrottleDuration, c.configurationValidatedChan, providerConfigUpdateCh)
})
}
@ -190,14 +191,14 @@ func (c *ConfigurationWatcher) preLoadConfiguration(configMsg dynamic.Message) {
// It will immediately publish a new configuration and then only publish the next configuration after the throttle duration.
// Note that in the case it receives N new configs in the timeframe of the throttle duration after publishing,
// it will publish the last of the newly received configurations.
func (c *ConfigurationWatcher) throttleProviderConfigReload(throttle time.Duration, publish chan<- dynamic.Message, in <-chan dynamic.Message, stop chan bool) {
func (c *ConfigurationWatcher) throttleProviderConfigReload(ctx context.Context, throttle time.Duration, publish chan<- dynamic.Message, in <-chan dynamic.Message) {
ring := channels.NewRingChannel(1)
defer ring.Close()
c.routinesPool.Go(func(stop chan bool) {
c.routinesPool.GoCtx(func(ctxPool context.Context) {
for {
select {
case <-stop:
case <-ctxPool.Done():
return
case nextConfig := <-ring.Out():
if config, ok := nextConfig.(dynamic.Message); ok {
@ -210,7 +211,7 @@ func (c *ConfigurationWatcher) throttleProviderConfigReload(throttle time.Durati
for {
select {
case <-stop:
case <-ctx.Done():
return
case nextConfig := <-in:
ring.In() <- nextConfig