1
0
Fork 0

Fix kubernetes providers shutdown and clean safe.Pool

This commit is contained in:
Julien Salleyron 2020-02-03 17:56:04 +01:00 committed by GitHub
parent c80d53e7e5
commit 1b63c95c4e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
11 changed files with 73 additions and 190 deletions

View file

@ -179,12 +179,12 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
p.renewCertificates(ctx)
ticker := time.NewTicker(24 * time.Hour)
pool.Go(func(stop chan bool) {
pool.GoCtx(func(ctxPool context.Context) {
for {
select {
case <-ticker.C:
p.renewCertificates(ctx)
case <-stop:
case <-ctxPool.Done():
ticker.Stop()
return
}
@ -341,7 +341,7 @@ func (p *Provider) resolveDomains(ctx context.Context, domains []string, tlsStor
}
func (p *Provider) watchNewDomains(ctx context.Context) {
p.pool.Go(func(stop chan bool) {
p.pool.GoCtx(func(ctxPool context.Context) {
for {
select {
case config := <-p.configFromListenerChan:
@ -415,7 +415,7 @@ func (p *Provider) watchNewDomains(ctx context.Context) {
p.resolveDomains(ctxRouter, domains, tlsStore)
}
}
case <-stop:
case <-ctxPool.Done():
return
}
}
@ -556,7 +556,7 @@ func deleteUnnecessaryDomains(ctx context.Context, domains []types.Domain) []typ
func (p *Provider) watchCertificate(ctx context.Context) {
p.certsChan = make(chan *CertAndStore)
p.pool.Go(func(stop chan bool) {
p.pool.GoCtx(func(ctxPool context.Context) {
for {
select {
case cert := <-p.certsChan:
@ -576,7 +576,7 @@ func (p *Provider) watchCertificate(ctx context.Context) {
if err != nil {
log.FromContext(ctx).Error(err)
}
case <-stop:
case <-ctxPool.Done():
return
}
}

View file

@ -103,11 +103,11 @@ func (p *Provider) addWatcher(pool *safe.Pool, directory string, configurationCh
}
// Process events
pool.Go(func(stop chan bool) {
pool.GoCtx(func(ctx context.Context) {
defer watcher.Close()
for {
select {
case <-stop:
case <-ctx.Done():
return
case evt := <-watcher.Events:
if p.Directory == "" {

View file

@ -98,11 +98,9 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
return err
}
pool.Go(func(stop chan bool) {
pool.GoCtx(func(ctxPool context.Context) {
operation := func() error {
stopWatch := make(chan struct{}, 1)
defer close(stopWatch)
eventsChan, err := k8sClient.WatchAll(p.Namespaces, stopWatch)
eventsChan, err := k8sClient.WatchAll(p.Namespaces, ctxPool.Done())
if err != nil {
logger.Errorf("Error watching kubernetes events: %v", err)
@ -110,20 +108,20 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
select {
case <-timer.C:
return err
case <-stop:
case <-ctxPool.Done():
return nil
}
}
throttleDuration := time.Duration(p.ThrottleDuration)
throttledChan := throttleEvents(ctxLog, throttleDuration, stop, eventsChan)
throttledChan := throttleEvents(ctxLog, throttleDuration, pool, eventsChan)
if throttledChan != nil {
eventsChan = throttledChan
}
for {
select {
case <-stop:
case <-ctxPool.Done():
return nil
case event := <-eventsChan:
// Note that event is the *first* event that came in during this throttling interval -- if we're hitting our throttle, we may have dropped events.
@ -156,7 +154,7 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
notify := func(err error, time time.Duration) {
logger.Errorf("Provider connection error: %v; retrying in %s", err, time)
}
err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify)
err := backoff.RetryNotify(safe.OperationWithRecover(operation), backoff.WithContext(job.NewBackOff(backoff.NewExponentialBackOff()), ctxPool), notify)
if err != nil {
logger.Errorf("Cannot connect to Provider: %v", err)
}
@ -625,7 +623,7 @@ func getCABlocks(secret *corev1.Secret, namespace, secretName string) (string, e
return cert, nil
}
func throttleEvents(ctx context.Context, throttleDuration time.Duration, stop chan bool, eventsChan <-chan interface{}) chan interface{} {
func throttleEvents(ctx context.Context, throttleDuration time.Duration, pool *safe.Pool, eventsChan <-chan interface{}) chan interface{} {
if throttleDuration == 0 {
return nil
}
@ -635,10 +633,10 @@ func throttleEvents(ctx context.Context, throttleDuration time.Duration, stop ch
// Run a goroutine that reads events from eventChan and does a non-blocking write to pendingEvent.
// This guarantees that writing to eventChan will never block,
// and that pendingEvent will have something in it if there's been an event since we read from that channel.
go func() {
pool.GoCtx(func(ctxPool context.Context) {
for {
select {
case <-stop:
case <-ctxPool.Done():
return
case nextEvent := <-eventsChan:
select {
@ -650,7 +648,7 @@ func throttleEvents(ctx context.Context, throttleDuration time.Duration, stop ch
}
}
}
}()
})
return eventsChanBuffered
}

View file

@ -104,32 +104,29 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
return err
}
pool.Go(func(stop chan bool) {
pool.GoCtx(func(ctxPool context.Context) {
operation := func() error {
stopWatch := make(chan struct{}, 1)
defer close(stopWatch)
eventsChan, err := k8sClient.WatchAll(p.Namespaces, stopWatch)
eventsChan, err := k8sClient.WatchAll(p.Namespaces, ctxPool.Done())
if err != nil {
logger.Errorf("Error watching kubernetes events: %v", err)
timer := time.NewTimer(1 * time.Second)
select {
case <-timer.C:
return err
case <-stop:
case <-ctxPool.Done():
return nil
}
}
throttleDuration := time.Duration(p.ThrottleDuration)
throttledChan := throttleEvents(ctxLog, throttleDuration, stop, eventsChan)
throttledChan := throttleEvents(ctxLog, throttleDuration, pool, eventsChan)
if throttledChan != nil {
eventsChan = throttledChan
}
for {
select {
case <-stop:
case <-ctxPool.Done():
return nil
case event := <-eventsChan:
// Note that event is the *first* event that came in during this
@ -164,7 +161,8 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
notify := func(err error, time time.Duration) {
logger.Errorf("Provider connection error: %s; retrying in %s", err, time)
}
err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify)
err := backoff.RetryNotify(safe.OperationWithRecover(operation), backoff.WithContext(job.NewBackOff(backoff.NewExponentialBackOff()), ctxPool), notify)
if err != nil {
logger.Errorf("Cannot connect to Provider: %s", err)
}
@ -517,7 +515,7 @@ func (p *Provider) updateIngressStatus(i *v1beta1.Ingress, k8sClient Client) err
return k8sClient.UpdateIngressStatus(i.Namespace, i.Name, service.Status.LoadBalancer.Ingress[0].IP, service.Status.LoadBalancer.Ingress[0].Hostname)
}
func throttleEvents(ctx context.Context, throttleDuration time.Duration, stop chan bool, eventsChan <-chan interface{}) chan interface{} {
func throttleEvents(ctx context.Context, throttleDuration time.Duration, pool *safe.Pool, eventsChan <-chan interface{}) chan interface{} {
if throttleDuration == 0 {
return nil
}
@ -528,10 +526,10 @@ func throttleEvents(ctx context.Context, throttleDuration time.Duration, stop ch
// non-blocking write to pendingEvent. This guarantees that writing to
// eventChan will never block, and that pendingEvent will have
// something in it if there's been an event since we read from that channel.
go func() {
pool.GoCtx(func(ctxPool context.Context) {
for {
select {
case <-stop:
case <-ctxPool.Done():
return
case nextEvent := <-eventsChan:
select {
@ -545,7 +543,7 @@ func throttleEvents(ctx context.Context, throttleDuration time.Duration, stop ch
}
}
}
}()
})
return eventsChanBuffered
}

View file

@ -159,11 +159,11 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
logger.Errorf("Failed to register for events, %s", err)
return err
}
pool.Go(func(stop chan bool) {
pool.GoCtx(func(ctxPool context.Context) {
defer close(update)
for {
select {
case <-stop:
case <-ctxPool.Done():
return
case event := <-update:
logger.Debugf("Received provider event %s", event)