1
0
Fork 0

Merge branch v2.1 into master

This commit is contained in:
Fernandez Ludovic 2020-02-10 16:03:39 +01:00
commit aa21351d0d
76 changed files with 392 additions and 395 deletions

View file

@ -179,12 +179,12 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
p.renewCertificates(ctx)
ticker := time.NewTicker(24 * time.Hour)
pool.Go(func(stop chan bool) {
pool.GoCtx(func(ctxPool context.Context) {
for {
select {
case <-ticker.C:
p.renewCertificates(ctx)
case <-stop:
case <-ctxPool.Done():
ticker.Stop()
return
}
@ -341,7 +341,7 @@ func (p *Provider) resolveDomains(ctx context.Context, domains []string, tlsStor
}
func (p *Provider) watchNewDomains(ctx context.Context) {
p.pool.Go(func(stop chan bool) {
p.pool.GoCtx(func(ctxPool context.Context) {
for {
select {
case config := <-p.configFromListenerChan:
@ -415,7 +415,7 @@ func (p *Provider) watchNewDomains(ctx context.Context) {
p.resolveDomains(ctxRouter, domains, tlsStore)
}
}
case <-stop:
case <-ctxPool.Done():
return
}
}
@ -556,7 +556,7 @@ func deleteUnnecessaryDomains(ctx context.Context, domains []types.Domain) []typ
func (p *Provider) watchCertificate(ctx context.Context) {
p.certsChan = make(chan *CertAndStore)
p.pool.Go(func(stop chan bool) {
p.pool.GoCtx(func(ctxPool context.Context) {
for {
select {
case cert := <-p.certsChan:
@ -576,7 +576,7 @@ func (p *Provider) watchCertificate(ctx context.Context) {
if err != nil {
log.FromContext(ctx).Error(err)
}
case <-stop:
case <-ctxPool.Done():
return
}
}

View file

@ -103,11 +103,11 @@ func (p *Provider) addWatcher(pool *safe.Pool, directory string, configurationCh
}
// Process events
pool.Go(func(stop chan bool) {
pool.GoCtx(func(ctx context.Context) {
defer watcher.Close()
for {
select {
case <-stop:
case <-ctx.Done():
return
case evt := <-watcher.Events:
if p.Directory == "" {

View file

@ -66,7 +66,7 @@ metadata:
spec:
ports:
- name: web-secure
- name: websecure
port: 443
targetPort: 8443
selector:
@ -85,7 +85,7 @@ subsets:
- ip: 10.10.0.5
- ip: 10.10.0.6
ports:
- name: web-secure
- name: websecure
port: 8443
---
@ -97,7 +97,7 @@ metadata:
spec:
ports:
- name: web-secure2
- name: websecure2
port: 8443
scheme: https
selector:
@ -116,5 +116,5 @@ subsets:
- ip: 10.10.0.7
- ip: 10.10.0.8
ports:
- name: web-secure2
- name: websecure2
port: 8443

View file

@ -66,7 +66,7 @@ metadata:
spec:
ports:
- name: web-secure
- name: websecure
port: 443
selector:
app: containous
@ -84,7 +84,7 @@ subsets:
- ip: 10.10.0.5
- ip: 10.10.0.6
ports:
- name: web-secure
- name: websecure
port: 443
---

View file

@ -98,11 +98,9 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
return err
}
pool.Go(func(stop chan bool) {
pool.GoCtx(func(ctxPool context.Context) {
operation := func() error {
stopWatch := make(chan struct{}, 1)
defer close(stopWatch)
eventsChan, err := k8sClient.WatchAll(p.Namespaces, stopWatch)
eventsChan, err := k8sClient.WatchAll(p.Namespaces, ctxPool.Done())
if err != nil {
logger.Errorf("Error watching kubernetes events: %v", err)
@ -110,20 +108,20 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
select {
case <-timer.C:
return err
case <-stop:
case <-ctxPool.Done():
return nil
}
}
throttleDuration := time.Duration(p.ThrottleDuration)
throttledChan := throttleEvents(ctxLog, throttleDuration, stop, eventsChan)
throttledChan := throttleEvents(ctxLog, throttleDuration, pool, eventsChan)
if throttledChan != nil {
eventsChan = throttledChan
}
for {
select {
case <-stop:
case <-ctxPool.Done():
return nil
case event := <-eventsChan:
// Note that event is the *first* event that came in during this throttling interval -- if we're hitting our throttle, we may have dropped events.
@ -156,7 +154,7 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
notify := func(err error, time time.Duration) {
logger.Errorf("Provider connection error: %v; retrying in %s", err, time)
}
err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify)
err := backoff.RetryNotify(safe.OperationWithRecover(operation), backoff.WithContext(job.NewBackOff(backoff.NewExponentialBackOff()), ctxPool), notify)
if err != nil {
logger.Errorf("Cannot connect to Provider: %v", err)
}
@ -625,7 +623,7 @@ func getCABlocks(secret *corev1.Secret, namespace, secretName string) (string, e
return cert, nil
}
func throttleEvents(ctx context.Context, throttleDuration time.Duration, stop chan bool, eventsChan <-chan interface{}) chan interface{} {
func throttleEvents(ctx context.Context, throttleDuration time.Duration, pool *safe.Pool, eventsChan <-chan interface{}) chan interface{} {
if throttleDuration == 0 {
return nil
}
@ -635,10 +633,10 @@ func throttleEvents(ctx context.Context, throttleDuration time.Duration, stop ch
// Run a goroutine that reads events from eventChan and does a non-blocking write to pendingEvent.
// This guarantees that writing to eventChan will never block,
// and that pendingEvent will have something in it if there's been an event since we read from that channel.
go func() {
pool.GoCtx(func(ctxPool context.Context) {
for {
select {
case <-stop:
case <-ctxPool.Done():
return
case nextEvent := <-eventsChan:
select {
@ -650,7 +648,7 @@ func throttleEvents(ctx context.Context, throttleDuration time.Duration, stop ch
}
}
}
}()
})
return eventsChanBuffered
}

View file

@ -0,0 +1,11 @@
kind: Endpoints
apiVersion: v1
metadata:
name: service1
namespace: testing
subsets:
- addresses:
- ip: 10.10.0.1
ports:
- port: 8080

View file

@ -0,0 +1,15 @@
kind: Ingress
apiVersion: networking.k8s.io/v1beta1
metadata:
name: ""
namespace: testing
spec:
rules:
- host: "*.foobar.com"
http:
paths:
- path: /bar
backend:
serviceName: service1
servicePort: 80

View file

@ -0,0 +1,11 @@
---
kind: Service
apiVersion: v1
metadata:
name: service1
namespace: testing
spec:
ports:
- port: 80
clusterIp: 10.0.0.1

View file

@ -105,32 +105,29 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
return err
}
pool.Go(func(stop chan bool) {
pool.GoCtx(func(ctxPool context.Context) {
operation := func() error {
stopWatch := make(chan struct{}, 1)
defer close(stopWatch)
eventsChan, err := k8sClient.WatchAll(p.Namespaces, stopWatch)
eventsChan, err := k8sClient.WatchAll(p.Namespaces, ctxPool.Done())
if err != nil {
logger.Errorf("Error watching kubernetes events: %v", err)
timer := time.NewTimer(1 * time.Second)
select {
case <-timer.C:
return err
case <-stop:
case <-ctxPool.Done():
return nil
}
}
throttleDuration := time.Duration(p.ThrottleDuration)
throttledChan := throttleEvents(ctxLog, throttleDuration, stop, eventsChan)
throttledChan := throttleEvents(ctxLog, throttleDuration, pool, eventsChan)
if throttledChan != nil {
eventsChan = throttledChan
}
for {
select {
case <-stop:
case <-ctxPool.Done():
return nil
case event := <-eventsChan:
// Note that event is the *first* event that came in during this
@ -165,7 +162,8 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
notify := func(err error, time time.Duration) {
logger.Errorf("Provider connection error: %s; retrying in %s", err, time)
}
err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify)
err := backoff.RetryNotify(safe.OperationWithRecover(operation), backoff.WithContext(job.NewBackOff(backoff.NewExponentialBackOff()), ctxPool), notify)
if err != nil {
logger.Errorf("Cannot connect to Provider: %s", err)
}
@ -267,6 +265,7 @@ func (p *Provider) loadConfigurationFromIngresses(ctx context.Context, client Cl
serviceName := provider.Normalize(ingress.Namespace + "-" + pa.Backend.ServiceName + "-" + pa.Backend.ServicePort.String())
conf.HTTP.Services[serviceName] = service
conf.HTTP.Services[serviceName] = service
routerKey := strings.TrimPrefix(provider.Normalize(rule.Host+pa.Path), "-")
conf.HTTP.Routers[routerKey] = loadRouter(ingress, rule, pa, rtConfig, serviceName)
@ -323,6 +322,14 @@ func (p *Provider) updateIngressStatus(ing *v1beta1.Ingress, k8sClient Client) e
return k8sClient.UpdateIngressStatus(ing, service.Status.LoadBalancer.Ingress[0].IP, service.Status.LoadBalancer.Ingress[0].Hostname)
}
func buildHostRule(host string) string {
if strings.HasPrefix(host, "*.") {
return "HostRegexp(`" + strings.Replace(host, "*.", "{subdomain:[a-zA-Z0-9-]+}.", 1) + "`)"
}
return "Host(`" + host + "`)"
}
func shouldProcessIngress(ingressClass string, ingressClassAnnotation string) bool {
return ingressClass == ingressClassAnnotation ||
(len(ingressClass) == 0 && ingressClassAnnotation == traefikDefaultIngressClass)
@ -522,7 +529,7 @@ func getProtocol(portSpec corev1.ServicePort, portName string, svcConfig *Servic
func loadRouter(ingress *v1beta1.Ingress, rule v1beta1.IngressRule, pa v1beta1.HTTPIngressPath, rtConfig *RouterConfig, serviceName string) *dynamic.Router {
var rules []string
if len(rule.Host) > 0 {
rules = []string{"Host(`" + rule.Host + "`)"}
rules = []string{buildHostRule(rule.Host)}
}
if len(pa.Path) > 0 {
@ -562,7 +569,7 @@ func checkStringQuoteValidity(value string) error {
return err
}
func throttleEvents(ctx context.Context, throttleDuration time.Duration, stop chan bool, eventsChan <-chan interface{}) chan interface{} {
func throttleEvents(ctx context.Context, throttleDuration time.Duration, pool *safe.Pool, eventsChan <-chan interface{}) chan interface{} {
if throttleDuration == 0 {
return nil
}
@ -573,10 +580,10 @@ func throttleEvents(ctx context.Context, throttleDuration time.Duration, stop ch
// non-blocking write to pendingEvent. This guarantees that writing to
// eventChan will never block, and that pendingEvent will have
// something in it if there's been an event since we read from that channel.
go func() {
pool.GoCtx(func(ctxPool context.Context) {
for {
select {
case <-stop:
case <-ctxPool.Done():
return
case nextEvent := <-eventsChan:
select {
@ -590,7 +597,7 @@ func throttleEvents(ctx context.Context, throttleDuration time.Duration, stop ch
}
}
}
}()
})
return eventsChanBuffered
}

View file

@ -980,6 +980,35 @@ func TestLoadConfigurationFromIngresses(t *testing.T) {
},
},
},
{
desc: "Ingress with wildcard host",
expected: &dynamic.Configuration{
TCP: &dynamic.TCPConfiguration{},
HTTP: &dynamic.HTTPConfiguration{
Middlewares: map[string]*dynamic.Middleware{},
Routers: map[string]*dynamic.Router{
"foobar-com-bar": {
Rule: "HostRegexp(`{subdomain:[a-zA-Z0-9-]+}.foobar.com`) && PathPrefix(`/bar`)",
Service: "testing-service1-80",
},
},
Services: map[string]*dynamic.Service{
"testing-service1-80": {
LoadBalancer: &dynamic.ServersLoadBalancer{
PassHostHeader: Bool(true),
Servers: []dynamic.Server{
{
URL: "http://10.10.0.1:8080",
Scheme: "",
Port: "",
},
},
},
},
},
},
},
},
}
for _, test := range testCases {

View file

@ -61,7 +61,6 @@ func (p *Provider) Init(storeType store.Backend, name string) error {
// Provide allows the docker provider to provide configurations to traefik using the given configuration channel.
func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.Pool) error {
ctx := log.With(context.Background(), log.Str(log.ProviderName, p.name))
logger := log.FromContext(ctx)
operation := func() error {
@ -89,8 +88,10 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
}
}
pool.Go(func(stop chan bool) {
err := p.watchKv(ctx, configurationChan, p.RootKey, stop)
pool.GoCtx(func(ctxPool context.Context) {
ctxLog := log.With(ctxPool, log.Str(log.ProviderName, p.name))
err := p.watchKv(ctxLog, configurationChan)
if err != nil {
logger.Errorf("Cannot watch KV store: %v", err)
}
@ -99,16 +100,16 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
return nil
}
func (p *Provider) watchKv(ctx context.Context, configurationChan chan<- dynamic.Message, prefix string, stop chan bool) error {
func (p *Provider) watchKv(ctx context.Context, configurationChan chan<- dynamic.Message) error {
operation := func() error {
events, err := p.kvClient.WatchTree(p.RootKey, make(chan struct{}), nil)
events, err := p.kvClient.WatchTree(p.RootKey, ctx.Done(), nil)
if err != nil {
return fmt.Errorf("failed to watch KV: %w", err)
}
for {
select {
case <-stop:
case <-ctx.Done():
return nil
case _, ok := <-events:
if !ok {
@ -133,7 +134,9 @@ func (p *Provider) watchKv(ctx context.Context, configurationChan chan<- dynamic
notify := func(err error, time time.Duration) {
log.FromContext(ctx).Errorf("KV connection error: %+v, retrying in %s", err, time)
}
err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify)
err := backoff.RetryNotify(safe.OperationWithRecover(operation),
backoff.WithContext(job.NewBackOff(backoff.NewExponentialBackOff()), ctx), notify)
if err != nil {
return fmt.Errorf("cannot connect to KV server: %w", err)
}

View file

@ -857,7 +857,7 @@ func TestKvWatchTree(t *testing.T) {
configChan := make(chan dynamic.Message)
go func() {
err := provider.watchKv(context.Background(), configChan, "prefix", make(chan bool, 1))
err := provider.watchKv(context.Background(), configChan)
require.NoError(t, err)
}()

View file

@ -159,11 +159,11 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
logger.Errorf("Failed to register for events, %s", err)
return err
}
pool.Go(func(stop chan bool) {
pool.GoCtx(func(ctxPool context.Context) {
defer close(update)
for {
select {
case <-stop:
case <-ctxPool.Done():
return
case event := <-update:
logger.Debugf("Received provider event %s", event)