1
0
Fork 0

Merge branch v2.3 into master

This commit is contained in:
kevinpollet 2020-11-20 11:30:07 +01:00
commit 2112de6f15
No known key found for this signature in database
GPG key ID: 0C9A5DDD1B292453
36 changed files with 864 additions and 188 deletions

View file

@ -108,27 +108,28 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
p.client, err = createClient(p.Endpoint)
if err != nil {
return fmt.Errorf("error create consul client, %w", err)
return fmt.Errorf("unable to create consul client: %w", err)
}
// get configuration at the provider's startup.
err = p.loadConfiguration(routineCtx, configurationChan)
if err != nil {
return fmt.Errorf("failed to get consul catalog data: %w", err)
}
// Periodic refreshes.
ticker := time.NewTicker(time.Duration(p.RefreshInterval))
defer ticker.Stop()
for {
select {
case <-ticker.C:
data, err := p.getConsulServicesData(routineCtx)
err = p.loadConfiguration(routineCtx, configurationChan)
if err != nil {
logger.Errorf("error get consul catalog data, %v", err)
return err
return fmt.Errorf("failed to refresh consul catalog data: %w", err)
}
configuration := p.buildConfiguration(routineCtx, data)
configurationChan <- dynamic.Message{
ProviderName: "consulcatalog",
Configuration: configuration,
}
case <-routineCtx.Done():
ticker.Stop()
return nil
}
}
@ -147,6 +148,20 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
return nil
}
func (p *Provider) loadConfiguration(ctx context.Context, configurationChan chan<- dynamic.Message) error {
data, err := p.getConsulServicesData(ctx)
if err != nil {
return err
}
configurationChan <- dynamic.Message{
ProviderName: "consulcatalog",
Configuration: p.buildConfiguration(ctx, data),
}
return nil
}
func (p *Provider) getConsulServicesData(ctx context.Context) ([]itemData, error) {
consulServiceNames, err := p.fetchServices(ctx)
if err != nil {
@ -155,17 +170,22 @@ func (p *Provider) getConsulServicesData(ctx context.Context) ([]itemData, error
var data []itemData
for _, name := range consulServiceNames {
consulServices, healthServices, err := p.fetchService(ctx, name)
consulServices, statuses, err := p.fetchService(ctx, name)
if err != nil {
return nil, err
}
for i, consulService := range consulServices {
for _, consulService := range consulServices {
address := consulService.ServiceAddress
if address == "" {
address = consulService.Address
}
status, exists := statuses[consulService.ID+consulService.ServiceID]
if !exists {
status = api.HealthAny
}
item := itemData{
ID: consulService.ServiceID,
Node: consulService.Node,
@ -174,7 +194,7 @@ func (p *Provider) getConsulServicesData(ctx context.Context) ([]itemData, error
Port: strconv.Itoa(consulService.ServicePort),
Labels: tagsToNeutralLabels(consulService.ServiceTags, p.Prefix),
Tags: consulService.ServiceTags,
Status: healthServices[i].Checks.AggregatedStatus(),
Status: status,
}
extraConf, err := p.getConfiguration(item)
@ -190,13 +210,14 @@ func (p *Provider) getConsulServicesData(ctx context.Context) ([]itemData, error
return data, nil
}
func (p *Provider) fetchService(ctx context.Context, name string) ([]*api.CatalogService, []*api.ServiceEntry, error) {
func (p *Provider) fetchService(ctx context.Context, name string) ([]*api.CatalogService, map[string]string, error) {
var tagFilter string
if !p.ExposedByDefault {
tagFilter = p.Prefix + ".enable=true"
}
opts := &api.QueryOptions{AllowStale: p.Stale, RequireConsistent: p.RequireConsistent, UseCache: p.Cache}
opts = opts.WithContext(ctx)
consulServices, _, err := p.client.Catalog().Service(name, tagFilter, opts)
if err != nil {
@ -204,7 +225,22 @@ func (p *Provider) fetchService(ctx context.Context, name string) ([]*api.Catalo
}
healthServices, _, err := p.client.Health().Service(name, tagFilter, false, opts)
return consulServices, healthServices, err
if err != nil {
return nil, nil, err
}
// Index status by service and node so it can be retrieved from a CatalogService even if the health and services
// are not in sync.
statuses := make(map[string]string)
for _, health := range healthServices {
if health.Service == nil || health.Node == nil {
continue
}
statuses[health.Node.ID+health.Service.ID] = health.Checks.AggregatedStatus()
}
return consulServices, statuses, err
}
func (p *Provider) fetchServices(ctx context.Context) ([]string, error) {

View file

@ -307,6 +307,13 @@ func (p Provider) getIPAddress(instance ecsInstance) string {
func getPort(instance ecsInstance, serverPort string) string {
if len(serverPort) > 0 {
for _, port := range instance.machine.ports {
containerPort := strconv.FormatInt(port.containerPort, 10)
if serverPort == containerPort {
return strconv.FormatInt(port.hostPort, 10)
}
}
return serverPort
}

View file

@ -1721,13 +1721,13 @@ func Test_buildConfiguration(t *testing.T) {
name("Test"),
labels(map[string]string{
"traefik.http.services.Service1.LoadBalancer.server.scheme": "h2c",
"traefik.http.services.Service1.LoadBalancer.server.port": "8080",
"traefik.http.services.Service1.LoadBalancer.server.port": "80",
}),
iMachine(
mState(ec2.InstanceStateNameRunning),
mPrivateIP("127.0.0.1"),
mPorts(
mPort(0, 80, "tcp"),
mPort(80, 8080, "tcp"),
),
),
),
@ -1764,6 +1764,125 @@ func Test_buildConfiguration(t *testing.T) {
},
},
},
{
desc: "one container with label port not exposed by container",
containers: []ecsInstance{
instance(
name("Test"),
labels(map[string]string{
"traefik.http.services.Service1.LoadBalancer.server.scheme": "h2c",
"traefik.http.services.Service1.LoadBalancer.server.port": "8040",
}),
iMachine(
mState(ec2.InstanceStateNameRunning),
mPrivateIP("127.0.0.1"),
mPorts(
mPort(80, 8080, "tcp"),
),
),
),
},
expected: &dynamic.Configuration{
TCP: &dynamic.TCPConfiguration{
Routers: map[string]*dynamic.TCPRouter{},
Services: map[string]*dynamic.TCPService{},
},
UDP: &dynamic.UDPConfiguration{
Routers: map[string]*dynamic.UDPRouter{},
Services: map[string]*dynamic.UDPService{},
},
HTTP: &dynamic.HTTPConfiguration{
Routers: map[string]*dynamic.Router{
"Test": {
Service: "Service1",
Rule: "Host(`Test.traefik.wtf`)",
},
},
Middlewares: map[string]*dynamic.Middleware{},
Services: map[string]*dynamic.Service{
"Service1": {
LoadBalancer: &dynamic.ServersLoadBalancer{
Servers: []dynamic.Server{
{
URL: "h2c://127.0.0.1:8040",
},
},
PassHostHeader: Bool(true),
},
},
},
},
},
},
{
desc: "one container with label and multiple ports",
containers: []ecsInstance{
instance(
name("Test"),
labels(map[string]string{
"traefik.http.routers.Test.rule": "Host(`Test.traefik.wtf`)",
"traefik.http.routers.Test.service": "Service1",
"traefik.http.services.Service1.LoadBalancer.server.port": "4445",
"traefik.http.routers.Test2.rule": "Host(`Test.traefik.local`)",
"traefik.http.routers.Test2.service": "Service2",
"traefik.http.services.Service2.LoadBalancer.server.port": "4444",
}),
iMachine(
mState(ec2.InstanceStateNameRunning),
mPrivateIP("127.0.0.1"),
mPorts(
mPort(4444, 32123, "tcp"),
mPort(4445, 32124, "tcp"),
),
),
),
},
expected: &dynamic.Configuration{
TCP: &dynamic.TCPConfiguration{
Routers: map[string]*dynamic.TCPRouter{},
Services: map[string]*dynamic.TCPService{},
},
UDP: &dynamic.UDPConfiguration{
Routers: map[string]*dynamic.UDPRouter{},
Services: map[string]*dynamic.UDPService{},
},
HTTP: &dynamic.HTTPConfiguration{
Routers: map[string]*dynamic.Router{
"Test": {
Service: "Service1",
Rule: "Host(`Test.traefik.wtf`)",
},
"Test2": {
Service: "Service2",
Rule: "Host(`Test.traefik.local`)",
},
},
Middlewares: map[string]*dynamic.Middleware{},
Services: map[string]*dynamic.Service{
"Service1": {
LoadBalancer: &dynamic.ServersLoadBalancer{
Servers: []dynamic.Server{
{
URL: "http://127.0.0.1:32124",
},
},
PassHostHeader: Bool(true),
},
},
"Service2": {
LoadBalancer: &dynamic.ServersLoadBalancer{
Servers: []dynamic.Server{
{
URL: "http://127.0.0.1:32123",
},
},
PassHostHeader: Bool(true),
},
},
},
},
},
},
{
desc: "one container with label port on two services",
containers: []ecsInstance{
@ -2274,13 +2393,13 @@ func Test_buildConfiguration(t *testing.T) {
labels(map[string]string{
"traefik.tcp.routers.foo.rule": "HostSNI(`foo.bar`)",
"traefik.tcp.routers.foo.tls.options": "foo",
"traefik.tcp.services.foo.loadbalancer.server.port": "8080",
"traefik.tcp.services.foo.loadbalancer.server.port": "80",
}),
iMachine(
mState(ec2.InstanceStateNameRunning),
mPrivateIP("127.0.0.1"),
mPorts(
mPort(0, 80, "tcp"),
mPort(80, 8080, "tcp"),
),
),
),
@ -2327,13 +2446,13 @@ func Test_buildConfiguration(t *testing.T) {
name("Test"),
labels(map[string]string{
"traefik.udp.routers.foo.entrypoints": "mydns",
"traefik.udp.services.foo.loadbalancer.server.port": "8080",
"traefik.udp.services.foo.loadbalancer.server.port": "80",
}),
iMachine(
mState(ec2.InstanceStateNameRunning),
mPrivateIP("127.0.0.1"),
mPorts(
mPort(0, 80, "udp"),
mPort(80, 8080, "udp"),
),
),
),
@ -2506,14 +2625,14 @@ func Test_buildConfiguration(t *testing.T) {
instance(
name("Test"),
labels(map[string]string{
"traefik.tcp.services.foo.loadbalancer.server.port": "8080",
"traefik.tcp.services.foo.loadbalancer.server.port": "80",
"traefik.tcp.services.foo.loadbalancer.terminationdelay": "200",
}),
iMachine(
mState(ec2.InstanceStateNameRunning),
mPrivateIP("127.0.0.1"),
mPorts(
mPort(0, 80, "tcp"),
mPort(80, 8080, "tcp"),
),
),
),

View file

@ -151,35 +151,25 @@ func (p Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.P
operation := func() error {
awsClient, err := p.createClient(logger)
if err != nil {
return err
return fmt.Errorf("unable to create AWS client: %w", err)
}
configuration, err := p.loadECSConfig(ctxLog, awsClient)
err = p.loadConfiguration(ctxLog, awsClient, configurationChan)
if err != nil {
return err
return fmt.Errorf("failed to get ECS configuration: %w", err)
}
configurationChan <- dynamic.Message{
ProviderName: "ecs",
Configuration: configuration,
}
reload := time.NewTicker(time.Second * time.Duration(p.RefreshSeconds))
defer reload.Stop()
ticker := time.NewTicker(time.Second * time.Duration(p.RefreshSeconds))
defer ticker.Stop()
for {
select {
case <-reload.C:
configuration, err := p.loadECSConfig(ctxLog, awsClient)
case <-ticker.C:
err = p.loadConfiguration(ctxLog, awsClient, configurationChan)
if err != nil {
logger.Errorf("Failed to load ECS configuration, error %s", err)
return err
return fmt.Errorf("failed to refresh ECS configuration: %w", err)
}
configurationChan <- dynamic.Message{
ProviderName: "ecs",
Configuration: configuration,
}
case <-routineCtx.Done():
return nil
}
@ -198,6 +188,20 @@ func (p Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.P
return nil
}
func (p *Provider) loadConfiguration(ctx context.Context, client *awsClient, configurationChan chan<- dynamic.Message) error {
instances, err := p.listInstances(ctx, client)
if err != nil {
return err
}
configurationChan <- dynamic.Message{
ProviderName: "ecs",
Configuration: p.buildConfiguration(ctx, instances),
}
return nil
}
// Find all running Provider tasks in a cluster, also collect the task definitions (for docker labels)
// and the EC2 instance data.
func (p *Provider) listInstances(ctx context.Context, client *awsClient) ([]ecsInstance, error) {
@ -365,15 +369,6 @@ func (p *Provider) listInstances(ctx context.Context, client *awsClient) ([]ecsI
return instances, nil
}
func (p *Provider) loadECSConfig(ctx context.Context, client *awsClient) (*dynamic.Configuration, error) {
instances, err := p.listInstances(ctx, client)
if err != nil {
return nil, err
}
return p.buildConfiguration(ctx, instances), nil
}
func (p *Provider) lookupEc2Instances(ctx context.Context, client *awsClient, clusterName *string, ecsDatas map[string]*ecs.Task) (map[string]*ec2.Instance, error) {
logger := log.FromContext(ctx)
instanceIds := make(map[string]string)

View file

@ -21,7 +21,6 @@ import (
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
)
@ -66,13 +65,14 @@ type Client interface {
// TODO: add tests for the clientWrapper (and its methods) itself.
type clientWrapper struct {
csCrd *versioned.Clientset
csKube *kubernetes.Clientset
csCrd versioned.Interface
csKube kubernetes.Interface
factoriesCrd map[string]externalversions.SharedInformerFactory
factoriesKube map[string]informers.SharedInformerFactory
factoriesCrd map[string]externalversions.SharedInformerFactory
factoriesKube map[string]informers.SharedInformerFactory
factoriesSecret map[string]informers.SharedInformerFactory
labelSelector labels.Selector
labelSelector string
isNamespaceAll bool
watchedNamespaces []string
@ -100,12 +100,13 @@ func createClientFromConfig(c *rest.Config) (*clientWrapper, error) {
return newClientImpl(csKube, csCrd), nil
}
func newClientImpl(csKube *kubernetes.Clientset, csCrd *versioned.Clientset) *clientWrapper {
func newClientImpl(csKube kubernetes.Interface, csCrd versioned.Interface) *clientWrapper {
return &clientWrapper{
csCrd: csCrd,
csKube: csKube,
factoriesCrd: make(map[string]externalversions.SharedInformerFactory),
factoriesKube: make(map[string]informers.SharedInformerFactory),
csCrd: csCrd,
csKube: csKube,
factoriesCrd: make(map[string]externalversions.SharedInformerFactory),
factoriesKube: make(map[string]informers.SharedInformerFactory),
factoriesSecret: make(map[string]informers.SharedInformerFactory),
}
}
@ -160,16 +161,25 @@ func newExternalClusterClient(endpoint, token, caFilePath string) (*clientWrappe
// WatchAll starts namespace-specific controllers for all relevant kinds.
func (c *clientWrapper) WatchAll(namespaces []string, stopCh <-chan struct{}) (<-chan interface{}, error) {
eventCh := make(chan interface{}, 1)
eventHandler := c.newResourceEventHandler(eventCh)
eventHandler := &resourceEventHandler{ev: eventCh}
if len(namespaces) == 0 {
namespaces = []string{metav1.NamespaceAll}
c.isNamespaceAll = true
}
c.watchedNamespaces = namespaces
notOwnedByHelm := func(opts *metav1.ListOptions) {
opts.LabelSelector = "owner!=helm"
}
matchesLabelSelector := func(opts *metav1.ListOptions) {
opts.LabelSelector = c.labelSelector
}
for _, ns := range namespaces {
factoryCrd := externalversions.NewSharedInformerFactoryWithOptions(c.csCrd, resyncPeriod, externalversions.WithNamespace(ns))
factoryCrd := externalversions.NewSharedInformerFactoryWithOptions(c.csCrd, resyncPeriod, externalversions.WithNamespace(ns), externalversions.WithTweakListOptions(matchesLabelSelector))
factoryCrd.Traefik().V1alpha1().IngressRoutes().Informer().AddEventHandler(eventHandler)
factoryCrd.Traefik().V1alpha1().Middlewares().Informer().AddEventHandler(eventHandler)
factoryCrd.Traefik().V1alpha1().IngressRouteTCPs().Informer().AddEventHandler(eventHandler)
@ -180,18 +190,21 @@ func (c *clientWrapper) WatchAll(namespaces []string, stopCh <-chan struct{}) (<
factoryCrd.Traefik().V1alpha1().TraefikServices().Informer().AddEventHandler(eventHandler)
factoryKube := informers.NewSharedInformerFactoryWithOptions(c.csKube, resyncPeriod, informers.WithNamespace(ns))
factoryKube.Extensions().V1beta1().Ingresses().Informer().AddEventHandler(eventHandler)
factoryKube.Core().V1().Services().Informer().AddEventHandler(eventHandler)
factoryKube.Core().V1().Endpoints().Informer().AddEventHandler(eventHandler)
factoryKube.Core().V1().Secrets().Informer().AddEventHandler(eventHandler)
factorySecret := informers.NewSharedInformerFactoryWithOptions(c.csKube, resyncPeriod, informers.WithNamespace(ns), informers.WithTweakListOptions(notOwnedByHelm))
factorySecret.Core().V1().Secrets().Informer().AddEventHandler(eventHandler)
c.factoriesCrd[ns] = factoryCrd
c.factoriesKube[ns] = factoryKube
c.factoriesSecret[ns] = factorySecret
}
for _, ns := range namespaces {
c.factoriesCrd[ns].Start(stopCh)
c.factoriesKube[ns].Start(stopCh)
c.factoriesSecret[ns].Start(stopCh)
}
for _, ns := range namespaces {
@ -206,6 +219,12 @@ func (c *clientWrapper) WatchAll(namespaces []string, stopCh <-chan struct{}) (<
return nil, fmt.Errorf("timed out waiting for controller caches to sync %s in namespace %q", t.String(), ns)
}
}
for t, ok := range c.factoriesSecret[ns].WaitForCacheSync(stopCh) {
if !ok {
return nil, fmt.Errorf("timed out waiting for controller caches to sync %s in namespace %q", t.String(), ns)
}
}
}
return eventCh, nil
@ -215,7 +234,7 @@ func (c *clientWrapper) GetIngressRoutes() []*v1alpha1.IngressRoute {
var result []*v1alpha1.IngressRoute
for ns, factory := range c.factoriesCrd {
ings, err := factory.Traefik().V1alpha1().IngressRoutes().Lister().List(c.labelSelector)
ings, err := factory.Traefik().V1alpha1().IngressRoutes().Lister().List(labels.Everything())
if err != nil {
log.Errorf("Failed to list ingress routes in namespace %s: %v", ns, err)
}
@ -229,7 +248,7 @@ func (c *clientWrapper) GetIngressRouteTCPs() []*v1alpha1.IngressRouteTCP {
var result []*v1alpha1.IngressRouteTCP
for ns, factory := range c.factoriesCrd {
ings, err := factory.Traefik().V1alpha1().IngressRouteTCPs().Lister().List(c.labelSelector)
ings, err := factory.Traefik().V1alpha1().IngressRouteTCPs().Lister().List(labels.Everything())
if err != nil {
log.Errorf("Failed to list tcp ingress routes in namespace %s: %v", ns, err)
}
@ -243,7 +262,7 @@ func (c *clientWrapper) GetIngressRouteUDPs() []*v1alpha1.IngressRouteUDP {
var result []*v1alpha1.IngressRouteUDP
for ns, factory := range c.factoriesCrd {
ings, err := factory.Traefik().V1alpha1().IngressRouteUDPs().Lister().List(c.labelSelector)
ings, err := factory.Traefik().V1alpha1().IngressRouteUDPs().Lister().List(labels.Everything())
if err != nil {
log.Errorf("Failed to list udp ingress routes in namespace %s: %v", ns, err)
}
@ -257,7 +276,7 @@ func (c *clientWrapper) GetMiddlewares() []*v1alpha1.Middleware {
var result []*v1alpha1.Middleware
for ns, factory := range c.factoriesCrd {
middlewares, err := factory.Traefik().V1alpha1().Middlewares().Lister().List(c.labelSelector)
middlewares, err := factory.Traefik().V1alpha1().Middlewares().Lister().List(labels.Everything())
if err != nil {
log.Errorf("Failed to list middlewares in namespace %s: %v", ns, err)
}
@ -283,7 +302,7 @@ func (c *clientWrapper) GetTraefikServices() []*v1alpha1.TraefikService {
var result []*v1alpha1.TraefikService
for ns, factory := range c.factoriesCrd {
ings, err := factory.Traefik().V1alpha1().TraefikServices().Lister().List(c.labelSelector)
ings, err := factory.Traefik().V1alpha1().TraefikServices().Lister().List(labels.Everything())
if err != nil {
log.Errorf("Failed to list Traefik services in namespace %s: %v", ns, err)
}
@ -298,7 +317,7 @@ func (c *clientWrapper) GetServersTransports() []*v1alpha1.ServersTransport {
var result []*v1alpha1.ServersTransport
for ns, factory := range c.factoriesCrd {
serversTransports, err := factory.Traefik().V1alpha1().ServersTransports().Lister().List(c.labelSelector)
serversTransports, err := factory.Traefik().V1alpha1().ServersTransports().Lister().List(labels.Everything())
if err != nil {
log.Errorf("Failed to list servers transport in namespace %s: %v", ns, err)
}
@ -313,7 +332,7 @@ func (c *clientWrapper) GetTLSOptions() []*v1alpha1.TLSOption {
var result []*v1alpha1.TLSOption
for ns, factory := range c.factoriesCrd {
options, err := factory.Traefik().V1alpha1().TLSOptions().Lister().List(c.labelSelector)
options, err := factory.Traefik().V1alpha1().TLSOptions().Lister().List(labels.Everything())
if err != nil {
log.Errorf("Failed to list tls options in namespace %s: %v", ns, err)
}
@ -328,7 +347,7 @@ func (c *clientWrapper) GetTLSStores() []*v1alpha1.TLSStore {
var result []*v1alpha1.TLSStore
for ns, factory := range c.factoriesCrd {
stores, err := factory.Traefik().V1alpha1().TLSStores().Lister().List(c.labelSelector)
stores, err := factory.Traefik().V1alpha1().TLSStores().Lister().List(labels.Everything())
if err != nil {
log.Errorf("Failed to list tls stores in namespace %s: %v", ns, err)
}
@ -366,7 +385,7 @@ func (c *clientWrapper) GetSecret(namespace, name string) (*corev1.Secret, bool,
return nil, false, fmt.Errorf("failed to get secret %s/%s: namespace is not within watched namespaces", namespace, name)
}
secret, err := c.factoriesKube[c.lookupNamespace(namespace)].Core().V1().Secrets().Lister().Secrets(namespace).Get(name)
secret, err := c.factoriesSecret[c.lookupNamespace(namespace)].Core().V1().Secrets().Lister().Secrets(namespace).Get(name)
exist, err := translateNotFoundError(err)
return secret, exist, err
}
@ -384,31 +403,6 @@ func (c *clientWrapper) lookupNamespace(ns string) string {
return ns
}
func (c *clientWrapper) newResourceEventHandler(events chan<- interface{}) cache.ResourceEventHandler {
return &cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
// Ignore Ingresses that do not match our custom label selector.
switch v := obj.(type) {
case *v1alpha1.IngressRoute:
return c.labelSelector.Matches(labels.Set(v.GetLabels()))
case *v1alpha1.IngressRouteTCP:
return c.labelSelector.Matches(labels.Set(v.GetLabels()))
case *v1alpha1.TraefikService:
return c.labelSelector.Matches(labels.Set(v.GetLabels()))
case *v1alpha1.TLSOption:
return c.labelSelector.Matches(labels.Set(v.GetLabels()))
case *v1alpha1.TLSStore:
return c.labelSelector.Matches(labels.Set(v.GetLabels()))
case *v1alpha1.Middleware:
return c.labelSelector.Matches(labels.Set(v.GetLabels()))
default:
return true
}
},
Handler: &resourceEventHandler{ev: events},
}
}
// eventHandlerFunc will pass the obj on to the events channel or drop it.
// This is so passing the events along won't block in the case of high volume.
// The events are only used for signaling anyway so dropping a few is ok.

View file

@ -0,0 +1,65 @@
package crd
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
crdfake "github.com/traefik/traefik/v2/pkg/provider/kubernetes/crd/generated/clientset/versioned/fake"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubefake "k8s.io/client-go/kubernetes/fake"
)
func TestClientIgnoresHelmOwnedSecrets(t *testing.T) {
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "secret",
},
}
helmSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "helm-secret",
Labels: map[string]string{
"owner": "helm",
},
},
}
kubeClient := kubefake.NewSimpleClientset(helmSecret, secret)
crdClient := crdfake.NewSimpleClientset()
client := newClientImpl(kubeClient, crdClient)
stopCh := make(chan struct{})
eventCh, err := client.WatchAll(nil, stopCh)
require.NoError(t, err)
select {
case event := <-eventCh:
secret, ok := event.(*corev1.Secret)
require.True(t, ok)
assert.NotEqual(t, "helm-secret", secret.Name)
case <-time.After(50 * time.Millisecond):
assert.Fail(t, "expected to receive event for secret")
}
select {
case <-eventCh:
assert.Fail(t, "received more than one event")
case <-time.After(50 * time.Millisecond):
}
_, found, err := client.GetSecret("default", "secret")
require.NoError(t, err)
assert.True(t, found)
_, found, err = client.GetSecret("default", "helm-secret")
require.NoError(t, err)
assert.False(t, found)
}

View file

@ -49,12 +49,12 @@ type Provider struct {
lastConfiguration safe.Safe
}
func (p *Provider) newK8sClient(ctx context.Context, labelSelector string) (*clientWrapper, error) {
labelSel, err := labels.Parse(labelSelector)
func (p *Provider) newK8sClient(ctx context.Context) (*clientWrapper, error) {
_, err := labels.Parse(p.LabelSelector)
if err != nil {
return nil, fmt.Errorf("invalid label selector: %q", labelSelector)
return nil, fmt.Errorf("invalid label selector: %q", p.LabelSelector)
}
log.FromContext(ctx).Infof("label selector is: %q", labelSel)
log.FromContext(ctx).Infof("label selector is: %q", p.LabelSelector)
withEndpoint := ""
if p.Endpoint != "" {
@ -74,11 +74,12 @@ func (p *Provider) newK8sClient(ctx context.Context, labelSelector string) (*cli
client, err = newExternalClusterClient(p.Endpoint, p.Token, p.CertAuthFilePath)
}
if err == nil {
client.labelSelector = labelSel
if err != nil {
return nil, err
}
return client, err
client.labelSelector = p.LabelSelector
return client, nil
}
// Init the provider.
@ -92,8 +93,7 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
ctxLog := log.With(context.Background(), log.Str(log.ProviderName, providerName))
logger := log.FromContext(ctxLog)
logger.Debugf("Using label selector: %q", p.LabelSelector)
k8sClient, err := p.newK8sClient(ctxLog, p.LabelSelector)
k8sClient, err := p.newK8sClient(ctxLog)
if err != nil {
return err
}

View file

@ -22,7 +22,6 @@ import (
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
)
@ -66,10 +65,12 @@ type Client interface {
}
type clientWrapper struct {
clientset *kubernetes.Clientset
factories map[string]informers.SharedInformerFactory
clientset kubernetes.Interface
factoriesKube map[string]informers.SharedInformerFactory
factoriesSecret map[string]informers.SharedInformerFactory
factoriesIngress map[string]informers.SharedInformerFactory
clusterFactory informers.SharedInformerFactory
ingressLabelSelector labels.Selector
ingressLabelSelector string
isNamespaceAll bool
watchedNamespaces []string
}
@ -138,17 +139,19 @@ func createClientFromConfig(c *rest.Config) (*clientWrapper, error) {
return newClientImpl(clientset), nil
}
func newClientImpl(clientset *kubernetes.Clientset) *clientWrapper {
func newClientImpl(clientset kubernetes.Interface) *clientWrapper {
return &clientWrapper{
clientset: clientset,
factories: make(map[string]informers.SharedInformerFactory),
clientset: clientset,
factoriesSecret: make(map[string]informers.SharedInformerFactory),
factoriesIngress: make(map[string]informers.SharedInformerFactory),
factoriesKube: make(map[string]informers.SharedInformerFactory),
}
}
// WatchAll starts namespace-specific controllers for all relevant kinds.
func (c *clientWrapper) WatchAll(namespaces []string, stopCh <-chan struct{}) (<-chan interface{}, error) {
eventCh := make(chan interface{}, 1)
eventHandler := c.newResourceEventHandler(eventCh)
eventHandler := &resourceEventHandler{eventCh}
if len(namespaces) == 0 {
namespaces = []string{metav1.NamespaceAll}
@ -157,21 +160,49 @@ func (c *clientWrapper) WatchAll(namespaces []string, stopCh <-chan struct{}) (<
c.watchedNamespaces = namespaces
for _, ns := range namespaces {
factory := informers.NewSharedInformerFactoryWithOptions(c.clientset, resyncPeriod, informers.WithNamespace(ns))
factory.Extensions().V1beta1().Ingresses().Informer().AddEventHandler(eventHandler)
factory.Core().V1().Services().Informer().AddEventHandler(eventHandler)
factory.Core().V1().Endpoints().Informer().AddEventHandler(eventHandler)
factory.Core().V1().Secrets().Informer().AddEventHandler(eventHandler)
c.factories[ns] = factory
notOwnedByHelm := func(opts *metav1.ListOptions) {
opts.LabelSelector = "owner!=helm"
}
matchesLabelSelector := func(opts *metav1.ListOptions) {
opts.LabelSelector = c.ingressLabelSelector
}
for _, ns := range namespaces {
c.factories[ns].Start(stopCh)
factoryIngress := informers.NewSharedInformerFactoryWithOptions(c.clientset, resyncPeriod, informers.WithNamespace(ns), informers.WithTweakListOptions(matchesLabelSelector))
factoryIngress.Extensions().V1beta1().Ingresses().Informer().AddEventHandler(eventHandler)
c.factoriesIngress[ns] = factoryIngress
factoryKube := informers.NewSharedInformerFactoryWithOptions(c.clientset, resyncPeriod, informers.WithNamespace(ns))
factoryKube.Core().V1().Services().Informer().AddEventHandler(eventHandler)
factoryKube.Core().V1().Endpoints().Informer().AddEventHandler(eventHandler)
c.factoriesKube[ns] = factoryKube
factorySecret := informers.NewSharedInformerFactoryWithOptions(c.clientset, resyncPeriod, informers.WithNamespace(ns), informers.WithTweakListOptions(notOwnedByHelm))
factorySecret.Core().V1().Secrets().Informer().AddEventHandler(eventHandler)
c.factoriesSecret[ns] = factorySecret
}
for _, ns := range namespaces {
for typ, ok := range c.factories[ns].WaitForCacheSync(stopCh) {
c.factoriesIngress[ns].Start(stopCh)
c.factoriesKube[ns].Start(stopCh)
c.factoriesSecret[ns].Start(stopCh)
}
for _, ns := range namespaces {
for typ, ok := range c.factoriesIngress[ns].WaitForCacheSync(stopCh) {
if !ok {
return nil, fmt.Errorf("timed out waiting for controller caches to sync %s in namespace %q", typ, ns)
}
}
for typ, ok := range c.factoriesKube[ns].WaitForCacheSync(stopCh) {
if !ok {
return nil, fmt.Errorf("timed out waiting for controller caches to sync %s in namespace %q", typ, ns)
}
}
for typ, ok := range c.factoriesSecret[ns].WaitForCacheSync(stopCh) {
if !ok {
return nil, fmt.Errorf("timed out waiting for controller caches to sync %s in namespace %q", typ, ns)
}
@ -203,9 +234,9 @@ func (c *clientWrapper) WatchAll(namespaces []string, stopCh <-chan struct{}) (<
func (c *clientWrapper) GetIngresses() []*networkingv1beta1.Ingress {
var results []*networkingv1beta1.Ingress
for ns, factory := range c.factories {
for ns, factory := range c.factoriesIngress {
// extensions
ings, err := factory.Extensions().V1beta1().Ingresses().Lister().List(c.ingressLabelSelector)
ings, err := factory.Extensions().V1beta1().Ingresses().Lister().List(labels.Everything())
if err != nil {
log.Errorf("Failed to list ingresses in namespace %s: %v", ns, err)
}
@ -220,7 +251,7 @@ func (c *clientWrapper) GetIngresses() []*networkingv1beta1.Ingress {
}
// networking
list, err := factory.Networking().V1beta1().Ingresses().Lister().List(c.ingressLabelSelector)
list, err := factory.Networking().V1beta1().Ingresses().Lister().List(labels.Everything())
if err != nil {
log.Errorf("Failed to list ingresses in namespace %s: %v", ns, err)
}
@ -254,7 +285,7 @@ func (c *clientWrapper) UpdateIngressStatus(src *networkingv1beta1.Ingress, ingS
return c.updateIngressStatusOld(src, ingStatus)
}
ing, err := c.factories[c.lookupNamespace(src.Namespace)].Networking().V1beta1().Ingresses().Lister().Ingresses(src.Namespace).Get(src.Name)
ing, err := c.factoriesIngress[c.lookupNamespace(src.Namespace)].Networking().V1beta1().Ingresses().Lister().Ingresses(src.Namespace).Get(src.Name)
if err != nil {
return fmt.Errorf("failed to get ingress %s/%s: %w", src.Namespace, src.Name, err)
}
@ -282,7 +313,7 @@ func (c *clientWrapper) UpdateIngressStatus(src *networkingv1beta1.Ingress, ingS
}
func (c *clientWrapper) updateIngressStatusOld(src *networkingv1beta1.Ingress, ingStatus []corev1.LoadBalancerIngress) error {
ing, err := c.factories[c.lookupNamespace(src.Namespace)].Extensions().V1beta1().Ingresses().Lister().Ingresses(src.Namespace).Get(src.Name)
ing, err := c.factoriesIngress[c.lookupNamespace(src.Namespace)].Extensions().V1beta1().Ingresses().Lister().Ingresses(src.Namespace).Get(src.Name)
if err != nil {
return fmt.Errorf("failed to get ingress %s/%s: %w", src.Namespace, src.Name, err)
}
@ -335,7 +366,7 @@ func (c *clientWrapper) GetService(namespace, name string) (*corev1.Service, boo
return nil, false, fmt.Errorf("failed to get service %s/%s: namespace is not within watched namespaces", namespace, name)
}
service, err := c.factories[c.lookupNamespace(namespace)].Core().V1().Services().Lister().Services(namespace).Get(name)
service, err := c.factoriesKube[c.lookupNamespace(namespace)].Core().V1().Services().Lister().Services(namespace).Get(name)
exist, err := translateNotFoundError(err)
return service, exist, err
}
@ -346,7 +377,7 @@ func (c *clientWrapper) GetEndpoints(namespace, name string) (*corev1.Endpoints,
return nil, false, fmt.Errorf("failed to get endpoints %s/%s: namespace is not within watched namespaces", namespace, name)
}
endpoint, err := c.factories[c.lookupNamespace(namespace)].Core().V1().Endpoints().Lister().Endpoints(namespace).Get(name)
endpoint, err := c.factoriesKube[c.lookupNamespace(namespace)].Core().V1().Endpoints().Lister().Endpoints(namespace).Get(name)
exist, err := translateNotFoundError(err)
return endpoint, exist, err
}
@ -357,7 +388,7 @@ func (c *clientWrapper) GetSecret(namespace, name string) (*corev1.Secret, bool,
return nil, false, fmt.Errorf("failed to get secret %s/%s: namespace is not within watched namespaces", namespace, name)
}
secret, err := c.factories[c.lookupNamespace(namespace)].Core().V1().Secrets().Lister().Secrets(namespace).Get(name)
secret, err := c.factoriesSecret[c.lookupNamespace(namespace)].Core().V1().Secrets().Lister().Secrets(namespace).Get(name)
exist, err := translateNotFoundError(err)
return secret, exist, err
}
@ -394,25 +425,6 @@ func (c *clientWrapper) lookupNamespace(ns string) string {
return ns
}
func (c *clientWrapper) newResourceEventHandler(events chan<- interface{}) cache.ResourceEventHandler {
return &cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
// Ignore Ingresses that do not match our custom label selector.
switch v := obj.(type) {
case *extensionsv1beta1.Ingress:
lbls := labels.Set(v.GetLabels())
return c.ingressLabelSelector.Matches(lbls)
case *networkingv1beta1.Ingress:
lbls := labels.Set(v.GetLabels())
return c.ingressLabelSelector.Matches(lbls)
default:
return true
}
},
Handler: &resourceEventHandler{ev: events},
}
}
// GetServerVersion returns the cluster server version, or an error.
func (c *clientWrapper) GetServerVersion() (*version.Version, error) {
serverVersion, err := c.clientset.Discovery().ServerVersion()

View file

@ -3,11 +3,15 @@ package ingress
import (
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
kubeerror "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
kubefake "k8s.io/client-go/kubernetes/fake"
)
func TestTranslateNotFoundError(t *testing.T) {
@ -125,3 +129,54 @@ func TestIsLoadBalancerIngressEquals(t *testing.T) {
})
}
}
func TestClientIgnoresHelmOwnedSecrets(t *testing.T) {
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "secret",
},
}
helmSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "helm-secret",
Labels: map[string]string{
"owner": "helm",
},
},
}
kubeClient := kubefake.NewSimpleClientset(helmSecret, secret)
client := newClientImpl(kubeClient)
stopCh := make(chan struct{})
eventCh, err := client.WatchAll(nil, stopCh)
require.NoError(t, err)
select {
case event := <-eventCh:
secret, ok := event.(*corev1.Secret)
require.True(t, ok)
assert.NotEqual(t, "helm-secret", secret.Name)
case <-time.After(50 * time.Millisecond):
assert.Fail(t, "expected to receive event for secret")
}
select {
case <-eventCh:
assert.Fail(t, "received more than one event")
case <-time.After(50 * time.Millisecond):
}
_, found, err := client.GetSecret("default", "secret")
require.NoError(t, err)
assert.True(t, found)
_, found, err = client.GetSecret("default", "helm-secret")
require.NoError(t, err)
assert.False(t, found)
}

View file

@ -53,15 +53,15 @@ type EndpointIngress struct {
PublishedService string `description:"Published Kubernetes Service to copy status from." json:"publishedService,omitempty" toml:"publishedService,omitempty" yaml:"publishedService,omitempty"`
}
func (p *Provider) newK8sClient(ctx context.Context, ingressLabelSelector string) (*clientWrapper, error) {
ingLabelSel, err := labels.Parse(ingressLabelSelector)
func (p *Provider) newK8sClient(ctx context.Context) (*clientWrapper, error) {
_, err := labels.Parse(p.LabelSelector)
if err != nil {
return nil, fmt.Errorf("invalid ingress label selector: %q", ingressLabelSelector)
return nil, fmt.Errorf("invalid ingress label selector: %q", p.LabelSelector)
}
logger := log.FromContext(ctx)
logger.Infof("ingress label selector is: %q", ingLabelSel)
logger.Infof("ingress label selector is: %q", p.LabelSelector)
withEndpoint := ""
if p.Endpoint != "" {
@ -81,11 +81,12 @@ func (p *Provider) newK8sClient(ctx context.Context, ingressLabelSelector string
cl, err = newExternalClusterClient(p.Endpoint, p.Token, p.CertAuthFilePath)
}
if err == nil {
cl.ingressLabelSelector = ingLabelSel
if err != nil {
return nil, err
}
return cl, err
cl.ingressLabelSelector = p.LabelSelector
return cl, nil
}
// Init the provider.
@ -99,8 +100,7 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
ctxLog := log.With(context.Background(), log.Str(log.ProviderName, "kubernetes"))
logger := log.FromContext(ctxLog)
logger.Debugf("Using Ingress label selector: %q", p.LabelSelector)
k8sClient, err := p.newK8sClient(ctxLog, p.LabelSelector)
k8sClient, err := p.newK8sClient(ctxLog)
if err != nil {
return err
}