1
0
Fork 0

Add Knative provider

This commit is contained in:
idurgakalyan 2025-10-08 01:32:05 -07:00 committed by GitHub
parent 3f23afb2c6
commit 13bcdebc89
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
38 changed files with 18589 additions and 37 deletions

View file

@ -0,0 +1,232 @@
package knative
import (
"context"
"errors"
"fmt"
"os"
"time"
"github.com/rs/zerolog/log"
"github.com/traefik/traefik/v3/pkg/provider/kubernetes/k8s"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
kinformers "k8s.io/client-go/informers"
kclientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
knativenetworkingv1alpha1 "knative.dev/networking/pkg/apis/networking/v1alpha1"
knativenetworkingclientset "knative.dev/networking/pkg/client/clientset/versioned"
knativenetworkinginformers "knative.dev/networking/pkg/client/informers/externalversions"
)
const resyncPeriod = 10 * time.Minute
type clientWrapper struct {
csKnativeNetworking knativenetworkingclientset.Interface
csKube kclientset.Interface
factoriesKnativeNetworking map[string]knativenetworkinginformers.SharedInformerFactory
factoriesKube map[string]kinformers.SharedInformerFactory
labelSelector string
isNamespaceAll bool
watchedNamespaces []string
}
func createClientFromConfig(c *rest.Config) (*clientWrapper, error) {
csKnativeNetworking, err := knativenetworkingclientset.NewForConfig(c)
if err != nil {
return nil, err
}
csKube, err := kclientset.NewForConfig(c)
if err != nil {
return nil, err
}
return newClientImpl(csKnativeNetworking, csKube), nil
}
func newClientImpl(csKnativeNetworking knativenetworkingclientset.Interface, csKube kclientset.Interface) *clientWrapper {
return &clientWrapper{
csKnativeNetworking: csKnativeNetworking,
csKube: csKube,
factoriesKnativeNetworking: make(map[string]knativenetworkinginformers.SharedInformerFactory),
factoriesKube: make(map[string]kinformers.SharedInformerFactory),
}
}
// newInClusterClient returns a new Provider client that is expected to run
// inside the cluster.
func newInClusterClient(endpoint string) (*clientWrapper, error) {
config, err := rest.InClusterConfig()
if err != nil {
return nil, fmt.Errorf("creating in-cluster configuration: %w", err)
}
if endpoint != "" {
config.Host = endpoint
}
return createClientFromConfig(config)
}
func newExternalClusterClientFromFile(file string) (*clientWrapper, error) {
configFromFlags, err := clientcmd.BuildConfigFromFlags("", file)
if err != nil {
return nil, err
}
return createClientFromConfig(configFromFlags)
}
// newExternalClusterClient returns a new Provider client that may run outside
// of the cluster.
// The endpoint parameter must not be empty.
func newExternalClusterClient(endpoint, token, caFilePath string) (*clientWrapper, error) {
if endpoint == "" {
return nil, errors.New("endpoint missing for external cluster client")
}
config := &rest.Config{
Host: endpoint,
BearerToken: token,
}
if caFilePath != "" {
caData, err := os.ReadFile(caFilePath)
if err != nil {
return nil, fmt.Errorf("reading CA file %s: %w", caFilePath, err)
}
config.TLSClientConfig = rest.TLSClientConfig{CAData: caData}
}
return createClientFromConfig(config)
}
// WatchAll starts namespace-specific controllers for all relevant kinds.
func (c *clientWrapper) WatchAll(namespaces []string, stopCh <-chan struct{}) (<-chan interface{}, error) {
eventCh := make(chan interface{}, 1)
eventHandler := &k8s.ResourceEventHandler{Ev: eventCh}
if len(namespaces) == 0 {
namespaces = []string{metav1.NamespaceAll}
c.isNamespaceAll = true
}
c.watchedNamespaces = namespaces
for _, ns := range namespaces {
factory := knativenetworkinginformers.NewSharedInformerFactoryWithOptions(c.csKnativeNetworking, resyncPeriod, knativenetworkinginformers.WithNamespace(ns), knativenetworkinginformers.WithTweakListOptions(func(opts *metav1.ListOptions) {
opts.LabelSelector = c.labelSelector
}))
_, err := factory.Networking().V1alpha1().Ingresses().Informer().AddEventHandler(eventHandler)
if err != nil {
return nil, err
}
factoryKube := kinformers.NewSharedInformerFactoryWithOptions(c.csKube, resyncPeriod, kinformers.WithNamespace(ns))
_, err = factoryKube.Core().V1().Services().Informer().AddEventHandler(eventHandler)
if err != nil {
return nil, err
}
_, err = factoryKube.Core().V1().Secrets().Informer().AddEventHandler(eventHandler)
if err != nil {
return nil, err
}
c.factoriesKube[ns] = factoryKube
c.factoriesKnativeNetworking[ns] = factory
}
for _, ns := range namespaces {
c.factoriesKnativeNetworking[ns].Start(stopCh)
c.factoriesKube[ns].Start(stopCh)
}
for _, ns := range namespaces {
for t, ok := range c.factoriesKnativeNetworking[ns].WaitForCacheSync(stopCh) {
if !ok {
return nil, fmt.Errorf("timed out waiting for controller caches to sync %s in namespace %q", t.String(), ns)
}
}
for t, ok := range c.factoriesKube[ns].WaitForCacheSync(stopCh) {
if !ok {
return nil, fmt.Errorf("timed out waiting for controller caches to sync %s in namespace %q", t.String(), ns)
}
}
}
return eventCh, nil
}
func (c *clientWrapper) ListIngresses() []*knativenetworkingv1alpha1.Ingress {
var result []*knativenetworkingv1alpha1.Ingress
for ns, factory := range c.factoriesKnativeNetworking {
ings, err := factory.Networking().V1alpha1().Ingresses().Lister().List(labels.Everything()) // todo: label selector
if err != nil {
log.Error().Msgf("Failed to list ingresses in namespace %s: %s", ns, err)
}
result = append(result, ings...)
}
return result
}
func (c *clientWrapper) UpdateIngressStatus(ingress *knativenetworkingv1alpha1.Ingress) error {
_, err := c.csKnativeNetworking.NetworkingV1alpha1().Ingresses(ingress.Namespace).UpdateStatus(context.TODO(), ingress, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("updating knative ingress status %s/%s: %w", ingress.Namespace, ingress.Name, err)
}
log.Info().Msgf("Updated status on knative ingress %s/%s", ingress.Namespace, ingress.Name)
return nil
}
// GetService returns the named service from the given namespace.
func (c *clientWrapper) GetService(namespace, name string) (*corev1.Service, error) {
if !c.isWatchedNamespace(namespace) {
return nil, fmt.Errorf("getting service %s/%s: namespace is not within watched namespaces", namespace, name)
}
return c.factoriesKube[c.lookupNamespace(namespace)].Core().V1().Services().Lister().Services(namespace).Get(name)
}
// GetSecret returns the named secret from the given namespace.
func (c *clientWrapper) GetSecret(namespace, name string) (*corev1.Secret, error) {
if !c.isWatchedNamespace(namespace) {
return nil, fmt.Errorf("getting secret %s/%s: namespace is not within watched namespaces", namespace, name)
}
return c.factoriesKube[c.lookupNamespace(namespace)].Core().V1().Secrets().Lister().Secrets(namespace).Get(name)
}
// isWatchedNamespace checks to ensure that the namespace is being watched before we request
// it to ensure we don't panic by requesting an out-of-watch object.
func (c *clientWrapper) isWatchedNamespace(ns string) bool {
if c.isNamespaceAll {
return true
}
for _, watchedNamespace := range c.watchedNamespaces {
if watchedNamespace == ns {
return true
}
}
return false
}
// lookupNamespace returns the lookup namespace key for the given namespace.
// When listening on all namespaces, it returns the client-go identifier ("")
// for all-namespaces. Otherwise, it returns the given namespace.
// The distinction is necessary because we index all informers on the special
// identifier iff all-namespaces are requested but receive specific namespace
// identifiers from the Kubernetes API, so we have to bridge this gap.
func (c *clientWrapper) lookupNamespace(ns string) string {
if c.isNamespaceAll {
return metav1.NamespaceAll
}
return ns
}

View file

@ -0,0 +1,33 @@
---
apiVersion: networking.internal.knative.dev/v1alpha1
kind: Ingress
metadata:
annotations:
networking.knative.dev/ingress.class: traefik.ingress.networking.knative.dev
name: helloworld-go
namespace: default
spec:
httpOption: Enabled
rules:
- hosts:
- helloworld-go.default
- helloworld-go.default.svc
- helloworld-go.default.svc.cluster.local
http:
paths:
- splits:
- appendHeaders:
Knative-Serving-Namespace: default
Knative-Serving-Revision: helloworld-go-00001
percent: 50
serviceName: helloworld-go-00001
serviceNamespace: default
servicePort: 80
- appendHeaders:
Knative-Serving-Namespace: default
Knative-Serving-Revision: helloworld-go-00002
percent: 50
serviceName: helloworld-go-00002
serviceNamespace: default
servicePort: 80
visibility: ClusterLocal

View file

@ -0,0 +1,33 @@
---
apiVersion: networking.internal.knative.dev/v1alpha1
kind: Ingress
metadata:
annotations:
networking.knative.dev/ingress.class: traefik.ingress.networking.knative.dev
name: helloworld-go
namespace: default
spec:
httpOption: Enabled
rules:
- hosts:
- helloworld-go.default
- helloworld-go.default.svc
- helloworld-go.default.svc.cluster.local
http:
paths:
- splits:
- appendHeaders:
Knative-Serving-Namespace: default
Knative-Serving-Revision: helloworld-go-00001
percent: 50
serviceName: helloworld-go-00001
serviceNamespace: default
servicePort: 80
- appendHeaders:
Knative-Serving-Namespace: default
Knative-Serving-Revision: helloworld-go-00002
percent: 50
serviceName: helloworld-go-00002
serviceNamespace: default
servicePort: 80
visibility: ExternalIP

View file

@ -0,0 +1,39 @@
---
apiVersion: v1
kind: Service
metadata:
name: helloworld-go-00001
namespace: default
spec:
clusterIP: 10.43.38.208
clusterIPs:
- 10.43.38.208
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8012
- name: https
port: 443
protocol: TCP
targetPort: 8112
---
apiVersion: v1
kind: Service
metadata:
name: helloworld-go-00002
namespace: default
spec:
clusterIP: 10.43.44.18
clusterIPs:
- 10.43.44.18
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8012
- name: https
port: 443
protocol: TCP
targetPort: 8112

View file

@ -0,0 +1,38 @@
---
apiVersion: networking.internal.knative.dev/v1alpha1
kind: Ingress
metadata:
annotations:
networking.knative.dev/ingress.class: traefik.ingress.networking.knative.dev
name: helloworld-go
namespace: default
spec:
httpOption: Enabled
tls:
- hosts:
- helloworld-go.default.svc.cluster.local
secretName: secretName
secretNamespace: secretNamespace
rules:
- hosts:
- helloworld-go.default
- helloworld-go.default.svc
- helloworld-go.default.svc.cluster.local
http:
paths:
- splits:
- appendHeaders:
Knative-Serving-Namespace: default
Knative-Serving-Revision: helloworld-go-00001
percent: 50
serviceName: helloworld-go-00001
serviceNamespace: default
servicePort: 80
- appendHeaders:
Knative-Serving-Namespace: default
Knative-Serving-Revision: helloworld-go-00002
percent: 50
serviceName: helloworld-go-00002
serviceNamespace: default
servicePort: 80
visibility: ExternalIP

View file

@ -0,0 +1,8 @@
---
apiVersion: networking.internal.knative.dev/v1alpha1
kind: Ingress
metadata:
annotations:
networking.knative.dev/ingress.class: foo.ingress.networking.knative.dev
name: helloworld-go
namespace: default

View file

@ -0,0 +1,531 @@
package knative
import (
"context"
"errors"
"fmt"
"maps"
"net"
"os"
"slices"
"strconv"
"strings"
"time"
"github.com/cenkalti/backoff/v4"
"github.com/mitchellh/hashstructure"
"github.com/rs/zerolog/log"
ptypes "github.com/traefik/paerser/types"
"github.com/traefik/traefik/v3/pkg/config/dynamic"
"github.com/traefik/traefik/v3/pkg/job"
"github.com/traefik/traefik/v3/pkg/observability/logs"
"github.com/traefik/traefik/v3/pkg/safe"
"github.com/traefik/traefik/v3/pkg/tls"
"github.com/traefik/traefik/v3/pkg/types"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/ptr"
knativenetworking "knative.dev/networking/pkg/apis/networking"
knativenetworkingv1alpha1 "knative.dev/networking/pkg/apis/networking/v1alpha1"
"knative.dev/pkg/network"
)
const (
providerName = "knative"
traefikIngressClassName = "traefik.ingress.networking.knative.dev"
)
// ServiceRef holds a Kubernetes service reference.
type ServiceRef struct {
Name string `description:"Name of the Kubernetes service." json:"desc,omitempty" toml:"desc,omitempty" yaml:"desc,omitempty"`
Namespace string `description:"Namespace of the Kubernetes service." json:"namespace,omitempty" toml:"namespace,omitempty" yaml:"namespace,omitempty"`
}
// Provider holds configurations of the provider.
type Provider struct {
Endpoint string `description:"Kubernetes server endpoint (required for external cluster client)." json:"endpoint,omitempty" toml:"endpoint,omitempty" yaml:"endpoint,omitempty"`
Token string `description:"Kubernetes bearer token (not needed for in-cluster client)." json:"token,omitempty" toml:"token,omitempty" yaml:"token,omitempty"`
CertAuthFilePath string `description:"Kubernetes certificate authority file path (not needed for in-cluster client)." json:"certAuthFilePath,omitempty" toml:"certAuthFilePath,omitempty" yaml:"certAuthFilePath,omitempty"`
Namespaces []string `description:"Kubernetes namespaces." json:"namespaces,omitempty" toml:"namespaces,omitempty" yaml:"namespaces,omitempty" export:"true"`
LabelSelector string `description:"Kubernetes label selector to use." json:"labelSelector,omitempty" toml:"labelSelector,omitempty" yaml:"labelSelector,omitempty" export:"true"`
PublicEntrypoints []string `description:"Entrypoint names used to expose the Ingress publicly. If empty an Ingress is exposed on all entrypoints." json:"publicEntrypoints,omitempty" toml:"publicEntrypoints,omitempty" yaml:"publicEntrypoints,omitempty" export:"true"`
PublicService ServiceRef `description:"Kubernetes service used to expose the networking controller publicly." json:"publicService,omitempty" toml:"publicService,omitempty" yaml:"publicService,omitempty" export:"true"`
PrivateEntrypoints []string `description:"Entrypoint names used to expose the Ingress privately. If empty local Ingresses are skipped." json:"privateEntrypoints,omitempty" toml:"privateEntrypoints,omitempty" yaml:"privateEntrypoints,omitempty" export:"true"`
PrivateService ServiceRef `description:"Kubernetes service used to expose the networking controller privately." json:"privateService,omitempty" toml:"privateService,omitempty" yaml:"privateService,omitempty" export:"true"`
ThrottleDuration ptypes.Duration `description:"Ingress refresh throttle duration" json:"throttleDuration,omitempty" toml:"throttleDuration,omitempty" yaml:"throttleDuration,omitempty"`
client *clientWrapper
lastConfiguration safe.Safe
}
// Init the provider.
func (p *Provider) Init() error {
logger := log.With().Str(logs.ProviderName, providerName).Logger()
// Initializes Kubernetes client.
var err error
p.client, err = p.newK8sClient(logger.WithContext(context.Background()))
if err != nil {
return fmt.Errorf("creating kubernetes client: %w", err)
}
return nil
}
// Provide allows the knative provider to provide configurations to traefik using the given configuration channel.
func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.Pool) error {
logger := log.With().Str(logs.ProviderName, providerName).Logger()
ctxLog := logger.WithContext(context.Background())
pool.GoCtx(func(ctxPool context.Context) {
operation := func() error {
eventsChan, err := p.client.WatchAll(p.Namespaces, ctxPool.Done())
if err != nil {
logger.Error().Msgf("Error watching kubernetes events: %v", err)
timer := time.NewTimer(1 * time.Second)
select {
case <-timer.C:
return err
case <-ctxPool.Done():
return nil
}
}
throttleDuration := time.Duration(p.ThrottleDuration)
throttledChan := throttleEvents(ctxLog, throttleDuration, pool, eventsChan)
if throttledChan != nil {
eventsChan = throttledChan
}
for {
select {
case <-ctxPool.Done():
return nil
case event := <-eventsChan:
// Note that event is the *first* event that came in during this throttling interval -- if we're hitting our throttle, we may have dropped events.
// This is fine, because we don't treat different event types differently.
// But if we do in the future, we'll need to track more information about the dropped events.
conf, ingressStatuses := p.loadConfiguration(ctxLog)
confHash, err := hashstructure.Hash(conf, nil)
switch {
case err != nil:
logger.Error().Msg("Unable to hash the configuration")
case p.lastConfiguration.Get() == confHash:
logger.Debug().Msgf("Skipping Kubernetes event kind %T", event)
default:
p.lastConfiguration.Set(confHash)
configurationChan <- dynamic.Message{
ProviderName: providerName,
Configuration: conf,
}
}
// If we're throttling,
// we sleep here for the throttle duration to enforce that we don't refresh faster than our throttle.
// time.Sleep returns immediately if p.ThrottleDuration is 0 (no throttle).
time.Sleep(throttleDuration)
// Updating the ingress status after the throttleDuration allows to wait to make sure that the dynamic conf is updated before updating the status.
// This is needed for the conformance tests to pass, for example.
for _, ingress := range ingressStatuses {
if err := p.updateKnativeIngressStatus(ctxLog, ingress); err != nil {
logger.Error().Err(err).Msgf("Error updating status for Ingress %s/%s", ingress.Namespace, ingress.Name)
}
}
}
}
}
notify := func(err error, time time.Duration) {
logger.Error().Msgf("Provider connection error: %v; retrying in %s", err, time)
}
err := backoff.RetryNotify(safe.OperationWithRecover(operation), backoff.WithContext(job.NewBackOff(backoff.NewExponentialBackOff()), ctxPool), notify)
if err != nil {
logger.Error().Msgf("Cannot connect to Provider: %v", err)
}
})
return nil
}
func (p *Provider) newK8sClient(ctx context.Context) (*clientWrapper, error) {
logger := log.Ctx(ctx).With().Logger()
_, err := labels.Parse(p.LabelSelector)
if err != nil {
return nil, fmt.Errorf("parsing label selector: %q", p.LabelSelector)
}
logger.Info().Msgf("Label selector is: %q", p.LabelSelector)
withEndpoint := ""
if p.Endpoint != "" {
withEndpoint = fmt.Sprintf(" with endpoint %s", p.Endpoint)
}
var client *clientWrapper
switch {
case os.Getenv("KUBERNETES_SERVICE_HOST") != "" && os.Getenv("KUBERNETES_SERVICE_PORT") != "":
logger.Info().Msgf("Creating in-cluster Provider client%s", withEndpoint)
client, err = newInClusterClient(p.Endpoint)
case os.Getenv("KUBECONFIG") != "":
logger.Info().Msgf("Creating cluster-external Provider client from KUBECONFIG %s", os.Getenv("KUBECONFIG"))
client, err = newExternalClusterClientFromFile(os.Getenv("KUBECONFIG"))
default:
logger.Info().Msgf("Creating cluster-external Provider client%s", withEndpoint)
client, err = newExternalClusterClient(p.Endpoint, p.Token, p.CertAuthFilePath)
}
if err != nil {
return nil, err
}
client.labelSelector = p.LabelSelector
return client, nil
}
func (p *Provider) loadConfiguration(ctx context.Context) (*dynamic.Configuration, []*knativenetworkingv1alpha1.Ingress) {
conf := &dynamic.Configuration{
HTTP: &dynamic.HTTPConfiguration{
Routers: make(map[string]*dynamic.Router),
Middlewares: make(map[string]*dynamic.Middleware),
Services: make(map[string]*dynamic.Service),
},
}
var ingressStatuses []*knativenetworkingv1alpha1.Ingress
uniqCerts := make(map[string]*tls.CertAndStores)
for _, ingress := range p.client.ListIngresses() {
logger := log.Ctx(ctx).With().
Str("ingress", ingress.Name).
Str("namespace", ingress.Namespace).
Logger()
if ingress.Annotations[knativenetworking.IngressClassAnnotationKey] != traefikIngressClassName {
logger.Debug().Msgf("Skipping Ingress %s/%s", ingress.Namespace, ingress.Name)
continue
}
if err := p.loadCertificates(ctx, ingress, uniqCerts); err != nil {
logger.Error().Err(err).Msg("Error loading TLS certificates")
continue
}
conf.HTTP = mergeHTTPConfigs(conf.HTTP, p.buildRouters(ctx, ingress))
// TODO: should we handle configuration errors?
ingressStatuses = append(ingressStatuses, ingress)
}
if len(uniqCerts) > 0 {
conf.TLS = &dynamic.TLSConfiguration{
Certificates: slices.Collect(maps.Values(uniqCerts)),
}
}
return conf, ingressStatuses
}
// loadCertificates loads the TLS certificates for the given Knative Ingress.
// This method mutates the uniqCerts map to add the loaded certificates.
func (p *Provider) loadCertificates(ctx context.Context, ingress *knativenetworkingv1alpha1.Ingress, uniqCerts map[string]*tls.CertAndStores) error {
for _, t := range ingress.Spec.TLS {
// TODO: maybe this could be allowed with an allowCrossNamespace option in the future.
if t.SecretNamespace != ingress.Namespace {
log.Ctx(ctx).Debug().Msg("TLS secret namespace has to be the same as the Ingress one")
continue
}
key := ingress.Namespace + "-" + t.SecretName
// TODO: as specified in the GoDoc we should validate that the certificates contain the configured Hosts.
if _, exists := uniqCerts[key]; !exists {
cert, err := p.loadCertificate(ingress.Namespace, t.SecretName)
if err != nil {
return fmt.Errorf("getting certificate: %w", err)
}
uniqCerts[key] = &tls.CertAndStores{Certificate: cert}
}
}
return nil
}
func (p *Provider) loadCertificate(namespace, secretName string) (tls.Certificate, error) {
secret, err := p.client.GetSecret(namespace, secretName)
if err != nil {
return tls.Certificate{}, fmt.Errorf("getting secret %s/%s: %w", namespace, secretName, err)
}
certBytes, hasCert := secret.Data[corev1.TLSCertKey]
keyBytes, hasKey := secret.Data[corev1.TLSPrivateKeyKey]
if (!hasCert || len(certBytes) == 0) || (!hasKey || len(keyBytes) == 0) {
return tls.Certificate{}, errors.New("secret does not contain a keypair")
}
return tls.Certificate{
CertFile: types.FileOrContent(certBytes),
KeyFile: types.FileOrContent(keyBytes),
}, nil
}
func (p *Provider) buildRouters(ctx context.Context, ingress *knativenetworkingv1alpha1.Ingress) *dynamic.HTTPConfiguration {
logger := log.Ctx(ctx).With().Logger()
conf := &dynamic.HTTPConfiguration{
Routers: make(map[string]*dynamic.Router),
Middlewares: make(map[string]*dynamic.Middleware),
Services: make(map[string]*dynamic.Service),
}
for ri, rule := range ingress.Spec.Rules {
if rule.HTTP == nil {
logger.Debug().Msgf("No HTTP rule defined for rule %d in Ingress %s", ri, ingress.Name)
continue
}
entrypoints := p.PublicEntrypoints
if rule.Visibility == knativenetworkingv1alpha1.IngressVisibilityClusterLocal {
if p.PrivateEntrypoints == nil {
// Skip route creation as no internal entrypoints are defined for cluster local visibility.
continue
}
entrypoints = p.PrivateEntrypoints
}
// TODO: support rewrite host
for pi, path := range rule.HTTP.Paths {
routerKey := fmt.Sprintf("%s-%s-rule-%d-path-%d", ingress.Namespace, ingress.Name, ri, pi)
router := &dynamic.Router{
EntryPoints: entrypoints,
Rule: buildRule(rule.Hosts, path.Headers, path.Path),
Middlewares: make([]string, 0),
Service: routerKey + "-wrr",
}
if len(path.AppendHeaders) > 0 {
midKey := fmt.Sprintf("%s-append-headers", routerKey)
router.Middlewares = append(router.Middlewares, midKey)
conf.Middlewares[midKey] = &dynamic.Middleware{
Headers: &dynamic.Headers{
CustomRequestHeaders: path.AppendHeaders,
},
}
}
wrr, services, err := p.buildWeightedRoundRobin(routerKey, path.Splits)
if err != nil {
logger.Error().Err(err).Msg("Error building weighted round robin")
continue
}
// TODO: support Ingress#HTTPOption to check if HTTP router should redirect to the HTTPS one.
conf.Routers[routerKey] = router
// TODO: at some point we should allow to define a default TLS secret at the provider level to enable TLS with a custom cert when external-domain-tls is disabled.
// see https://knative.dev/docs/serving/encryption/external-domain-tls/#manually-obtain-and-renew-certificates
if len(ingress.Spec.TLS) > 0 {
conf.Routers[routerKey+"-tls"] = &dynamic.Router{
EntryPoints: router.EntryPoints,
Rule: router.Rule, // TODO: maybe the rule should be a new one containing the TLS hosts injected by Knative.
Middlewares: router.Middlewares,
Service: router.Service,
TLS: &dynamic.RouterTLSConfig{},
}
}
conf.Services[routerKey+"-wrr"] = &dynamic.Service{Weighted: wrr}
for k, v := range services {
conf.Services[k] = v
}
}
}
return conf
}
func (p *Provider) buildWeightedRoundRobin(routerKey string, splits []knativenetworkingv1alpha1.IngressBackendSplit) (*dynamic.WeightedRoundRobin, map[string]*dynamic.Service, error) {
wrr := &dynamic.WeightedRoundRobin{
Services: make([]dynamic.WRRService, 0),
}
services := make(map[string]*dynamic.Service)
for si, split := range splits {
serviceKey := fmt.Sprintf("%s-split-%d", routerKey, si)
var err error
services[serviceKey], err = p.buildService(split.ServiceNamespace, split.ServiceName, split.ServicePort)
if err != nil {
return nil, nil, fmt.Errorf("building service: %w", err)
}
// As described in the spec if there is only one split it defaults to 100.
percent := split.Percent
if len(splits) == 1 {
percent = 100
}
wrr.Services = append(wrr.Services, dynamic.WRRService{
Name: serviceKey,
Weight: ptr.To(percent),
Headers: split.AppendHeaders,
})
}
return wrr, services, nil
}
func (p *Provider) buildService(namespace, serviceName string, port intstr.IntOrString) (*dynamic.Service, error) {
servers, err := p.buildServers(namespace, serviceName, port)
if err != nil {
return nil, fmt.Errorf("building servers: %w", err)
}
var lb dynamic.ServersLoadBalancer
lb.SetDefaults()
lb.Servers = servers
return &dynamic.Service{LoadBalancer: &lb}, nil
}
func (p *Provider) buildServers(namespace, serviceName string, port intstr.IntOrString) ([]dynamic.Server, error) {
service, err := p.client.GetService(namespace, serviceName)
if err != nil {
return nil, fmt.Errorf("getting service %s/%s: %w", namespace, serviceName, err)
}
var svcPort *corev1.ServicePort
for _, p := range service.Spec.Ports {
if p.Name == port.String() || strconv.Itoa(int(p.Port)) == port.String() {
svcPort = &p
break
}
}
if svcPort == nil {
return nil, errors.New("service port not found")
}
if service.Spec.ClusterIP == "" {
return nil, errors.New("service does not have a ClusterIP")
}
scheme := "http"
if svcPort.AppProtocol != nil && *svcPort.AppProtocol == knativenetworking.AppProtocolH2C {
scheme = "h2c"
}
hostPort := net.JoinHostPort(service.Spec.ClusterIP, strconv.Itoa(int(svcPort.Port)))
return []dynamic.Server{{URL: fmt.Sprintf("%s://%s", scheme, hostPort)}}, nil
}
func (p *Provider) updateKnativeIngressStatus(ctx context.Context, ingress *knativenetworkingv1alpha1.Ingress) error {
log.Ctx(ctx).Debug().Msgf("Updating status for Ingress %s/%s", ingress.Namespace, ingress.Name)
var publicLbs []knativenetworkingv1alpha1.LoadBalancerIngressStatus
if p.PublicService.Name != "" && p.PublicService.Namespace != "" {
publicLbs = append(publicLbs, knativenetworkingv1alpha1.LoadBalancerIngressStatus{
DomainInternal: network.GetServiceHostname(p.PublicService.Name, p.PublicService.Namespace),
})
}
var privateLbs []knativenetworkingv1alpha1.LoadBalancerIngressStatus
if p.PrivateService.Name != "" && p.PrivateService.Namespace != "" {
privateLbs = append(privateLbs, knativenetworkingv1alpha1.LoadBalancerIngressStatus{
DomainInternal: network.GetServiceHostname(p.PrivateService.Name, p.PrivateService.Namespace),
})
}
if ingress.GetStatus() == nil || !ingress.GetStatus().GetCondition(knativenetworkingv1alpha1.IngressConditionNetworkConfigured).IsTrue() || ingress.GetGeneration() != ingress.GetStatus().ObservedGeneration {
ingress.Status.MarkNetworkConfigured()
ingress.Status.MarkLoadBalancerReady(publicLbs, privateLbs)
ingress.Status.ObservedGeneration = ingress.GetGeneration()
return p.client.UpdateIngressStatus(ingress)
}
return nil
}
func buildRule(hosts []string, headers map[string]knativenetworkingv1alpha1.HeaderMatch, path string) string {
var operands []string
if len(hosts) > 0 {
var hostRules []string
for _, host := range hosts {
hostRules = append(hostRules, fmt.Sprintf("Host(`%v`)", host))
}
operands = append(operands, fmt.Sprintf("(%s)", strings.Join(hostRules, " || ")))
}
if len(headers) > 0 {
headerKeys := slices.Collect(maps.Keys(headers))
slices.Sort(headerKeys)
var headerRules []string
for _, key := range headerKeys {
headerRules = append(headerRules, fmt.Sprintf("Header(`%s`,`%s`)", key, headers[key].Exact))
}
operands = append(operands, fmt.Sprintf("(%s)", strings.Join(headerRules, " && ")))
}
if len(path) > 0 {
operands = append(operands, fmt.Sprintf("PathPrefix(`%s`)", path))
}
return strings.Join(operands, " && ")
}
func mergeHTTPConfigs(confs ...*dynamic.HTTPConfiguration) *dynamic.HTTPConfiguration {
conf := &dynamic.HTTPConfiguration{
Routers: map[string]*dynamic.Router{},
Middlewares: map[string]*dynamic.Middleware{},
Services: map[string]*dynamic.Service{},
}
for _, c := range confs {
for k, v := range c.Routers {
conf.Routers[k] = v
}
for k, v := range c.Middlewares {
conf.Middlewares[k] = v
}
for k, v := range c.Services {
conf.Services[k] = v
}
}
return conf
}
func throttleEvents(ctx context.Context, throttleDuration time.Duration, pool *safe.Pool, eventsChan <-chan interface{}) chan interface{} {
logger := log.Ctx(ctx).With().Logger()
if throttleDuration == 0 {
return nil
}
// Create a buffered channel to hold the pending event (if we're delaying processing the event due to throttling)
eventsChanBuffered := make(chan interface{}, 1)
// Run a goroutine that reads events from eventChan and does a non-blocking write to pendingEvent.
// This guarantees that writing to eventChan will never block,
// and that pendingEvent will have something in it if there's been an event since we read from that channel.
pool.GoCtx(func(ctxPool context.Context) {
for {
select {
case <-ctxPool.Done():
return
case nextEvent := <-eventsChan:
select {
case eventsChanBuffered <- nextEvent:
default:
// We already have an event in eventsChanBuffered, so we'll do a refresh as soon as our throttle allows us to.
// It's fine to drop the event and keep whatever's in the buffer -- we don't do different things for different events
logger.Debug().Msgf("Dropping event kind %T due to throttling", nextEvent)
}
}
}
})
return eventsChanBuffered
}

View file

@ -0,0 +1,478 @@
package knative
import (
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/traefik/paerser/types"
"github.com/traefik/traefik/v3/pkg/config/dynamic"
"github.com/traefik/traefik/v3/pkg/provider/kubernetes/k8s"
"k8s.io/apimachinery/pkg/runtime"
kubefake "k8s.io/client-go/kubernetes/fake"
kscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/utils/ptr"
knativenetworkingv1alpha1 "knative.dev/networking/pkg/apis/networking/v1alpha1"
knfake "knative.dev/networking/pkg/client/clientset/versioned/fake"
)
func init() {
// required by k8s.MustParseYaml
if err := knativenetworkingv1alpha1.AddToScheme(kscheme.Scheme); err != nil {
panic(err)
}
}
func Test_loadConfiguration(t *testing.T) {
testCases := []struct {
desc string
paths []string
want *dynamic.Configuration
wantLen int
}{
{
desc: "Wrong ingress class",
paths: []string{"wrong_ingress_class.yaml"},
wantLen: 0,
want: &dynamic.Configuration{
HTTP: &dynamic.HTTPConfiguration{
Routers: map[string]*dynamic.Router{},
Services: map[string]*dynamic.Service{},
Middlewares: map[string]*dynamic.Middleware{},
},
},
},
{
desc: "Cluster Local",
paths: []string{"cluster_local.yaml", "services.yaml"},
wantLen: 1,
want: &dynamic.Configuration{
HTTP: &dynamic.HTTPConfiguration{
Routers: map[string]*dynamic.Router{
"default-helloworld-go-rule-0-path-0": {
EntryPoints: []string{"priv-http", "priv-https"},
Service: "default-helloworld-go-rule-0-path-0-wrr",
Rule: "(Host(`helloworld-go.default`) || Host(`helloworld-go.default.svc`) || Host(`helloworld-go.default.svc.cluster.local`))",
Middlewares: []string{},
},
},
Services: map[string]*dynamic.Service{
"default-helloworld-go-rule-0-path-0-split-0": {
LoadBalancer: &dynamic.ServersLoadBalancer{
Strategy: "wrr",
PassHostHeader: ptr.To(true),
ResponseForwarding: &dynamic.ResponseForwarding{
FlushInterval: types.Duration(100 * time.Millisecond),
},
Servers: []dynamic.Server{
{
URL: "http://10.43.38.208:80",
},
},
},
},
"default-helloworld-go-rule-0-path-0-split-1": {
LoadBalancer: &dynamic.ServersLoadBalancer{
Strategy: "wrr",
PassHostHeader: ptr.To(true),
ResponseForwarding: &dynamic.ResponseForwarding{
FlushInterval: types.Duration(100 * time.Millisecond),
},
Servers: []dynamic.Server{
{
URL: "http://10.43.44.18:80",
},
},
},
},
"default-helloworld-go-rule-0-path-0-wrr": {
Weighted: &dynamic.WeightedRoundRobin{
Services: []dynamic.WRRService{
{
Name: "default-helloworld-go-rule-0-path-0-split-0",
Weight: ptr.To(50),
Headers: map[string]string{
"Knative-Serving-Namespace": "default",
"Knative-Serving-Revision": "helloworld-go-00001",
},
},
{
Name: "default-helloworld-go-rule-0-path-0-split-1",
Weight: ptr.To(50),
Headers: map[string]string{
"Knative-Serving-Namespace": "default",
"Knative-Serving-Revision": "helloworld-go-00002",
},
},
},
},
},
},
Middlewares: map[string]*dynamic.Middleware{},
},
},
},
{
desc: "External IP",
paths: []string{"external_ip.yaml", "services.yaml"},
wantLen: 1,
want: &dynamic.Configuration{
HTTP: &dynamic.HTTPConfiguration{
Routers: map[string]*dynamic.Router{
"default-helloworld-go-rule-0-path-0": {
EntryPoints: []string{"http", "https"},
Service: "default-helloworld-go-rule-0-path-0-wrr",
Rule: "(Host(`helloworld-go.default`) || Host(`helloworld-go.default.svc`) || Host(`helloworld-go.default.svc.cluster.local`))",
Middlewares: []string{},
},
},
Services: map[string]*dynamic.Service{
"default-helloworld-go-rule-0-path-0-split-0": {
LoadBalancer: &dynamic.ServersLoadBalancer{
Strategy: "wrr",
PassHostHeader: ptr.To(true),
ResponseForwarding: &dynamic.ResponseForwarding{
FlushInterval: types.Duration(100 * time.Millisecond),
},
Servers: []dynamic.Server{
{
URL: "http://10.43.38.208:80",
},
},
},
},
"default-helloworld-go-rule-0-path-0-split-1": {
LoadBalancer: &dynamic.ServersLoadBalancer{
Strategy: "wrr",
PassHostHeader: ptr.To(true),
ResponseForwarding: &dynamic.ResponseForwarding{
FlushInterval: types.Duration(100 * time.Millisecond),
},
Servers: []dynamic.Server{
{
URL: "http://10.43.44.18:80",
},
},
},
},
"default-helloworld-go-rule-0-path-0-wrr": {
Weighted: &dynamic.WeightedRoundRobin{
Services: []dynamic.WRRService{
{
Name: "default-helloworld-go-rule-0-path-0-split-0",
Weight: ptr.To(50),
Headers: map[string]string{
"Knative-Serving-Namespace": "default",
"Knative-Serving-Revision": "helloworld-go-00001",
},
},
{
Name: "default-helloworld-go-rule-0-path-0-split-1",
Weight: ptr.To(50),
Headers: map[string]string{
"Knative-Serving-Namespace": "default",
"Knative-Serving-Revision": "helloworld-go-00002",
},
},
},
},
},
},
Middlewares: map[string]*dynamic.Middleware{},
},
},
},
{
desc: "TLS",
paths: []string{"tls.yaml", "services.yaml"},
wantLen: 1,
want: &dynamic.Configuration{
HTTP: &dynamic.HTTPConfiguration{
Routers: map[string]*dynamic.Router{
"default-helloworld-go-rule-0-path-0": {
EntryPoints: []string{"http", "https"},
Service: "default-helloworld-go-rule-0-path-0-wrr",
Rule: "(Host(`helloworld-go.default`) || Host(`helloworld-go.default.svc`) || Host(`helloworld-go.default.svc.cluster.local`))",
Middlewares: []string{},
},
"default-helloworld-go-rule-0-path-0-tls": {
EntryPoints: []string{"http", "https"},
Service: "default-helloworld-go-rule-0-path-0-wrr",
Rule: "(Host(`helloworld-go.default`) || Host(`helloworld-go.default.svc`) || Host(`helloworld-go.default.svc.cluster.local`))",
Middlewares: []string{},
TLS: &dynamic.RouterTLSConfig{},
},
},
Services: map[string]*dynamic.Service{
"default-helloworld-go-rule-0-path-0-split-0": {
LoadBalancer: &dynamic.ServersLoadBalancer{
Strategy: "wrr",
PassHostHeader: ptr.To(true),
ResponseForwarding: &dynamic.ResponseForwarding{
FlushInterval: types.Duration(100 * time.Millisecond),
},
Servers: []dynamic.Server{
{
URL: "http://10.43.38.208:80",
},
},
},
},
"default-helloworld-go-rule-0-path-0-split-1": {
LoadBalancer: &dynamic.ServersLoadBalancer{
Strategy: "wrr",
PassHostHeader: ptr.To(true),
ResponseForwarding: &dynamic.ResponseForwarding{
FlushInterval: types.Duration(100 * time.Millisecond),
},
Servers: []dynamic.Server{
{
URL: "http://10.43.44.18:80",
},
},
},
},
"default-helloworld-go-rule-0-path-0-wrr": {
Weighted: &dynamic.WeightedRoundRobin{
Services: []dynamic.WRRService{
{
Name: "default-helloworld-go-rule-0-path-0-split-0",
Weight: ptr.To(50),
Headers: map[string]string{
"Knative-Serving-Namespace": "default",
"Knative-Serving-Revision": "helloworld-go-00001",
},
},
{
Name: "default-helloworld-go-rule-0-path-0-split-1",
Weight: ptr.To(50),
Headers: map[string]string{
"Knative-Serving-Namespace": "default",
"Knative-Serving-Revision": "helloworld-go-00002",
},
},
},
},
},
},
Middlewares: map[string]*dynamic.Middleware{},
},
},
},
}
for _, testCase := range testCases {
t.Run(testCase.desc, func(t *testing.T) {
t.Parallel()
k8sObjects, knObjects := readResources(t, testCase.paths)
k8sClient := kubefake.NewClientset(k8sObjects...)
knClient := knfake.NewSimpleClientset(knObjects...)
client := newClientImpl(knClient, k8sClient)
eventCh, err := client.WatchAll(nil, make(chan struct{}))
require.NoError(t, err)
if len(k8sObjects) > 0 || len(knObjects) > 0 {
// just wait for the first event
<-eventCh
}
p := Provider{
PublicEntrypoints: []string{"http", "https"},
PrivateEntrypoints: []string{"priv-http", "priv-https"},
client: client,
}
got, gotIngresses := p.loadConfiguration(t.Context())
assert.Len(t, gotIngresses, testCase.wantLen)
assert.Equal(t, testCase.want, got)
})
}
}
func Test_buildRule(t *testing.T) {
testCases := []struct {
desc string
hosts []string
headers map[string]knativenetworkingv1alpha1.HeaderMatch
path string
want string
}{
{
desc: "single host, no headers, no path",
hosts: []string{"example.com"},
want: "(Host(`example.com`))",
},
{
desc: "multiple hosts, no headers, no path",
hosts: []string{"example.com", "foo.com"},
want: "(Host(`example.com`) || Host(`foo.com`))",
},
{
desc: "single host, single header, no path",
hosts: []string{"example.com"},
headers: map[string]knativenetworkingv1alpha1.HeaderMatch{
"X-Header": {Exact: "value"},
},
want: "(Host(`example.com`)) && (Header(`X-Header`,`value`))",
},
{
desc: "single host, multiple headers, no path",
hosts: []string{"example.com"},
headers: map[string]knativenetworkingv1alpha1.HeaderMatch{
"X-Header": {Exact: "value"},
"X-Header2": {Exact: "value2"},
},
want: "(Host(`example.com`)) && (Header(`X-Header`,`value`) && Header(`X-Header2`,`value2`))",
},
{
desc: "single host, multiple headers, with path",
hosts: []string{"example.com"},
headers: map[string]knativenetworkingv1alpha1.HeaderMatch{
"X-Header": {Exact: "value"},
"X-Header2": {Exact: "value2"},
},
path: "/foo",
want: "(Host(`example.com`)) && (Header(`X-Header`,`value`) && Header(`X-Header2`,`value2`)) && PathPrefix(`/foo`)",
},
{
desc: "single host, no headers, with path",
hosts: []string{"example.com"},
path: "/foo",
want: "(Host(`example.com`)) && PathPrefix(`/foo`)",
},
}
for _, test := range testCases {
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
got := buildRule(test.hosts, test.headers, test.path)
assert.Equal(t, test.want, got)
})
}
}
func Test_mergeHTTPConfigs(t *testing.T) {
testCases := []struct {
desc string
configs []*dynamic.HTTPConfiguration
want *dynamic.HTTPConfiguration
}{
{
desc: "one empty configuration",
configs: []*dynamic.HTTPConfiguration{
{
Routers: map[string]*dynamic.Router{
"router1": {Rule: "Host(`example.com`)"},
},
Middlewares: map[string]*dynamic.Middleware{
"middleware1": {Headers: &dynamic.Headers{CustomRequestHeaders: map[string]string{"X-Test": "value"}}},
},
Services: map[string]*dynamic.Service{
"service1": {LoadBalancer: &dynamic.ServersLoadBalancer{Servers: []dynamic.Server{{URL: "http://example.com"}}}},
},
},
{
Routers: map[string]*dynamic.Router{},
Middlewares: map[string]*dynamic.Middleware{},
Services: map[string]*dynamic.Service{},
},
},
want: &dynamic.HTTPConfiguration{
Routers: map[string]*dynamic.Router{
"router1": {Rule: "Host(`example.com`)"},
},
Middlewares: map[string]*dynamic.Middleware{
"middleware1": {Headers: &dynamic.Headers{CustomRequestHeaders: map[string]string{"X-Test": "value"}}},
},
Services: map[string]*dynamic.Service{
"service1": {LoadBalancer: &dynamic.ServersLoadBalancer{Servers: []dynamic.Server{{URL: "http://example.com"}}}},
},
},
},
{
desc: "merging two non-empty configurations",
configs: []*dynamic.HTTPConfiguration{
{
Routers: map[string]*dynamic.Router{
"router1": {Rule: "Host(`example.com`)"},
},
Middlewares: map[string]*dynamic.Middleware{
"middleware1": {Headers: &dynamic.Headers{CustomRequestHeaders: map[string]string{"X-Test": "value"}}},
},
Services: map[string]*dynamic.Service{
"service1": {LoadBalancer: &dynamic.ServersLoadBalancer{Servers: []dynamic.Server{{URL: "http://example.com"}}}},
},
},
{
Routers: map[string]*dynamic.Router{
"router2": {Rule: "PathPrefix(`/test`)"},
},
Middlewares: map[string]*dynamic.Middleware{
"middleware2": {Headers: &dynamic.Headers{CustomRequestHeaders: map[string]string{"X-Test": "value"}}},
},
Services: map[string]*dynamic.Service{
"service2": {LoadBalancer: &dynamic.ServersLoadBalancer{Servers: []dynamic.Server{{URL: "http://example.com"}}}},
},
},
},
want: &dynamic.HTTPConfiguration{
Routers: map[string]*dynamic.Router{
"router1": {Rule: "Host(`example.com`)"},
"router2": {Rule: "PathPrefix(`/test`)"},
},
Middlewares: map[string]*dynamic.Middleware{
"middleware1": {Headers: &dynamic.Headers{CustomRequestHeaders: map[string]string{"X-Test": "value"}}},
"middleware2": {Headers: &dynamic.Headers{CustomRequestHeaders: map[string]string{"X-Test": "value"}}},
},
Services: map[string]*dynamic.Service{
"service1": {LoadBalancer: &dynamic.ServersLoadBalancer{Servers: []dynamic.Server{{URL: "http://example.com"}}}},
"service2": {LoadBalancer: &dynamic.ServersLoadBalancer{Servers: []dynamic.Server{{URL: "http://example.com"}}}},
},
},
},
}
for _, test := range testCases {
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
got := mergeHTTPConfigs(test.configs...)
assert.Equal(t, test.want, got)
})
}
}
func readResources(t *testing.T, paths []string) ([]runtime.Object, []runtime.Object) {
t.Helper()
var (
k8sObjects []runtime.Object
knObjects []runtime.Object
)
for _, path := range paths {
yamlContent, err := os.ReadFile(filepath.FromSlash("./fixtures/" + path))
if err != nil {
panic(err)
}
objects := k8s.MustParseYaml(yamlContent)
for _, obj := range objects {
switch obj.GetObjectKind().GroupVersionKind().Group {
case "networking.internal.knative.dev":
knObjects = append(knObjects, obj)
default:
k8sObjects = append(k8sObjects, obj)
}
}
}
return k8sObjects, knObjects
}