New logger for the Traefik logs
This commit is contained in:
parent
27c02b5a56
commit
56f7515ecd
297 changed files with 2337 additions and 1934 deletions
|
@ -8,7 +8,7 @@ import (
|
|||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/traefik/traefik/v2/pkg/log"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/traefik/traefik/v2/pkg/provider/kubernetes/crd/generated/clientset/versioned"
|
||||
"github.com/traefik/traefik/v2/pkg/provider/kubernetes/crd/generated/informers/externalversions"
|
||||
"github.com/traefik/traefik/v2/pkg/provider/kubernetes/crd/traefik/v1alpha1"
|
||||
|
@ -220,7 +220,7 @@ func (c *clientWrapper) GetIngressRoutes() []*v1alpha1.IngressRoute {
|
|||
for ns, factory := range c.factoriesCrd {
|
||||
ings, err := factory.Traefik().V1alpha1().IngressRoutes().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list ingress routes in namespace %s: %v", ns, err)
|
||||
log.Error().Err(err).Msgf("Failed to list ingress routes in namespace %s", ns)
|
||||
}
|
||||
result = append(result, ings...)
|
||||
}
|
||||
|
@ -234,7 +234,7 @@ func (c *clientWrapper) GetIngressRouteTCPs() []*v1alpha1.IngressRouteTCP {
|
|||
for ns, factory := range c.factoriesCrd {
|
||||
ings, err := factory.Traefik().V1alpha1().IngressRouteTCPs().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list tcp ingress routes in namespace %s: %v", ns, err)
|
||||
log.Error().Err(err).Msgf("Failed to list tcp ingress routes in namespace %s", ns)
|
||||
}
|
||||
result = append(result, ings...)
|
||||
}
|
||||
|
@ -248,7 +248,7 @@ func (c *clientWrapper) GetIngressRouteUDPs() []*v1alpha1.IngressRouteUDP {
|
|||
for ns, factory := range c.factoriesCrd {
|
||||
ings, err := factory.Traefik().V1alpha1().IngressRouteUDPs().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list udp ingress routes in namespace %s: %v", ns, err)
|
||||
log.Error().Err(err).Msgf("Failed to list udp ingress routes in namespace %s", ns)
|
||||
}
|
||||
result = append(result, ings...)
|
||||
}
|
||||
|
@ -262,7 +262,7 @@ func (c *clientWrapper) GetMiddlewares() []*v1alpha1.Middleware {
|
|||
for ns, factory := range c.factoriesCrd {
|
||||
middlewares, err := factory.Traefik().V1alpha1().Middlewares().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list middlewares in namespace %s: %v", ns, err)
|
||||
log.Error().Err(err).Msgf("Failed to list middlewares in namespace %s", ns)
|
||||
}
|
||||
result = append(result, middlewares...)
|
||||
}
|
||||
|
@ -276,7 +276,7 @@ func (c *clientWrapper) GetMiddlewareTCPs() []*v1alpha1.MiddlewareTCP {
|
|||
for ns, factory := range c.factoriesCrd {
|
||||
middlewares, err := factory.Traefik().V1alpha1().MiddlewareTCPs().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list TCP middlewares in namespace %s: %v", ns, err)
|
||||
log.Error().Err(err).Msgf("Failed to list TCP middlewares in namespace %s", ns)
|
||||
}
|
||||
result = append(result, middlewares...)
|
||||
}
|
||||
|
@ -302,7 +302,7 @@ func (c *clientWrapper) GetTraefikServices() []*v1alpha1.TraefikService {
|
|||
for ns, factory := range c.factoriesCrd {
|
||||
ings, err := factory.Traefik().V1alpha1().TraefikServices().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list Traefik services in namespace %s: %v", ns, err)
|
||||
log.Error().Err(err).Msgf("Failed to list Traefik services in namespace %s", ns)
|
||||
}
|
||||
result = append(result, ings...)
|
||||
}
|
||||
|
@ -310,14 +310,14 @@ func (c *clientWrapper) GetTraefikServices() []*v1alpha1.TraefikService {
|
|||
return result
|
||||
}
|
||||
|
||||
// GetServersTransport returns all ServersTransport.
|
||||
// GetServersTransports returns all ServersTransport.
|
||||
func (c *clientWrapper) GetServersTransports() []*v1alpha1.ServersTransport {
|
||||
var result []*v1alpha1.ServersTransport
|
||||
|
||||
for ns, factory := range c.factoriesCrd {
|
||||
serversTransports, err := factory.Traefik().V1alpha1().ServersTransports().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list servers transport in namespace %s: %v", ns, err)
|
||||
log.Error().Err(err).Msgf("Failed to list servers transport in namespace %s", ns)
|
||||
}
|
||||
result = append(result, serversTransports...)
|
||||
}
|
||||
|
@ -332,7 +332,7 @@ func (c *clientWrapper) GetTLSOptions() []*v1alpha1.TLSOption {
|
|||
for ns, factory := range c.factoriesCrd {
|
||||
options, err := factory.Traefik().V1alpha1().TLSOptions().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list tls options in namespace %s: %v", ns, err)
|
||||
log.Error().Err(err).Msgf("Failed to list tls options in namespace %s", ns)
|
||||
}
|
||||
result = append(result, options...)
|
||||
}
|
||||
|
@ -347,7 +347,7 @@ func (c *clientWrapper) GetTLSStores() []*v1alpha1.TLSStore {
|
|||
for ns, factory := range c.factoriesCrd {
|
||||
stores, err := factory.Traefik().V1alpha1().TLSStores().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list tls stores in namespace %s: %v", ns, err)
|
||||
log.Error().Err(err).Msgf("Failed to list tls stores in namespace %s", ns)
|
||||
}
|
||||
result = append(result, stores...)
|
||||
}
|
||||
|
|
|
@ -17,10 +17,11 @@ import (
|
|||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/mitchellh/hashstructure"
|
||||
"github.com/rs/zerolog/log"
|
||||
ptypes "github.com/traefik/paerser/types"
|
||||
"github.com/traefik/traefik/v2/pkg/config/dynamic"
|
||||
"github.com/traefik/traefik/v2/pkg/job"
|
||||
"github.com/traefik/traefik/v2/pkg/log"
|
||||
"github.com/traefik/traefik/v2/pkg/logs"
|
||||
"github.com/traefik/traefik/v2/pkg/provider"
|
||||
"github.com/traefik/traefik/v2/pkg/provider/kubernetes/crd/traefik/v1alpha1"
|
||||
"github.com/traefik/traefik/v2/pkg/safe"
|
||||
|
@ -62,7 +63,7 @@ func (p *Provider) newK8sClient(ctx context.Context) (*clientWrapper, error) {
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid label selector: %q", p.LabelSelector)
|
||||
}
|
||||
log.FromContext(ctx).Infof("label selector is: %q", p.LabelSelector)
|
||||
log.Ctx(ctx).Info().Msgf("label selector is: %q", p.LabelSelector)
|
||||
|
||||
withEndpoint := ""
|
||||
if p.Endpoint != "" {
|
||||
|
@ -72,13 +73,13 @@ func (p *Provider) newK8sClient(ctx context.Context) (*clientWrapper, error) {
|
|||
var client *clientWrapper
|
||||
switch {
|
||||
case os.Getenv("KUBERNETES_SERVICE_HOST") != "" && os.Getenv("KUBERNETES_SERVICE_PORT") != "":
|
||||
log.FromContext(ctx).Infof("Creating in-cluster Provider client%s", withEndpoint)
|
||||
log.Ctx(ctx).Info().Msgf("Creating in-cluster Provider client%s", withEndpoint)
|
||||
client, err = newInClusterClient(p.Endpoint)
|
||||
case os.Getenv("KUBECONFIG") != "":
|
||||
log.FromContext(ctx).Infof("Creating cluster-external Provider client from KUBECONFIG %s", os.Getenv("KUBECONFIG"))
|
||||
log.Ctx(ctx).Info().Msgf("Creating cluster-external Provider client from KUBECONFIG %s", os.Getenv("KUBECONFIG"))
|
||||
client, err = newExternalClusterClientFromFile(os.Getenv("KUBECONFIG"))
|
||||
default:
|
||||
log.FromContext(ctx).Infof("Creating cluster-external Provider client%s", withEndpoint)
|
||||
log.Ctx(ctx).Info().Msgf("Creating cluster-external Provider client%s", withEndpoint)
|
||||
client, err = newExternalClusterClient(p.Endpoint, p.Token, p.CertAuthFilePath)
|
||||
}
|
||||
|
||||
|
@ -98,8 +99,8 @@ func (p *Provider) Init() error {
|
|||
// Provide allows the k8s provider to provide configurations to traefik
|
||||
// using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.Pool) error {
|
||||
ctxLog := log.With(context.Background(), log.Str(log.ProviderName, providerName))
|
||||
logger := log.FromContext(ctxLog)
|
||||
logger := log.With().Str(logs.ProviderName, providerName).Logger()
|
||||
ctxLog := logger.WithContext(context.Background())
|
||||
|
||||
k8sClient, err := p.newK8sClient(ctxLog)
|
||||
if err != nil {
|
||||
|
@ -107,18 +108,18 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
|
|||
}
|
||||
|
||||
if p.AllowCrossNamespace {
|
||||
logger.Warn("Cross-namespace reference between IngressRoutes and resources is enabled, please ensure that this is expected (see AllowCrossNamespace option)")
|
||||
logger.Warn().Msg("Cross-namespace reference between IngressRoutes and resources is enabled, please ensure that this is expected (see AllowCrossNamespace option)")
|
||||
}
|
||||
|
||||
if p.AllowExternalNameServices {
|
||||
logger.Warn("ExternalName service loading is enabled, please ensure that this is expected (see AllowExternalNameServices option)")
|
||||
logger.Warn().Msg("ExternalName service loading is enabled, please ensure that this is expected (see AllowExternalNameServices option)")
|
||||
}
|
||||
|
||||
pool.GoCtx(func(ctxPool context.Context) {
|
||||
operation := func() error {
|
||||
eventsChan, err := k8sClient.WatchAll(p.Namespaces, ctxPool.Done())
|
||||
if err != nil {
|
||||
logger.Errorf("Error watching kubernetes events: %v", err)
|
||||
logger.Error().Err(err).Msg("Error watching kubernetes events")
|
||||
timer := time.NewTimer(1 * time.Second)
|
||||
select {
|
||||
case <-timer.C:
|
||||
|
@ -147,9 +148,9 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
|
|||
confHash, err := hashstructure.Hash(conf, nil)
|
||||
switch {
|
||||
case err != nil:
|
||||
logger.Error("Unable to hash the configuration")
|
||||
logger.Error().Err(err).Msg("Unable to hash the configuration")
|
||||
case p.lastConfiguration.Get() == confHash:
|
||||
logger.Debugf("Skipping Kubernetes event kind %T", event)
|
||||
logger.Debug().Msgf("Skipping Kubernetes event kind %T", event)
|
||||
default:
|
||||
p.lastConfiguration.Set(confHash)
|
||||
configurationChan <- dynamic.Message{
|
||||
|
@ -167,11 +168,11 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
|
|||
}
|
||||
|
||||
notify := func(err error, time time.Duration) {
|
||||
logger.Errorf("Provider connection error: %v; retrying in %s", err, time)
|
||||
logger.Error().Err(err).Msgf("Provider connection error, retrying in %s", time)
|
||||
}
|
||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), backoff.WithContext(job.NewBackOff(backoff.NewExponentialBackOff()), ctxPool), notify)
|
||||
if err != nil {
|
||||
logger.Errorf("Cannot connect to Provider: %v", err)
|
||||
logger.Error().Err(err).Msg("Cannot connect to Provider")
|
||||
}
|
||||
})
|
||||
|
||||
|
@ -200,29 +201,30 @@ func (p *Provider) loadConfigurationFromCRD(ctx context.Context, client Client)
|
|||
|
||||
for _, middleware := range client.GetMiddlewares() {
|
||||
id := provider.Normalize(makeID(middleware.Namespace, middleware.Name))
|
||||
ctxMid := log.With(ctx, log.Str(log.MiddlewareName, id))
|
||||
logger := log.Ctx(ctx).With().Str(logs.MiddlewareName, id).Logger()
|
||||
ctxMid := logger.WithContext(ctx)
|
||||
|
||||
basicAuth, err := createBasicAuthMiddleware(client, middleware.Namespace, middleware.Spec.BasicAuth)
|
||||
if err != nil {
|
||||
log.FromContext(ctxMid).Errorf("Error while reading basic auth middleware: %v", err)
|
||||
logger.Error().Err(err).Msg("Error while reading basic auth middleware")
|
||||
continue
|
||||
}
|
||||
|
||||
digestAuth, err := createDigestAuthMiddleware(client, middleware.Namespace, middleware.Spec.DigestAuth)
|
||||
if err != nil {
|
||||
log.FromContext(ctxMid).Errorf("Error while reading digest auth middleware: %v", err)
|
||||
logger.Error().Err(err).Msg("Error while reading digest auth middleware")
|
||||
continue
|
||||
}
|
||||
|
||||
forwardAuth, err := createForwardAuthMiddleware(client, middleware.Namespace, middleware.Spec.ForwardAuth)
|
||||
if err != nil {
|
||||
log.FromContext(ctxMid).Errorf("Error while reading forward auth middleware: %v", err)
|
||||
logger.Error().Err(err).Msg("Error while reading forward auth middleware")
|
||||
continue
|
||||
}
|
||||
|
||||
errorPage, errorPageService, err := p.createErrorPageMiddleware(client, middleware.Namespace, middleware.Spec.Errors)
|
||||
if err != nil {
|
||||
log.FromContext(ctxMid).Errorf("Error while reading error page middleware: %v", err)
|
||||
logger.Error().Err(err).Msg("Error while reading error page middleware")
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -234,25 +236,25 @@ func (p *Provider) loadConfigurationFromCRD(ctx context.Context, client Client)
|
|||
|
||||
plugin, err := createPluginMiddleware(client, middleware.Namespace, middleware.Spec.Plugin)
|
||||
if err != nil {
|
||||
log.FromContext(ctxMid).Errorf("Error while reading plugins middleware: %v", err)
|
||||
logger.Error().Err(err).Msg("Error while reading plugins middleware")
|
||||
continue
|
||||
}
|
||||
|
||||
rateLimit, err := createRateLimitMiddleware(middleware.Spec.RateLimit)
|
||||
if err != nil {
|
||||
log.FromContext(ctxMid).Errorf("Error while reading rateLimit middleware: %v", err)
|
||||
logger.Error().Err(err).Msg("Error while reading rateLimit middleware")
|
||||
continue
|
||||
}
|
||||
|
||||
retry, err := createRetryMiddleware(middleware.Spec.Retry)
|
||||
if err != nil {
|
||||
log.FromContext(ctxMid).Errorf("Error while reading retry middleware: %v", err)
|
||||
logger.Error().Err(err).Msg("Error while reading retry middleware")
|
||||
continue
|
||||
}
|
||||
|
||||
circuitBreaker, err := createCircuitBreakerMiddleware(middleware.Spec.CircuitBreaker)
|
||||
if err != nil {
|
||||
log.FromContext(ctxMid).Errorf("Error while reading circuit breaker middleware: %v", err)
|
||||
logger.Error().Err(err).Msg("Error while reading circuit breaker middleware")
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -298,20 +300,20 @@ func (p *Provider) loadConfigurationFromCRD(ctx context.Context, client Client)
|
|||
for _, service := range client.GetTraefikServices() {
|
||||
err := cb.buildTraefikService(ctx, service, conf.HTTP.Services)
|
||||
if err != nil {
|
||||
log.FromContext(ctx).WithField(log.ServiceName, service.Name).
|
||||
Errorf("Error while building TraefikService: %v", err)
|
||||
log.Ctx(ctx).Error().Str(logs.ServiceName, service.Name).Err(err).
|
||||
Msg("Error while building TraefikService")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
for _, serversTransport := range client.GetServersTransports() {
|
||||
logger := log.FromContext(ctx).WithField(log.ServersTransportName, serversTransport.Name)
|
||||
logger := log.Ctx(ctx).With().Str(logs.ServersTransportName, serversTransport.Name).Logger()
|
||||
|
||||
var rootCAs []tls.FileOrContent
|
||||
for _, secret := range serversTransport.Spec.RootCAsSecrets {
|
||||
caSecret, err := loadCASecret(serversTransport.Namespace, secret, client)
|
||||
if err != nil {
|
||||
logger.Errorf("Error while loading rootCAs %s: %v", secret, err)
|
||||
logger.Error().Err(err).Msgf("Error while loading rootCAs %s", secret)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -322,7 +324,7 @@ func (p *Provider) loadConfigurationFromCRD(ctx context.Context, client Client)
|
|||
for _, secret := range serversTransport.Spec.CertificatesSecrets {
|
||||
tlsSecret, tlsKey, err := loadAuthTLSSecret(serversTransport.Namespace, secret, client)
|
||||
if err != nil {
|
||||
logger.Errorf("Error while loading certificates %s: %v", secret, err)
|
||||
logger.Error().Err(err).Msgf("Error while loading certificates %s", secret)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -339,35 +341,35 @@ func (p *Provider) loadConfigurationFromCRD(ctx context.Context, client Client)
|
|||
if serversTransport.Spec.ForwardingTimeouts.DialTimeout != nil {
|
||||
err := forwardingTimeout.DialTimeout.Set(serversTransport.Spec.ForwardingTimeouts.DialTimeout.String())
|
||||
if err != nil {
|
||||
logger.Errorf("Error while reading DialTimeout: %v", err)
|
||||
logger.Error().Err(err).Msg("Error while reading DialTimeout")
|
||||
}
|
||||
}
|
||||
|
||||
if serversTransport.Spec.ForwardingTimeouts.ResponseHeaderTimeout != nil {
|
||||
err := forwardingTimeout.ResponseHeaderTimeout.Set(serversTransport.Spec.ForwardingTimeouts.ResponseHeaderTimeout.String())
|
||||
if err != nil {
|
||||
logger.Errorf("Error while reading ResponseHeaderTimeout: %v", err)
|
||||
logger.Error().Err(err).Msg("Error while reading ResponseHeaderTimeout")
|
||||
}
|
||||
}
|
||||
|
||||
if serversTransport.Spec.ForwardingTimeouts.IdleConnTimeout != nil {
|
||||
err := forwardingTimeout.IdleConnTimeout.Set(serversTransport.Spec.ForwardingTimeouts.IdleConnTimeout.String())
|
||||
if err != nil {
|
||||
logger.Errorf("Error while reading IdleConnTimeout: %v", err)
|
||||
logger.Error().Err(err).Msg("Error while reading IdleConnTimeout")
|
||||
}
|
||||
}
|
||||
|
||||
if serversTransport.Spec.ForwardingTimeouts.ReadIdleTimeout != nil {
|
||||
err := forwardingTimeout.ReadIdleTimeout.Set(serversTransport.Spec.ForwardingTimeouts.ReadIdleTimeout.String())
|
||||
if err != nil {
|
||||
logger.Errorf("Error while reading ReadIdleTimeout: %v", err)
|
||||
logger.Error().Err(err).Msg("Error while reading ReadIdleTimeout")
|
||||
}
|
||||
}
|
||||
|
||||
if serversTransport.Spec.ForwardingTimeouts.PingTimeout != nil {
|
||||
err := forwardingTimeout.PingTimeout.Set(serversTransport.Spec.ForwardingTimeouts.PingTimeout.String())
|
||||
if err != nil {
|
||||
logger.Errorf("Error while reading PingTimeout: %v", err)
|
||||
logger.Error().Err(err).Msg("Error while reading PingTimeout")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -414,8 +416,8 @@ func getServicePort(svc *corev1.Service, port intstr.IntOrString) (*corev1.Servi
|
|||
}
|
||||
|
||||
if hasValidPort {
|
||||
log.WithoutContext().
|
||||
Warnf("The port %s from IngressRoute doesn't match with ports defined in the ExternalName service %s/%s.", port, svc.Namespace, svc.Name)
|
||||
log.Warn().Msgf("The port %s from IngressRoute doesn't match with ports defined in the ExternalName service %s/%s.",
|
||||
&port, svc.Namespace, svc.Name)
|
||||
}
|
||||
|
||||
return &corev1.ServicePort{Port: port.IntVal}, nil
|
||||
|
@ -811,8 +813,7 @@ func createChainMiddleware(ctx context.Context, namespace string, chain *v1alpha
|
|||
for _, mi := range chain.Middlewares {
|
||||
if strings.Contains(mi.Name, providerNamespaceSeparator) {
|
||||
if len(mi.Namespace) > 0 {
|
||||
log.FromContext(ctx).
|
||||
Warnf("namespace %q is ignored in cross-provider context", mi.Namespace)
|
||||
log.Ctx(ctx).Warn().Msgf("namespace %q is ignored in cross-provider context", mi.Namespace)
|
||||
}
|
||||
mds = append(mds, mi.Name)
|
||||
continue
|
||||
|
@ -838,24 +839,24 @@ func buildTLSOptions(ctx context.Context, client Client) map[string]tls.Options
|
|||
var nsDefault []string
|
||||
|
||||
for _, tlsOption := range tlsOptionsCRD {
|
||||
logger := log.FromContext(log.With(ctx, log.Str("tlsOption", tlsOption.Name), log.Str("namespace", tlsOption.Namespace)))
|
||||
logger := log.Ctx(ctx).With().Str("tlsOption", tlsOption.Name).Str("namespace", tlsOption.Namespace).Logger()
|
||||
var clientCAs []tls.FileOrContent
|
||||
|
||||
for _, secretName := range tlsOption.Spec.ClientAuth.SecretNames {
|
||||
secret, exists, err := client.GetSecret(tlsOption.Namespace, secretName)
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to fetch secret %s/%s: %v", tlsOption.Namespace, secretName, err)
|
||||
logger.Error().Err(err).Msgf("Failed to fetch secret %s/%s", tlsOption.Namespace, secretName)
|
||||
continue
|
||||
}
|
||||
|
||||
if !exists {
|
||||
logger.Warnf("Secret %s/%s does not exist", tlsOption.Namespace, secretName)
|
||||
logger.Warn().Msgf("Secret %s/%s does not exist", tlsOption.Namespace, secretName)
|
||||
continue
|
||||
}
|
||||
|
||||
cert, err := getCABlocks(secret, tlsOption.Namespace, secretName)
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to extract CA from secret %s/%s: %v", tlsOption.Namespace, secretName, err)
|
||||
logger.Error().Err(err).Msgf("Failed to extract CA from secret %s/%s", tlsOption.Namespace, secretName)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -890,7 +891,7 @@ func buildTLSOptions(ctx context.Context, client Client) map[string]tls.Options
|
|||
|
||||
if len(nsDefault) > 1 {
|
||||
delete(tlsOptions, tls.DefaultTLSConfigName)
|
||||
log.FromContext(ctx).Errorf("Default TLS Options defined in multiple namespaces: %v", nsDefault)
|
||||
log.Ctx(ctx).Error().Msgf("Default TLS Options defined in multiple namespaces: %v", nsDefault)
|
||||
}
|
||||
|
||||
return tlsOptions
|
||||
|
@ -907,7 +908,7 @@ func buildTLSStores(ctx context.Context, client Client) (map[string]tls.Store, m
|
|||
tlsConfigs := make(map[string]*tls.CertAndStores)
|
||||
|
||||
for _, t := range tlsStoreCRD {
|
||||
logger := log.FromContext(log.With(ctx, log.Str("TLSStore", t.Name), log.Str("namespace", t.Namespace)))
|
||||
logger := log.Ctx(ctx).With().Str("TLSStore", t.Name).Str("namespace", t.Namespace).Logger()
|
||||
|
||||
id := makeID(t.Namespace, t.Name)
|
||||
|
||||
|
@ -924,17 +925,17 @@ func buildTLSStores(ctx context.Context, client Client) (map[string]tls.Store, m
|
|||
|
||||
secret, exists, err := client.GetSecret(t.Namespace, secretName)
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to fetch secret %s/%s: %v", t.Namespace, secretName, err)
|
||||
logger.Error().Err(err).Msgf("Failed to fetch secret %s/%s", t.Namespace, secretName)
|
||||
continue
|
||||
}
|
||||
if !exists {
|
||||
logger.Errorf("Secret %s/%s does not exist", t.Namespace, secretName)
|
||||
logger.Error().Msgf("Secret %s/%s does not exist", t.Namespace, secretName)
|
||||
continue
|
||||
}
|
||||
|
||||
cert, key, err := getCertificateBlocks(secret, t.Namespace, secretName)
|
||||
if err != nil {
|
||||
logger.Errorf("Could not get certificate blocks: %v", err)
|
||||
logger.Error().Err(err).Msg("Could not get certificate blocks")
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -952,7 +953,7 @@ func buildTLSStores(ctx context.Context, client Client) (map[string]tls.Store, m
|
|||
}
|
||||
|
||||
if err := buildCertificates(client, id, t.Namespace, t.Spec.Certificates, tlsConfigs); err != nil {
|
||||
logger.Errorf("Failed to load certificates: %v", err)
|
||||
logger.Error().Err(err).Msg("Failed to load certificates")
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -961,7 +962,7 @@ func buildTLSStores(ctx context.Context, client Client) (map[string]tls.Store, m
|
|||
|
||||
if len(nsDefault) > 1 {
|
||||
delete(tlsStores, tls.DefaultTLSStoreName)
|
||||
log.FromContext(ctx).Errorf("Default TLS Stores defined in multiple namespaces: %v", nsDefault)
|
||||
log.Ctx(ctx).Error().Msgf("Default TLS Stores defined in multiple namespaces: %v", nsDefault)
|
||||
}
|
||||
|
||||
return tlsStores, tlsConfigs
|
||||
|
@ -1117,7 +1118,7 @@ func throttleEvents(ctx context.Context, throttleDuration time.Duration, pool *s
|
|||
default:
|
||||
// We already have an event in eventsChanBuffered, so we'll do a refresh as soon as our throttle allows us to.
|
||||
// It's fine to drop the event and keep whatever's in the buffer -- we don't do different things for different events
|
||||
log.FromContext(ctx).Debugf("Dropping event kind %T due to throttling", nextEvent)
|
||||
log.Ctx(ctx).Debug().Msgf("Dropping event kind %T due to throttling", nextEvent)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,8 +8,9 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/traefik/traefik/v2/pkg/config/dynamic"
|
||||
"github.com/traefik/traefik/v2/pkg/log"
|
||||
"github.com/traefik/traefik/v2/pkg/logs"
|
||||
"github.com/traefik/traefik/v2/pkg/provider"
|
||||
"github.com/traefik/traefik/v2/pkg/provider/kubernetes/crd/traefik/v1alpha1"
|
||||
"github.com/traefik/traefik/v2/pkg/tls"
|
||||
|
@ -32,8 +33,7 @@ func (p *Provider) loadIngressRouteConfiguration(ctx context.Context, client Cli
|
|||
}
|
||||
|
||||
for _, ingressRoute := range client.GetIngressRoutes() {
|
||||
ctxRt := log.With(ctx, log.Str("ingress", ingressRoute.Name), log.Str("namespace", ingressRoute.Namespace))
|
||||
logger := log.FromContext(ctxRt)
|
||||
logger := log.Ctx(ctx).With().Str("ingress", ingressRoute.Name).Str("namespace", ingressRoute.Namespace).Logger()
|
||||
|
||||
// TODO keep the name ingressClass?
|
||||
if !shouldProcessIngress(p.IngressClass, ingressRoute.Annotations[annotationKubernetesIngressClass]) {
|
||||
|
@ -42,7 +42,7 @@ func (p *Provider) loadIngressRouteConfiguration(ctx context.Context, client Cli
|
|||
|
||||
err := getTLSHTTP(ctx, ingressRoute, client, tlsConfigs)
|
||||
if err != nil {
|
||||
logger.Errorf("Error configuring TLS: %v", err)
|
||||
logger.Error().Err(err).Msg("Error configuring TLS")
|
||||
}
|
||||
|
||||
ingressName := ingressRoute.Name
|
||||
|
@ -59,24 +59,24 @@ func (p *Provider) loadIngressRouteConfiguration(ctx context.Context, client Cli
|
|||
|
||||
for _, route := range ingressRoute.Spec.Routes {
|
||||
if route.Kind != "Rule" {
|
||||
logger.Errorf("Unsupported match kind: %s. Only \"Rule\" is supported for now.", route.Kind)
|
||||
logger.Error().Msgf("Unsupported match kind: %s. Only \"Rule\" is supported for now.", route.Kind)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(route.Match) == 0 {
|
||||
logger.Errorf("Empty match rule")
|
||||
logger.Error().Msg("Empty match rule")
|
||||
continue
|
||||
}
|
||||
|
||||
serviceKey, err := makeServiceKey(route.Match, ingressName)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
logger.Error().Err(err).Send()
|
||||
continue
|
||||
}
|
||||
|
||||
mds, err := p.makeMiddlewareKeys(ctx, ingressRoute.Namespace, route.Middlewares)
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to create middleware keys: %v", err)
|
||||
logger.Error().Err(err).Msg("Failed to create middleware keys")
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -92,13 +92,13 @@ func (p *Provider) loadIngressRouteConfiguration(ctx context.Context, client Cli
|
|||
|
||||
errBuild := cb.buildServicesLB(ctx, ingressRoute.Namespace, spec, serviceName, conf.Services)
|
||||
if errBuild != nil {
|
||||
logger.Error(errBuild)
|
||||
logger.Error().Err(errBuild).Send()
|
||||
continue
|
||||
}
|
||||
} else if len(route.Services) == 1 {
|
||||
fullName, serversLB, err := cb.nameAndService(ctx, ingressRoute.Namespace, route.Services[0].LoadBalancerSpec)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
logger.Error().Err(err).Send()
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -134,12 +134,12 @@ func (p *Provider) loadIngressRouteConfiguration(ctx context.Context, client Cli
|
|||
tlsOptionsName = makeID(ns, tlsOptionsName)
|
||||
} else if len(ns) > 0 {
|
||||
logger.
|
||||
WithField("TLSOption", ingressRoute.Spec.TLS.Options.Name).
|
||||
Warnf("Namespace %q is ignored in cross-provider context", ns)
|
||||
Warn().Str("TLSOption", ingressRoute.Spec.TLS.Options.Name).
|
||||
Msgf("Namespace %q is ignored in cross-provider context", ns)
|
||||
}
|
||||
|
||||
if !isNamespaceAllowed(p.AllowCrossNamespace, ingressRoute.Namespace, ns) {
|
||||
logger.Errorf("TLSOption %s/%s is not in the IngressRoute namespace %s",
|
||||
logger.Error().Msgf("TLSOption %s/%s is not in the IngressRoute namespace %s",
|
||||
ns, ingressRoute.Spec.TLS.Options.Name, ingressRoute.Namespace)
|
||||
continue
|
||||
}
|
||||
|
@ -170,9 +170,9 @@ func (p *Provider) makeMiddlewareKeys(ctx context.Context, ingRouteNamespace str
|
|||
|
||||
if strings.Contains(name, providerNamespaceSeparator) {
|
||||
if len(mi.Namespace) > 0 {
|
||||
log.FromContext(ctx).
|
||||
WithField(log.MiddlewareName, mi.Name).
|
||||
Warnf("namespace %q is ignored in cross-provider context", mi.Namespace)
|
||||
log.Ctx(ctx).
|
||||
Warn().Str(logs.MiddlewareName, mi.Name).
|
||||
Msgf("namespace %q is ignored in cross-provider context", mi.Namespace)
|
||||
}
|
||||
|
||||
mds = append(mds, name)
|
||||
|
@ -439,7 +439,7 @@ func (c configBuilder) loadServers(parentNamespace string, svc v1alpha1.LoadBala
|
|||
// it generates and returns the configuration part for such a service,
|
||||
// so that the caller can add it to the global config map.
|
||||
func (c configBuilder) nameAndService(ctx context.Context, parentNamespace string, service v1alpha1.LoadBalancerSpec) (string, *dynamic.Service, error) {
|
||||
svcCtx := log.With(ctx, log.Str(log.ServiceName, service.Name))
|
||||
svcCtx := log.Ctx(ctx).With().Str(logs.ServiceName, service.Name).Logger().WithContext(ctx)
|
||||
|
||||
namespace := namespaceOrFallback(service, parentNamespace)
|
||||
|
||||
|
@ -488,7 +488,7 @@ func fullServiceName(ctx context.Context, namespace string, service v1alpha1.Loa
|
|||
}
|
||||
|
||||
if service.Namespace != "" {
|
||||
log.FromContext(ctx).Warnf("namespace %q is ignored in cross-provider context", service.Namespace)
|
||||
log.Ctx(ctx).Warn().Msgf("namespace %q is ignored in cross-provider context", service.Namespace)
|
||||
}
|
||||
|
||||
return provider.Normalize(name) + providerNamespaceSeparator + pName
|
||||
|
@ -507,7 +507,7 @@ func getTLSHTTP(ctx context.Context, ingressRoute *v1alpha1.IngressRoute, k8sCli
|
|||
return nil
|
||||
}
|
||||
if ingressRoute.Spec.TLS.SecretName == "" {
|
||||
log.FromContext(ctx).Debugf("No secret name provided")
|
||||
log.Ctx(ctx).Debug().Msg("No secret name provided")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -8,8 +8,9 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/traefik/traefik/v2/pkg/config/dynamic"
|
||||
"github.com/traefik/traefik/v2/pkg/log"
|
||||
"github.com/traefik/traefik/v2/pkg/logs"
|
||||
"github.com/traefik/traefik/v2/pkg/provider"
|
||||
"github.com/traefik/traefik/v2/pkg/provider/kubernetes/crd/traefik/v1alpha1"
|
||||
"github.com/traefik/traefik/v2/pkg/tls"
|
||||
|
@ -24,7 +25,7 @@ func (p *Provider) loadIngressRouteTCPConfiguration(ctx context.Context, client
|
|||
}
|
||||
|
||||
for _, ingressRouteTCP := range client.GetIngressRouteTCPs() {
|
||||
logger := log.FromContext(log.With(ctx, log.Str("ingress", ingressRouteTCP.Name), log.Str("namespace", ingressRouteTCP.Namespace)))
|
||||
logger := log.Ctx(ctx).With().Str("ingress", ingressRouteTCP.Name).Str("namespace", ingressRouteTCP.Namespace).Logger()
|
||||
|
||||
if !shouldProcessIngress(p.IngressClass, ingressRouteTCP.Annotations[annotationKubernetesIngressClass]) {
|
||||
continue
|
||||
|
@ -33,7 +34,7 @@ func (p *Provider) loadIngressRouteTCPConfiguration(ctx context.Context, client
|
|||
if ingressRouteTCP.Spec.TLS != nil && !ingressRouteTCP.Spec.TLS.Passthrough {
|
||||
err := getTLSTCP(ctx, ingressRouteTCP, client, tlsConfigs)
|
||||
if err != nil {
|
||||
logger.Errorf("Error configuring TLS: %v", err)
|
||||
logger.Error().Err(err).Msg("Error configuring TLS")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -44,19 +45,19 @@ func (p *Provider) loadIngressRouteTCPConfiguration(ctx context.Context, client
|
|||
|
||||
for _, route := range ingressRouteTCP.Spec.Routes {
|
||||
if len(route.Match) == 0 {
|
||||
logger.Errorf("Empty match rule")
|
||||
logger.Error().Msg("Empty match rule")
|
||||
continue
|
||||
}
|
||||
|
||||
key, err := makeServiceKey(route.Match, ingressName)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
logger.Error().Err(err).Send()
|
||||
continue
|
||||
}
|
||||
|
||||
mds, err := p.makeMiddlewareTCPKeys(ctx, ingressRouteTCP.Namespace, route.Middlewares)
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to create middleware keys: %v", err)
|
||||
logger.Error().Err(err).Msg("Failed to create middleware keys")
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -65,10 +66,11 @@ func (p *Provider) loadIngressRouteTCPConfiguration(ctx context.Context, client
|
|||
for _, service := range route.Services {
|
||||
balancerServerTCP, err := p.createLoadBalancerServerTCP(client, ingressRouteTCP.Namespace, service)
|
||||
if err != nil {
|
||||
logger.
|
||||
WithField("serviceName", service.Name).
|
||||
WithField("servicePort", service.Port).
|
||||
Errorf("Cannot create service: %v", err)
|
||||
logger.Error().
|
||||
Str("serviceName", service.Name).
|
||||
Stringer("servicePort", &service.Port).
|
||||
Err(err).
|
||||
Msg("Cannot create service")
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -119,13 +121,13 @@ func (p *Provider) loadIngressRouteTCPConfiguration(ctx context.Context, client
|
|||
}
|
||||
tlsOptionsName = makeID(ns, tlsOptionsName)
|
||||
} else if len(ns) > 0 {
|
||||
logger.
|
||||
WithField("TLSOption", ingressRouteTCP.Spec.TLS.Options.Name).
|
||||
Warnf("Namespace %q is ignored in cross-provider context", ns)
|
||||
logger.Warn().
|
||||
Str("TLSOption", ingressRouteTCP.Spec.TLS.Options.Name).
|
||||
Msgf("Namespace %q is ignored in cross-provider context", ns)
|
||||
}
|
||||
|
||||
if !isNamespaceAllowed(p.AllowCrossNamespace, ingressRouteTCP.Namespace, ns) {
|
||||
logger.Errorf("TLSOption %s/%s is not in the IngressRouteTCP namespace %s",
|
||||
logger.Error().Msgf("TLSOption %s/%s is not in the IngressRouteTCP namespace %s",
|
||||
ns, ingressRouteTCP.Spec.TLS.Options.Name, ingressRouteTCP.Namespace)
|
||||
continue
|
||||
}
|
||||
|
@ -147,9 +149,9 @@ func (p *Provider) makeMiddlewareTCPKeys(ctx context.Context, ingRouteTCPNamespa
|
|||
for _, mi := range middlewares {
|
||||
if strings.Contains(mi.Name, providerNamespaceSeparator) {
|
||||
if len(mi.Namespace) > 0 {
|
||||
log.FromContext(ctx).
|
||||
WithField(log.MiddlewareName, mi.Name).
|
||||
Warnf("namespace %q is ignored in cross-provider context", mi.Namespace)
|
||||
log.Ctx(ctx).Warn().
|
||||
Str(logs.MiddlewareName, mi.Name).
|
||||
Msgf("Namespace %q is ignored in cross-provider context", mi.Namespace)
|
||||
}
|
||||
mds = append(mds, mi.Name)
|
||||
continue
|
||||
|
@ -275,7 +277,7 @@ func getTLSTCP(ctx context.Context, ingressRoute *v1alpha1.IngressRouteTCP, k8sC
|
|||
return nil
|
||||
}
|
||||
if ingressRoute.Spec.TLS.SecretName == "" {
|
||||
log.FromContext(ctx).Debugf("No secret name provided")
|
||||
log.Ctx(ctx).Debug().Msg("No secret name provided")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -7,8 +7,8 @@ import (
|
|||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/traefik/traefik/v2/pkg/config/dynamic"
|
||||
"github.com/traefik/traefik/v2/pkg/log"
|
||||
"github.com/traefik/traefik/v2/pkg/provider/kubernetes/crd/traefik/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
@ -20,7 +20,7 @@ func (p *Provider) loadIngressRouteUDPConfiguration(ctx context.Context, client
|
|||
}
|
||||
|
||||
for _, ingressRouteUDP := range client.GetIngressRouteUDPs() {
|
||||
logger := log.FromContext(log.With(ctx, log.Str("ingress", ingressRouteUDP.Name), log.Str("namespace", ingressRouteUDP.Namespace)))
|
||||
logger := log.Ctx(ctx).With().Str("ingress", ingressRouteUDP.Name).Str("namespace", ingressRouteUDP.Namespace).Logger()
|
||||
|
||||
if !shouldProcessIngress(p.IngressClass, ingressRouteUDP.Annotations[annotationKubernetesIngressClass]) {
|
||||
continue
|
||||
|
@ -38,10 +38,11 @@ func (p *Provider) loadIngressRouteUDPConfiguration(ctx context.Context, client
|
|||
for _, service := range route.Services {
|
||||
balancerServerUDP, err := p.createLoadBalancerServerUDP(client, ingressRouteUDP.Namespace, service)
|
||||
if err != nil {
|
||||
logger.
|
||||
WithField("serviceName", service.Name).
|
||||
WithField("servicePort", service.Port).
|
||||
Errorf("Cannot create service: %v", err)
|
||||
logger.Error().
|
||||
Str("serviceName", service.Name).
|
||||
Stringer("servicePort", &service.Port).
|
||||
Err(err).
|
||||
Msg("Cannot create service")
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/traefik/traefik/v2/pkg/log"
|
||||
"github.com/rs/zerolog/log"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kubeerror "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -252,7 +252,7 @@ func (c *clientWrapper) GetNamespaces(selector labels.Selector) ([]string, error
|
|||
var namespaces []string
|
||||
for _, namespace := range ns {
|
||||
if !c.isWatchedNamespace(namespace.Name) {
|
||||
log.WithoutContext().Warnf("Namespace %q is not within watched namespaces", selector, namespace)
|
||||
log.Warn().Msgf("Namespace %q is not within %q watched namespaces", selector, namespace)
|
||||
continue
|
||||
}
|
||||
namespaces = append(namespaces, namespace.Name)
|
||||
|
@ -264,7 +264,7 @@ func (c *clientWrapper) GetHTTPRoutes(namespaces []string) ([]*v1alpha2.HTTPRout
|
|||
var httpRoutes []*v1alpha2.HTTPRoute
|
||||
for _, namespace := range namespaces {
|
||||
if !c.isWatchedNamespace(namespace) {
|
||||
log.WithoutContext().Warnf("Failed to get HTTPRoutes: %q is not within watched namespaces", namespace)
|
||||
log.Warn().Msgf("Failed to get HTTPRoutes: %q is not within watched namespaces", namespace)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -274,7 +274,7 @@ func (c *clientWrapper) GetHTTPRoutes(namespaces []string) ([]*v1alpha2.HTTPRout
|
|||
}
|
||||
|
||||
if len(routes) == 0 {
|
||||
log.WithoutContext().Debugf("No HTTPRoutes found in namespace %q", namespace)
|
||||
log.Debug().Msgf("No HTTPRoutes found in namespace %q", namespace)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -288,7 +288,7 @@ func (c *clientWrapper) GetTCPRoutes(namespaces []string) ([]*v1alpha2.TCPRoute,
|
|||
var tcpRoutes []*v1alpha2.TCPRoute
|
||||
for _, namespace := range namespaces {
|
||||
if !c.isWatchedNamespace(namespace) {
|
||||
log.WithoutContext().Warnf("Failed to get TCPRoutes: %q is not within watched namespaces", namespace)
|
||||
log.Warn().Msgf("Failed to get TCPRoutes: %q is not within watched namespaces", namespace)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -298,7 +298,7 @@ func (c *clientWrapper) GetTCPRoutes(namespaces []string) ([]*v1alpha2.TCPRoute,
|
|||
}
|
||||
|
||||
if len(routes) == 0 {
|
||||
log.WithoutContext().Debugf("No TCPRoutes found in namespace %q", namespace)
|
||||
log.Debug().Msgf("No TCPRoutes found in namespace %q", namespace)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -311,7 +311,7 @@ func (c *clientWrapper) GetTLSRoutes(namespaces []string) ([]*v1alpha2.TLSRoute,
|
|||
var tlsRoutes []*v1alpha2.TLSRoute
|
||||
for _, namespace := range namespaces {
|
||||
if !c.isWatchedNamespace(namespace) {
|
||||
log.WithoutContext().Warnf("Failed to get TLSRoutes: %q is not within watched namespaces", namespace)
|
||||
log.Warn().Msgf("Failed to get TLSRoutes: %q is not within watched namespaces", namespace)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -321,7 +321,7 @@ func (c *clientWrapper) GetTLSRoutes(namespaces []string) ([]*v1alpha2.TLSRoute,
|
|||
}
|
||||
|
||||
if len(routes) == 0 {
|
||||
log.WithoutContext().Debugf("No TLSRoutes found in namespace %q", namespace)
|
||||
log.Debug().Msgf("No TLSRoutes found in namespace %q", namespace)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -336,7 +336,7 @@ func (c *clientWrapper) GetGateways() []*v1alpha2.Gateway {
|
|||
for ns, factory := range c.factoriesGateway {
|
||||
gateways, err := factory.Gateway().V1alpha2().Gateways().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
log.WithoutContext().Errorf("Failed to list Gateways in namespace %s: %v", ns, err)
|
||||
log.Error().Err(err).Msgf("Failed to list Gateways in namespace %s", ns)
|
||||
continue
|
||||
}
|
||||
result = append(result, gateways...)
|
||||
|
|
|
@ -15,10 +15,11 @@ import (
|
|||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/mitchellh/hashstructure"
|
||||
"github.com/rs/zerolog/log"
|
||||
ptypes "github.com/traefik/paerser/types"
|
||||
"github.com/traefik/traefik/v2/pkg/config/dynamic"
|
||||
"github.com/traefik/traefik/v2/pkg/job"
|
||||
"github.com/traefik/traefik/v2/pkg/log"
|
||||
"github.com/traefik/traefik/v2/pkg/logs"
|
||||
"github.com/traefik/traefik/v2/pkg/provider"
|
||||
traefikv1alpha1 "github.com/traefik/traefik/v2/pkg/provider/kubernetes/crd/traefik/v1alpha1"
|
||||
"github.com/traefik/traefik/v2/pkg/safe"
|
||||
|
@ -67,24 +68,19 @@ func (p *Provider) newK8sClient(ctx context.Context) (*clientWrapper, error) {
|
|||
return nil, fmt.Errorf("invalid label selector: %q", p.LabelSelector)
|
||||
}
|
||||
|
||||
logger := log.FromContext(ctx)
|
||||
logger.Infof("label selector is: %q", p.LabelSelector)
|
||||
|
||||
withEndpoint := ""
|
||||
if p.Endpoint != "" {
|
||||
withEndpoint = fmt.Sprintf(" with endpoint %s", p.Endpoint)
|
||||
}
|
||||
logger := log.Ctx(ctx)
|
||||
logger.Info().Msgf("Label selector is: %q", p.LabelSelector)
|
||||
|
||||
var client *clientWrapper
|
||||
switch {
|
||||
case os.Getenv("KUBERNETES_SERVICE_HOST") != "" && os.Getenv("KUBERNETES_SERVICE_PORT") != "":
|
||||
logger.Infof("Creating in-cluster Provider client%s", withEndpoint)
|
||||
logger.Info().Str("endpoint", p.Endpoint).Msg("Creating in-cluster Provider client")
|
||||
client, err = newInClusterClient(p.Endpoint)
|
||||
case os.Getenv("KUBECONFIG") != "":
|
||||
logger.Infof("Creating cluster-external Provider client from KUBECONFIG %s", os.Getenv("KUBECONFIG"))
|
||||
logger.Info().Msgf("Creating cluster-external Provider client from KUBECONFIG %s", os.Getenv("KUBECONFIG"))
|
||||
client, err = newExternalClusterClientFromFile(os.Getenv("KUBECONFIG"))
|
||||
default:
|
||||
logger.Infof("Creating cluster-external Provider client%s", withEndpoint)
|
||||
logger.Info().Str("endpoint", p.Endpoint).Msg("Creating cluster-external Provider client")
|
||||
client, err = newExternalClusterClient(p.Endpoint, p.Token, p.CertAuthFilePath)
|
||||
}
|
||||
|
||||
|
@ -104,8 +100,8 @@ func (p *Provider) Init() error {
|
|||
|
||||
// Provide allows the k8s provider to provide configurations to traefik using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.Pool) error {
|
||||
ctxLog := log.With(context.Background(), log.Str(log.ProviderName, providerName))
|
||||
logger := log.FromContext(ctxLog)
|
||||
logger := log.With().Str(logs.ProviderName, providerName).Logger()
|
||||
ctxLog := logger.WithContext(context.Background())
|
||||
|
||||
k8sClient, err := p.newK8sClient(ctxLog)
|
||||
if err != nil {
|
||||
|
@ -116,7 +112,7 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
|
|||
operation := func() error {
|
||||
eventsChan, err := k8sClient.WatchAll(p.Namespaces, ctxPool.Done())
|
||||
if err != nil {
|
||||
logger.Errorf("Error watching kubernetes events: %v", err)
|
||||
logger.Error().Err(err).Msg("Error watching kubernetes events")
|
||||
timer := time.NewTimer(1 * time.Second)
|
||||
select {
|
||||
case <-timer.C:
|
||||
|
@ -145,9 +141,9 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
|
|||
confHash, err := hashstructure.Hash(conf, nil)
|
||||
switch {
|
||||
case err != nil:
|
||||
logger.Error("Unable to hash the configuration")
|
||||
logger.Error().Msg("Unable to hash the configuration")
|
||||
case p.lastConfiguration.Get() == confHash:
|
||||
logger.Debugf("Skipping Kubernetes event kind %T", event)
|
||||
logger.Debug().Msgf("Skipping Kubernetes event kind %T", event)
|
||||
default:
|
||||
p.lastConfiguration.Set(confHash)
|
||||
configurationChan <- dynamic.Message{
|
||||
|
@ -165,11 +161,11 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
|
|||
}
|
||||
|
||||
notify := func(err error, time time.Duration) {
|
||||
logger.Errorf("Provider connection error: %v; retrying in %s", err, time)
|
||||
logger.Error().Err(err).Msgf("Provider connection error, retrying in %s", time)
|
||||
}
|
||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), backoff.WithContext(job.NewBackOff(backoff.NewExponentialBackOff()), ctxPool), notify)
|
||||
if err != nil {
|
||||
logger.Errorf("Cannot connect to Provider: %v", err)
|
||||
logger.Error().Err(err).Msg("Cannot connect to Provider")
|
||||
}
|
||||
})
|
||||
|
||||
|
@ -178,13 +174,13 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
|
|||
|
||||
// TODO Handle errors and update resources statuses (gatewayClass, gateway).
|
||||
func (p *Provider) loadConfigurationFromGateway(ctx context.Context, client Client) *dynamic.Configuration {
|
||||
logger := log.FromContext(ctx)
|
||||
logger := log.Ctx(ctx)
|
||||
|
||||
gatewayClassNames := map[string]struct{}{}
|
||||
|
||||
gatewayClasses, err := client.GetGatewayClasses()
|
||||
if err != nil {
|
||||
logger.Errorf("Cannot find GatewayClasses: %v", err)
|
||||
logger.Error().Err(err).Msg("Cannot find GatewayClasses")
|
||||
return &dynamic.Configuration{
|
||||
UDP: &dynamic.UDPConfiguration{
|
||||
Routers: map[string]*dynamic.UDPRouter{},
|
||||
|
@ -215,7 +211,7 @@ func (p *Provider) loadConfigurationFromGateway(ctx context.Context, client Clie
|
|||
LastTransitionTime: metav1.Now(),
|
||||
})
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to update %s condition: %v", v1alpha2.GatewayClassConditionStatusAccepted, err)
|
||||
logger.Error().Err(err).Msgf("Failed to update %s condition", v1alpha2.GatewayClassConditionStatusAccepted)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -224,8 +220,8 @@ func (p *Provider) loadConfigurationFromGateway(ctx context.Context, client Clie
|
|||
|
||||
// TODO check if we can only use the default filtering mechanism
|
||||
for _, gateway := range client.GetGateways() {
|
||||
ctxLog := log.With(ctx, log.Str("gateway", gateway.Name), log.Str("namespace", gateway.Namespace))
|
||||
logger := log.FromContext(ctxLog)
|
||||
logger := log.Ctx(ctx).With().Str("gateway", gateway.Name).Str("namespace", gateway.Namespace).Logger()
|
||||
ctxLog := logger.WithContext(ctx)
|
||||
|
||||
if _, ok := gatewayClassNames[string(gateway.Spec.GatewayClassName)]; !ok {
|
||||
continue
|
||||
|
@ -233,7 +229,7 @@ func (p *Provider) loadConfigurationFromGateway(ctx context.Context, client Clie
|
|||
|
||||
cfg, err := p.createGatewayConf(ctxLog, client, gateway)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
logger.Error().Err(err).Send()
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -315,7 +311,7 @@ func (p *Provider) createGatewayConf(ctx context.Context, client Client, gateway
|
|||
}
|
||||
|
||||
func (p *Provider) fillGatewayConf(ctx context.Context, client Client, gateway *v1alpha2.Gateway, conf *dynamic.Configuration, tlsConfigs map[string]*tls.CertAndStores) []v1alpha2.ListenerStatus {
|
||||
logger := log.FromContext(ctx)
|
||||
logger := log.Ctx(ctx)
|
||||
listenerStatuses := make([]v1alpha2.ListenerStatus, len(gateway.Spec.Listeners))
|
||||
allocatedListeners := make(map[string]struct{})
|
||||
|
||||
|
@ -407,7 +403,7 @@ func (p *Provider) fillGatewayConf(ctx context.Context, client Client, gateway *
|
|||
|
||||
if isTLSPassthrough && len(listener.TLS.CertificateRefs) > 0 {
|
||||
// https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.GatewayTLSConfig
|
||||
logger.Warnf("In case of Passthrough TLS mode, no TLS settings take effect as the TLS session from the client is NOT terminated at the Gateway")
|
||||
logger.Warn().Msg("In case of Passthrough TLS mode, no TLS settings take effect as the TLS session from the client is NOT terminated at the Gateway")
|
||||
}
|
||||
|
||||
// Allowed configurations:
|
||||
|
@ -684,7 +680,7 @@ func gatewayHTTPRouteToHTTPConf(ctx context.Context, ep string, listener v1alpha
|
|||
}
|
||||
|
||||
if len(routes) == 0 {
|
||||
log.FromContext(ctx).Debugf("No HTTPRoutes found")
|
||||
log.Ctx(ctx).Debug().Msg("No HTTPRoutes found")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -825,7 +821,7 @@ func gatewayTCPRouteToTCPConf(ctx context.Context, ep string, listener v1alpha2.
|
|||
}
|
||||
|
||||
if len(routes) == 0 {
|
||||
log.FromContext(ctx).Debugf("No TCPRoutes found")
|
||||
log.Ctx(ctx).Debug().Msg("No TCPRoutes found")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -955,7 +951,7 @@ func gatewayTLSRouteToTCPConf(ctx context.Context, ep string, listener v1alpha2.
|
|||
}
|
||||
|
||||
if len(routes) == 0 {
|
||||
log.FromContext(ctx).Debugf("No TLSRoutes found")
|
||||
log.Ctx(ctx).Debug().Msg("No TLSRoutes found")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1466,7 +1462,7 @@ func loadServices(client Client, namespace string, backendRefs []v1alpha2.HTTPBa
|
|||
// "DroppedRoutes" reason. The gateway status for this route
|
||||
// should be updated with a condition that describes the error
|
||||
// more specifically.
|
||||
log.WithoutContext().Errorf("A multiple ports Kubernetes Service cannot be used if unspecified backendRef.Port")
|
||||
log.Error().Msg("A multiple ports Kubernetes Service cannot be used if unspecified backendRef.Port")
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -1587,7 +1583,7 @@ func loadTCPServices(client Client, namespace string, backendRefs []v1alpha2.Bac
|
|||
// "DroppedRoutes" reason. The gateway status for this route
|
||||
// should be updated with a condition that describes the error
|
||||
// more specifically.
|
||||
log.WithoutContext().Errorf("A multiple ports Kubernetes Service cannot be used if unspecified backendRef.Port")
|
||||
log.Error().Msg("A multiple ports Kubernetes Service cannot be used if unspecified backendRef.Port")
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -1684,7 +1680,7 @@ func throttleEvents(ctx context.Context, throttleDuration time.Duration, pool *s
|
|||
default:
|
||||
// We already have an event in eventsChanBuffered, so we'll do a refresh as soon as our throttle allows us to.
|
||||
// It's fine to drop the event and keep whatever's in the buffer -- we don't do different things for different events
|
||||
log.FromContext(ctx).Debugf("Dropping event kind %T due to throttling", nextEvent)
|
||||
log.Ctx(ctx).Debug().Msgf("Dropping event kind %T due to throttling", nextEvent)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/hashicorp/go-version"
|
||||
"github.com/traefik/traefik/v2/pkg/log"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/traefik/traefik/v2/pkg/provider/kubernetes/k8s"
|
||||
traefikversion "github.com/traefik/traefik/v2/pkg/version"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
@ -246,7 +246,7 @@ func (c *clientWrapper) GetIngresses() []*networkingv1.Ingress {
|
|||
// networking
|
||||
listNew, err := factory.Networking().V1().Ingresses().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
log.WithoutContext().Errorf("Failed to list ingresses in namespace %s: %v", ns, err)
|
||||
log.Error().Err(err).Msgf("Failed to list ingresses in namespace %s", ns)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -257,14 +257,14 @@ func (c *clientWrapper) GetIngresses() []*networkingv1.Ingress {
|
|||
// networking beta
|
||||
list, err := factory.Networking().V1beta1().Ingresses().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
log.WithoutContext().Errorf("Failed to list ingresses in namespace %s: %v", ns, err)
|
||||
log.Error().Err(err).Msgf("Failed to list ingresses in namespace %s", ns)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, ing := range list {
|
||||
n, err := toNetworkingV1(ing)
|
||||
if err != nil {
|
||||
log.WithoutContext().Errorf("Failed to convert ingress %s from networking/v1beta1 to networking/v1: %v", ns, err)
|
||||
log.Error().Err(err).Msgf("Failed to convert ingress %s from networking/v1beta1 to networking/v1", ns)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -366,10 +366,10 @@ func (c *clientWrapper) UpdateIngressStatus(src *networkingv1.Ingress, ingStatus
|
|||
return fmt.Errorf("failed to get ingress %s/%s: %w", src.Namespace, src.Name, err)
|
||||
}
|
||||
|
||||
logger := log.WithoutContext().WithField("namespace", ing.Namespace).WithField("ingress", ing.Name)
|
||||
logger := log.With().Str("namespace", ing.Namespace).Str("ingress", ing.Name).Logger()
|
||||
|
||||
if isLoadBalancerIngressEquals(ing.Status.LoadBalancer.Ingress, ingStatus) {
|
||||
logger.Debug("Skipping ingress status update")
|
||||
logger.Debug().Msg("Skipping ingress status update")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -384,7 +384,7 @@ func (c *clientWrapper) UpdateIngressStatus(src *networkingv1.Ingress, ingStatus
|
|||
return fmt.Errorf("failed to update ingress status %s/%s: %w", src.Namespace, src.Name, err)
|
||||
}
|
||||
|
||||
logger.Info("Updated ingress status")
|
||||
logger.Info().Msg("Updated ingress status")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -394,10 +394,10 @@ func (c *clientWrapper) updateIngressStatusOld(src *networkingv1.Ingress, ingSta
|
|||
return fmt.Errorf("failed to get ingress %s/%s: %w", src.Namespace, src.Name, err)
|
||||
}
|
||||
|
||||
logger := log.WithoutContext().WithField("namespace", ing.Namespace).WithField("ingress", ing.Name)
|
||||
logger := log.With().Str("namespace", ing.Namespace).Str("ingress", ing.Name).Logger()
|
||||
|
||||
if isLoadBalancerIngressEquals(ing.Status.LoadBalancer.Ingress, ingStatus) {
|
||||
logger.Debug("Skipping ingress status update")
|
||||
logger.Debug().Msg("Skipping ingress status update")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -411,7 +411,7 @@ func (c *clientWrapper) updateIngressStatusOld(src *networkingv1.Ingress, ingSta
|
|||
if err != nil {
|
||||
return fmt.Errorf("failed to update ingress status %s/%s: %w", src.Namespace, src.Name, err)
|
||||
}
|
||||
logger.Info("Updated ingress status")
|
||||
logger.Info().Msg("Updated ingress status")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -484,7 +484,7 @@ func (c *clientWrapper) GetIngressClasses() ([]*networkingv1.IngressClass, error
|
|||
if ic.Spec.Controller == traefikDefaultIngressClassController {
|
||||
icN, err := toNetworkingV1IngressClass(ic)
|
||||
if err != nil {
|
||||
log.WithoutContext().Errorf("Failed to convert ingress class %s from networking/v1beta1 to networking/v1: %v", ic.Name, err)
|
||||
log.Error().Err(err).Msgf("Failed to convert ingress class %s from networking/v1beta1 to networking/v1", ic.Name)
|
||||
continue
|
||||
}
|
||||
ics = append(ics, icN)
|
||||
|
|
|
@ -15,10 +15,11 @@ import (
|
|||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/mitchellh/hashstructure"
|
||||
"github.com/rs/zerolog/log"
|
||||
ptypes "github.com/traefik/paerser/types"
|
||||
"github.com/traefik/traefik/v2/pkg/config/dynamic"
|
||||
"github.com/traefik/traefik/v2/pkg/job"
|
||||
"github.com/traefik/traefik/v2/pkg/log"
|
||||
"github.com/traefik/traefik/v2/pkg/logs"
|
||||
"github.com/traefik/traefik/v2/pkg/provider"
|
||||
"github.com/traefik/traefik/v2/pkg/safe"
|
||||
"github.com/traefik/traefik/v2/pkg/tls"
|
||||
|
@ -62,9 +63,9 @@ func (p *Provider) newK8sClient(ctx context.Context) (*clientWrapper, error) {
|
|||
return nil, fmt.Errorf("invalid ingress label selector: %q", p.LabelSelector)
|
||||
}
|
||||
|
||||
logger := log.FromContext(ctx)
|
||||
logger := log.Ctx(ctx)
|
||||
|
||||
logger.Infof("ingress label selector is: %q", p.LabelSelector)
|
||||
logger.Info().Msgf("ingress label selector is: %q", p.LabelSelector)
|
||||
|
||||
withEndpoint := ""
|
||||
if p.Endpoint != "" {
|
||||
|
@ -74,13 +75,13 @@ func (p *Provider) newK8sClient(ctx context.Context) (*clientWrapper, error) {
|
|||
var cl *clientWrapper
|
||||
switch {
|
||||
case os.Getenv("KUBERNETES_SERVICE_HOST") != "" && os.Getenv("KUBERNETES_SERVICE_PORT") != "":
|
||||
logger.Infof("Creating in-cluster Provider client%s", withEndpoint)
|
||||
logger.Info().Msgf("Creating in-cluster Provider client%s", withEndpoint)
|
||||
cl, err = newInClusterClient(p.Endpoint)
|
||||
case os.Getenv("KUBECONFIG") != "":
|
||||
logger.Infof("Creating cluster-external Provider client from KUBECONFIG %s", os.Getenv("KUBECONFIG"))
|
||||
logger.Info().Msgf("Creating cluster-external Provider client from KUBECONFIG %s", os.Getenv("KUBECONFIG"))
|
||||
cl, err = newExternalClusterClientFromFile(os.Getenv("KUBECONFIG"))
|
||||
default:
|
||||
logger.Infof("Creating cluster-external Provider client%s", withEndpoint)
|
||||
logger.Info().Msgf("Creating cluster-external Provider client%s", withEndpoint)
|
||||
cl, err = newExternalClusterClient(p.Endpoint, p.Token, p.CertAuthFilePath)
|
||||
}
|
||||
|
||||
|
@ -100,8 +101,8 @@ func (p *Provider) Init() error {
|
|||
// Provide allows the k8s provider to provide configurations to traefik
|
||||
// using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.Pool) error {
|
||||
ctxLog := log.With(context.Background(), log.Str(log.ProviderName, "kubernetes"))
|
||||
logger := log.FromContext(ctxLog)
|
||||
logger := log.With().Str(logs.ProviderName, "kubernetes").Logger()
|
||||
ctxLog := logger.WithContext(context.Background())
|
||||
|
||||
k8sClient, err := p.newK8sClient(ctxLog)
|
||||
if err != nil {
|
||||
|
@ -109,14 +110,14 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
|
|||
}
|
||||
|
||||
if p.AllowExternalNameServices {
|
||||
logger.Warn("ExternalName service loading is enabled, please ensure that this is expected (see AllowExternalNameServices option)")
|
||||
logger.Warn().Msg("ExternalName service loading is enabled, please ensure that this is expected (see AllowExternalNameServices option)")
|
||||
}
|
||||
|
||||
pool.GoCtx(func(ctxPool context.Context) {
|
||||
operation := func() error {
|
||||
eventsChan, err := k8sClient.WatchAll(p.Namespaces, ctxPool.Done())
|
||||
if err != nil {
|
||||
logger.Errorf("Error watching kubernetes events: %v", err)
|
||||
logger.Error().Err(err).Msg("Error watching kubernetes events")
|
||||
timer := time.NewTimer(1 * time.Second)
|
||||
select {
|
||||
case <-timer.C:
|
||||
|
@ -147,9 +148,9 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
|
|||
confHash, err := hashstructure.Hash(conf, nil)
|
||||
switch {
|
||||
case err != nil:
|
||||
logger.Error("Unable to hash the configuration")
|
||||
logger.Error().Msg("Unable to hash the configuration")
|
||||
case p.lastConfiguration.Get() == confHash:
|
||||
logger.Debugf("Skipping Kubernetes event kind %T", event)
|
||||
logger.Debug().Msgf("Skipping Kubernetes event kind %T", event)
|
||||
default:
|
||||
p.lastConfiguration.Set(confHash)
|
||||
configurationChan <- dynamic.Message{
|
||||
|
@ -167,12 +168,12 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
|
|||
}
|
||||
|
||||
notify := func(err error, time time.Duration) {
|
||||
logger.Errorf("Provider connection error: %s; retrying in %s", err, time)
|
||||
logger.Error().Err(err).Msgf("Provider connection error, retrying in %s", time)
|
||||
}
|
||||
|
||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), backoff.WithContext(job.NewBackOff(backoff.NewExponentialBackOff()), ctxPool), notify)
|
||||
if err != nil {
|
||||
logger.Errorf("Cannot connect to Provider: %s", err)
|
||||
logger.Error().Err(err).Msg("Cannot connect to Provider")
|
||||
}
|
||||
})
|
||||
|
||||
|
@ -196,7 +197,7 @@ func (p *Provider) loadConfigurationFromIngresses(ctx context.Context, client Cl
|
|||
if supportsIngressClass(serverVersion) {
|
||||
ics, err := client.GetIngressClasses()
|
||||
if err != nil {
|
||||
log.FromContext(ctx).Warnf("Failed to list ingress classes: %v", err)
|
||||
log.Ctx(ctx).Warn().Err(err).Msg("Failed to list ingress classes")
|
||||
}
|
||||
|
||||
if p.IngressClass != "" {
|
||||
|
@ -210,7 +211,8 @@ func (p *Provider) loadConfigurationFromIngresses(ctx context.Context, client Cl
|
|||
|
||||
certConfigs := make(map[string]*tls.CertAndStores)
|
||||
for _, ingress := range ingresses {
|
||||
ctx = log.With(ctx, log.Str("ingress", ingress.Name), log.Str("namespace", ingress.Namespace))
|
||||
logger := log.Ctx(ctx).With().Str("ingress", ingress.Name).Str("namespace", ingress.Namespace).Logger()
|
||||
ctx = logger.WithContext(ctx)
|
||||
|
||||
if !p.shouldProcessIngress(ingress, ingressClasses) {
|
||||
continue
|
||||
|
@ -218,35 +220,36 @@ func (p *Provider) loadConfigurationFromIngresses(ctx context.Context, client Cl
|
|||
|
||||
rtConfig, err := parseRouterConfig(ingress.Annotations)
|
||||
if err != nil {
|
||||
log.FromContext(ctx).Errorf("Failed to parse annotations: %v", err)
|
||||
logger.Error().Err(err).Msg("Failed to parse annotations")
|
||||
continue
|
||||
}
|
||||
|
||||
err = getCertificates(ctx, ingress, client, certConfigs)
|
||||
if err != nil {
|
||||
log.FromContext(ctx).Errorf("Error configuring TLS: %v", err)
|
||||
logger.Error().Err(err).Msg("Error configuring TLS")
|
||||
}
|
||||
|
||||
if len(ingress.Spec.Rules) == 0 && ingress.Spec.DefaultBackend != nil {
|
||||
if _, ok := conf.HTTP.Services["default-backend"]; ok {
|
||||
log.FromContext(ctx).Error("The default backend already exists.")
|
||||
logger.Error().Msg("The default backend already exists.")
|
||||
continue
|
||||
}
|
||||
|
||||
service, err := p.loadService(client, ingress.Namespace, *ingress.Spec.DefaultBackend)
|
||||
if err != nil {
|
||||
log.FromContext(ctx).
|
||||
WithField("serviceName", ingress.Spec.DefaultBackend.Service.Name).
|
||||
WithField("servicePort", ingress.Spec.DefaultBackend.Service.Port.String()).
|
||||
Errorf("Cannot create service: %v", err)
|
||||
logger.Error().
|
||||
Str("serviceName", ingress.Spec.DefaultBackend.Service.Name).
|
||||
Str("servicePort", ingress.Spec.DefaultBackend.Service.Port.String()).
|
||||
Err(err).
|
||||
Msg("Cannot create service")
|
||||
continue
|
||||
}
|
||||
|
||||
if len(service.LoadBalancer.Servers) == 0 && !p.AllowEmptyServices {
|
||||
log.FromContext(ctx).
|
||||
WithField("serviceName", ingress.Spec.DefaultBackend.Service.Name).
|
||||
WithField("servicePort", ingress.Spec.DefaultBackend.Service.Port.String()).
|
||||
Errorf("Skipping service: no endpoints found")
|
||||
logger.Error().
|
||||
Str("serviceName", ingress.Spec.DefaultBackend.Service.Name).
|
||||
Str("servicePort", ingress.Spec.DefaultBackend.Service.Port.String()).
|
||||
Msg("Skipping service: no endpoints found")
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -270,7 +273,7 @@ func (p *Provider) loadConfigurationFromIngresses(ctx context.Context, client Cl
|
|||
|
||||
for _, rule := range ingress.Spec.Rules {
|
||||
if err := p.updateIngressStatus(ingress, client); err != nil {
|
||||
log.FromContext(ctx).Errorf("Error while updating ingress status: %v", err)
|
||||
logger.Error().Err(err).Msg("Error while updating ingress status")
|
||||
}
|
||||
|
||||
if rule.HTTP == nil {
|
||||
|
@ -280,18 +283,19 @@ func (p *Provider) loadConfigurationFromIngresses(ctx context.Context, client Cl
|
|||
for _, pa := range rule.HTTP.Paths {
|
||||
service, err := p.loadService(client, ingress.Namespace, pa.Backend)
|
||||
if err != nil {
|
||||
log.FromContext(ctx).
|
||||
WithField("serviceName", pa.Backend.Service.Name).
|
||||
WithField("servicePort", pa.Backend.Service.Port.String()).
|
||||
Errorf("Cannot create service: %v", err)
|
||||
logger.Error().
|
||||
Str("serviceName", pa.Backend.Service.Name).
|
||||
Str("servicePort", pa.Backend.Service.Port.String()).
|
||||
Err(err).
|
||||
Msg("Cannot create service")
|
||||
continue
|
||||
}
|
||||
|
||||
if len(service.LoadBalancer.Servers) == 0 && !p.AllowEmptyServices {
|
||||
log.FromContext(ctx).
|
||||
WithField("serviceName", pa.Backend.Service.Name).
|
||||
WithField("servicePort", pa.Backend.Service.Port.String()).
|
||||
Errorf("Skipping service: no endpoints found")
|
||||
logger.Error().
|
||||
Str("serviceName", pa.Backend.Service.Name).
|
||||
Str("servicePort", pa.Backend.Service.Port.String()).
|
||||
Msg("Skipping service: no endpoints found")
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -315,12 +319,12 @@ func (p *Provider) loadConfigurationFromIngresses(ctx context.Context, client Cl
|
|||
continue
|
||||
}
|
||||
|
||||
log.FromContext(ctx).Debugf("Multiple routers are defined with the same key %q, generating hashes to avoid conflicts", routerKey)
|
||||
logger.Debug().Msgf("Multiple routers are defined with the same key %q, generating hashes to avoid conflicts", routerKey)
|
||||
|
||||
for _, router := range conflictingRouters {
|
||||
key, err := makeRouterKeyWithHash(routerKey, router.Rule)
|
||||
if err != nil {
|
||||
log.FromContext(ctx).Error(err)
|
||||
logger.Error().Err(err).Send()
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -367,7 +371,7 @@ func (p *Provider) updateIngressStatus(ing *networkingv1.Ingress, k8sClient Clie
|
|||
|
||||
if exists && service.Status.LoadBalancer.Ingress == nil {
|
||||
// service exists, but has no Load Balancer status
|
||||
log.Debugf("Skipping updating Ingress %s/%s due to service %s having no status set", ing.Namespace, ing.Name, p.IngressEndpoint.PublishedService)
|
||||
log.Debug().Msgf("Skipping updating Ingress %s/%s due to service %s having no status set", ing.Namespace, ing.Name, p.IngressEndpoint.PublishedService)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -405,7 +409,7 @@ func buildHostRule(host string) string {
|
|||
func getCertificates(ctx context.Context, ingress *networkingv1.Ingress, k8sClient Client, tlsConfigs map[string]*tls.CertAndStores) error {
|
||||
for _, t := range ingress.Spec.TLS {
|
||||
if t.SecretName == "" {
|
||||
log.FromContext(ctx).Debugf("Skipping TLS sub-section: No secret name provided")
|
||||
log.Ctx(ctx).Debug().Msg("Skipping TLS sub-section: No secret name provided")
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -673,7 +677,7 @@ func throttleEvents(ctx context.Context, throttleDuration time.Duration, pool *s
|
|||
// do a refresh as soon as our throttle allows us to. It's fine
|
||||
// to drop the event and keep whatever's in the buffer -- we
|
||||
// don't do different things for different events.
|
||||
log.FromContext(ctx).Debugf("Dropping event kind %T due to throttling", nextEvent)
|
||||
log.Ctx(ctx).Debug().Msgf("Dropping event kind %T due to throttling", nextEvent)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/traefik/traefik/v2/pkg/log"
|
||||
"github.com/rs/zerolog/log"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
)
|
||||
|
@ -28,7 +28,7 @@ func MustParseYaml(content []byte) []runtime.Object {
|
|||
}
|
||||
|
||||
if !acceptedK8sTypes.MatchString(groupVersionKind.Kind) {
|
||||
log.WithoutContext().Debugf("The custom-roles configMap contained K8s object types which are not supported! Skipping object with type: %s", groupVersionKind.Kind)
|
||||
log.Debug().Msgf("The custom-roles configMap contained K8s object types which are not supported! Skipping object with type: %s", groupVersionKind.Kind)
|
||||
} else {
|
||||
retVal = append(retVal, obj)
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue