New logger for the Traefik logs
This commit is contained in:
parent
27c02b5a56
commit
56f7515ecd
297 changed files with 2337 additions and 1934 deletions
|
@ -15,10 +15,11 @@ import (
|
|||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/mitchellh/hashstructure"
|
||||
"github.com/rs/zerolog/log"
|
||||
ptypes "github.com/traefik/paerser/types"
|
||||
"github.com/traefik/traefik/v2/pkg/config/dynamic"
|
||||
"github.com/traefik/traefik/v2/pkg/job"
|
||||
"github.com/traefik/traefik/v2/pkg/log"
|
||||
"github.com/traefik/traefik/v2/pkg/logs"
|
||||
"github.com/traefik/traefik/v2/pkg/provider"
|
||||
"github.com/traefik/traefik/v2/pkg/safe"
|
||||
"github.com/traefik/traefik/v2/pkg/tls"
|
||||
|
@ -62,9 +63,9 @@ func (p *Provider) newK8sClient(ctx context.Context) (*clientWrapper, error) {
|
|||
return nil, fmt.Errorf("invalid ingress label selector: %q", p.LabelSelector)
|
||||
}
|
||||
|
||||
logger := log.FromContext(ctx)
|
||||
logger := log.Ctx(ctx)
|
||||
|
||||
logger.Infof("ingress label selector is: %q", p.LabelSelector)
|
||||
logger.Info().Msgf("ingress label selector is: %q", p.LabelSelector)
|
||||
|
||||
withEndpoint := ""
|
||||
if p.Endpoint != "" {
|
||||
|
@ -74,13 +75,13 @@ func (p *Provider) newK8sClient(ctx context.Context) (*clientWrapper, error) {
|
|||
var cl *clientWrapper
|
||||
switch {
|
||||
case os.Getenv("KUBERNETES_SERVICE_HOST") != "" && os.Getenv("KUBERNETES_SERVICE_PORT") != "":
|
||||
logger.Infof("Creating in-cluster Provider client%s", withEndpoint)
|
||||
logger.Info().Msgf("Creating in-cluster Provider client%s", withEndpoint)
|
||||
cl, err = newInClusterClient(p.Endpoint)
|
||||
case os.Getenv("KUBECONFIG") != "":
|
||||
logger.Infof("Creating cluster-external Provider client from KUBECONFIG %s", os.Getenv("KUBECONFIG"))
|
||||
logger.Info().Msgf("Creating cluster-external Provider client from KUBECONFIG %s", os.Getenv("KUBECONFIG"))
|
||||
cl, err = newExternalClusterClientFromFile(os.Getenv("KUBECONFIG"))
|
||||
default:
|
||||
logger.Infof("Creating cluster-external Provider client%s", withEndpoint)
|
||||
logger.Info().Msgf("Creating cluster-external Provider client%s", withEndpoint)
|
||||
cl, err = newExternalClusterClient(p.Endpoint, p.Token, p.CertAuthFilePath)
|
||||
}
|
||||
|
||||
|
@ -100,8 +101,8 @@ func (p *Provider) Init() error {
|
|||
// Provide allows the k8s provider to provide configurations to traefik
|
||||
// using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.Pool) error {
|
||||
ctxLog := log.With(context.Background(), log.Str(log.ProviderName, "kubernetes"))
|
||||
logger := log.FromContext(ctxLog)
|
||||
logger := log.With().Str(logs.ProviderName, "kubernetes").Logger()
|
||||
ctxLog := logger.WithContext(context.Background())
|
||||
|
||||
k8sClient, err := p.newK8sClient(ctxLog)
|
||||
if err != nil {
|
||||
|
@ -109,14 +110,14 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
|
|||
}
|
||||
|
||||
if p.AllowExternalNameServices {
|
||||
logger.Warn("ExternalName service loading is enabled, please ensure that this is expected (see AllowExternalNameServices option)")
|
||||
logger.Warn().Msg("ExternalName service loading is enabled, please ensure that this is expected (see AllowExternalNameServices option)")
|
||||
}
|
||||
|
||||
pool.GoCtx(func(ctxPool context.Context) {
|
||||
operation := func() error {
|
||||
eventsChan, err := k8sClient.WatchAll(p.Namespaces, ctxPool.Done())
|
||||
if err != nil {
|
||||
logger.Errorf("Error watching kubernetes events: %v", err)
|
||||
logger.Error().Err(err).Msg("Error watching kubernetes events")
|
||||
timer := time.NewTimer(1 * time.Second)
|
||||
select {
|
||||
case <-timer.C:
|
||||
|
@ -147,9 +148,9 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
|
|||
confHash, err := hashstructure.Hash(conf, nil)
|
||||
switch {
|
||||
case err != nil:
|
||||
logger.Error("Unable to hash the configuration")
|
||||
logger.Error().Msg("Unable to hash the configuration")
|
||||
case p.lastConfiguration.Get() == confHash:
|
||||
logger.Debugf("Skipping Kubernetes event kind %T", event)
|
||||
logger.Debug().Msgf("Skipping Kubernetes event kind %T", event)
|
||||
default:
|
||||
p.lastConfiguration.Set(confHash)
|
||||
configurationChan <- dynamic.Message{
|
||||
|
@ -167,12 +168,12 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
|
|||
}
|
||||
|
||||
notify := func(err error, time time.Duration) {
|
||||
logger.Errorf("Provider connection error: %s; retrying in %s", err, time)
|
||||
logger.Error().Err(err).Msgf("Provider connection error, retrying in %s", time)
|
||||
}
|
||||
|
||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), backoff.WithContext(job.NewBackOff(backoff.NewExponentialBackOff()), ctxPool), notify)
|
||||
if err != nil {
|
||||
logger.Errorf("Cannot connect to Provider: %s", err)
|
||||
logger.Error().Err(err).Msg("Cannot connect to Provider")
|
||||
}
|
||||
})
|
||||
|
||||
|
@ -196,7 +197,7 @@ func (p *Provider) loadConfigurationFromIngresses(ctx context.Context, client Cl
|
|||
if supportsIngressClass(serverVersion) {
|
||||
ics, err := client.GetIngressClasses()
|
||||
if err != nil {
|
||||
log.FromContext(ctx).Warnf("Failed to list ingress classes: %v", err)
|
||||
log.Ctx(ctx).Warn().Err(err).Msg("Failed to list ingress classes")
|
||||
}
|
||||
|
||||
if p.IngressClass != "" {
|
||||
|
@ -210,7 +211,8 @@ func (p *Provider) loadConfigurationFromIngresses(ctx context.Context, client Cl
|
|||
|
||||
certConfigs := make(map[string]*tls.CertAndStores)
|
||||
for _, ingress := range ingresses {
|
||||
ctx = log.With(ctx, log.Str("ingress", ingress.Name), log.Str("namespace", ingress.Namespace))
|
||||
logger := log.Ctx(ctx).With().Str("ingress", ingress.Name).Str("namespace", ingress.Namespace).Logger()
|
||||
ctx = logger.WithContext(ctx)
|
||||
|
||||
if !p.shouldProcessIngress(ingress, ingressClasses) {
|
||||
continue
|
||||
|
@ -218,35 +220,36 @@ func (p *Provider) loadConfigurationFromIngresses(ctx context.Context, client Cl
|
|||
|
||||
rtConfig, err := parseRouterConfig(ingress.Annotations)
|
||||
if err != nil {
|
||||
log.FromContext(ctx).Errorf("Failed to parse annotations: %v", err)
|
||||
logger.Error().Err(err).Msg("Failed to parse annotations")
|
||||
continue
|
||||
}
|
||||
|
||||
err = getCertificates(ctx, ingress, client, certConfigs)
|
||||
if err != nil {
|
||||
log.FromContext(ctx).Errorf("Error configuring TLS: %v", err)
|
||||
logger.Error().Err(err).Msg("Error configuring TLS")
|
||||
}
|
||||
|
||||
if len(ingress.Spec.Rules) == 0 && ingress.Spec.DefaultBackend != nil {
|
||||
if _, ok := conf.HTTP.Services["default-backend"]; ok {
|
||||
log.FromContext(ctx).Error("The default backend already exists.")
|
||||
logger.Error().Msg("The default backend already exists.")
|
||||
continue
|
||||
}
|
||||
|
||||
service, err := p.loadService(client, ingress.Namespace, *ingress.Spec.DefaultBackend)
|
||||
if err != nil {
|
||||
log.FromContext(ctx).
|
||||
WithField("serviceName", ingress.Spec.DefaultBackend.Service.Name).
|
||||
WithField("servicePort", ingress.Spec.DefaultBackend.Service.Port.String()).
|
||||
Errorf("Cannot create service: %v", err)
|
||||
logger.Error().
|
||||
Str("serviceName", ingress.Spec.DefaultBackend.Service.Name).
|
||||
Str("servicePort", ingress.Spec.DefaultBackend.Service.Port.String()).
|
||||
Err(err).
|
||||
Msg("Cannot create service")
|
||||
continue
|
||||
}
|
||||
|
||||
if len(service.LoadBalancer.Servers) == 0 && !p.AllowEmptyServices {
|
||||
log.FromContext(ctx).
|
||||
WithField("serviceName", ingress.Spec.DefaultBackend.Service.Name).
|
||||
WithField("servicePort", ingress.Spec.DefaultBackend.Service.Port.String()).
|
||||
Errorf("Skipping service: no endpoints found")
|
||||
logger.Error().
|
||||
Str("serviceName", ingress.Spec.DefaultBackend.Service.Name).
|
||||
Str("servicePort", ingress.Spec.DefaultBackend.Service.Port.String()).
|
||||
Msg("Skipping service: no endpoints found")
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -270,7 +273,7 @@ func (p *Provider) loadConfigurationFromIngresses(ctx context.Context, client Cl
|
|||
|
||||
for _, rule := range ingress.Spec.Rules {
|
||||
if err := p.updateIngressStatus(ingress, client); err != nil {
|
||||
log.FromContext(ctx).Errorf("Error while updating ingress status: %v", err)
|
||||
logger.Error().Err(err).Msg("Error while updating ingress status")
|
||||
}
|
||||
|
||||
if rule.HTTP == nil {
|
||||
|
@ -280,18 +283,19 @@ func (p *Provider) loadConfigurationFromIngresses(ctx context.Context, client Cl
|
|||
for _, pa := range rule.HTTP.Paths {
|
||||
service, err := p.loadService(client, ingress.Namespace, pa.Backend)
|
||||
if err != nil {
|
||||
log.FromContext(ctx).
|
||||
WithField("serviceName", pa.Backend.Service.Name).
|
||||
WithField("servicePort", pa.Backend.Service.Port.String()).
|
||||
Errorf("Cannot create service: %v", err)
|
||||
logger.Error().
|
||||
Str("serviceName", pa.Backend.Service.Name).
|
||||
Str("servicePort", pa.Backend.Service.Port.String()).
|
||||
Err(err).
|
||||
Msg("Cannot create service")
|
||||
continue
|
||||
}
|
||||
|
||||
if len(service.LoadBalancer.Servers) == 0 && !p.AllowEmptyServices {
|
||||
log.FromContext(ctx).
|
||||
WithField("serviceName", pa.Backend.Service.Name).
|
||||
WithField("servicePort", pa.Backend.Service.Port.String()).
|
||||
Errorf("Skipping service: no endpoints found")
|
||||
logger.Error().
|
||||
Str("serviceName", pa.Backend.Service.Name).
|
||||
Str("servicePort", pa.Backend.Service.Port.String()).
|
||||
Msg("Skipping service: no endpoints found")
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -315,12 +319,12 @@ func (p *Provider) loadConfigurationFromIngresses(ctx context.Context, client Cl
|
|||
continue
|
||||
}
|
||||
|
||||
log.FromContext(ctx).Debugf("Multiple routers are defined with the same key %q, generating hashes to avoid conflicts", routerKey)
|
||||
logger.Debug().Msgf("Multiple routers are defined with the same key %q, generating hashes to avoid conflicts", routerKey)
|
||||
|
||||
for _, router := range conflictingRouters {
|
||||
key, err := makeRouterKeyWithHash(routerKey, router.Rule)
|
||||
if err != nil {
|
||||
log.FromContext(ctx).Error(err)
|
||||
logger.Error().Err(err).Send()
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -367,7 +371,7 @@ func (p *Provider) updateIngressStatus(ing *networkingv1.Ingress, k8sClient Clie
|
|||
|
||||
if exists && service.Status.LoadBalancer.Ingress == nil {
|
||||
// service exists, but has no Load Balancer status
|
||||
log.Debugf("Skipping updating Ingress %s/%s due to service %s having no status set", ing.Namespace, ing.Name, p.IngressEndpoint.PublishedService)
|
||||
log.Debug().Msgf("Skipping updating Ingress %s/%s due to service %s having no status set", ing.Namespace, ing.Name, p.IngressEndpoint.PublishedService)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -405,7 +409,7 @@ func buildHostRule(host string) string {
|
|||
func getCertificates(ctx context.Context, ingress *networkingv1.Ingress, k8sClient Client, tlsConfigs map[string]*tls.CertAndStores) error {
|
||||
for _, t := range ingress.Spec.TLS {
|
||||
if t.SecretName == "" {
|
||||
log.FromContext(ctx).Debugf("Skipping TLS sub-section: No secret name provided")
|
||||
log.Ctx(ctx).Debug().Msg("Skipping TLS sub-section: No secret name provided")
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -673,7 +677,7 @@ func throttleEvents(ctx context.Context, throttleDuration time.Duration, pool *s
|
|||
// do a refresh as soon as our throttle allows us to. It's fine
|
||||
// to drop the event and keep whatever's in the buffer -- we
|
||||
// don't do different things for different events.
|
||||
log.FromContext(ctx).Debugf("Dropping event kind %T due to throttling", nextEvent)
|
||||
log.Ctx(ctx).Debug().Msgf("Dropping event kind %T due to throttling", nextEvent)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue