1
0
Fork 0

New logger for the Traefik logs

This commit is contained in:
Ludovic Fernandez 2022-11-21 18:36:05 +01:00 committed by GitHub
parent 27c02b5a56
commit 56f7515ecd
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
297 changed files with 2337 additions and 1934 deletions

View file

@ -7,9 +7,9 @@ import (
"net"
"strings"
"github.com/rs/zerolog/log"
"github.com/traefik/traefik/v2/pkg/config/dynamic"
"github.com/traefik/traefik/v2/pkg/config/label"
"github.com/traefik/traefik/v2/pkg/log"
"github.com/traefik/traefik/v2/pkg/provider"
"github.com/traefik/traefik/v2/pkg/provider/constraints"
)
@ -18,17 +18,16 @@ func (p *Provider) buildConfiguration(ctx context.Context, services []rancherDat
configurations := make(map[string]*dynamic.Configuration)
for _, service := range services {
ctxService := log.With(ctx, log.Str("service", service.Name))
logger := log.Ctx(ctx).With().Str("service", service.Name).Logger()
ctxService := logger.WithContext(ctx)
if !p.keepService(ctx, service) {
continue
}
logger := log.FromContext(ctxService)
confFromLabel, err := label.DecodeConfiguration(service.Labels)
if err != nil {
logger.Error(err)
logger.Error().Err(err).Send()
continue
}
@ -38,7 +37,7 @@ func (p *Provider) buildConfiguration(ctx context.Context, services []rancherDat
err := p.buildTCPServiceConfiguration(ctxService, service, confFromLabel.TCP)
if err != nil {
logger.Error(err)
logger.Error().Err(err).Send()
continue
}
provider.BuildTCPRouterConfiguration(ctxService, confFromLabel.TCP)
@ -49,7 +48,7 @@ func (p *Provider) buildConfiguration(ctx context.Context, services []rancherDat
err := p.buildUDPServiceConfiguration(ctxService, service, confFromLabel.UDP)
if err != nil {
logger.Error(err)
logger.Error().Err(err).Send()
continue
}
provider.BuildUDPRouterConfiguration(ctxService, confFromLabel.UDP)
@ -64,7 +63,7 @@ func (p *Provider) buildConfiguration(ctx context.Context, services []rancherDat
err = p.buildServiceConfiguration(ctx, service, confFromLabel.HTTP)
if err != nil {
logger.Error(err)
logger.Error().Err(err).Send()
continue
}
@ -151,30 +150,30 @@ func (p *Provider) buildServiceConfiguration(ctx context.Context, service ranche
}
func (p *Provider) keepService(ctx context.Context, service rancherData) bool {
logger := log.FromContext(ctx)
logger := log.Ctx(ctx)
if !service.ExtraConf.Enable {
logger.Debug("Filtering disabled service.")
logger.Debug().Msg("Filtering disabled service")
return false
}
matches, err := constraints.MatchLabels(service.Labels, p.Constraints)
if err != nil {
logger.Errorf("Error matching constraint expression: %v", err)
logger.Error().Err(err).Msg("Error matching constraint expression")
return false
}
if !matches {
logger.Debugf("Service pruned by constraint expression: %q", p.Constraints)
logger.Debug().Msgf("Service pruned by constraint expression: %q", p.Constraints)
return false
}
if p.EnableServiceHealthFilter {
if service.Health != "" && service.Health != healthy && service.Health != updatingHealthy {
logger.Debugf("Filtering service %s with healthState of %s \n", service.Name, service.Health)
logger.Debug().Msgf("Filtering service %s with healthState of %s", service.Name, service.Health)
return false
}
if service.State != "" && service.State != active && service.State != updatingActive && service.State != upgraded && service.State != upgrading {
logger.Debugf("Filtering service %s with state of %s \n", service.Name, service.State)
logger.Debug().Msgf("Filtering service %s with state of %s", service.Name, service.State)
return false
}
}
@ -183,7 +182,7 @@ func (p *Provider) keepService(ctx context.Context, service rancherData) bool {
}
func (p *Provider) addServerTCP(ctx context.Context, service rancherData, loadBalancer *dynamic.TCPServersLoadBalancer) error {
log.FromContext(ctx).Debugf("Trying to add servers for service %s \n", service.Name)
log.Ctx(ctx).Debug().Msgf("Trying to add servers for service %s", service.Name)
if loadBalancer == nil {
return errors.New("load-balancer is not defined")
@ -217,7 +216,7 @@ func (p *Provider) addServerTCP(ctx context.Context, service rancherData, loadBa
}
func (p *Provider) addServerUDP(ctx context.Context, service rancherData, loadBalancer *dynamic.UDPServersLoadBalancer) error {
log.FromContext(ctx).Debugf("Trying to add servers for service %s \n", service.Name)
log.Ctx(ctx).Debug().Msgf("Trying to add servers for service %s", service.Name)
if loadBalancer == nil {
return errors.New("load-balancer is not defined")
@ -251,7 +250,7 @@ func (p *Provider) addServerUDP(ctx context.Context, service rancherData, loadBa
}
func (p *Provider) addServers(ctx context.Context, service rancherData, loadBalancer *dynamic.ServersLoadBalancer) error {
log.FromContext(ctx).Debugf("Trying to add servers for service %s \n", service.Name)
log.Ctx(ctx).Debug().Msgf("Trying to add servers for service %s", service.Name)
if loadBalancer == nil {
return errors.New("load-balancer is not defined")

View file

@ -8,9 +8,10 @@ import (
"github.com/cenkalti/backoff/v4"
rancher "github.com/rancher/go-rancher-metadata/metadata"
"github.com/rs/zerolog/log"
"github.com/traefik/traefik/v2/pkg/config/dynamic"
"github.com/traefik/traefik/v2/pkg/job"
"github.com/traefik/traefik/v2/pkg/log"
"github.com/traefik/traefik/v2/pkg/logs"
"github.com/traefik/traefik/v2/pkg/provider"
"github.com/traefik/traefik/v2/pkg/safe"
)
@ -86,7 +87,7 @@ func (p *Provider) createClient(ctx context.Context) (rancher.Client, error) {
metadataServiceURL := fmt.Sprintf("http://rancher-metadata.rancher.internal/%s", p.Prefix)
client, err := rancher.NewClientAndWait(metadataServiceURL)
if err != nil {
log.FromContext(ctx).Errorf("Failed to create Rancher metadata service client: %v", err)
log.Ctx(ctx).Error().Err(err).Msg("Failed to create Rancher metadata service client")
return nil, err
}
@ -96,20 +97,20 @@ func (p *Provider) createClient(ctx context.Context) (rancher.Client, error) {
// Provide allows the rancher provider to provide configurations to traefik using the given configuration channel.
func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.Pool) error {
pool.GoCtx(func(routineCtx context.Context) {
ctxLog := log.With(routineCtx, log.Str(log.ProviderName, "rancher"))
logger := log.FromContext(ctxLog)
logger := log.Ctx(routineCtx).With().Str(logs.ProviderName, "rancher").Logger()
ctxLog := logger.WithContext(routineCtx)
operation := func() error {
client, err := p.createClient(ctxLog)
if err != nil {
logger.Errorf("Failed to create the metadata client metadata service: %v", err)
logger.Error().Err(err).Msg("Failed to create the metadata client metadata service")
return err
}
updateConfiguration := func(_ string) {
stacks, err := client.GetStacks()
if err != nil {
logger.Errorf("Failed to query Rancher metadata service: %v", err)
logger.Error().Err(err).Msg("Failed to query Rancher metadata service")
return
}
@ -139,11 +140,11 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
}
notify := func(err error, time time.Duration) {
logger.Errorf("Provider connection error %+v, retrying in %s", err, time)
logger.Error().Err(err).Msgf("Provider connection error, retrying in %s", time)
}
err := backoff.RetryNotify(safe.OperationWithRecover(operation), backoff.WithContext(job.NewBackOff(backoff.NewExponentialBackOff()), ctxLog), notify)
if err != nil {
logger.Errorf("Cannot connect to Provider server: %+v", err)
logger.Error().Err(err).Msg("Cannot connect to Provider server")
}
})
@ -160,7 +161,7 @@ func (p *Provider) intervalPoll(ctx context.Context, client rancher.Client, upda
case <-ticker.C:
newVersion, err := client.GetVersion()
if err != nil {
log.FromContext(ctx).Errorf("Failed to create Rancher metadata service client: %v", err)
log.Ctx(ctx).Error().Err(err).Msg("Failed to create Rancher metadata service client")
} else if version != newVersion {
version = newVersion
updateConfiguration(version)
@ -174,15 +175,15 @@ func (p *Provider) intervalPoll(ctx context.Context, client rancher.Client, upda
func (p *Provider) parseMetadataSourcedRancherData(ctx context.Context, stacks []rancher.Stack) (rancherDataList []rancherData) {
for _, stack := range stacks {
for _, service := range stack.Services {
ctxSvc := log.With(ctx, log.Str("stack", stack.Name), log.Str("service", service.Name))
logger := log.FromContext(ctxSvc)
logger := log.Ctx(ctx).With().Str("stack", stack.Name).Str("service", service.Name).Logger()
ctxSvc := logger.WithContext(ctx)
servicePort := ""
if len(service.Ports) > 0 {
servicePort = service.Ports[0]
}
for _, port := range service.Ports {
logger.Debugf("Set Port %s", port)
logger.Debug().Msgf("Set Port %s", port)
}
var containerIPAddresses []string
@ -202,7 +203,7 @@ func (p *Provider) parseMetadataSourcedRancherData(ctx context.Context, stacks [
extraConf, err := p.getConfiguration(service)
if err != nil {
logger.Errorf("Skip container %s: %v", service.Name, err)
logger.Error().Err(err).Msgf("Skip container %s", service.Name)
continue
}
@ -215,15 +216,15 @@ func (p *Provider) parseMetadataSourcedRancherData(ctx context.Context, stacks [
}
func containerFilter(ctx context.Context, name, healthState, state string) bool {
logger := log.FromContext(ctx)
logger := log.Ctx(ctx)
if healthState != "" && healthState != healthy && healthState != updatingHealthy {
logger.Debugf("Filtering container %s with healthState of %s", name, healthState)
logger.Debug().Msgf("Filtering container %s with healthState of %s", name, healthState)
return false
}
if state != "" && state != running && state != updatingRunning && state != upgraded {
logger.Debugf("Filtering container %s with state of %s", name, state)
logger.Debug().Msgf("Filtering container %s with state of %s", name, state)
return false
}