1
0
Fork 0

Merge current v3.1 into master

This commit is contained in:
mmatur 2024-08-29 10:09:18 +02:00
commit 8dc9607db7
No known key found for this signature in database
GPG key ID: 2FFE42FC256CFF8E
50 changed files with 159 additions and 128 deletions

View file

@ -93,7 +93,7 @@ func (c *searchCriterion) filterMiddleware(mns []string) bool {
return false
}
func pagination(request *http.Request, max int) (pageInfo, error) {
func pagination(request *http.Request, maximum int) (pageInfo, error) {
perPage, err := getIntParam(request, "per_page", defaultPerPage)
if err != nil {
return pageInfo{}, err
@ -105,17 +105,17 @@ func pagination(request *http.Request, max int) (pageInfo, error) {
}
startIndex := (page - 1) * perPage
if startIndex != 0 && startIndex >= max {
if startIndex != 0 && startIndex >= maximum {
return pageInfo{}, fmt.Errorf("invalid request: page: %d, per_page: %d", page, perPage)
}
endIndex := startIndex + perPage
if endIndex >= max {
endIndex = max
if endIndex >= maximum {
endIndex = maximum
}
nextPage := 1
if page*perPage < max {
if page*perPage < maximum {
nextPage = page + 1
}

View file

@ -214,7 +214,7 @@ type providers struct {
ETCD *etcd `json:"etcd,omitempty" toml:"etcd,omitempty" yaml:"etcd,omitempty" label:"allowEmpty" file:"allowEmpty"`
Redis *redis `json:"redis,omitempty" toml:"redis,omitempty" yaml:"redis,omitempty" label:"allowEmpty" file:"allowEmpty"`
HTTP *http `json:"http,omitempty" toml:"http,omitempty" yaml:"http,omitempty" label:"allowEmpty" file:"allowEmpty"`
KubernetesIngress *ingress `json:"kubernetesIngress,omitempty" toml:"kubernetesIngress,omitempty" yaml:"kubernetesIngress,omitempty" file:"allowEmpty"`
KubernetesIngress *ingress `json:"kubernetesIngress,omitempty" toml:"kubernetesIngress,omitempty" yaml:"kubernetesIngress,omitempty" label:"allowEmpty" file:"allowEmpty"`
}
func (p *providers) deprecationNotice(logger zerolog.Logger) bool {

View file

@ -21,11 +21,11 @@ const collectorURL = "https://collect.traefik.io/yYaUej3P42cziRVzv6T5w2aYy9po2Mr
// Collected data.
type data struct {
Version string
Codename string
BuildDate string
Configuration string
Hash string
Version string `json:"version"`
Codename string `json:"codename"`
BuildDate string `json:"buildDate"`
Configuration string `json:"configuration"`
Hash string `json:"hash"`
}
// Collect anonymous data.

View file

@ -199,7 +199,7 @@ func TestLoggerHeaderFields(t *testing.T) {
if config.FilePath != "" {
_, err = os.Stat(config.FilePath)
require.NoError(t, err, fmt.Sprintf("logger should create %s", config.FilePath))
require.NoErrorf(t, err, "logger should create %s", config.FilePath)
}
req := &http.Request{
@ -704,7 +704,7 @@ func assertValidLogData(t *testing.T, expected string, logData []byte) {
t.Helper()
if len(expected) == 0 {
assert.Zero(t, len(logData))
assert.Empty(t, logData)
t.Log(string(logData))
return
}
@ -761,7 +761,7 @@ func doLoggingTLSOpt(t *testing.T, config *types.AccessLog, enableTLS bool) {
if config.FilePath != "" {
_, err = os.Stat(config.FilePath)
require.NoError(t, err, fmt.Sprintf("logger should create %s", config.FilePath))
require.NoErrorf(t, err, "logger should create %s", config.FilePath)
}
req := &http.Request{

View file

@ -128,9 +128,9 @@ func (fa *forwardAuth) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
forwardReq, err := http.NewRequestWithContext(req.Context(), http.MethodGet, fa.address, nil)
if err != nil {
logMessage := fmt.Sprintf("Error calling %s. Cause %s", fa.address, err)
logger.Debug().Msg(logMessage)
observability.SetStatusErrorf(req.Context(), logMessage)
logger.Debug().Msgf("Error calling %s. Cause %s", fa.address, err)
observability.SetStatusErrorf(req.Context(), "Error calling %s. Cause %s", fa.address, err)
rw.WriteHeader(http.StatusInternalServerError)
return
}
@ -152,9 +152,8 @@ func (fa *forwardAuth) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
forwardResponse, forwardErr := fa.client.Do(forwardReq)
if forwardErr != nil {
logMessage := fmt.Sprintf("Error calling %s. Cause: %s", fa.address, forwardErr)
logger.Debug().Msg(logMessage)
observability.SetStatusErrorf(forwardReq.Context(), logMessage)
logger.Debug().Msgf("Error calling %s. Cause: %s", fa.address, forwardErr)
observability.SetStatusErrorf(req.Context(), "Error calling %s. Cause: %s", fa.address, forwardErr)
rw.WriteHeader(http.StatusInternalServerError)
return
@ -163,9 +162,8 @@ func (fa *forwardAuth) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
body, readError := io.ReadAll(forwardResponse.Body)
if readError != nil {
logMessage := fmt.Sprintf("Error reading body %s. Cause: %s", fa.address, readError)
logger.Debug().Msg(logMessage)
observability.SetStatusErrorf(forwardReq.Context(), logMessage)
logger.Debug().Msgf("Error reading body %s. Cause: %s", fa.address, readError)
observability.SetStatusErrorf(req.Context(), "Error reading body %s. Cause: %s", fa.address, readError)
rw.WriteHeader(http.StatusInternalServerError)
return
@ -199,9 +197,8 @@ func (fa *forwardAuth) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
if err != nil {
if !errors.Is(err, http.ErrNoLocation) {
logMessage := fmt.Sprintf("Error reading response location header %s. Cause: %s", fa.address, err)
logger.Debug().Msg(logMessage)
observability.SetStatusErrorf(forwardReq.Context(), logMessage)
logger.Debug().Msgf("Error reading response location header %s. Cause: %s", fa.address, err)
observability.SetStatusErrorf(req.Context(), "Error reading response location header %s. Cause: %s", fa.address, err)
rw.WriteHeader(http.StatusInternalServerError)
return

View file

@ -95,9 +95,8 @@ func (c *CompressionHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request)
compressionWriter, err := newCompressionWriter(c.cfg.Algorithm, rw)
if err != nil {
logger := middlewares.GetLogger(r.Context(), c.cfg.MiddlewareName, typeName)
logMessage := fmt.Sprintf("create compression handler: %v", err)
logger.Debug().Msg(logMessage)
observability.SetStatusErrorf(r.Context(), logMessage)
logger.Debug().Msgf("Create compression handler: %v", err)
observability.SetStatusErrorf(r.Context(), "Create compression handler: %v", err)
rw.WriteHeader(http.StatusInternalServerError)
return
}

View file

@ -70,8 +70,8 @@ func (c *customErrors) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
logger := middlewares.GetLogger(req.Context(), c.name, typeName)
if c.backendHandler == nil {
logger.Error().Msg("Error pages: no backend handler.")
observability.SetStatusErrorf(req.Context(), "Error pages: no backend handler.")
logger.Error().Msg("No backend handler.")
observability.SetStatusErrorf(req.Context(), "No backend handler.")
c.next.ServeHTTP(rw, req)
return
}
@ -95,8 +95,8 @@ func (c *customErrors) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
pageReq, err := newRequest("http://" + req.Host + query)
if err != nil {
logger.Error().Err(err).Send()
observability.SetStatusErrorf(req.Context(), err.Error())
logger.Error().Msgf("Unable to create error page request: %v", err)
observability.SetStatusErrorf(req.Context(), "Unable to create error page request: %v", err)
http.Error(rw, http.StatusText(code), code)
return
}

View file

@ -76,9 +76,8 @@ func (al *ipAllowLister) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
clientIP := al.strategy.GetIP(req)
err := al.allowLister.IsAuthorized(clientIP)
if err != nil {
msg := fmt.Sprintf("Rejecting IP %s: %v", clientIP, err)
logger.Debug().Msg(msg)
observability.SetStatusErrorf(req.Context(), msg)
logger.Debug().Msgf("Rejecting IP %s: %v", clientIP, err)
observability.SetStatusErrorf(req.Context(), "Rejecting IP %s: %v", clientIP, err)
reject(ctx, al.rejectStatusCode, rw)
return
}

View file

@ -66,9 +66,8 @@ func (wl *ipWhiteLister) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
clientIP := wl.strategy.GetIP(req)
err := wl.whiteLister.IsAuthorized(clientIP)
if err != nil {
msg := fmt.Sprintf("Rejecting IP %s: %v", clientIP, err)
logger.Debug().Msg(msg)
observability.SetStatusErrorf(req.Context(), msg)
logger.Debug().Msgf("Rejecting IP %s: %v", clientIP, err)
observability.SetStatusErrorf(req.Context(), "Rejecting IP %s: %v", clientIP, err)
reject(ctx, rw)
return
}

View file

@ -319,7 +319,7 @@ func TestPassTLSClientCert_PEM(t *testing.T) {
res := httptest.NewRecorder()
req := testhelpers.MustNewRequest(http.MethodGet, "http://example.com/foo", nil)
if test.certContents != nil && len(test.certContents) > 0 {
if len(test.certContents) > 0 {
req.TLS = buildTLSWith(test.certContents)
}
@ -541,7 +541,7 @@ func TestPassTLSClientCert_certInfo(t *testing.T) {
res := httptest.NewRecorder()
req := testhelpers.MustNewRequest(http.MethodGet, "http://example.com/foo", nil)
if test.certContents != nil && len(test.certContents) > 0 {
if len(test.certContents) > 0 {
req.TLS = buildTLSWith(test.certContents)
}

View file

@ -51,8 +51,8 @@ func (r *replacePath) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
var err error
req.URL.Path, err = url.PathUnescape(req.URL.RawPath)
if err != nil {
middlewares.GetLogger(context.Background(), r.name, typeName).Error().Err(err).Send()
observability.SetStatusErrorf(req.Context(), err.Error())
middlewares.GetLogger(context.Background(), r.name, typeName).Error().Msgf("Unable to unescape url raw path %q: %v", req.URL.RawPath, err)
observability.SetStatusErrorf(req.Context(), "Unable to unescape url raw path %q: %v", req.URL.RawPath, err)
http.Error(rw, err.Error(), http.StatusInternalServerError)
return
}

View file

@ -62,8 +62,8 @@ func (rp *replacePathRegex) ServeHTTP(rw http.ResponseWriter, req *http.Request)
var err error
req.URL.Path, err = url.PathUnescape(req.URL.RawPath)
if err != nil {
middlewares.GetLogger(context.Background(), rp.name, typeName).Error().Err(err).Send()
observability.SetStatusErrorf(req.Context(), err.Error())
middlewares.GetLogger(context.Background(), rp.name, typeName).Error().Msgf("Unable to unescape url raw path %q: %v", req.URL.RawPath, err)
observability.SetStatusErrorf(req.Context(), "Unable to unescape url raw path %q: %v", req.URL.RawPath, err)
http.Error(rw, err.Error(), http.StatusInternalServerError)
return
}

View file

@ -199,8 +199,7 @@ func getPort(container dockerData, serverPort string) string {
nat.Sort(ports, less)
if len(ports) > 0 {
min := ports[0]
return min.Port()
return ports[0].Port()
}
return ""

View file

@ -316,8 +316,7 @@ func getPort(instance ecsInstance, serverPort string) string {
nat.Sort(ports, less)
if len(ports) > 0 {
min := ports[0]
return min.Port()
return ports[0].Port()
}
return ""

View file

@ -77,7 +77,7 @@ type ServiceTCP struct {
// hence fully terminating the connection.
// It is a duration in milliseconds, defaulting to 100.
// A negative value means an infinite deadline (i.e. the reading capability is never closed).
// Deprecated: TerminationDelay is not supported APIVersion traefik.io/v1, please use ServersTransport to configure the TerminationDelay instead.
// Deprecated: TerminationDelay will not be supported in future APIVersions, please use ServersTransport to configure the TerminationDelay instead.
TerminationDelay *int `json:"terminationDelay,omitempty"`
// ProxyProtocol defines the PROXY protocol configuration.
// More info: https://doc.traefik.io/traefik/v3.1/routing/services/#proxy-protocol

View file

@ -26,6 +26,7 @@ type RouterIng struct {
EntryPoints []string `json:"entryPoints,omitempty"`
Middlewares []string `json:"middlewares,omitempty"`
Priority int `json:"priority,omitempty"`
RuleSyntax string `json:"ruleSyntax,omitempty"`
TLS *dynamic.RouterTLSConfig `json:"tls,omitempty" label:"allowEmpty"`
}

View file

@ -24,6 +24,7 @@ func Test_parseRouterConfig(t *testing.T) {
"traefik.ingress.kubernetes.io/router.entrypoints": "foobar,foobar",
"traefik.ingress.kubernetes.io/router.middlewares": "foobar,foobar",
"traefik.ingress.kubernetes.io/router.priority": "42",
"traefik.ingress.kubernetes.io/router.rulesyntax": "foobar",
"traefik.ingress.kubernetes.io/router.tls": "true",
"traefik.ingress.kubernetes.io/router.tls.certresolver": "foobar",
"traefik.ingress.kubernetes.io/router.tls.domains.0.main": "foobar",
@ -38,6 +39,7 @@ func Test_parseRouterConfig(t *testing.T) {
EntryPoints: []string{"foobar", "foobar"},
Middlewares: []string{"foobar", "foobar"},
Priority: 42,
RuleSyntax: "foobar",
TLS: &dynamic.RouterTLSConfig{
CertResolver: "foobar",
Domains: []types.Domain{
@ -180,6 +182,7 @@ func Test_convertAnnotations(t *testing.T) {
"traefik.ingress.kubernetes.io/router.entrypoints": "foobar,foobar",
"traefik.ingress.kubernetes.io/router.middlewares": "foobar,foobar",
"traefik.ingress.kubernetes.io/router.priority": "42",
"traefik.ingress.kubernetes.io/router.rulesyntax": "foobar",
"traefik.ingress.kubernetes.io/router.tls": "true",
"traefik.ingress.kubernetes.io/router.tls.certresolver": "foobar",
"traefik.ingress.kubernetes.io/router.tls.domains.0.main": "foobar",
@ -194,6 +197,7 @@ func Test_convertAnnotations(t *testing.T) {
"traefik.router.entrypoints": "foobar,foobar",
"traefik.router.middlewares": "foobar,foobar",
"traefik.router.priority": "42",
"traefik.router.rulesyntax": "foobar",
"traefik.router.tls": "true",
"traefik.router.tls.certresolver": "foobar",
"traefik.router.tls.domains[0].main": "foobar",

View file

@ -10,6 +10,7 @@ metadata:
traefik.ingress.kubernetes.io/router.entrypoints: ep1,ep2
traefik.ingress.kubernetes.io/router.middlewares: md1,md2
traefik.ingress.kubernetes.io/router.priority: "42"
traefik.ingress.kubernetes.io/router.rulesyntax: "v2"
traefik.ingress.kubernetes.io/router.tls: "true"
traefik.ingress.kubernetes.io/router.tls.certresolver: foobar
traefik.ingress.kubernetes.io/router.tls.domains.0.main: domain.com

View file

@ -760,6 +760,7 @@ func loadRouter(rule netv1.IngressRule, pa netv1.HTTPIngressPath, rtConfig *Rout
}
if rtConfig != nil && rtConfig.Router != nil {
rt.RuleSyntax = rtConfig.Router.RuleSyntax
rt.Priority = rtConfig.Router.Priority
rt.EntryPoints = rtConfig.Router.EntryPoints
rt.Middlewares = rtConfig.Router.Middlewares

View file

@ -96,6 +96,7 @@ func TestLoadConfigurationFromIngresses(t *testing.T) {
Service: "testing-service1-80",
Middlewares: []string{"md1", "md2"},
Priority: 42,
RuleSyntax: "v2",
TLS: &dynamic.RouterTLSConfig{
CertResolver: "foobar",
Domains: []types.Domain{

View file

@ -7,6 +7,8 @@ import (
"github.com/rs/zerolog/log"
)
var errNoServersInPool = errors.New("no servers in the pool")
type server struct {
Handler
weight int
@ -34,8 +36,10 @@ func (b *WRRLoadBalancer) ServeTCP(conn WriteCloser) {
b.lock.Unlock()
if err != nil {
log.Error().Err(err).Msg("Error during load balancing")
conn.Close()
if !errors.Is(err, errNoServersInPool) {
log.Error().Err(err).Msg("Error during load balancing")
}
_ = conn.Close()
return
}
@ -61,13 +65,13 @@ func (b *WRRLoadBalancer) AddWeightServer(serverHandler Handler, weight *int) {
}
func (b *WRRLoadBalancer) maxWeight() int {
max := -1
maximum := -1
for _, s := range b.servers {
if s.weight > max {
max = s.weight
if s.weight > maximum {
maximum = s.weight
}
}
return max
return maximum
}
func (b *WRRLoadBalancer) weightGcd() int {
@ -91,7 +95,7 @@ func gcd(a, b int) int {
func (b *WRRLoadBalancer) next() (Handler, error) {
if len(b.servers) == 0 {
return nil, errors.New("no servers in the pool")
return nil, errNoServersInPool
}
// The algo below may look messy, but is actually very simple
@ -99,8 +103,8 @@ func (b *WRRLoadBalancer) next() (Handler, error) {
// and allows us not to build an iterator every time we readjust weights
// Maximum weight across all enabled servers
max := b.maxWeight()
if max == 0 {
maximum := b.maxWeight()
if maximum == 0 {
return nil, errors.New("all servers have 0 weight")
}
@ -112,7 +116,7 @@ func (b *WRRLoadBalancer) next() (Handler, error) {
if b.index == 0 {
b.currentWeight -= gcd
if b.currentWeight <= 0 {
b.currentWeight = max
b.currentWeight = maximum
}
}
srv := b.servers[b.index]

View file

@ -61,13 +61,13 @@ func (b *WRRLoadBalancer) AddWeightedServer(serverHandler Handler, weight *int)
}
func (b *WRRLoadBalancer) maxWeight() int {
max := -1
maximum := -1
for _, s := range b.servers {
if s.weight > max {
max = s.weight
if s.weight > maximum {
maximum = s.weight
}
}
return max
return maximum
}
func (b *WRRLoadBalancer) weightGcd() int {
@ -99,8 +99,8 @@ func (b *WRRLoadBalancer) next() (Handler, error) {
// what interleaves servers and allows us not to build an iterator every time we readjust weights.
// Maximum weight across all enabled servers
max := b.maxWeight()
if max == 0 {
maximum := b.maxWeight()
if maximum == 0 {
return nil, errors.New("all servers have 0 weight")
}
@ -112,7 +112,7 @@ func (b *WRRLoadBalancer) next() (Handler, error) {
if b.index == 0 {
b.currentWeight -= gcd
if b.currentWeight <= 0 {
b.currentWeight = max
b.currentWeight = maximum
}
}
srv := b.servers[b.index]