Add HighestRandomWeight Loadbalancing Algorithm
This commit is contained in:
parent
9b42b5b930
commit
02443545e7
10 changed files with 936 additions and 112 deletions
|
|
@ -53,10 +53,11 @@ type Model struct {
|
|||
|
||||
// Service holds a service configuration (can only be of one type at the same time).
|
||||
type Service struct {
|
||||
LoadBalancer *ServersLoadBalancer `json:"loadBalancer,omitempty" toml:"loadBalancer,omitempty" yaml:"loadBalancer,omitempty" export:"true"`
|
||||
Weighted *WeightedRoundRobin `json:"weighted,omitempty" toml:"weighted,omitempty" yaml:"weighted,omitempty" label:"-" export:"true"`
|
||||
Mirroring *Mirroring `json:"mirroring,omitempty" toml:"mirroring,omitempty" yaml:"mirroring,omitempty" label:"-" export:"true"`
|
||||
Failover *Failover `json:"failover,omitempty" toml:"failover,omitempty" yaml:"failover,omitempty" label:"-" export:"true"`
|
||||
LoadBalancer *ServersLoadBalancer `json:"loadBalancer,omitempty" toml:"loadBalancer,omitempty" yaml:"loadBalancer,omitempty" export:"true"`
|
||||
HighestRandomWeight *HighestRandomWeight `json:"highestRandomWeight,omitempty" toml:"highestRandomWeight,omitempty" yaml:"highestRandomWeight,omitempty" label:"-" export:"true"`
|
||||
Weighted *WeightedRoundRobin `json:"weighted,omitempty" toml:"weighted,omitempty" yaml:"weighted,omitempty" label:"-" export:"true"`
|
||||
Mirroring *Mirroring `json:"mirroring,omitempty" toml:"mirroring,omitempty" yaml:"mirroring,omitempty" label:"-" export:"true"`
|
||||
Failover *Failover `json:"failover,omitempty" toml:"failover,omitempty" yaml:"failover,omitempty" label:"-" export:"true"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen=true
|
||||
|
|
@ -157,6 +158,19 @@ type WeightedRoundRobin struct {
|
|||
|
||||
// +k8s:deepcopy-gen=true
|
||||
|
||||
// HighestRandomWeight is a weighted sticky load-balancer of services.
|
||||
type HighestRandomWeight struct {
|
||||
Services []HRWService `json:"services,omitempty" toml:"services,omitempty" yaml:"services,omitempty" export:"true"`
|
||||
// HealthCheck enables automatic self-healthcheck for this service, i.e.
|
||||
// whenever one of its children is reported as down, this service becomes aware of it,
|
||||
// and takes it into account (i.e. it ignores the down child) when running the
|
||||
// load-balancing algorithm. In addition, if the parent of this service also has
|
||||
// HealthCheck enabled, this service reports to its parent any status change.
|
||||
HealthCheck *HealthCheck `json:"healthCheck,omitempty" toml:"healthCheck,omitempty" yaml:"healthCheck,omitempty" label:"allowEmpty" file:"allowEmpty" kv:"allowEmpty" export:"true"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen=true
|
||||
|
||||
// WRRService is a reference to a service load-balanced with weighted round-robin.
|
||||
type WRRService struct {
|
||||
Name string `json:"name,omitempty" toml:"name,omitempty" yaml:"name,omitempty" export:"true"`
|
||||
|
|
@ -170,6 +184,20 @@ type WRRService struct {
|
|||
GRPCStatus *GRPCStatus `json:"-" toml:"-" yaml:"-" label:"-" file:"-"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen=true
|
||||
|
||||
// HRWService is a reference to a service load-balanced with highest random weight.
|
||||
type HRWService struct {
|
||||
Name string `json:"name,omitempty" toml:"name,omitempty" yaml:"name,omitempty" export:"true"`
|
||||
Weight *int `json:"weight,omitempty" toml:"weight,omitempty" yaml:"weight,omitempty" export:"true"`
|
||||
}
|
||||
|
||||
// SetDefaults Default values for a HRWService.
|
||||
func (w *HRWService) SetDefaults() {
|
||||
defaultWeight := 1
|
||||
w.Weight = &defaultWeight
|
||||
}
|
||||
|
||||
// SetDefaults Default values for a WRRService.
|
||||
func (w *WRRService) SetDefaults() {
|
||||
defaultWeight := 1
|
||||
|
|
@ -231,6 +259,8 @@ const (
|
|||
BalancerStrategyWRR BalancerStrategy = "wrr"
|
||||
// BalancerStrategyP2C is the power of two choices strategy.
|
||||
BalancerStrategyP2C BalancerStrategy = "p2c"
|
||||
// BalancerStrategyHRW is the power of two choices strategy.
|
||||
BalancerStrategyHRW BalancerStrategy = "hrw"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen=true
|
||||
|
|
|
|||
|
|
@ -449,6 +449,27 @@ func (in *GrpcWeb) DeepCopy() *GrpcWeb {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HRWService) DeepCopyInto(out *HRWService) {
|
||||
*out = *in
|
||||
if in.Weight != nil {
|
||||
in, out := &in.Weight, &out.Weight
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HRWService.
|
||||
func (in *HRWService) DeepCopy() *HRWService {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(HRWService)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HTTPConfiguration) DeepCopyInto(out *HTTPConfiguration) {
|
||||
*out = *in
|
||||
|
|
@ -688,6 +709,34 @@ func (in *HealthCheck) DeepCopy() *HealthCheck {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HighestRandomWeight) DeepCopyInto(out *HighestRandomWeight) {
|
||||
*out = *in
|
||||
if in.Services != nil {
|
||||
in, out := &in.Services, &out.Services
|
||||
*out = make([]HRWService, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.HealthCheck != nil {
|
||||
in, out := &in.HealthCheck, &out.HealthCheck
|
||||
*out = new(HealthCheck)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HighestRandomWeight.
|
||||
func (in *HighestRandomWeight) DeepCopy() *HighestRandomWeight {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(HighestRandomWeight)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IPAllowList) DeepCopyInto(out *IPAllowList) {
|
||||
*out = *in
|
||||
|
|
@ -1566,6 +1615,11 @@ func (in *Service) DeepCopyInto(out *Service) {
|
|||
*out = new(ServersLoadBalancer)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.HighestRandomWeight != nil {
|
||||
in, out := &in.HighestRandomWeight, &out.HighestRandomWeight
|
||||
*out = new(HighestRandomWeight)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Weighted != nil {
|
||||
in, out := &in.Weighted, &out.Weighted
|
||||
*out = new(WeightedRoundRobin)
|
||||
|
|
|
|||
200
pkg/server/service/loadbalancer/hrw/hrw.go
Normal file
200
pkg/server/service/loadbalancer/hrw/hrw.go
Normal file
|
|
@ -0,0 +1,200 @@
|
|||
package hrw
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"hash/fnv"
|
||||
"math"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/traefik/traefik/v3/pkg/config/dynamic"
|
||||
"github.com/traefik/traefik/v3/pkg/ip"
|
||||
)
|
||||
|
||||
type namedHandler struct {
|
||||
http.Handler
|
||||
name string
|
||||
weight float64
|
||||
}
|
||||
|
||||
// Balancer implements the Rendezvous Hashing algorithm for load balancing.
|
||||
// The idea is to compute a score for each available backend using a hash of the client's
|
||||
// source (for example, IP) combined with the backend's identifier, and assign the client
|
||||
// to the backend with the highest score. This ensures that each client consistently
|
||||
// connects to the same backend while distributing load evenly across all backends.
|
||||
type Balancer struct {
|
||||
wantsHealthCheck bool
|
||||
|
||||
strategy ip.RemoteAddrStrategy
|
||||
|
||||
handlersMu sync.RWMutex
|
||||
// References all the handlers by name and also by the hashed value of the name.
|
||||
handlers []*namedHandler
|
||||
// status is a record of which child services of the Balancer are healthy, keyed
|
||||
// by name of child service. A service is initially added to the map when it is
|
||||
// created via Add, and it is later removed or added to the map as needed,
|
||||
// through the SetStatus method.
|
||||
status map[string]struct{}
|
||||
// updaters is the list of hooks that are run (to update the Balancer
|
||||
// parent(s)), whenever the Balancer status changes.
|
||||
updaters []func(bool)
|
||||
// fenced is the list of terminating yet still serving child services.
|
||||
fenced map[string]struct{}
|
||||
}
|
||||
|
||||
// New creates a new load balancer.
|
||||
func New(wantHealthCheck bool) *Balancer {
|
||||
balancer := &Balancer{
|
||||
status: make(map[string]struct{}),
|
||||
fenced: make(map[string]struct{}),
|
||||
wantsHealthCheck: wantHealthCheck,
|
||||
strategy: ip.RemoteAddrStrategy{},
|
||||
}
|
||||
|
||||
return balancer
|
||||
}
|
||||
|
||||
// getNodeScore calculates the score of the couple of src and handler name.
|
||||
func getNodeScore(handler *namedHandler, src string) float64 {
|
||||
h := fnv.New64a()
|
||||
h.Write([]byte(src + handler.name))
|
||||
sum := h.Sum64()
|
||||
score := float64(sum) / math.Pow(2, 64)
|
||||
logScore := 1.0 / -math.Log(score)
|
||||
|
||||
return logScore * handler.weight
|
||||
}
|
||||
|
||||
// SetStatus sets on the balancer that its given child is now of the given
|
||||
// status. balancerName is only needed for logging purposes.
|
||||
func (b *Balancer) SetStatus(ctx context.Context, childName string, up bool) {
|
||||
b.handlersMu.Lock()
|
||||
defer b.handlersMu.Unlock()
|
||||
|
||||
upBefore := len(b.status) > 0
|
||||
|
||||
status := "DOWN"
|
||||
if up {
|
||||
status = "UP"
|
||||
}
|
||||
|
||||
log.Ctx(ctx).Debug().Msgf("Setting status of %s to %v", childName, status)
|
||||
|
||||
if up {
|
||||
b.status[childName] = struct{}{}
|
||||
} else {
|
||||
delete(b.status, childName)
|
||||
}
|
||||
|
||||
upAfter := len(b.status) > 0
|
||||
status = "DOWN"
|
||||
if upAfter {
|
||||
status = "UP"
|
||||
}
|
||||
|
||||
// No Status Change
|
||||
if upBefore == upAfter {
|
||||
// We're still with the same status, no need to propagate
|
||||
log.Ctx(ctx).Debug().Msgf("Still %s, no need to propagate", status)
|
||||
return
|
||||
}
|
||||
|
||||
// Status Change
|
||||
log.Ctx(ctx).Debug().Msgf("Propagating new %s status", status)
|
||||
for _, fn := range b.updaters {
|
||||
fn(upAfter)
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterStatusUpdater adds fn to the list of hooks that are run when the
|
||||
// status of the Balancer changes.
|
||||
// Not thread safe.
|
||||
func (b *Balancer) RegisterStatusUpdater(fn func(up bool)) error {
|
||||
if !b.wantsHealthCheck {
|
||||
return errors.New("healthCheck not enabled in config for this weighted service")
|
||||
}
|
||||
b.updaters = append(b.updaters, fn)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var errNoAvailableServer = errors.New("no available server")
|
||||
|
||||
func (b *Balancer) nextServer(ip string) (*namedHandler, error) {
|
||||
b.handlersMu.RLock()
|
||||
var healthy []*namedHandler
|
||||
for _, h := range b.handlers {
|
||||
if _, ok := b.status[h.name]; ok {
|
||||
if _, fenced := b.fenced[h.name]; !fenced {
|
||||
healthy = append(healthy, h)
|
||||
}
|
||||
}
|
||||
}
|
||||
b.handlersMu.RUnlock()
|
||||
|
||||
if len(healthy) == 0 {
|
||||
return nil, errNoAvailableServer
|
||||
}
|
||||
|
||||
var handler *namedHandler
|
||||
score := 0.0
|
||||
for _, h := range healthy {
|
||||
s := getNodeScore(h, ip)
|
||||
if s > score {
|
||||
handler = h
|
||||
score = s
|
||||
}
|
||||
}
|
||||
|
||||
log.Debug().Msgf("Service selected by HRW: %s", handler.name)
|
||||
|
||||
return handler, nil
|
||||
}
|
||||
|
||||
func (b *Balancer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
// give ip fetched to b.nextServer
|
||||
clientIP := b.strategy.GetIP(req)
|
||||
log.Debug().Msgf("ServeHTTP() clientIP=%s", clientIP)
|
||||
|
||||
server, err := b.nextServer(clientIP)
|
||||
if err != nil {
|
||||
if errors.Is(err, errNoAvailableServer) {
|
||||
http.Error(w, errNoAvailableServer.Error(), http.StatusServiceUnavailable)
|
||||
} else {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
server.ServeHTTP(w, req)
|
||||
}
|
||||
|
||||
// AddServer adds a handler with a server.
|
||||
func (b *Balancer) AddServer(name string, handler http.Handler, server dynamic.Server) {
|
||||
b.Add(name, handler, server.Weight, server.Fenced)
|
||||
}
|
||||
|
||||
// Add adds a handler.
|
||||
// A handler with a non-positive weight is ignored.
|
||||
func (b *Balancer) Add(name string, handler http.Handler, weight *int, fenced bool) {
|
||||
w := 1
|
||||
if weight != nil {
|
||||
w = *weight
|
||||
}
|
||||
|
||||
if w <= 0 { // non-positive weight is meaningless
|
||||
return
|
||||
}
|
||||
|
||||
h := &namedHandler{Handler: handler, name: name, weight: float64(w)}
|
||||
|
||||
b.handlersMu.Lock()
|
||||
b.handlers = append(b.handlers, h)
|
||||
b.status[name] = struct{}{}
|
||||
if fenced {
|
||||
b.fenced[name] = struct{}{}
|
||||
}
|
||||
b.handlersMu.Unlock()
|
||||
}
|
||||
302
pkg/server/service/loadbalancer/hrw/hrw_test.go
Normal file
302
pkg/server/service/loadbalancer/hrw/hrw_test.go
Normal file
|
|
@ -0,0 +1,302 @@
|
|||
package hrw
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// genIPAddress generate randomly an IP address as a string.
|
||||
func genIPAddress() string {
|
||||
buf := make([]byte, 4)
|
||||
|
||||
ip := rand.Uint32()
|
||||
|
||||
binary.LittleEndian.PutUint32(buf, ip)
|
||||
ipStr := net.IP(buf)
|
||||
return ipStr.String()
|
||||
}
|
||||
|
||||
// initStatusArray initialize an array filled with status value for test assertions.
|
||||
func initStatusArray(size int, value int) []int {
|
||||
status := make([]int, 0, size)
|
||||
for i := 1; i <= size; i++ {
|
||||
status = append(status, value)
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
||||
// Tests evaluate load balancing of single and multiple clients.
|
||||
// Due to the randomness of IP Adresses, repartition between services is not perfect
|
||||
// The tests validate repartition using a margin of 10% of the number of requests
|
||||
|
||||
func TestBalancer(t *testing.T) {
|
||||
balancer := New(false)
|
||||
|
||||
balancer.Add("first", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
rw.Header().Set("server", "first")
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
}), Int(4), false)
|
||||
|
||||
balancer.Add("second", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
rw.Header().Set("server", "second")
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
}), Int(1), false)
|
||||
|
||||
recorder := &responseRecorder{ResponseRecorder: httptest.NewRecorder(), save: map[string]int{}}
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
for range 100 {
|
||||
req.RemoteAddr = genIPAddress()
|
||||
balancer.ServeHTTP(recorder, req)
|
||||
}
|
||||
assert.InDelta(t, 80, recorder.save["first"], 10)
|
||||
assert.InDelta(t, 20, recorder.save["second"], 10)
|
||||
}
|
||||
|
||||
func TestBalancerNoService(t *testing.T) {
|
||||
balancer := New(false)
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
balancer.ServeHTTP(recorder, httptest.NewRequest(http.MethodGet, "/", nil))
|
||||
|
||||
assert.Equal(t, http.StatusServiceUnavailable, recorder.Result().StatusCode)
|
||||
}
|
||||
|
||||
func TestBalancerOneServerZeroWeight(t *testing.T) {
|
||||
balancer := New(false)
|
||||
|
||||
balancer.Add("first", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
rw.Header().Set("server", "first")
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
}), Int(1), false)
|
||||
|
||||
balancer.Add("second", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {}), Int(0), false)
|
||||
|
||||
recorder := &responseRecorder{ResponseRecorder: httptest.NewRecorder(), save: map[string]int{}}
|
||||
for range 3 {
|
||||
balancer.ServeHTTP(recorder, httptest.NewRequest(http.MethodGet, "/", nil))
|
||||
}
|
||||
|
||||
assert.Equal(t, 3, recorder.save["first"])
|
||||
}
|
||||
|
||||
type key string
|
||||
|
||||
const serviceName key = "serviceName"
|
||||
|
||||
func TestBalancerNoServiceUp(t *testing.T) {
|
||||
balancer := New(false)
|
||||
|
||||
balancer.Add("first", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
rw.WriteHeader(http.StatusInternalServerError)
|
||||
}), Int(1), false)
|
||||
|
||||
balancer.Add("second", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
rw.WriteHeader(http.StatusInternalServerError)
|
||||
}), Int(1), false)
|
||||
|
||||
balancer.SetStatus(context.WithValue(t.Context(), serviceName, "parent"), "first", false)
|
||||
balancer.SetStatus(context.WithValue(t.Context(), serviceName, "parent"), "second", false)
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
balancer.ServeHTTP(recorder, httptest.NewRequest(http.MethodGet, "/", nil))
|
||||
|
||||
assert.Equal(t, http.StatusServiceUnavailable, recorder.Result().StatusCode)
|
||||
}
|
||||
|
||||
func TestBalancerOneServerDown(t *testing.T) {
|
||||
balancer := New(false)
|
||||
|
||||
balancer.Add("first", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
rw.Header().Set("server", "first")
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
}), Int(1), false)
|
||||
|
||||
balancer.Add("second", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
rw.WriteHeader(http.StatusInternalServerError)
|
||||
}), Int(1), false)
|
||||
balancer.SetStatus(context.WithValue(t.Context(), serviceName, "parent"), "second", false)
|
||||
|
||||
recorder := &responseRecorder{ResponseRecorder: httptest.NewRecorder(), save: map[string]int{}}
|
||||
for range 3 {
|
||||
balancer.ServeHTTP(recorder, httptest.NewRequest(http.MethodGet, "/", nil))
|
||||
}
|
||||
|
||||
assert.Equal(t, 3, recorder.save["first"])
|
||||
}
|
||||
|
||||
func TestBalancerDownThenUp(t *testing.T) {
|
||||
balancer := New(false)
|
||||
|
||||
balancer.Add("first", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
rw.Header().Set("server", "first")
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
}), Int(1), false)
|
||||
|
||||
balancer.Add("second", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
rw.Header().Set("server", "second")
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
}), Int(1), false)
|
||||
balancer.SetStatus(context.WithValue(t.Context(), serviceName, "parent"), "second", false)
|
||||
|
||||
recorder := &responseRecorder{ResponseRecorder: httptest.NewRecorder(), save: map[string]int{}}
|
||||
for range 3 {
|
||||
balancer.ServeHTTP(recorder, httptest.NewRequest(http.MethodGet, "/", nil))
|
||||
}
|
||||
assert.Equal(t, 3, recorder.save["first"])
|
||||
|
||||
balancer.SetStatus(context.WithValue(t.Context(), serviceName, "parent"), "second", true)
|
||||
recorder = &responseRecorder{ResponseRecorder: httptest.NewRecorder(), save: map[string]int{}}
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
for range 100 {
|
||||
req.RemoteAddr = genIPAddress()
|
||||
balancer.ServeHTTP(recorder, req)
|
||||
}
|
||||
assert.InDelta(t, 50, recorder.save["first"], 10)
|
||||
assert.InDelta(t, 50, recorder.save["second"], 10)
|
||||
}
|
||||
|
||||
func TestBalancerPropagate(t *testing.T) {
|
||||
balancer1 := New(true)
|
||||
|
||||
balancer1.Add("first", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
rw.Header().Set("server", "first")
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
}), Int(1), false)
|
||||
balancer1.Add("second", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
rw.Header().Set("server", "second")
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
}), Int(1), false)
|
||||
|
||||
balancer2 := New(true)
|
||||
balancer2.Add("third", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
rw.Header().Set("server", "third")
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
}), Int(1), false)
|
||||
balancer2.Add("fourth", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
rw.Header().Set("server", "fourth")
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
}), Int(1), false)
|
||||
|
||||
topBalancer := New(true)
|
||||
topBalancer.Add("balancer1", balancer1, Int(1), false)
|
||||
_ = balancer1.RegisterStatusUpdater(func(up bool) {
|
||||
topBalancer.SetStatus(context.WithValue(t.Context(), serviceName, "top"), "balancer1", up)
|
||||
// TODO(mpl): if test gets flaky, add channel or something here to signal that
|
||||
// propagation is done, and wait on it before sending request.
|
||||
})
|
||||
topBalancer.Add("balancer2", balancer2, Int(1), false)
|
||||
_ = balancer2.RegisterStatusUpdater(func(up bool) {
|
||||
topBalancer.SetStatus(context.WithValue(t.Context(), serviceName, "top"), "balancer2", up)
|
||||
})
|
||||
|
||||
recorder := &responseRecorder{ResponseRecorder: httptest.NewRecorder(), save: map[string]int{}}
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
for range 100 {
|
||||
req.RemoteAddr = genIPAddress()
|
||||
topBalancer.ServeHTTP(recorder, req)
|
||||
}
|
||||
assert.InDelta(t, 25, recorder.save["first"], 10)
|
||||
assert.InDelta(t, 25, recorder.save["second"], 10)
|
||||
assert.InDelta(t, 25, recorder.save["third"], 10)
|
||||
assert.InDelta(t, 25, recorder.save["fourth"], 10)
|
||||
wantStatus := initStatusArray(100, 200)
|
||||
assert.Equal(t, wantStatus, recorder.status)
|
||||
|
||||
// fourth gets downed, but balancer2 still up since third is still up.
|
||||
balancer2.SetStatus(context.WithValue(t.Context(), serviceName, "top"), "fourth", false)
|
||||
recorder = &responseRecorder{ResponseRecorder: httptest.NewRecorder(), save: map[string]int{}}
|
||||
req = httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
for range 100 {
|
||||
req.RemoteAddr = genIPAddress()
|
||||
topBalancer.ServeHTTP(recorder, req)
|
||||
}
|
||||
assert.InDelta(t, 25, recorder.save["first"], 10)
|
||||
assert.InDelta(t, 25, recorder.save["second"], 10)
|
||||
assert.InDelta(t, 50, recorder.save["third"], 10)
|
||||
assert.InDelta(t, 0, recorder.save["fourth"], 0)
|
||||
wantStatus = initStatusArray(100, 200)
|
||||
assert.Equal(t, wantStatus, recorder.status)
|
||||
|
||||
// third gets downed, and the propagation triggers balancer2 to be marked as
|
||||
// down as well for topBalancer.
|
||||
balancer2.SetStatus(context.WithValue(t.Context(), serviceName, "top"), "third", false)
|
||||
recorder = &responseRecorder{ResponseRecorder: httptest.NewRecorder(), save: map[string]int{}}
|
||||
req = httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
for range 100 {
|
||||
req.RemoteAddr = genIPAddress()
|
||||
topBalancer.ServeHTTP(recorder, req)
|
||||
}
|
||||
assert.InDelta(t, 50, recorder.save["first"], 10)
|
||||
assert.InDelta(t, 50, recorder.save["second"], 10)
|
||||
assert.InDelta(t, 0, recorder.save["third"], 0)
|
||||
assert.InDelta(t, 0, recorder.save["fourth"], 0)
|
||||
wantStatus = initStatusArray(100, 200)
|
||||
assert.Equal(t, wantStatus, recorder.status)
|
||||
}
|
||||
|
||||
func TestBalancerAllServersZeroWeight(t *testing.T) {
|
||||
balancer := New(false)
|
||||
|
||||
balancer.Add("test", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {}), Int(0), false)
|
||||
balancer.Add("test2", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {}), Int(0), false)
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
balancer.ServeHTTP(recorder, httptest.NewRequest(http.MethodGet, "/", nil))
|
||||
|
||||
assert.Equal(t, http.StatusServiceUnavailable, recorder.Result().StatusCode)
|
||||
}
|
||||
|
||||
func TestSticky(t *testing.T) {
|
||||
balancer := New(false)
|
||||
|
||||
balancer.Add("first", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
rw.Header().Set("server", "first")
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
}), Int(1), false)
|
||||
|
||||
balancer.Add("second", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
rw.Header().Set("server", "second")
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
}), Int(2), false)
|
||||
|
||||
recorder := &responseRecorder{ResponseRecorder: httptest.NewRecorder(), save: map[string]int{}}
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.RemoteAddr = genIPAddress()
|
||||
for range 10 {
|
||||
for _, cookie := range recorder.Result().Cookies() {
|
||||
req.AddCookie(cookie)
|
||||
}
|
||||
recorder.ResponseRecorder = httptest.NewRecorder()
|
||||
|
||||
balancer.ServeHTTP(recorder, req)
|
||||
}
|
||||
|
||||
assert.True(t, recorder.save["first"] == 0 || recorder.save["first"] == 10)
|
||||
assert.True(t, recorder.save["second"] == 0 || recorder.save["second"] == 10)
|
||||
// from one IP, the choice between server must be the same for the 10 requests
|
||||
// weight does not impose what would be chosen from 1 client
|
||||
}
|
||||
|
||||
func Int(v int) *int { return &v }
|
||||
|
||||
type responseRecorder struct {
|
||||
*httptest.ResponseRecorder
|
||||
save map[string]int
|
||||
sequence []string
|
||||
status []int
|
||||
}
|
||||
|
||||
func (r *responseRecorder) WriteHeader(statusCode int) {
|
||||
r.save[r.Header().Get("server")]++
|
||||
r.sequence = append(r.sequence, r.Header().Get("server"))
|
||||
r.status = append(r.status, statusCode)
|
||||
r.ResponseRecorder.WriteHeader(statusCode)
|
||||
}
|
||||
|
|
@ -28,6 +28,7 @@ import (
|
|||
"github.com/traefik/traefik/v3/pkg/server/middleware"
|
||||
"github.com/traefik/traefik/v3/pkg/server/provider"
|
||||
"github.com/traefik/traefik/v3/pkg/server/service/loadbalancer/failover"
|
||||
"github.com/traefik/traefik/v3/pkg/server/service/loadbalancer/hrw"
|
||||
"github.com/traefik/traefik/v3/pkg/server/service/loadbalancer/mirror"
|
||||
"github.com/traefik/traefik/v3/pkg/server/service/loadbalancer/p2c"
|
||||
"github.com/traefik/traefik/v3/pkg/server/service/loadbalancer/wrr"
|
||||
|
|
@ -137,6 +138,13 @@ func (m *Manager) BuildHTTP(rootCtx context.Context, serviceName string) (http.H
|
|||
conf.AddError(err, true)
|
||||
return nil, err
|
||||
}
|
||||
case conf.HighestRandomWeight != nil:
|
||||
var err error
|
||||
lb, err = m.getHRWServiceHandler(ctx, serviceName, conf.HighestRandomWeight)
|
||||
if err != nil {
|
||||
conf.AddError(err, true)
|
||||
return nil, err
|
||||
}
|
||||
case conf.Mirroring != nil:
|
||||
var err error
|
||||
lb, err = m.getMirrorServiceHandler(ctx, conf.Mirroring)
|
||||
|
|
@ -305,6 +313,40 @@ func (m *Manager) getServiceHandler(ctx context.Context, service dynamic.WRRServ
|
|||
}
|
||||
}
|
||||
|
||||
func (m *Manager) getHRWServiceHandler(ctx context.Context, serviceName string, config *dynamic.HighestRandomWeight) (http.Handler, error) {
|
||||
// TODO Handle accesslog and metrics with multiple service name
|
||||
balancer := hrw.New(config.HealthCheck != nil)
|
||||
for _, service := range shuffle(config.Services, m.rand) {
|
||||
serviceHandler, err := m.BuildHTTP(ctx, service.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
balancer.Add(service.Name, serviceHandler, service.Weight, false)
|
||||
|
||||
if config.HealthCheck == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
childName := service.Name
|
||||
updater, ok := serviceHandler.(healthcheck.StatusUpdater)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("child service %v of %v not a healthcheck.StatusUpdater (%T)", childName, serviceName, serviceHandler)
|
||||
}
|
||||
|
||||
if err := updater.RegisterStatusUpdater(func(up bool) {
|
||||
balancer.SetStatus(ctx, childName, up)
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("cannot register %v as updater for %v: %w", childName, serviceName, err)
|
||||
}
|
||||
|
||||
log.Ctx(ctx).Debug().Str("parent", serviceName).Str("child", childName).
|
||||
Msg("Child service will update parent on status change")
|
||||
}
|
||||
|
||||
return balancer, nil
|
||||
}
|
||||
|
||||
type serverBalancer interface {
|
||||
http.Handler
|
||||
healthcheck.StatusSetter
|
||||
|
|
@ -346,6 +388,8 @@ func (m *Manager) getLoadBalancerServiceHandler(ctx context.Context, serviceName
|
|||
lb = wrr.New(service.Sticky, service.HealthCheck != nil)
|
||||
case dynamic.BalancerStrategyP2C:
|
||||
lb = p2c.New(service.Sticky, service.HealthCheck != nil)
|
||||
case dynamic.BalancerStrategyHRW:
|
||||
lb = hrw.New(service.HealthCheck != nil)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported load-balancer strategy %q", service.Strategy)
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue