Rework servers load-balancer to use the WRR
Co-authored-by: Kevin Pollet <pollet.kevin@gmail.com>
This commit is contained in:
parent
67d9c8da0b
commit
fadee5e87b
70 changed files with 2085 additions and 2211 deletions
|
@ -8,17 +8,12 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
gokitmetrics "github.com/go-kit/kit/metrics"
|
||||
"github.com/traefik/traefik/v2/pkg/config/dynamic"
|
||||
"github.com/traefik/traefik/v2/pkg/config/runtime"
|
||||
"github.com/traefik/traefik/v2/pkg/log"
|
||||
"github.com/traefik/traefik/v2/pkg/metrics"
|
||||
"github.com/traefik/traefik/v2/pkg/safe"
|
||||
"github.com/vulcand/oxy/roundrobin"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
@ -26,267 +21,153 @@ import (
|
|||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
serverUp = "UP"
|
||||
serverDown = "DOWN"
|
||||
)
|
||||
const modeGRPC = "grpc"
|
||||
|
||||
const (
|
||||
HTTPMode = "http"
|
||||
GRPCMode = "grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
singleton *HealthCheck
|
||||
once sync.Once
|
||||
)
|
||||
|
||||
// Balancer is the set of operations required to manage the list of servers in a load-balancer.
|
||||
type Balancer interface {
|
||||
Servers() []*url.URL
|
||||
RemoveServer(u *url.URL) error
|
||||
UpsertServer(u *url.URL, options ...roundrobin.ServerOption) error
|
||||
// StatusSetter should be implemented by a service that, when the status of a
|
||||
// registered target change, needs to be notified of that change.
|
||||
type StatusSetter interface {
|
||||
SetStatus(ctx context.Context, childName string, up bool)
|
||||
}
|
||||
|
||||
// BalancerHandler includes functionality for load-balancing management.
|
||||
type BalancerHandler interface {
|
||||
ServeHTTP(w http.ResponseWriter, req *http.Request)
|
||||
Balancer
|
||||
// StatusUpdater should be implemented by a service that, when its status
|
||||
// changes (e.g. all if its children are down), needs to propagate upwards (to
|
||||
// their parent(s)) that change.
|
||||
type StatusUpdater interface {
|
||||
RegisterStatusUpdater(fn func(up bool)) error
|
||||
}
|
||||
|
||||
// BalancerStatusHandler is an http Handler that does load-balancing,
|
||||
// and updates its parents of its status.
|
||||
type BalancerStatusHandler interface {
|
||||
BalancerHandler
|
||||
StatusUpdater
|
||||
type metricsHealthCheck interface {
|
||||
ServiceServerUpGauge() gokitmetrics.Gauge
|
||||
}
|
||||
|
||||
type metricsHealthcheck struct {
|
||||
serverUpGauge gokitmetrics.Gauge
|
||||
type ServiceHealthChecker struct {
|
||||
balancer StatusSetter
|
||||
info *runtime.ServiceInfo
|
||||
|
||||
config *dynamic.ServerHealthCheck
|
||||
interval time.Duration
|
||||
timeout time.Duration
|
||||
|
||||
metrics metricsHealthCheck
|
||||
|
||||
client *http.Client
|
||||
targets map[string]*url.URL
|
||||
}
|
||||
|
||||
// Options are the public health check options.
|
||||
type Options struct {
|
||||
Headers map[string]string
|
||||
Hostname string
|
||||
Scheme string
|
||||
Mode string
|
||||
Path string
|
||||
Method string
|
||||
Port int
|
||||
FollowRedirects bool
|
||||
Transport http.RoundTripper
|
||||
Interval time.Duration
|
||||
Timeout time.Duration
|
||||
LB Balancer
|
||||
}
|
||||
|
||||
func (opt Options) String() string {
|
||||
return fmt.Sprintf("[Hostname: %s Headers: %v Path: %s Method: %s Port: %d Interval: %s Timeout: %s FollowRedirects: %v]", opt.Hostname, opt.Headers, opt.Path, opt.Method, opt.Port, opt.Interval, opt.Timeout, opt.FollowRedirects)
|
||||
}
|
||||
|
||||
type backendURL struct {
|
||||
url *url.URL
|
||||
weight int
|
||||
}
|
||||
|
||||
// BackendConfig HealthCheck configuration for a backend.
|
||||
type BackendConfig struct {
|
||||
Options
|
||||
name string
|
||||
disabledURLs []backendURL
|
||||
}
|
||||
|
||||
func (b *BackendConfig) newRequest(serverURL *url.URL) (*http.Request, error) {
|
||||
u, err := serverURL.Parse(b.Path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(b.Scheme) > 0 {
|
||||
u.Scheme = b.Scheme
|
||||
}
|
||||
|
||||
if b.Port != 0 {
|
||||
u.Host = net.JoinHostPort(u.Hostname(), strconv.Itoa(b.Port))
|
||||
}
|
||||
|
||||
return http.NewRequest(http.MethodGet, u.String(), http.NoBody)
|
||||
}
|
||||
|
||||
// setRequestOptions sets all request options present on the BackendConfig.
|
||||
func (b *BackendConfig) setRequestOptions(req *http.Request) *http.Request {
|
||||
if b.Options.Hostname != "" {
|
||||
req.Host = b.Options.Hostname
|
||||
}
|
||||
|
||||
for k, v := range b.Options.Headers {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
|
||||
if b.Options.Method != "" {
|
||||
req.Method = strings.ToUpper(b.Options.Method)
|
||||
}
|
||||
|
||||
return req
|
||||
}
|
||||
|
||||
// HealthCheck struct.
|
||||
type HealthCheck struct {
|
||||
Backends map[string]*BackendConfig
|
||||
metrics metricsHealthcheck
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
// SetBackendsConfiguration set backends configuration.
|
||||
func (hc *HealthCheck) SetBackendsConfiguration(parentCtx context.Context, backends map[string]*BackendConfig) {
|
||||
hc.Backends = backends
|
||||
if hc.cancel != nil {
|
||||
hc.cancel()
|
||||
}
|
||||
ctx, cancel := context.WithCancel(parentCtx)
|
||||
hc.cancel = cancel
|
||||
|
||||
for _, backend := range backends {
|
||||
currentBackend := backend
|
||||
safe.Go(func() {
|
||||
hc.execute(ctx, currentBackend)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (hc *HealthCheck) execute(ctx context.Context, backend *BackendConfig) {
|
||||
func NewServiceHealthChecker(ctx context.Context, metrics metricsHealthCheck, config *dynamic.ServerHealthCheck, service StatusSetter, info *runtime.ServiceInfo, transport http.RoundTripper, targets map[string]*url.URL) *ServiceHealthChecker {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
logger.Debugf("Initial health check for backend: %q", backend.name)
|
||||
hc.checkServersLB(ctx, backend)
|
||||
|
||||
ticker := time.NewTicker(backend.Interval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logger.Debugf("Stopping current health check goroutines of backend: %s", backend.name)
|
||||
return
|
||||
case <-ticker.C:
|
||||
logger.Debugf("Routine health check refresh for backend: %s", backend.name)
|
||||
hc.checkServersLB(ctx, backend)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (hc *HealthCheck) checkServersLB(ctx context.Context, backend *BackendConfig) {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
enabledURLs := backend.LB.Servers()
|
||||
|
||||
var newDisabledURLs []backendURL
|
||||
for _, disabledURL := range backend.disabledURLs {
|
||||
serverUpMetricValue := float64(0)
|
||||
|
||||
if err := checkHealth(disabledURL.url, backend); err == nil {
|
||||
logger.Warnf("Health check up: returning to server list. Backend: %q URL: %q Weight: %d",
|
||||
backend.name, disabledURL.url.String(), disabledURL.weight)
|
||||
if err = backend.LB.UpsertServer(disabledURL.url, roundrobin.Weight(disabledURL.weight)); err != nil {
|
||||
logger.Error(err)
|
||||
}
|
||||
serverUpMetricValue = 1
|
||||
} else {
|
||||
logger.Warnf("Health check still failing. Backend: %q URL: %q Reason: %s", backend.name, disabledURL.url.String(), err)
|
||||
newDisabledURLs = append(newDisabledURLs, disabledURL)
|
||||
}
|
||||
|
||||
labelValues := []string{"service", backend.name, "url", disabledURL.url.String()}
|
||||
hc.metrics.serverUpGauge.With(labelValues...).Set(serverUpMetricValue)
|
||||
interval := time.Duration(config.Interval)
|
||||
if interval <= 0 {
|
||||
logger.Error("Health check interval smaller than zero")
|
||||
interval = time.Duration(dynamic.DefaultHealthCheckInterval)
|
||||
}
|
||||
|
||||
backend.disabledURLs = newDisabledURLs
|
||||
|
||||
for _, enabledURL := range enabledURLs {
|
||||
serverUpMetricValue := float64(1)
|
||||
|
||||
if err := checkHealth(enabledURL, backend); err != nil {
|
||||
weight := 1
|
||||
rr, ok := backend.LB.(*roundrobin.RoundRobin)
|
||||
if ok {
|
||||
var gotWeight bool
|
||||
weight, gotWeight = rr.ServerWeight(enabledURL)
|
||||
if !gotWeight {
|
||||
weight = 1
|
||||
}
|
||||
}
|
||||
|
||||
logger.Warnf("Health check failed, removing from server list. Backend: %q URL: %q Weight: %d Reason: %s",
|
||||
backend.name, enabledURL.String(), weight, err)
|
||||
if err := backend.LB.RemoveServer(enabledURL); err != nil {
|
||||
logger.Error(err)
|
||||
}
|
||||
|
||||
backend.disabledURLs = append(backend.disabledURLs, backendURL{enabledURL, weight})
|
||||
serverUpMetricValue = 0
|
||||
}
|
||||
|
||||
labelValues := []string{"service", backend.name, "url", enabledURL.String()}
|
||||
hc.metrics.serverUpGauge.With(labelValues...).Set(serverUpMetricValue)
|
||||
}
|
||||
}
|
||||
|
||||
// GetHealthCheck returns the health check which is guaranteed to be a singleton.
|
||||
func GetHealthCheck(registry metrics.Registry) *HealthCheck {
|
||||
once.Do(func() {
|
||||
singleton = newHealthCheck(registry)
|
||||
})
|
||||
return singleton
|
||||
}
|
||||
|
||||
func newHealthCheck(registry metrics.Registry) *HealthCheck {
|
||||
return &HealthCheck{
|
||||
Backends: make(map[string]*BackendConfig),
|
||||
metrics: metricsHealthcheck{
|
||||
serverUpGauge: registry.ServiceServerUpGauge(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewBackendConfig Instantiate a new BackendConfig.
|
||||
func NewBackendConfig(options Options, backendName string) *BackendConfig {
|
||||
return &BackendConfig{
|
||||
Options: options,
|
||||
name: backendName,
|
||||
}
|
||||
}
|
||||
|
||||
// checkHealth calls the proper health check function depending on the
|
||||
// backend config mode, defaults to HTTP.
|
||||
func checkHealth(serverURL *url.URL, backend *BackendConfig) error {
|
||||
if backend.Options.Mode == GRPCMode {
|
||||
return checkHealthGRPC(serverURL, backend)
|
||||
}
|
||||
return checkHealthHTTP(serverURL, backend)
|
||||
}
|
||||
|
||||
// checkHealthHTTP returns an error with a meaningful description if the health check failed.
|
||||
// Dedicated to HTTP servers.
|
||||
func checkHealthHTTP(serverURL *url.URL, backend *BackendConfig) error {
|
||||
req, err := backend.newRequest(serverURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create HTTP request: %w", err)
|
||||
timeout := time.Duration(config.Timeout)
|
||||
if timeout <= 0 {
|
||||
logger.Error("Health check timeout smaller than zero")
|
||||
timeout = time.Duration(dynamic.DefaultHealthCheckTimeout)
|
||||
}
|
||||
|
||||
req = backend.setRequestOptions(req)
|
||||
|
||||
client := http.Client{
|
||||
Timeout: backend.Options.Timeout,
|
||||
Transport: backend.Options.Transport,
|
||||
if timeout >= interval {
|
||||
logger.Warnf("Health check timeout should be lower than the health check interval. Interval set to timeout + 1 second (%s).", interval)
|
||||
interval = timeout + time.Second
|
||||
}
|
||||
|
||||
if !backend.FollowRedirects {
|
||||
client := &http.Client{
|
||||
Transport: transport,
|
||||
}
|
||||
|
||||
if config.FollowRedirects != nil && !*config.FollowRedirects {
|
||||
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
return &ServiceHealthChecker{
|
||||
balancer: service,
|
||||
info: info,
|
||||
config: config,
|
||||
interval: interval,
|
||||
timeout: timeout,
|
||||
targets: targets,
|
||||
client: client,
|
||||
metrics: metrics,
|
||||
}
|
||||
}
|
||||
|
||||
func (shc *ServiceHealthChecker) Launch(ctx context.Context) {
|
||||
ticker := time.NewTicker(shc.interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
||||
case <-ticker.C:
|
||||
for proxyName, target := range shc.targets {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
up := true
|
||||
serverUpMetricValue := float64(1)
|
||||
|
||||
if err := shc.executeHealthCheck(ctx, shc.config, target); err != nil {
|
||||
// The context is canceled when the dynamic configuration is refreshed.
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return
|
||||
}
|
||||
|
||||
log.FromContext(ctx).
|
||||
WithField("targetURL", target.String()).
|
||||
WithError(err).
|
||||
Warn("Health check failed.")
|
||||
|
||||
up = false
|
||||
serverUpMetricValue = float64(0)
|
||||
}
|
||||
|
||||
shc.balancer.SetStatus(ctx, proxyName, up)
|
||||
|
||||
statusStr := runtime.StatusDown
|
||||
if up {
|
||||
statusStr = runtime.StatusUp
|
||||
}
|
||||
|
||||
shc.info.UpdateServerStatus(target.String(), statusStr)
|
||||
|
||||
shc.metrics.ServiceServerUpGauge().
|
||||
With("service", proxyName).
|
||||
With("url", target.String()).
|
||||
Set(serverUpMetricValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (shc *ServiceHealthChecker) executeHealthCheck(ctx context.Context, config *dynamic.ServerHealthCheck, target *url.URL) error {
|
||||
ctx, cancel := context.WithDeadline(ctx, time.Now().Add(shc.timeout))
|
||||
defer cancel()
|
||||
|
||||
if config.Mode == modeGRPC {
|
||||
return shc.checkHealthGRPC(ctx, target)
|
||||
}
|
||||
return shc.checkHealthHTTP(ctx, target)
|
||||
}
|
||||
|
||||
// checkHealthHTTP returns an error with a meaningful description if the health check failed.
|
||||
// Dedicated to HTTP servers.
|
||||
func (shc *ServiceHealthChecker) checkHealthHTTP(ctx context.Context, target *url.URL) error {
|
||||
req, err := shc.newRequest(ctx, target)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create HTTP request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := shc.client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("HTTP request failed: %w", err)
|
||||
}
|
||||
|
@ -300,34 +181,61 @@ func checkHealthHTTP(serverURL *url.URL, backend *BackendConfig) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (shc *ServiceHealthChecker) newRequest(ctx context.Context, target *url.URL) (*http.Request, error) {
|
||||
u, err := target.Parse(shc.config.Path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(shc.config.Scheme) > 0 {
|
||||
u.Scheme = shc.config.Scheme
|
||||
}
|
||||
|
||||
if shc.config.Port != 0 {
|
||||
u.Host = net.JoinHostPort(u.Hostname(), strconv.Itoa(shc.config.Port))
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, shc.config.Method, u.String(), http.NoBody)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create HTTP request: %w", err)
|
||||
}
|
||||
|
||||
if shc.config.Hostname != "" {
|
||||
req.Host = shc.config.Hostname
|
||||
}
|
||||
|
||||
for k, v := range shc.config.Headers {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// checkHealthGRPC returns an error with a meaningful description if the health check failed.
|
||||
// Dedicated to gRPC servers implementing gRPC Health Checking Protocol v1.
|
||||
func checkHealthGRPC(serverURL *url.URL, backend *BackendConfig) error {
|
||||
u, err := serverURL.Parse(backend.Path)
|
||||
func (shc *ServiceHealthChecker) checkHealthGRPC(ctx context.Context, serverURL *url.URL) error {
|
||||
u, err := serverURL.Parse(shc.config.Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse server URL: %w", err)
|
||||
}
|
||||
|
||||
port := u.Port()
|
||||
if backend.Options.Port != 0 {
|
||||
port = strconv.Itoa(backend.Options.Port)
|
||||
if shc.config.Port != 0 {
|
||||
port = strconv.Itoa(shc.config.Port)
|
||||
}
|
||||
|
||||
serverAddr := net.JoinHostPort(u.Hostname(), port)
|
||||
|
||||
var opts []grpc.DialOption
|
||||
switch backend.Options.Scheme {
|
||||
switch shc.config.Scheme {
|
||||
case "http", "h2c", "":
|
||||
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), backend.Options.Timeout)
|
||||
defer cancel()
|
||||
|
||||
conn, err := grpc.DialContext(ctx, serverAddr, opts...)
|
||||
if err != nil {
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return fmt.Errorf("fail to connect to %s within %s: %w", serverAddr, backend.Options.Timeout, err)
|
||||
return fmt.Errorf("fail to connect to %s within %s: %w", serverAddr, shc.config.Timeout, err)
|
||||
}
|
||||
return fmt.Errorf("fail to connect to %s: %w", serverAddr, err)
|
||||
}
|
||||
|
@ -341,6 +249,8 @@ func checkHealthGRPC(serverURL *url.URL, backend *BackendConfig) error {
|
|||
return fmt.Errorf("gRPC server does not implement the health protocol: %w", err)
|
||||
case codes.DeadlineExceeded:
|
||||
return fmt.Errorf("gRPC health check timeout: %w", err)
|
||||
case codes.Canceled:
|
||||
return context.Canceled
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -353,155 +263,3 @@ func checkHealthGRPC(serverURL *url.URL, backend *BackendConfig) error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
// StatusUpdater should be implemented by a service that, when its status
|
||||
// changes (e.g. all if its children are down), needs to propagate upwards (to
|
||||
// their parent(s)) that change.
|
||||
type StatusUpdater interface {
|
||||
RegisterStatusUpdater(fn func(up bool)) error
|
||||
}
|
||||
|
||||
// NewLBStatusUpdater returns a new LbStatusUpdater.
|
||||
func NewLBStatusUpdater(bh BalancerHandler, info *runtime.ServiceInfo, hc *dynamic.ServerHealthCheck) *LbStatusUpdater {
|
||||
return &LbStatusUpdater{
|
||||
BalancerHandler: bh,
|
||||
serviceInfo: info,
|
||||
wantsHealthCheck: hc != nil,
|
||||
}
|
||||
}
|
||||
|
||||
// LbStatusUpdater wraps a BalancerHandler and a ServiceInfo,
|
||||
// so it can keep track of the status of a server in the ServiceInfo.
|
||||
type LbStatusUpdater struct {
|
||||
BalancerHandler
|
||||
serviceInfo *runtime.ServiceInfo // can be nil
|
||||
updaters []func(up bool)
|
||||
wantsHealthCheck bool
|
||||
}
|
||||
|
||||
// RegisterStatusUpdater adds fn to the list of hooks that are run when the
|
||||
// status of the Balancer changes.
|
||||
// Not thread safe.
|
||||
func (lb *LbStatusUpdater) RegisterStatusUpdater(fn func(up bool)) error {
|
||||
if !lb.wantsHealthCheck {
|
||||
return errors.New("healthCheck not enabled in config for this loadbalancer service")
|
||||
}
|
||||
|
||||
lb.updaters = append(lb.updaters, fn)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveServer removes the given server from the BalancerHandler,
|
||||
// and updates the status of the server to "DOWN".
|
||||
func (lb *LbStatusUpdater) RemoveServer(u *url.URL) error {
|
||||
// TODO(mpl): when we have the freedom to change the signature of RemoveServer
|
||||
// (kinda stuck because of oxy for now), let's pass around a context to improve
|
||||
// logging.
|
||||
ctx := context.TODO()
|
||||
upBefore := len(lb.BalancerHandler.Servers()) > 0
|
||||
err := lb.BalancerHandler.RemoveServer(u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if lb.serviceInfo != nil {
|
||||
lb.serviceInfo.UpdateServerStatus(u.String(), serverDown)
|
||||
}
|
||||
log.FromContext(ctx).Debugf("child %s now %s", u.String(), serverDown)
|
||||
|
||||
if !upBefore {
|
||||
// we were already down, and we still are, no need to propagate.
|
||||
log.FromContext(ctx).Debugf("Still %s, no need to propagate", serverDown)
|
||||
return nil
|
||||
}
|
||||
if len(lb.BalancerHandler.Servers()) > 0 {
|
||||
// we were up, and we still are, no need to propagate
|
||||
log.FromContext(ctx).Debugf("Still %s, no need to propagate", serverUp)
|
||||
return nil
|
||||
}
|
||||
|
||||
log.FromContext(ctx).Debugf("Propagating new %s status", serverDown)
|
||||
for _, fn := range lb.updaters {
|
||||
fn(false)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpsertServer adds the given server to the BalancerHandler,
|
||||
// and updates the status of the server to "UP".
|
||||
func (lb *LbStatusUpdater) UpsertServer(u *url.URL, options ...roundrobin.ServerOption) error {
|
||||
ctx := context.TODO()
|
||||
upBefore := len(lb.BalancerHandler.Servers()) > 0
|
||||
err := lb.BalancerHandler.UpsertServer(u, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if lb.serviceInfo != nil {
|
||||
lb.serviceInfo.UpdateServerStatus(u.String(), serverUp)
|
||||
}
|
||||
log.FromContext(ctx).Debugf("child %s now %s", u.String(), serverUp)
|
||||
|
||||
if upBefore {
|
||||
// we were up, and we still are, no need to propagate
|
||||
log.FromContext(ctx).Debugf("Still %s, no need to propagate", serverUp)
|
||||
return nil
|
||||
}
|
||||
|
||||
log.FromContext(ctx).Debugf("Propagating new %s status", serverUp)
|
||||
for _, fn := range lb.updaters {
|
||||
fn(true)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Balancers is a list of Balancers(s) that implements the Balancer interface.
|
||||
type Balancers []Balancer
|
||||
|
||||
// Servers returns the deduplicated server URLs from all the Balancer.
|
||||
// Note that the deduplication is only possible because all the underlying
|
||||
// balancers are of the same kind (the oxy implementation).
|
||||
// The comparison property is the same as the one found at:
|
||||
// https://github.com/vulcand/oxy/blob/fb2728c857b7973a27f8de2f2190729c0f22cf49/roundrobin/rr.go#L347.
|
||||
func (b Balancers) Servers() []*url.URL {
|
||||
seen := make(map[string]struct{})
|
||||
|
||||
var servers []*url.URL
|
||||
for _, lb := range b {
|
||||
for _, server := range lb.Servers() {
|
||||
key := serverKey(server)
|
||||
if _, ok := seen[key]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
servers = append(servers, server)
|
||||
seen[key] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
return servers
|
||||
}
|
||||
|
||||
// RemoveServer removes the given server from all the Balancer,
|
||||
// and updates the status of the server to "DOWN".
|
||||
func (b Balancers) RemoveServer(u *url.URL) error {
|
||||
for _, lb := range b {
|
||||
if err := lb.RemoveServer(u); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpsertServer adds the given server to all the Balancer,
|
||||
// and updates the status of the server to "UP".
|
||||
func (b Balancers) UpsertServer(u *url.URL, options ...roundrobin.ServerOption) error {
|
||||
for _, lb := range b {
|
||||
if err := lb.UpsertServer(u, options...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func serverKey(u *url.URL) string {
|
||||
return u.Path + u.Host + u.Scheme
|
||||
}
|
||||
|
|
|
@ -11,127 +11,324 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
ptypes "github.com/traefik/paerser/types"
|
||||
"github.com/traefik/traefik/v2/pkg/config/dynamic"
|
||||
"github.com/traefik/traefik/v2/pkg/config/runtime"
|
||||
"github.com/traefik/traefik/v2/pkg/testhelpers"
|
||||
"github.com/vulcand/oxy/roundrobin"
|
||||
healthpb "google.golang.org/grpc/health/grpc_health_v1"
|
||||
)
|
||||
|
||||
const (
|
||||
healthCheckInterval = 200 * time.Millisecond
|
||||
healthCheckTimeout = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
func TestSetBackendsConfiguration(t *testing.T) {
|
||||
func TestServiceHealthChecker_newRequest(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
startHealthy bool
|
||||
mode string
|
||||
server StartTestServer
|
||||
expectedNumRemovedServers int
|
||||
expectedNumUpsertedServers int
|
||||
expectedGaugeValue float64
|
||||
desc string
|
||||
targetURL string
|
||||
config dynamic.ServerHealthCheck
|
||||
expTarget string
|
||||
expError bool
|
||||
expHostname string
|
||||
expHeader string
|
||||
expMethod string
|
||||
}{
|
||||
{
|
||||
desc: "healthy server staying healthy",
|
||||
startHealthy: true,
|
||||
server: newHTTPServer(http.StatusOK),
|
||||
expectedNumRemovedServers: 0,
|
||||
expectedNumUpsertedServers: 0,
|
||||
expectedGaugeValue: 1,
|
||||
desc: "no port override",
|
||||
targetURL: "http://backend1:80",
|
||||
config: dynamic.ServerHealthCheck{
|
||||
Path: "/test",
|
||||
Port: 0,
|
||||
},
|
||||
expError: false,
|
||||
expTarget: "http://backend1:80/test",
|
||||
expHostname: "backend1:80",
|
||||
expMethod: http.MethodGet,
|
||||
},
|
||||
{
|
||||
desc: "healthy server staying healthy (StatusNoContent)",
|
||||
startHealthy: true,
|
||||
server: newHTTPServer(http.StatusNoContent),
|
||||
expectedNumRemovedServers: 0,
|
||||
expectedNumUpsertedServers: 0,
|
||||
expectedGaugeValue: 1,
|
||||
desc: "port override",
|
||||
targetURL: "http://backend2:80",
|
||||
config: dynamic.ServerHealthCheck{
|
||||
Path: "/test",
|
||||
Port: 8080,
|
||||
},
|
||||
expError: false,
|
||||
expTarget: "http://backend2:8080/test",
|
||||
expHostname: "backend2:8080",
|
||||
expMethod: http.MethodGet,
|
||||
},
|
||||
{
|
||||
desc: "healthy server staying healthy (StatusPermanentRedirect)",
|
||||
startHealthy: true,
|
||||
server: newHTTPServer(http.StatusPermanentRedirect),
|
||||
expectedNumRemovedServers: 0,
|
||||
expectedNumUpsertedServers: 0,
|
||||
expectedGaugeValue: 1,
|
||||
desc: "no port override with no port in server URL",
|
||||
targetURL: "http://backend1",
|
||||
config: dynamic.ServerHealthCheck{
|
||||
Path: "/health",
|
||||
Port: 0,
|
||||
},
|
||||
expError: false,
|
||||
expTarget: "http://backend1/health",
|
||||
expHostname: "backend1",
|
||||
expMethod: http.MethodGet,
|
||||
},
|
||||
{
|
||||
desc: "healthy server becoming sick",
|
||||
startHealthy: true,
|
||||
server: newHTTPServer(http.StatusServiceUnavailable),
|
||||
expectedNumRemovedServers: 1,
|
||||
expectedNumUpsertedServers: 0,
|
||||
expectedGaugeValue: 0,
|
||||
desc: "port override with no port in server URL",
|
||||
targetURL: "http://backend2",
|
||||
config: dynamic.ServerHealthCheck{
|
||||
Path: "/health",
|
||||
Port: 8080,
|
||||
},
|
||||
expError: false,
|
||||
expTarget: "http://backend2:8080/health",
|
||||
expHostname: "backend2:8080",
|
||||
expMethod: http.MethodGet,
|
||||
},
|
||||
{
|
||||
desc: "sick server becoming healthy",
|
||||
startHealthy: false,
|
||||
server: newHTTPServer(http.StatusOK),
|
||||
expectedNumRemovedServers: 0,
|
||||
expectedNumUpsertedServers: 1,
|
||||
expectedGaugeValue: 1,
|
||||
desc: "scheme override",
|
||||
targetURL: "https://backend1:80",
|
||||
config: dynamic.ServerHealthCheck{
|
||||
Scheme: "http",
|
||||
Path: "/test",
|
||||
Port: 0,
|
||||
},
|
||||
expError: false,
|
||||
expTarget: "http://backend1:80/test",
|
||||
expHostname: "backend1:80",
|
||||
expMethod: http.MethodGet,
|
||||
},
|
||||
{
|
||||
desc: "sick server staying sick",
|
||||
startHealthy: false,
|
||||
server: newHTTPServer(http.StatusServiceUnavailable),
|
||||
expectedNumRemovedServers: 0,
|
||||
expectedNumUpsertedServers: 0,
|
||||
expectedGaugeValue: 0,
|
||||
desc: "path with param",
|
||||
targetURL: "http://backend1:80",
|
||||
config: dynamic.ServerHealthCheck{
|
||||
Path: "/health?powpow=do",
|
||||
Port: 0,
|
||||
},
|
||||
expError: false,
|
||||
expTarget: "http://backend1:80/health?powpow=do",
|
||||
expHostname: "backend1:80",
|
||||
expMethod: http.MethodGet,
|
||||
},
|
||||
{
|
||||
desc: "healthy server toggling to sick and back to healthy",
|
||||
startHealthy: true,
|
||||
server: newHTTPServer(http.StatusServiceUnavailable, http.StatusOK),
|
||||
expectedNumRemovedServers: 1,
|
||||
expectedNumUpsertedServers: 1,
|
||||
expectedGaugeValue: 1,
|
||||
desc: "path with params",
|
||||
targetURL: "http://backend1:80",
|
||||
config: dynamic.ServerHealthCheck{
|
||||
Path: "/health?powpow=do&do=powpow",
|
||||
Port: 0,
|
||||
},
|
||||
expError: false,
|
||||
expTarget: "http://backend1:80/health?powpow=do&do=powpow",
|
||||
expHostname: "backend1:80",
|
||||
expMethod: http.MethodGet,
|
||||
},
|
||||
{
|
||||
desc: "healthy grpc server staying healthy",
|
||||
mode: "grpc",
|
||||
startHealthy: true,
|
||||
server: newGRPCServer(healthpb.HealthCheckResponse_SERVING),
|
||||
expectedNumRemovedServers: 0,
|
||||
expectedNumUpsertedServers: 0,
|
||||
expectedGaugeValue: 1,
|
||||
desc: "path with invalid path",
|
||||
targetURL: "http://backend1:80",
|
||||
config: dynamic.ServerHealthCheck{
|
||||
Path: ":",
|
||||
Port: 0,
|
||||
},
|
||||
expError: true,
|
||||
expTarget: "",
|
||||
expHostname: "backend1:80",
|
||||
expMethod: http.MethodGet,
|
||||
},
|
||||
{
|
||||
desc: "healthy grpc server becoming sick",
|
||||
mode: "grpc",
|
||||
startHealthy: true,
|
||||
server: newGRPCServer(healthpb.HealthCheckResponse_NOT_SERVING),
|
||||
expectedNumRemovedServers: 1,
|
||||
expectedNumUpsertedServers: 0,
|
||||
expectedGaugeValue: 0,
|
||||
desc: "override hostname",
|
||||
targetURL: "http://backend1:80",
|
||||
config: dynamic.ServerHealthCheck{
|
||||
Hostname: "myhost",
|
||||
Path: "/",
|
||||
},
|
||||
expTarget: "http://backend1:80/",
|
||||
expHostname: "myhost",
|
||||
expHeader: "",
|
||||
expMethod: http.MethodGet,
|
||||
},
|
||||
{
|
||||
desc: "sick grpc server becoming healthy",
|
||||
mode: "grpc",
|
||||
startHealthy: false,
|
||||
server: newGRPCServer(healthpb.HealthCheckResponse_SERVING),
|
||||
expectedNumRemovedServers: 0,
|
||||
expectedNumUpsertedServers: 1,
|
||||
expectedGaugeValue: 1,
|
||||
desc: "not override hostname",
|
||||
targetURL: "http://backend1:80",
|
||||
config: dynamic.ServerHealthCheck{
|
||||
Hostname: "",
|
||||
Path: "/",
|
||||
},
|
||||
expTarget: "http://backend1:80/",
|
||||
expHostname: "backend1:80",
|
||||
expHeader: "",
|
||||
expMethod: http.MethodGet,
|
||||
},
|
||||
{
|
||||
desc: "sick grpc server staying sick",
|
||||
mode: "grpc",
|
||||
startHealthy: false,
|
||||
server: newGRPCServer(healthpb.HealthCheckResponse_NOT_SERVING),
|
||||
expectedNumRemovedServers: 0,
|
||||
expectedNumUpsertedServers: 0,
|
||||
expectedGaugeValue: 0,
|
||||
desc: "custom header",
|
||||
targetURL: "http://backend1:80",
|
||||
config: dynamic.ServerHealthCheck{
|
||||
Headers: map[string]string{"Custom-Header": "foo"},
|
||||
Hostname: "",
|
||||
Path: "/",
|
||||
},
|
||||
expTarget: "http://backend1:80/",
|
||||
expHostname: "backend1:80",
|
||||
expHeader: "foo",
|
||||
expMethod: http.MethodGet,
|
||||
},
|
||||
{
|
||||
desc: "healthy grpc server toggling to sick and back to healthy",
|
||||
mode: "grpc",
|
||||
startHealthy: true,
|
||||
server: newGRPCServer(healthpb.HealthCheckResponse_NOT_SERVING, healthpb.HealthCheckResponse_SERVING),
|
||||
expectedNumRemovedServers: 1,
|
||||
expectedNumUpsertedServers: 1,
|
||||
expectedGaugeValue: 1,
|
||||
desc: "custom header with hostname override",
|
||||
targetURL: "http://backend1:80",
|
||||
config: dynamic.ServerHealthCheck{
|
||||
Headers: map[string]string{"Custom-Header": "foo"},
|
||||
Hostname: "myhost",
|
||||
Path: "/",
|
||||
},
|
||||
expTarget: "http://backend1:80/",
|
||||
expHostname: "myhost",
|
||||
expHeader: "foo",
|
||||
expMethod: http.MethodGet,
|
||||
},
|
||||
{
|
||||
desc: "custom method",
|
||||
targetURL: "http://backend1:80",
|
||||
config: dynamic.ServerHealthCheck{
|
||||
Path: "/",
|
||||
Method: http.MethodHead,
|
||||
},
|
||||
expTarget: "http://backend1:80/",
|
||||
expHostname: "backend1:80",
|
||||
expMethod: http.MethodHead,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
shc := ServiceHealthChecker{config: &test.config}
|
||||
|
||||
u := testhelpers.MustParseURL(test.targetURL)
|
||||
req, err := shc.newRequest(context.Background(), u)
|
||||
|
||||
if test.expError {
|
||||
require.Error(t, err)
|
||||
assert.Nil(t, req)
|
||||
} else {
|
||||
require.NoError(t, err, "failed to create new request")
|
||||
require.NotNil(t, req)
|
||||
|
||||
assert.Equal(t, test.expTarget, req.URL.String())
|
||||
assert.Equal(t, test.expHeader, req.Header.Get("Custom-Header"))
|
||||
assert.Equal(t, test.expHostname, req.Host)
|
||||
assert.Equal(t, test.expMethod, req.Method)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceHealthChecker_checkHealthHTTP_NotFollowingRedirects(t *testing.T) {
|
||||
redirectServerCalled := false
|
||||
redirectTestServer := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
redirectServerCalled = true
|
||||
}))
|
||||
defer redirectTestServer.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(dynamic.DefaultHealthCheckTimeout))
|
||||
defer cancel()
|
||||
|
||||
server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
rw.Header().Add("location", redirectTestServer.URL)
|
||||
rw.WriteHeader(http.StatusSeeOther)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
config := &dynamic.ServerHealthCheck{
|
||||
Path: "/path",
|
||||
FollowRedirects: Bool(false),
|
||||
Interval: dynamic.DefaultHealthCheckInterval,
|
||||
Timeout: dynamic.DefaultHealthCheckTimeout,
|
||||
}
|
||||
healthChecker := NewServiceHealthChecker(ctx, nil, config, nil, nil, http.DefaultTransport, nil)
|
||||
|
||||
err := healthChecker.checkHealthHTTP(ctx, testhelpers.MustParseURL(server.URL))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.False(t, redirectServerCalled, "HTTP redirect must not be followed")
|
||||
}
|
||||
|
||||
func TestServiceHealthChecker_Launch(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
mode string
|
||||
server StartTestServer
|
||||
expNumRemovedServers int
|
||||
expNumUpsertedServers int
|
||||
expGaugeValue float64
|
||||
targetStatus string
|
||||
}{
|
||||
{
|
||||
desc: "healthy server staying healthy",
|
||||
server: newHTTPServer(http.StatusOK),
|
||||
expNumRemovedServers: 0,
|
||||
expNumUpsertedServers: 1,
|
||||
expGaugeValue: 1,
|
||||
targetStatus: runtime.StatusUp,
|
||||
},
|
||||
{
|
||||
desc: "healthy server staying healthy (StatusNoContent)",
|
||||
server: newHTTPServer(http.StatusNoContent),
|
||||
expNumRemovedServers: 0,
|
||||
expNumUpsertedServers: 1,
|
||||
expGaugeValue: 1,
|
||||
targetStatus: runtime.StatusUp,
|
||||
},
|
||||
{
|
||||
desc: "healthy server staying healthy (StatusPermanentRedirect)",
|
||||
server: newHTTPServer(http.StatusPermanentRedirect),
|
||||
expNumRemovedServers: 0,
|
||||
expNumUpsertedServers: 1,
|
||||
expGaugeValue: 1,
|
||||
targetStatus: runtime.StatusUp,
|
||||
},
|
||||
{
|
||||
desc: "healthy server becoming sick",
|
||||
server: newHTTPServer(http.StatusServiceUnavailable),
|
||||
expNumRemovedServers: 1,
|
||||
expNumUpsertedServers: 0,
|
||||
expGaugeValue: 0,
|
||||
targetStatus: runtime.StatusDown,
|
||||
},
|
||||
{
|
||||
desc: "healthy server toggling to sick and back to healthy",
|
||||
server: newHTTPServer(http.StatusServiceUnavailable, http.StatusOK),
|
||||
expNumRemovedServers: 1,
|
||||
expNumUpsertedServers: 1,
|
||||
expGaugeValue: 1,
|
||||
targetStatus: runtime.StatusUp,
|
||||
},
|
||||
{
|
||||
desc: "healthy server toggling to healthy and go to sick",
|
||||
server: newHTTPServer(http.StatusOK, http.StatusServiceUnavailable),
|
||||
expNumRemovedServers: 1,
|
||||
expNumUpsertedServers: 1,
|
||||
expGaugeValue: 0,
|
||||
targetStatus: runtime.StatusDown,
|
||||
},
|
||||
{
|
||||
desc: "healthy grpc server staying healthy",
|
||||
mode: "grpc",
|
||||
server: newGRPCServer(healthpb.HealthCheckResponse_SERVING),
|
||||
expNumRemovedServers: 0,
|
||||
expNumUpsertedServers: 1,
|
||||
expGaugeValue: 1,
|
||||
targetStatus: runtime.StatusUp,
|
||||
},
|
||||
{
|
||||
desc: "healthy grpc server becoming sick",
|
||||
mode: "grpc",
|
||||
server: newGRPCServer(healthpb.HealthCheckResponse_NOT_SERVING),
|
||||
expNumRemovedServers: 1,
|
||||
expNumUpsertedServers: 0,
|
||||
expGaugeValue: 0,
|
||||
targetStatus: runtime.StatusDown,
|
||||
},
|
||||
{
|
||||
desc: "healthy grpc server toggling to sick and back to healthy",
|
||||
mode: "grpc",
|
||||
server: newGRPCServer(healthpb.HealthCheckResponse_NOT_SERVING, healthpb.HealthCheckResponse_SERVING),
|
||||
expNumRemovedServers: 1,
|
||||
expNumUpsertedServers: 1,
|
||||
expGaugeValue: 1,
|
||||
targetStatus: runtime.StatusUp,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -145,37 +342,26 @@ func TestSetBackendsConfiguration(t *testing.T) {
|
|||
ctx, cancel := context.WithCancel(context.Background())
|
||||
t.Cleanup(cancel)
|
||||
|
||||
serverURL, timeout := test.server.Start(t, cancel)
|
||||
targetURL, timeout := test.server.Start(t, cancel)
|
||||
|
||||
lb := &testLoadBalancer{RWMutex: &sync.RWMutex{}}
|
||||
|
||||
options := Options{
|
||||
config := &dynamic.ServerHealthCheck{
|
||||
Mode: test.mode,
|
||||
Path: "/path",
|
||||
Interval: healthCheckInterval,
|
||||
Timeout: healthCheckTimeout,
|
||||
LB: lb,
|
||||
}
|
||||
backend := NewBackendConfig(options, "backendName")
|
||||
|
||||
if test.startHealthy {
|
||||
lb.servers = append(lb.servers, serverURL)
|
||||
} else {
|
||||
backend.disabledURLs = append(backend.disabledURLs, backendURL{url: serverURL, weight: 1})
|
||||
Interval: ptypes.Duration(500 * time.Millisecond),
|
||||
Timeout: ptypes.Duration(499 * time.Millisecond),
|
||||
}
|
||||
|
||||
collectingMetrics := &testhelpers.CollectingGauge{}
|
||||
|
||||
check := HealthCheck{
|
||||
Backends: make(map[string]*BackendConfig),
|
||||
metrics: metricsHealthcheck{serverUpGauge: collectingMetrics},
|
||||
}
|
||||
gauge := &testhelpers.CollectingGauge{}
|
||||
serviceInfo := &runtime.ServiceInfo{}
|
||||
hc := NewServiceHealthChecker(ctx, &MetricsMock{gauge}, config, lb, serviceInfo, http.DefaultTransport, map[string]*url.URL{"test": targetURL})
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
check.execute(ctx, backend)
|
||||
hc.Launch(ctx)
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
|
@ -189,392 +375,14 @@ func TestSetBackendsConfiguration(t *testing.T) {
|
|||
lb.Lock()
|
||||
defer lb.Unlock()
|
||||
|
||||
assert.Equal(t, test.expectedNumRemovedServers, lb.numRemovedServers, "removed servers")
|
||||
assert.Equal(t, test.expectedNumUpsertedServers, lb.numUpsertedServers, "upserted servers")
|
||||
assert.Equal(t, test.expectedGaugeValue, collectingMetrics.GaugeValue, "ServerUp Gauge")
|
||||
assert.Equal(t, test.expNumRemovedServers, lb.numRemovedServers, "removed servers")
|
||||
assert.Equal(t, test.expNumUpsertedServers, lb.numUpsertedServers, "upserted servers")
|
||||
assert.Equal(t, test.expGaugeValue, gauge.GaugeValue, "ServerUp Gauge")
|
||||
assert.Equal(t, serviceInfo.GetAllStatus(), map[string]string{targetURL.String(): test.targetStatus})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewRequest(t *testing.T) {
|
||||
type expected struct {
|
||||
err bool
|
||||
value string
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
serverURL string
|
||||
options Options
|
||||
expected expected
|
||||
}{
|
||||
{
|
||||
desc: "no port override",
|
||||
serverURL: "http://backend1:80",
|
||||
options: Options{
|
||||
Path: "/test",
|
||||
Port: 0,
|
||||
},
|
||||
expected: expected{
|
||||
err: false,
|
||||
value: "http://backend1:80/test",
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "port override",
|
||||
serverURL: "http://backend2:80",
|
||||
options: Options{
|
||||
Path: "/test",
|
||||
Port: 8080,
|
||||
},
|
||||
expected: expected{
|
||||
err: false,
|
||||
value: "http://backend2:8080/test",
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "no port override with no port in server URL",
|
||||
serverURL: "http://backend1",
|
||||
options: Options{
|
||||
Path: "/health",
|
||||
Port: 0,
|
||||
},
|
||||
expected: expected{
|
||||
err: false,
|
||||
value: "http://backend1/health",
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "port override with no port in server URL",
|
||||
serverURL: "http://backend2",
|
||||
options: Options{
|
||||
Path: "/health",
|
||||
Port: 8080,
|
||||
},
|
||||
expected: expected{
|
||||
err: false,
|
||||
value: "http://backend2:8080/health",
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "scheme override",
|
||||
serverURL: "https://backend1:80",
|
||||
options: Options{
|
||||
Scheme: "http",
|
||||
Path: "/test",
|
||||
Port: 0,
|
||||
},
|
||||
expected: expected{
|
||||
err: false,
|
||||
value: "http://backend1:80/test",
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "path with param",
|
||||
serverURL: "http://backend1:80",
|
||||
options: Options{
|
||||
Path: "/health?powpow=do",
|
||||
Port: 0,
|
||||
},
|
||||
expected: expected{
|
||||
err: false,
|
||||
value: "http://backend1:80/health?powpow=do",
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "path with params",
|
||||
serverURL: "http://backend1:80",
|
||||
options: Options{
|
||||
Path: "/health?powpow=do&do=powpow",
|
||||
Port: 0,
|
||||
},
|
||||
expected: expected{
|
||||
err: false,
|
||||
value: "http://backend1:80/health?powpow=do&do=powpow",
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "path with invalid path",
|
||||
serverURL: "http://backend1:80",
|
||||
options: Options{
|
||||
Path: ":",
|
||||
Port: 0,
|
||||
},
|
||||
expected: expected{
|
||||
err: true,
|
||||
value: "",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
backend := NewBackendConfig(test.options, "backendName")
|
||||
|
||||
u := testhelpers.MustParseURL(test.serverURL)
|
||||
|
||||
req, err := backend.newRequest(u)
|
||||
|
||||
if test.expected.err {
|
||||
require.Error(t, err)
|
||||
assert.Nil(t, nil)
|
||||
} else {
|
||||
require.NoError(t, err, "failed to create new backend request")
|
||||
require.NotNil(t, req)
|
||||
assert.Equal(t, test.expected.value, req.URL.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRequestOptions(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
serverURL string
|
||||
options Options
|
||||
expectedHostname string
|
||||
expectedHeader string
|
||||
expectedMethod string
|
||||
}{
|
||||
{
|
||||
desc: "override hostname",
|
||||
serverURL: "http://backend1:80",
|
||||
options: Options{
|
||||
Hostname: "myhost",
|
||||
Path: "/",
|
||||
},
|
||||
expectedHostname: "myhost",
|
||||
expectedHeader: "",
|
||||
expectedMethod: http.MethodGet,
|
||||
},
|
||||
{
|
||||
desc: "not override hostname",
|
||||
serverURL: "http://backend1:80",
|
||||
options: Options{
|
||||
Hostname: "",
|
||||
Path: "/",
|
||||
},
|
||||
expectedHostname: "backend1:80",
|
||||
expectedHeader: "",
|
||||
expectedMethod: http.MethodGet,
|
||||
},
|
||||
{
|
||||
desc: "custom header",
|
||||
serverURL: "http://backend1:80",
|
||||
options: Options{
|
||||
Headers: map[string]string{"Custom-Header": "foo"},
|
||||
Hostname: "",
|
||||
Path: "/",
|
||||
},
|
||||
expectedHostname: "backend1:80",
|
||||
expectedHeader: "foo",
|
||||
expectedMethod: http.MethodGet,
|
||||
},
|
||||
{
|
||||
desc: "custom header with hostname override",
|
||||
serverURL: "http://backend1:80",
|
||||
options: Options{
|
||||
Headers: map[string]string{"Custom-Header": "foo"},
|
||||
Hostname: "myhost",
|
||||
Path: "/",
|
||||
},
|
||||
expectedHostname: "myhost",
|
||||
expectedHeader: "foo",
|
||||
expectedMethod: http.MethodGet,
|
||||
},
|
||||
{
|
||||
desc: "custom method",
|
||||
serverURL: "http://backend1:80",
|
||||
options: Options{
|
||||
Path: "/",
|
||||
Method: http.MethodHead,
|
||||
},
|
||||
expectedHostname: "backend1:80",
|
||||
expectedMethod: http.MethodHead,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
backend := NewBackendConfig(test.options, "backendName")
|
||||
|
||||
u, err := url.Parse(test.serverURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := backend.newRequest(u)
|
||||
require.NoError(t, err, "failed to create new backend request")
|
||||
|
||||
req = backend.setRequestOptions(req)
|
||||
|
||||
assert.Equal(t, "http://backend1:80/", req.URL.String())
|
||||
assert.Equal(t, test.expectedHostname, req.Host)
|
||||
assert.Equal(t, test.expectedHeader, req.Header.Get("Custom-Header"))
|
||||
assert.Equal(t, test.expectedMethod, req.Method)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBalancers_Servers(t *testing.T) {
|
||||
server1, err := url.Parse("http://foo.com")
|
||||
require.NoError(t, err)
|
||||
|
||||
balancer1, err := roundrobin.New(nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = balancer1.UpsertServer(server1)
|
||||
require.NoError(t, err)
|
||||
|
||||
server2, err := url.Parse("http://foo.com")
|
||||
require.NoError(t, err)
|
||||
|
||||
balancer2, err := roundrobin.New(nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = balancer2.UpsertServer(server2)
|
||||
require.NoError(t, err)
|
||||
|
||||
balancers := Balancers([]Balancer{balancer1, balancer2})
|
||||
|
||||
want, err := url.Parse("http://foo.com")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 1, len(balancers.Servers()))
|
||||
assert.Equal(t, want, balancers.Servers()[0])
|
||||
}
|
||||
|
||||
func TestBalancers_UpsertServer(t *testing.T) {
|
||||
balancer1, err := roundrobin.New(nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
balancer2, err := roundrobin.New(nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
want, err := url.Parse("http://foo.com")
|
||||
require.NoError(t, err)
|
||||
|
||||
balancers := Balancers([]Balancer{balancer1, balancer2})
|
||||
|
||||
err = balancers.UpsertServer(want)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 1, len(balancer1.Servers()))
|
||||
assert.Equal(t, want, balancer1.Servers()[0])
|
||||
|
||||
assert.Equal(t, 1, len(balancer2.Servers()))
|
||||
assert.Equal(t, want, balancer2.Servers()[0])
|
||||
}
|
||||
|
||||
func TestBalancers_RemoveServer(t *testing.T) {
|
||||
server, err := url.Parse("http://foo.com")
|
||||
require.NoError(t, err)
|
||||
|
||||
balancer1, err := roundrobin.New(nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = balancer1.UpsertServer(server)
|
||||
require.NoError(t, err)
|
||||
|
||||
balancer2, err := roundrobin.New(nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = balancer2.UpsertServer(server)
|
||||
require.NoError(t, err)
|
||||
|
||||
balancers := Balancers([]Balancer{balancer1, balancer2})
|
||||
|
||||
err = balancers.RemoveServer(server)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 0, len(balancer1.Servers()))
|
||||
assert.Equal(t, 0, len(balancer2.Servers()))
|
||||
}
|
||||
|
||||
func TestLBStatusUpdater(t *testing.T) {
|
||||
lb := &testLoadBalancer{RWMutex: &sync.RWMutex{}}
|
||||
svInfo := &runtime.ServiceInfo{}
|
||||
lbsu := NewLBStatusUpdater(lb, svInfo, nil)
|
||||
newServer, err := url.Parse("http://foo.com")
|
||||
assert.NoError(t, err)
|
||||
err = lbsu.UpsertServer(newServer, roundrobin.Weight(1))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(lbsu.Servers()), 1)
|
||||
assert.Equal(t, len(lbsu.BalancerHandler.(*testLoadBalancer).Options()), 1)
|
||||
statuses := svInfo.GetAllStatus()
|
||||
assert.Equal(t, len(statuses), 1)
|
||||
for k, v := range statuses {
|
||||
assert.Equal(t, k, newServer.String())
|
||||
assert.Equal(t, v, serverUp)
|
||||
break
|
||||
}
|
||||
err = lbsu.RemoveServer(newServer)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(lbsu.Servers()), 0)
|
||||
statuses = svInfo.GetAllStatus()
|
||||
assert.Equal(t, len(statuses), 1)
|
||||
for k, v := range statuses {
|
||||
assert.Equal(t, k, newServer.String())
|
||||
assert.Equal(t, v, serverDown)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
func TestNotFollowingRedirects(t *testing.T) {
|
||||
redirectServerCalled := false
|
||||
redirectTestServer := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
redirectServerCalled = true
|
||||
}))
|
||||
defer redirectTestServer.Close()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
rw.Header().Add("location", redirectTestServer.URL)
|
||||
rw.WriteHeader(http.StatusSeeOther)
|
||||
cancel()
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
lb := &testLoadBalancer{
|
||||
RWMutex: &sync.RWMutex{},
|
||||
servers: []*url.URL{testhelpers.MustParseURL(server.URL)},
|
||||
}
|
||||
|
||||
backend := NewBackendConfig(Options{
|
||||
Path: "/path",
|
||||
Interval: healthCheckInterval,
|
||||
Timeout: healthCheckTimeout,
|
||||
LB: lb,
|
||||
FollowRedirects: false,
|
||||
}, "backendName")
|
||||
|
||||
collectingMetrics := &testhelpers.CollectingGauge{}
|
||||
check := HealthCheck{
|
||||
Backends: make(map[string]*BackendConfig),
|
||||
metrics: metricsHealthcheck{serverUpGauge: collectingMetrics},
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
check.execute(ctx, backend)
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
timeout := time.Duration(int(healthCheckInterval) + 500)
|
||||
select {
|
||||
case <-time.After(timeout):
|
||||
t.Fatal("test did not complete in time")
|
||||
case <-ctx.Done():
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
assert.False(t, redirectServerCalled, "HTTP redirect must not be followed")
|
||||
func Bool(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
|
|
@ -10,9 +10,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
gokitmetrics "github.com/go-kit/kit/metrics"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/traefik/traefik/v2/pkg/config/dynamic"
|
||||
"github.com/traefik/traefik/v2/pkg/testhelpers"
|
||||
"github.com/vulcand/oxy/roundrobin"
|
||||
"google.golang.org/grpc"
|
||||
healthpb "google.golang.org/grpc/health/grpc_health_v1"
|
||||
)
|
||||
|
@ -64,10 +65,13 @@ func newGRPCServer(healthSequence ...healthpb.HealthCheckResponse_ServingStatus)
|
|||
}
|
||||
|
||||
func (s *GRPCServer) Check(_ context.Context, _ *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {
|
||||
stat := s.status.Pop()
|
||||
if s.status.IsEmpty() {
|
||||
s.done()
|
||||
return &healthpb.HealthCheckResponse{
|
||||
Status: healthpb.HealthCheckResponse_SERVICE_UNKNOWN,
|
||||
}, nil
|
||||
}
|
||||
stat := s.status.Pop()
|
||||
|
||||
return &healthpb.HealthCheckResponse{
|
||||
Status: stat,
|
||||
|
@ -75,10 +79,13 @@ func (s *GRPCServer) Check(_ context.Context, _ *healthpb.HealthCheckRequest) (*
|
|||
}
|
||||
|
||||
func (s *GRPCServer) Watch(_ *healthpb.HealthCheckRequest, server healthpb.Health_WatchServer) error {
|
||||
stat := s.status.Pop()
|
||||
if s.status.IsEmpty() {
|
||||
s.done()
|
||||
return server.Send(&healthpb.HealthCheckResponse{
|
||||
Status: healthpb.HealthCheckResponse_SERVICE_UNKNOWN,
|
||||
})
|
||||
}
|
||||
stat := s.status.Pop()
|
||||
|
||||
return server.Send(&healthpb.HealthCheckResponse{
|
||||
Status: stat,
|
||||
|
@ -105,7 +112,7 @@ func (s *GRPCServer) Start(t *testing.T, done func()) (*url.URL, time.Duration)
|
|||
}()
|
||||
|
||||
// Make test timeout dependent on number of expected requests, health check interval, and a safety margin.
|
||||
return testhelpers.MustParseURL("http://" + listener.Addr().String()), time.Duration(len(s.status.sequence)*int(healthCheckInterval) + 500)
|
||||
return testhelpers.MustParseURL("http://" + listener.Addr().String()), time.Duration(len(s.status.sequence)*int(dynamic.DefaultHealthCheckInterval) + 500)
|
||||
}
|
||||
|
||||
type HTTPServer struct {
|
||||
|
@ -126,13 +133,14 @@ func newHTTPServer(healthSequence ...int) *HTTPServer {
|
|||
// ServeHTTP returns HTTP response codes following a status sequences.
|
||||
// It calls the given 'done' function once all request health indicators have been depleted.
|
||||
func (s *HTTPServer) ServeHTTP(w http.ResponseWriter, _ *http.Request) {
|
||||
if s.status.IsEmpty() {
|
||||
s.done()
|
||||
return
|
||||
}
|
||||
|
||||
stat := s.status.Pop()
|
||||
|
||||
w.WriteHeader(stat)
|
||||
|
||||
if s.status.IsEmpty() {
|
||||
s.done()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *HTTPServer) Start(t *testing.T, done func()) (*url.URL, time.Duration) {
|
||||
|
@ -144,7 +152,7 @@ func (s *HTTPServer) Start(t *testing.T, done func()) (*url.URL, time.Duration)
|
|||
t.Cleanup(ts.Close)
|
||||
|
||||
// Make test timeout dependent on number of expected requests, health check interval, and a safety margin.
|
||||
return testhelpers.MustParseURL(ts.URL), time.Duration(len(s.status.sequence)*int(healthCheckInterval) + 500)
|
||||
return testhelpers.MustParseURL(ts.URL), time.Duration(len(s.status.sequence)*int(dynamic.DefaultHealthCheckInterval) + 500)
|
||||
}
|
||||
|
||||
type testLoadBalancer struct {
|
||||
|
@ -153,53 +161,20 @@ type testLoadBalancer struct {
|
|||
*sync.RWMutex
|
||||
numRemovedServers int
|
||||
numUpsertedServers int
|
||||
servers []*url.URL
|
||||
// options is just to make sure that LBStatusUpdater forwards options on Upsert to its BalancerHandler
|
||||
options []roundrobin.ServerOption
|
||||
}
|
||||
|
||||
func (lb *testLoadBalancer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
// noop
|
||||
}
|
||||
|
||||
func (lb *testLoadBalancer) RemoveServer(u *url.URL) error {
|
||||
lb.Lock()
|
||||
defer lb.Unlock()
|
||||
lb.numRemovedServers++
|
||||
lb.removeServer(u)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lb *testLoadBalancer) UpsertServer(u *url.URL, options ...roundrobin.ServerOption) error {
|
||||
lb.Lock()
|
||||
defer lb.Unlock()
|
||||
lb.numUpsertedServers++
|
||||
lb.servers = append(lb.servers, u)
|
||||
lb.options = append(lb.options, options...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lb *testLoadBalancer) Servers() []*url.URL {
|
||||
return lb.servers
|
||||
}
|
||||
|
||||
func (lb *testLoadBalancer) Options() []roundrobin.ServerOption {
|
||||
return lb.options
|
||||
}
|
||||
|
||||
func (lb *testLoadBalancer) removeServer(u *url.URL) {
|
||||
var i int
|
||||
var serverURL *url.URL
|
||||
found := false
|
||||
for i, serverURL = range lb.servers {
|
||||
if *serverURL == *u {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
func (lb *testLoadBalancer) SetStatus(ctx context.Context, childName string, up bool) {
|
||||
if up {
|
||||
lb.numUpsertedServers++
|
||||
} else {
|
||||
lb.numRemovedServers++
|
||||
}
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
|
||||
lb.servers = append(lb.servers[:i], lb.servers[i+1:]...)
|
||||
}
|
||||
|
||||
type MetricsMock struct {
|
||||
Gauge gokitmetrics.Gauge
|
||||
}
|
||||
|
||||
func (m *MetricsMock) ServiceServerUpGauge() gokitmetrics.Gauge {
|
||||
return m.Gauge
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue