Merge tag 'v1.7.4' into master
This commit is contained in:
commit
d3ae88f108
154 changed files with 4356 additions and 1285 deletions
|
@ -6,6 +6,7 @@ import (
|
|||
"fmt"
|
||||
"io/ioutil"
|
||||
fmtlog "log"
|
||||
"net"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
@ -74,10 +75,12 @@ type Certificate struct {
|
|||
|
||||
// DNSChallenge contains DNS challenge Configuration
|
||||
type DNSChallenge struct {
|
||||
Provider string `description:"Use a DNS-01 based challenge provider rather than HTTPS."`
|
||||
DelayBeforeCheck parse.Duration `description:"Assume DNS propagates after a delay in seconds rather than finding and querying nameservers."`
|
||||
preCheckTimeout time.Duration
|
||||
preCheckInterval time.Duration
|
||||
Provider string `description:"Use a DNS-01 based challenge provider rather than HTTPS."`
|
||||
DelayBeforeCheck parse.Duration `description:"Assume DNS propagates after a delay in seconds rather than finding and querying nameservers."`
|
||||
Resolvers types.DNSResolvers `description:"Use following DNS servers to resolve the FQDN authority."`
|
||||
DisablePropagationCheck bool `description:"Disable the DNS propagation checks before notifying ACME that the DNS challenge is ready. [not recommended]"`
|
||||
preCheckTimeout time.Duration
|
||||
preCheckInterval time.Duration
|
||||
}
|
||||
|
||||
// HTTPChallenge contains HTTP challenge Configuration
|
||||
|
@ -252,6 +255,9 @@ func (p *Provider) getClient() (*acme.Client, error) {
|
|||
if p.DNSChallenge != nil && len(p.DNSChallenge.Provider) > 0 {
|
||||
log.Debugf("Using DNS Challenge provider: %s", p.DNSChallenge.Provider)
|
||||
|
||||
SetRecursiveNameServers(p.DNSChallenge.Resolvers)
|
||||
SetPropagationCheck(p.DNSChallenge.DisablePropagationCheck)
|
||||
|
||||
err = dnsOverrideDelay(p.DNSChallenge.DelayBeforeCheck)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -784,3 +790,37 @@ func isDomainAlreadyChecked(domainToCheck string, existentDomains []string) bool
|
|||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// SetPropagationCheck to disable the Lego PreCheck.
|
||||
func SetPropagationCheck(disable bool) {
|
||||
if disable {
|
||||
acme.PreCheckDNS = func(_, _ string) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetRecursiveNameServers to provide a custom DNS resolver.
|
||||
func SetRecursiveNameServers(dnsResolvers []string) {
|
||||
resolvers := normaliseDNSResolvers(dnsResolvers)
|
||||
if len(resolvers) > 0 {
|
||||
acme.RecursiveNameservers = resolvers
|
||||
log.Infof("Validating FQDN authority with DNS using %+v", resolvers)
|
||||
}
|
||||
}
|
||||
|
||||
// ensure all servers have a port number
|
||||
func normaliseDNSResolvers(dnsResolvers []string) []string {
|
||||
var normalisedResolvers []string
|
||||
for _, server := range dnsResolvers {
|
||||
srv := strings.TrimSpace(server)
|
||||
if len(srv) > 0 {
|
||||
if host, port, err := net.SplitHostPort(srv); err != nil {
|
||||
normalisedResolvers = append(normalisedResolvers, net.JoinHostPort(srv, "53"))
|
||||
} else {
|
||||
normalisedResolvers = append(normalisedResolvers, net.JoinHostPort(host, port))
|
||||
}
|
||||
}
|
||||
}
|
||||
return normalisedResolvers
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ func (p *Provider) buildConfiguration(catalog []catalogUpdate) *types.Configurat
|
|||
"getMaxConn": label.GetMaxConn,
|
||||
"getHealthCheck": label.GetHealthCheck,
|
||||
"getBuffering": label.GetBuffering,
|
||||
"getResponseForwarding": label.GetResponseForwarding,
|
||||
"getServer": p.getServer,
|
||||
|
||||
// Frontend functions
|
||||
|
@ -111,7 +112,7 @@ func (p *Provider) getFrontendRule(service serviceUpdate) string {
|
|||
return ""
|
||||
}
|
||||
|
||||
return buffer.String()
|
||||
return strings.TrimSuffix(buffer.String(), ".")
|
||||
}
|
||||
|
||||
func (p *Provider) getServer(node *api.ServiceEntry) types.Server {
|
||||
|
|
|
@ -408,6 +408,7 @@ func TestProviderBuildConfiguration(t *testing.T) {
|
|||
label.TraefikBackend + "=foobar",
|
||||
|
||||
label.TraefikBackendCircuitBreakerExpression + "=NetworkErrorRatio() > 0.5",
|
||||
label.TraefikBackendResponseForwardingFlushInterval + "=10ms",
|
||||
label.TraefikBackendHealthCheckPath + "=/health",
|
||||
label.TraefikBackendHealthCheckScheme + "=http",
|
||||
label.TraefikBackendHealthCheckPort + "=880",
|
||||
|
@ -680,6 +681,9 @@ func TestProviderBuildConfiguration(t *testing.T) {
|
|||
CircuitBreaker: &types.CircuitBreaker{
|
||||
Expression: "NetworkErrorRatio() > 0.5",
|
||||
},
|
||||
ResponseForwarding: &types.ResponseForwarding{
|
||||
FlushInterval: "10ms",
|
||||
},
|
||||
LoadBalancer: &types.LoadBalancer{
|
||||
Method: "drr",
|
||||
Stickiness: &types.Stickiness{
|
||||
|
@ -1036,6 +1040,7 @@ func TestProviderGetFrontendRule(t *testing.T) {
|
|||
testCases := []struct {
|
||||
desc string
|
||||
service serviceUpdate
|
||||
domain string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
|
@ -1044,8 +1049,18 @@ func TestProviderGetFrontendRule(t *testing.T) {
|
|||
ServiceName: "foo",
|
||||
Attributes: []string{},
|
||||
},
|
||||
domain: "localhost",
|
||||
expected: "Host:foo.localhost",
|
||||
},
|
||||
{
|
||||
desc: "When no domain should return default host foo",
|
||||
service: serviceUpdate{
|
||||
ServiceName: "foo",
|
||||
Attributes: []string{},
|
||||
},
|
||||
domain: "",
|
||||
expected: "Host:foo",
|
||||
},
|
||||
{
|
||||
desc: "Should return host *.example.com",
|
||||
service: serviceUpdate{
|
||||
|
@ -1054,6 +1069,7 @@ func TestProviderGetFrontendRule(t *testing.T) {
|
|||
"traefik.frontend.rule=Host:*.example.com",
|
||||
},
|
||||
},
|
||||
domain: "localhost",
|
||||
expected: "Host:*.example.com",
|
||||
},
|
||||
{
|
||||
|
@ -1064,6 +1080,7 @@ func TestProviderGetFrontendRule(t *testing.T) {
|
|||
"traefik.frontend.rule=Host:{{.ServiceName}}.example.com",
|
||||
},
|
||||
},
|
||||
domain: "localhost",
|
||||
expected: "Host:foo.example.com",
|
||||
},
|
||||
{
|
||||
|
@ -1075,6 +1092,7 @@ func TestProviderGetFrontendRule(t *testing.T) {
|
|||
"contextPath=/bar",
|
||||
},
|
||||
},
|
||||
domain: "localhost",
|
||||
expected: "PathPrefix:/bar",
|
||||
},
|
||||
}
|
||||
|
@ -1085,7 +1103,7 @@ func TestProviderGetFrontendRule(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
p := &Provider{
|
||||
Domain: "localhost",
|
||||
Domain: test.domain,
|
||||
Prefix: "traefik",
|
||||
FrontEndRule: "Host:{{.ServiceName}}.{{.Domain}}",
|
||||
frontEndRuleTemplate: template.New("consul catalog frontend rule"),
|
||||
|
|
|
@ -33,13 +33,14 @@ func (p *Provider) buildConfiguration(containersInspected []dockerData) *types.C
|
|||
"getDomain": label.GetFuncString(label.TraefikDomain, p.Domain),
|
||||
|
||||
// Backend functions
|
||||
"getIPAddress": p.getDeprecatedIPAddress, // TODO: Should we expose getIPPort instead?
|
||||
"getServers": p.getServers,
|
||||
"getMaxConn": label.GetMaxConn,
|
||||
"getHealthCheck": label.GetHealthCheck,
|
||||
"getBuffering": label.GetBuffering,
|
||||
"getCircuitBreaker": label.GetCircuitBreaker,
|
||||
"getLoadBalancer": label.GetLoadBalancer,
|
||||
"getIPAddress": p.getDeprecatedIPAddress, // TODO: Should we expose getIPPort instead?
|
||||
"getServers": p.getServers,
|
||||
"getMaxConn": label.GetMaxConn,
|
||||
"getHealthCheck": label.GetHealthCheck,
|
||||
"getBuffering": label.GetBuffering,
|
||||
"getResponseForwarding": label.GetResponseForwarding,
|
||||
"getCircuitBreaker": label.GetCircuitBreaker,
|
||||
"getLoadBalancer": label.GetLoadBalancer,
|
||||
|
||||
// Frontend functions
|
||||
"getBackendName": getBackendName,
|
||||
|
@ -186,13 +187,16 @@ func (p *Provider) getFrontendRule(container dockerData, segmentLabels map[strin
|
|||
}
|
||||
|
||||
domain := label.GetStringValue(segmentLabels, label.TraefikDomain, p.Domain)
|
||||
if len(domain) > 0 {
|
||||
domain = "." + domain
|
||||
}
|
||||
|
||||
if values, err := label.GetStringMultipleStrict(container.Labels, labelDockerComposeProject, labelDockerComposeService); err == nil {
|
||||
return "Host:" + getSubDomain(values[labelDockerComposeService]+"."+values[labelDockerComposeProject]) + "." + domain
|
||||
return "Host:" + getSubDomain(values[labelDockerComposeService]+"."+values[labelDockerComposeProject]) + domain
|
||||
}
|
||||
|
||||
if len(domain) > 0 {
|
||||
return "Host:" + getSubDomain(container.ServiceName) + "." + domain
|
||||
return "Host:" + getSubDomain(container.ServiceName) + domain
|
||||
}
|
||||
|
||||
return ""
|
||||
|
|
|
@ -436,6 +436,7 @@ func TestDockerBuildConfiguration(t *testing.T) {
|
|||
label.TraefikBackend: "foobar",
|
||||
|
||||
label.TraefikBackendCircuitBreakerExpression: "NetworkErrorRatio() > 0.5",
|
||||
label.TraefikBackendResponseForwardingFlushInterval: "10ms",
|
||||
label.TraefikBackendHealthCheckScheme: "http",
|
||||
label.TraefikBackendHealthCheckPath: "/health",
|
||||
label.TraefikBackendHealthCheckPort: "880",
|
||||
|
@ -674,6 +675,9 @@ func TestDockerBuildConfiguration(t *testing.T) {
|
|||
CircuitBreaker: &types.CircuitBreaker{
|
||||
Expression: "NetworkErrorRatio() > 0.5",
|
||||
},
|
||||
ResponseForwarding: &types.ResponseForwarding{
|
||||
FlushInterval: "10ms",
|
||||
},
|
||||
LoadBalancer: &types.LoadBalancer{
|
||||
Method: "drr",
|
||||
Stickiness: &types.Stickiness{
|
||||
|
|
|
@ -385,6 +385,7 @@ func TestSwarmBuildConfiguration(t *testing.T) {
|
|||
label.TraefikBackend: "foobar",
|
||||
|
||||
label.TraefikBackendCircuitBreakerExpression: "NetworkErrorRatio() > 0.5",
|
||||
label.TraefikBackendResponseForwardingFlushInterval: "10ms",
|
||||
label.TraefikBackendHealthCheckScheme: "http",
|
||||
label.TraefikBackendHealthCheckPath: "/health",
|
||||
label.TraefikBackendHealthCheckPort: "880",
|
||||
|
@ -592,6 +593,9 @@ func TestSwarmBuildConfiguration(t *testing.T) {
|
|||
CircuitBreaker: &types.CircuitBreaker{
|
||||
Expression: "NetworkErrorRatio() > 0.5",
|
||||
},
|
||||
ResponseForwarding: &types.ResponseForwarding{
|
||||
FlushInterval: "10ms",
|
||||
},
|
||||
LoadBalancer: &types.LoadBalancer{
|
||||
Method: "drr",
|
||||
Stickiness: &types.Stickiness{
|
||||
|
|
|
@ -123,18 +123,17 @@ func (p *Provider) createClient() (client.APIClient, error) {
|
|||
// Provide allows the docker provider to provide configurations to traefik
|
||||
// using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error {
|
||||
// TODO register this routine in pool, and watch for stop channel
|
||||
safe.Go(func() {
|
||||
pool.GoCtx(func(routineCtx context.Context) {
|
||||
operation := func() error {
|
||||
var err error
|
||||
|
||||
ctx, cancel := context.WithCancel(routineCtx)
|
||||
defer cancel()
|
||||
dockerClient, err := p.createClient()
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create a client for docker, error: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
serverVersion, err := dockerClient.ServerVersion(ctx)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to retrieve information of the docker client and server host: %s", err)
|
||||
|
@ -162,12 +161,11 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s
|
|||
Configuration: configuration,
|
||||
}
|
||||
if p.Watch {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
if p.SwarmMode {
|
||||
errChan := make(chan error)
|
||||
// TODO: This need to be change. Linked to Swarm events docker/docker#23827
|
||||
ticker := time.NewTicker(SwarmDefaultWatchTime)
|
||||
pool.Go(func(stop chan bool) {
|
||||
pool.GoCtx(func(ctx context.Context) {
|
||||
defer close(errChan)
|
||||
for {
|
||||
select {
|
||||
|
@ -186,9 +184,8 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s
|
|||
}
|
||||
}
|
||||
|
||||
case <-stop:
|
||||
case <-ctx.Done():
|
||||
ticker.Stop()
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -199,10 +196,6 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s
|
|||
// channel closed
|
||||
|
||||
} else {
|
||||
pool.Go(func(stop chan bool) {
|
||||
<-stop
|
||||
cancel()
|
||||
})
|
||||
f := filters.NewArgs()
|
||||
f.Add("type", "container")
|
||||
options := dockertypes.EventsOptions{
|
||||
|
@ -215,7 +208,6 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s
|
|||
if err != nil {
|
||||
log.Errorf("Failed to list containers for docker, error %s", err)
|
||||
// Call cancel to get out of the monitor
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
configuration := p.buildConfiguration(containers)
|
||||
|
@ -240,8 +232,9 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s
|
|||
if err == io.EOF {
|
||||
log.Debug("Provider event stream closed")
|
||||
}
|
||||
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -251,7 +244,7 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s
|
|||
notify := func(err error, time time.Duration) {
|
||||
log.Errorf("Provider connection error %+v, retrying in %s", err, time)
|
||||
}
|
||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify)
|
||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), backoff.WithContext(job.NewBackOff(backoff.NewExponentialBackOff()), routineCtx), notify)
|
||||
if err != nil {
|
||||
log.Errorf("Cannot connect to docker server %+v", err)
|
||||
}
|
||||
|
@ -398,7 +391,7 @@ func parseService(service swarmtypes.Service, networkMap map[string]*dockertypes
|
|||
if service.Spec.EndpointSpec != nil {
|
||||
if service.Spec.EndpointSpec.Mode == swarmtypes.ResolutionModeDNSRR {
|
||||
if isBackendLBSwarm(dData) {
|
||||
log.Warnf("Ignored %s endpoint-mode not supported, service name: %s. Fallback to Træfik load balancing", swarmtypes.ResolutionModeDNSRR, service.Spec.Annotations.Name)
|
||||
log.Warnf("Ignored %s endpoint-mode not supported, service name: %s. Fallback to Traefik load balancing", swarmtypes.ResolutionModeDNSRR, service.Spec.Annotations.Name)
|
||||
}
|
||||
} else if service.Spec.EndpointSpec.Mode == swarmtypes.ResolutionModeVIP {
|
||||
dData.NetworkSettings.Networks = make(map[string]*networkData)
|
||||
|
|
|
@ -21,14 +21,16 @@ import (
|
|||
func (p *Provider) buildConfiguration(instances []ecsInstance) (*types.Configuration, error) {
|
||||
var ecsFuncMap = template.FuncMap{
|
||||
// Backend functions
|
||||
"getHost": getHost,
|
||||
"getPort": getPort,
|
||||
"getCircuitBreaker": label.GetCircuitBreaker,
|
||||
"getLoadBalancer": label.GetLoadBalancer,
|
||||
"getMaxConn": label.GetMaxConn,
|
||||
"getHealthCheck": label.GetHealthCheck,
|
||||
"getBuffering": label.GetBuffering,
|
||||
"getServers": getServers,
|
||||
"getHost": getHost,
|
||||
"getPort": getPort,
|
||||
"getCircuitBreaker": label.GetCircuitBreaker,
|
||||
"getLoadBalancer": label.GetLoadBalancer,
|
||||
"getMaxConn": label.GetMaxConn,
|
||||
"getHealthCheck": label.GetHealthCheck,
|
||||
"getBuffering": label.GetBuffering,
|
||||
"getResponseForwarding": label.GetResponseForwarding,
|
||||
|
||||
"getServers": getServers,
|
||||
|
||||
// Frontend functions
|
||||
"filterFrontends": filterFrontends,
|
||||
|
@ -141,7 +143,11 @@ func (p *Provider) getFrontendRule(i ecsInstance) string {
|
|||
}
|
||||
|
||||
domain := label.GetStringValue(i.SegmentLabels, label.TraefikDomain, p.Domain)
|
||||
defaultRule := "Host:" + strings.ToLower(strings.Replace(i.Name, "_", "-", -1)) + "." + domain
|
||||
if len(domain) > 0 {
|
||||
domain = "." + domain
|
||||
}
|
||||
|
||||
defaultRule := "Host:" + strings.ToLower(strings.Replace(i.Name, "_", "-", -1)) + domain
|
||||
|
||||
return label.GetStringValue(i.TraefikLabels, label.TraefikFrontendRule, defaultRule)
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ func TestBuildConfiguration(t *testing.T) {
|
|||
Backend: "backend-instance",
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-instance": {
|
||||
Rule: "Host:instance.",
|
||||
Rule: "Host:instance",
|
||||
},
|
||||
},
|
||||
PassHostHeader: true,
|
||||
|
@ -101,7 +101,7 @@ func TestBuildConfiguration(t *testing.T) {
|
|||
Backend: "backend-instance",
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-instance": {
|
||||
Rule: "Host:instance.",
|
||||
Rule: "Host:instance",
|
||||
},
|
||||
},
|
||||
PassHostHeader: true,
|
||||
|
@ -146,7 +146,7 @@ func TestBuildConfiguration(t *testing.T) {
|
|||
Backend: "backend-instance",
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-instance": {
|
||||
Rule: "Host:instance.",
|
||||
Rule: "Host:instance",
|
||||
},
|
||||
},
|
||||
Auth: &types.Auth{
|
||||
|
@ -197,7 +197,7 @@ func TestBuildConfiguration(t *testing.T) {
|
|||
Backend: "backend-instance",
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-instance": {
|
||||
Rule: "Host:instance.",
|
||||
Rule: "Host:instance",
|
||||
},
|
||||
},
|
||||
Auth: &types.Auth{
|
||||
|
@ -248,7 +248,7 @@ func TestBuildConfiguration(t *testing.T) {
|
|||
Backend: "backend-instance",
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-instance": {
|
||||
Rule: "Host:instance.",
|
||||
Rule: "Host:instance",
|
||||
},
|
||||
},
|
||||
Auth: &types.Auth{
|
||||
|
@ -307,7 +307,7 @@ func TestBuildConfiguration(t *testing.T) {
|
|||
Backend: "backend-instance",
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-instance": {
|
||||
Rule: "Host:instance.",
|
||||
Rule: "Host:instance",
|
||||
},
|
||||
},
|
||||
Auth: &types.Auth{
|
||||
|
@ -344,6 +344,7 @@ func TestBuildConfiguration(t *testing.T) {
|
|||
label.TraefikBackend: aws.String("foobar"),
|
||||
|
||||
label.TraefikBackendCircuitBreakerExpression: aws.String("NetworkErrorRatio() > 0.5"),
|
||||
label.TraefikBackendResponseForwardingFlushInterval: aws.String("10ms"),
|
||||
label.TraefikBackendHealthCheckScheme: aws.String("http"),
|
||||
label.TraefikBackendHealthCheckPath: aws.String("/health"),
|
||||
label.TraefikBackendHealthCheckPort: aws.String("880"),
|
||||
|
@ -461,6 +462,9 @@ func TestBuildConfiguration(t *testing.T) {
|
|||
CircuitBreaker: &types.CircuitBreaker{
|
||||
Expression: "NetworkErrorRatio() > 0.5",
|
||||
},
|
||||
ResponseForwarding: &types.ResponseForwarding{
|
||||
FlushInterval: "10ms",
|
||||
},
|
||||
LoadBalancer: &types.LoadBalancer{
|
||||
Method: "drr",
|
||||
Stickiness: &types.Stickiness{
|
||||
|
|
|
@ -7,43 +7,45 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
annotationKubernetesIngressClass = "kubernetes.io/ingress.class"
|
||||
annotationKubernetesAuthRealm = "ingress.kubernetes.io/auth-realm"
|
||||
annotationKubernetesAuthType = "ingress.kubernetes.io/auth-type"
|
||||
annotationKubernetesAuthSecret = "ingress.kubernetes.io/auth-secret"
|
||||
annotationKubernetesAuthHeaderField = "ingress.kubernetes.io/auth-header-field"
|
||||
annotationKubernetesAuthForwardResponseHeaders = "ingress.kubernetes.io/auth-response-headers"
|
||||
annotationKubernetesAuthRemoveHeader = "ingress.kubernetes.io/auth-remove-header"
|
||||
annotationKubernetesAuthForwardURL = "ingress.kubernetes.io/auth-url"
|
||||
annotationKubernetesAuthForwardTrustHeaders = "ingress.kubernetes.io/auth-trust-headers"
|
||||
annotationKubernetesAuthForwardTLSSecret = "ingress.kubernetes.io/auth-tls-secret"
|
||||
annotationKubernetesAuthForwardTLSInsecure = "ingress.kubernetes.io/auth-tls-insecure"
|
||||
annotationKubernetesRewriteTarget = "ingress.kubernetes.io/rewrite-target"
|
||||
annotationKubernetesWhiteListSourceRange = "ingress.kubernetes.io/whitelist-source-range"
|
||||
annotationKubernetesWhiteListIPStrategy = "ingress.kubernetes.io/whitelist-ipstrategy"
|
||||
annotationKubernetesWhiteListIPStrategyDepth = "ingress.kubernetes.io/whitelist-ipstrategy-depth"
|
||||
annotationKubernetesWhiteListIPStrategyExcludedIPs = "ingress.kubernetes.io/whitelist-ipstrategy-excluded-ips"
|
||||
annotationKubernetesPreserveHost = "ingress.kubernetes.io/preserve-host"
|
||||
annotationKubernetesPassTLSCert = "ingress.kubernetes.io/pass-tls-cert"
|
||||
annotationKubernetesFrontendEntryPoints = "ingress.kubernetes.io/frontend-entry-points"
|
||||
annotationKubernetesPriority = "ingress.kubernetes.io/priority"
|
||||
annotationKubernetesCircuitBreakerExpression = "ingress.kubernetes.io/circuit-breaker-expression"
|
||||
annotationKubernetesLoadBalancerMethod = "ingress.kubernetes.io/load-balancer-method"
|
||||
annotationKubernetesAffinity = "ingress.kubernetes.io/affinity"
|
||||
annotationKubernetesSessionCookieName = "ingress.kubernetes.io/session-cookie-name"
|
||||
annotationKubernetesRuleType = "ingress.kubernetes.io/rule-type"
|
||||
annotationKubernetesRedirectEntryPoint = "ingress.kubernetes.io/redirect-entry-point"
|
||||
annotationKubernetesRedirectPermanent = "ingress.kubernetes.io/redirect-permanent"
|
||||
annotationKubernetesRedirectRegex = "ingress.kubernetes.io/redirect-regex"
|
||||
annotationKubernetesRedirectReplacement = "ingress.kubernetes.io/redirect-replacement"
|
||||
annotationKubernetesMaxConnAmount = "ingress.kubernetes.io/max-conn-amount"
|
||||
annotationKubernetesMaxConnExtractorFunc = "ingress.kubernetes.io/max-conn-extractor-func"
|
||||
annotationKubernetesRateLimit = "ingress.kubernetes.io/rate-limit"
|
||||
annotationKubernetesErrorPages = "ingress.kubernetes.io/error-pages"
|
||||
annotationKubernetesBuffering = "ingress.kubernetes.io/buffering"
|
||||
annotationKubernetesAppRoot = "ingress.kubernetes.io/app-root"
|
||||
annotationKubernetesServiceWeights = "ingress.kubernetes.io/service-weights"
|
||||
annotationKubernetesRequestModifier = "ingress.kubernetes.io/request-modifier"
|
||||
annotationKubernetesIngressClass = "kubernetes.io/ingress.class"
|
||||
annotationKubernetesAuthRealm = "ingress.kubernetes.io/auth-realm"
|
||||
annotationKubernetesAuthType = "ingress.kubernetes.io/auth-type"
|
||||
annotationKubernetesAuthSecret = "ingress.kubernetes.io/auth-secret"
|
||||
annotationKubernetesAuthHeaderField = "ingress.kubernetes.io/auth-header-field"
|
||||
annotationKubernetesAuthForwardResponseHeaders = "ingress.kubernetes.io/auth-response-headers"
|
||||
annotationKubernetesAuthRemoveHeader = "ingress.kubernetes.io/auth-remove-header"
|
||||
annotationKubernetesAuthForwardURL = "ingress.kubernetes.io/auth-url"
|
||||
annotationKubernetesAuthForwardTrustHeaders = "ingress.kubernetes.io/auth-trust-headers"
|
||||
annotationKubernetesAuthForwardTLSSecret = "ingress.kubernetes.io/auth-tls-secret"
|
||||
annotationKubernetesAuthForwardTLSInsecure = "ingress.kubernetes.io/auth-tls-insecure"
|
||||
annotationKubernetesRewriteTarget = "ingress.kubernetes.io/rewrite-target"
|
||||
annotationKubernetesWhiteListSourceRange = "ingress.kubernetes.io/whitelist-source-range"
|
||||
annotationKubernetesWhiteListIPStrategy = "ingress.kubernetes.io/whitelist-ipstrategy"
|
||||
annotationKubernetesWhiteListIPStrategyDepth = "ingress.kubernetes.io/whitelist-ipstrategy-depth"
|
||||
annotationKubernetesWhiteListIPStrategyExcludedIPs = "ingress.kubernetes.io/whitelist-ipstrategy-excluded-ips"
|
||||
annotationKubernetesPreserveHost = "ingress.kubernetes.io/preserve-host"
|
||||
annotationKubernetesPassTLSCert = "ingress.kubernetes.io/pass-tls-cert" // Deprecated
|
||||
annotationKubernetesPassTLSClientCert = "ingress.kubernetes.io/pass-client-tls-cert"
|
||||
annotationKubernetesFrontendEntryPoints = "ingress.kubernetes.io/frontend-entry-points"
|
||||
annotationKubernetesPriority = "ingress.kubernetes.io/priority"
|
||||
annotationKubernetesCircuitBreakerExpression = "ingress.kubernetes.io/circuit-breaker-expression"
|
||||
annotationKubernetesLoadBalancerMethod = "ingress.kubernetes.io/load-balancer-method"
|
||||
annotationKubernetesAffinity = "ingress.kubernetes.io/affinity"
|
||||
annotationKubernetesSessionCookieName = "ingress.kubernetes.io/session-cookie-name"
|
||||
annotationKubernetesRuleType = "ingress.kubernetes.io/rule-type"
|
||||
annotationKubernetesRedirectEntryPoint = "ingress.kubernetes.io/redirect-entry-point"
|
||||
annotationKubernetesRedirectPermanent = "ingress.kubernetes.io/redirect-permanent"
|
||||
annotationKubernetesRedirectRegex = "ingress.kubernetes.io/redirect-regex"
|
||||
annotationKubernetesRedirectReplacement = "ingress.kubernetes.io/redirect-replacement"
|
||||
annotationKubernetesMaxConnAmount = "ingress.kubernetes.io/max-conn-amount"
|
||||
annotationKubernetesMaxConnExtractorFunc = "ingress.kubernetes.io/max-conn-extractor-func"
|
||||
annotationKubernetesRateLimit = "ingress.kubernetes.io/rate-limit"
|
||||
annotationKubernetesErrorPages = "ingress.kubernetes.io/error-pages"
|
||||
annotationKubernetesBuffering = "ingress.kubernetes.io/buffering"
|
||||
annotationKubernetesResponseForwardingFlushInterval = "ingress.kubernetes.io/responseforwarding-flushinterval"
|
||||
annotationKubernetesAppRoot = "ingress.kubernetes.io/app-root"
|
||||
annotationKubernetesServiceWeights = "ingress.kubernetes.io/service-weights"
|
||||
annotationKubernetesRequestModifier = "ingress.kubernetes.io/request-modifier"
|
||||
|
||||
annotationKubernetesSSLForceHost = "ingress.kubernetes.io/ssl-force-host"
|
||||
annotationKubernetesSSLRedirect = "ingress.kubernetes.io/ssl-redirect"
|
||||
|
|
|
@ -93,6 +93,13 @@ func circuitBreaker(exp string) func(*types.Backend) {
|
|||
}
|
||||
}
|
||||
|
||||
func responseForwarding(interval string) func(*types.Backend) {
|
||||
return func(b *types.Backend) {
|
||||
b.ResponseForwarding = &types.ResponseForwarding{}
|
||||
b.ResponseForwarding.FlushInterval = interval
|
||||
}
|
||||
}
|
||||
|
||||
func buffering(opts ...func(*types.Buffering)) func(*types.Backend) {
|
||||
return func(b *types.Backend) {
|
||||
if b.Buffering == nil {
|
||||
|
@ -398,12 +405,34 @@ func limitPeriod(period time.Duration) func(*types.Rate) {
|
|||
}
|
||||
}
|
||||
|
||||
// Deprecated
|
||||
func passTLSCert() func(*types.Frontend) {
|
||||
return func(f *types.Frontend) {
|
||||
f.PassTLSCert = true
|
||||
}
|
||||
}
|
||||
|
||||
func passTLSClientCert() func(*types.Frontend) {
|
||||
return func(f *types.Frontend) {
|
||||
f.PassTLSClientCert = &types.TLSClientHeaders{
|
||||
PEM: true,
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
NotAfter: true,
|
||||
NotBefore: true,
|
||||
Subject: &types.TLSCLientCertificateSubjectInfos{
|
||||
Country: true,
|
||||
Province: true,
|
||||
Locality: true,
|
||||
Organization: true,
|
||||
CommonName: true,
|
||||
SerialNumber: true,
|
||||
},
|
||||
Sans: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func routes(opts ...func(*types.Route) string) func(*types.Frontend) {
|
||||
return func(f *types.Frontend) {
|
||||
f.Routes = make(map[string]types.Route)
|
||||
|
|
|
@ -43,6 +43,7 @@ const (
|
|||
traefikDefaultIngressClass = "traefik"
|
||||
defaultBackendName = "global-default-backend"
|
||||
defaultFrontendName = "global-default-frontend"
|
||||
defaultFrontendRule = "PathPrefix:/"
|
||||
allowedProtocolHTTPS = "https"
|
||||
allowedProtocolH2C = "h2c"
|
||||
)
|
||||
|
@ -61,7 +62,7 @@ type Provider struct {
|
|||
Token string `description:"Kubernetes bearer token (not needed for in-cluster client)"`
|
||||
CertAuthFilePath string `description:"Kubernetes certificate authority file path (not needed for in-cluster client)"`
|
||||
DisablePassHostHeaders bool `description:"Kubernetes disable PassHost Headers" export:"true"`
|
||||
EnablePassTLSCert bool `description:"Kubernetes enable Pass TLS Client Certs" export:"true"`
|
||||
EnablePassTLSCert bool `description:"Kubernetes enable Pass TLS Client Certs" export:"true"` // Deprecated
|
||||
Namespaces Namespaces `description:"Kubernetes namespaces" export:"true"`
|
||||
LabelSelector string `description:"Kubernetes Ingress label selector to use" export:"true"`
|
||||
IngressClass string `description:"Value of kubernetes.io/ingress.class annotation to watch for" export:"true"`
|
||||
|
@ -238,6 +239,11 @@ func (p *Provider) loadIngresses(k8sClient Client) (*types.Configuration, error)
|
|||
}
|
||||
|
||||
baseName := r.Host + pa.Path
|
||||
|
||||
if len(baseName) == 0 {
|
||||
baseName = pa.Backend.ServiceName
|
||||
}
|
||||
|
||||
if priority > 0 {
|
||||
baseName = strconv.Itoa(priority) + "-" + baseName
|
||||
}
|
||||
|
@ -269,22 +275,23 @@ func (p *Provider) loadIngresses(k8sClient Client) (*types.Configuration, error)
|
|||
}
|
||||
|
||||
passHostHeader := getBoolValue(i.Annotations, annotationKubernetesPreserveHost, !p.DisablePassHostHeaders)
|
||||
passTLSCert := getBoolValue(i.Annotations, annotationKubernetesPassTLSCert, p.EnablePassTLSCert)
|
||||
passTLSCert := getBoolValue(i.Annotations, annotationKubernetesPassTLSCert, p.EnablePassTLSCert) // Deprecated
|
||||
entryPoints := getSliceStringValue(i.Annotations, annotationKubernetesFrontendEntryPoints)
|
||||
|
||||
frontend = &types.Frontend{
|
||||
Backend: baseName,
|
||||
PassHostHeader: passHostHeader,
|
||||
PassTLSCert: passTLSCert,
|
||||
Routes: make(map[string]types.Route),
|
||||
Priority: priority,
|
||||
WhiteList: getWhiteList(i),
|
||||
Redirect: getFrontendRedirect(i, baseName, pa.Path),
|
||||
EntryPoints: entryPoints,
|
||||
Headers: getHeader(i),
|
||||
Errors: getErrorPages(i),
|
||||
RateLimit: getRateLimit(i),
|
||||
Auth: auth,
|
||||
Backend: baseName,
|
||||
PassHostHeader: passHostHeader,
|
||||
PassTLSCert: passTLSCert,
|
||||
PassTLSClientCert: getPassTLSClientCert(i),
|
||||
Routes: make(map[string]types.Route),
|
||||
Priority: priority,
|
||||
WhiteList: getWhiteList(i),
|
||||
Redirect: getFrontendRedirect(i, baseName, pa.Path),
|
||||
EntryPoints: entryPoints,
|
||||
Headers: getHeader(i),
|
||||
Errors: getErrorPages(i),
|
||||
RateLimit: getRateLimit(i),
|
||||
Auth: auth,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -319,11 +326,18 @@ func (p *Provider) loadIngresses(k8sClient Client) (*types.Configuration, error)
|
|||
}
|
||||
}
|
||||
|
||||
if len(frontend.Routes) == 0 {
|
||||
frontend.Routes["/"] = types.Route{
|
||||
Rule: defaultFrontendRule,
|
||||
}
|
||||
}
|
||||
|
||||
templateObjects.Frontends[baseName] = frontend
|
||||
templateObjects.Backends[baseName].CircuitBreaker = getCircuitBreaker(service)
|
||||
templateObjects.Backends[baseName].LoadBalancer = getLoadBalancer(service)
|
||||
templateObjects.Backends[baseName].MaxConn = getMaxConn(service)
|
||||
templateObjects.Backends[baseName].Buffering = getBuffering(service)
|
||||
templateObjects.Backends[baseName].ResponseForwarding = getResponseForwarding(service)
|
||||
|
||||
protocol := label.DefaultProtocol
|
||||
|
||||
|
@ -481,6 +495,7 @@ func (p *Provider) addGlobalBackend(cl Client, i *extensionsv1beta1.Ingress, tem
|
|||
templateObjects.Backends[defaultBackendName].LoadBalancer = getLoadBalancer(service)
|
||||
templateObjects.Backends[defaultBackendName].MaxConn = getMaxConn(service)
|
||||
templateObjects.Backends[defaultBackendName].Buffering = getBuffering(service)
|
||||
templateObjects.Backends[defaultBackendName].ResponseForwarding = getResponseForwarding(service)
|
||||
|
||||
endpoints, exists, err := cl.GetEndpoints(service.Namespace, service.Name)
|
||||
if err != nil {
|
||||
|
@ -520,26 +535,27 @@ func (p *Provider) addGlobalBackend(cl Client, i *extensionsv1beta1.Ingress, tem
|
|||
}
|
||||
|
||||
passHostHeader := getBoolValue(i.Annotations, annotationKubernetesPreserveHost, !p.DisablePassHostHeaders)
|
||||
passTLSCert := getBoolValue(i.Annotations, annotationKubernetesPassTLSCert, p.EnablePassTLSCert)
|
||||
passTLSCert := getBoolValue(i.Annotations, annotationKubernetesPassTLSCert, p.EnablePassTLSCert) // Deprecated
|
||||
priority := getIntValue(i.Annotations, annotationKubernetesPriority, 0)
|
||||
entryPoints := getSliceStringValue(i.Annotations, annotationKubernetesFrontendEntryPoints)
|
||||
|
||||
templateObjects.Frontends[defaultFrontendName] = &types.Frontend{
|
||||
Backend: defaultBackendName,
|
||||
PassHostHeader: passHostHeader,
|
||||
PassTLSCert: passTLSCert,
|
||||
Routes: make(map[string]types.Route),
|
||||
Priority: priority,
|
||||
WhiteList: getWhiteList(i),
|
||||
Redirect: getFrontendRedirect(i, defaultFrontendName, "/"),
|
||||
EntryPoints: entryPoints,
|
||||
Headers: getHeader(i),
|
||||
Errors: getErrorPages(i),
|
||||
RateLimit: getRateLimit(i),
|
||||
Backend: defaultBackendName,
|
||||
PassHostHeader: passHostHeader,
|
||||
PassTLSCert: passTLSCert,
|
||||
PassTLSClientCert: getPassTLSClientCert(i),
|
||||
Routes: make(map[string]types.Route),
|
||||
Priority: priority,
|
||||
WhiteList: getWhiteList(i),
|
||||
Redirect: getFrontendRedirect(i, defaultFrontendName, "/"),
|
||||
EntryPoints: entryPoints,
|
||||
Headers: getHeader(i),
|
||||
Errors: getErrorPages(i),
|
||||
RateLimit: getRateLimit(i),
|
||||
}
|
||||
|
||||
templateObjects.Frontends[defaultFrontendName].Routes["/"] = types.Route{
|
||||
Rule: "PathPrefix:/",
|
||||
Rule: defaultFrontendRule,
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -578,6 +594,7 @@ func getRuleForPath(pa extensionsv1beta1.HTTPIngressPath, i *extensionsv1beta1.I
|
|||
|
||||
rules = append(rules, rule)
|
||||
}
|
||||
|
||||
return strings.Join(rules, ";"), nil
|
||||
}
|
||||
|
||||
|
@ -951,6 +968,17 @@ func getIPStrategy(annotations map[string]string) *types.IPStrategy {
|
|||
}
|
||||
}
|
||||
|
||||
func getResponseForwarding(service *corev1.Service) *types.ResponseForwarding {
|
||||
flushIntervalValue := getStringValue(service.Annotations, annotationKubernetesResponseForwardingFlushInterval, "")
|
||||
if len(flushIntervalValue) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &types.ResponseForwarding{
|
||||
FlushInterval: flushIntervalValue,
|
||||
}
|
||||
}
|
||||
|
||||
func getBuffering(service *corev1.Service) *types.Buffering {
|
||||
var buffering *types.Buffering
|
||||
|
||||
|
@ -1081,6 +1109,22 @@ func getRateLimit(i *extensionsv1beta1.Ingress) *types.RateLimit {
|
|||
return rateLimit
|
||||
}
|
||||
|
||||
func getPassTLSClientCert(i *extensionsv1beta1.Ingress) *types.TLSClientHeaders {
|
||||
var passTLSClientCert *types.TLSClientHeaders
|
||||
|
||||
passRaw := getStringValue(i.Annotations, annotationKubernetesPassTLSClientCert, "")
|
||||
if len(passRaw) > 0 {
|
||||
passTLSClientCert = &types.TLSClientHeaders{}
|
||||
err := yaml.Unmarshal([]byte(passRaw), passTLSClientCert)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return passTLSClientCert
|
||||
}
|
||||
|
||||
func templateSafeString(value string) error {
|
||||
_, err := strconv.Unquote(`"` + value + `"`)
|
||||
return err
|
||||
|
|
|
@ -49,6 +49,11 @@ func TestLoadIngresses(t *testing.T) {
|
|||
onePath(iBackend("service7", intstr.FromInt(80))),
|
||||
),
|
||||
),
|
||||
iRule(iHost(""),
|
||||
iPaths(
|
||||
onePath(iBackend("service8", intstr.FromInt(80))),
|
||||
),
|
||||
),
|
||||
),
|
||||
),
|
||||
}
|
||||
|
@ -117,6 +122,14 @@ func TestLoadIngresses(t *testing.T) {
|
|||
clusterIP("10.0.0.7"),
|
||||
sPorts(sPort(80, ""))),
|
||||
),
|
||||
buildService(
|
||||
sName("service8"),
|
||||
sNamespace("testing"),
|
||||
sUID("8"),
|
||||
sSpec(
|
||||
clusterIP("10.0.0.8"),
|
||||
sPorts(sPort(80, ""))),
|
||||
),
|
||||
}
|
||||
|
||||
endpoints := []*corev1.Endpoints{
|
||||
|
@ -164,6 +177,14 @@ func TestLoadIngresses(t *testing.T) {
|
|||
eAddresses(eAddress("10.10.0.7")),
|
||||
ePorts(ePort(80, ""))),
|
||||
),
|
||||
buildEndpoint(
|
||||
eNamespace("testing"),
|
||||
eName("service8"),
|
||||
eUID("8"),
|
||||
subset(
|
||||
eAddresses(eAddress("10.10.0.8")),
|
||||
ePorts(ePort(80, ""))),
|
||||
),
|
||||
}
|
||||
|
||||
watchChan := make(chan interface{})
|
||||
|
@ -217,6 +238,12 @@ func TestLoadIngresses(t *testing.T) {
|
|||
server("http://10.10.0.7:80", weight(1)),
|
||||
),
|
||||
),
|
||||
backend("service8",
|
||||
lbMethod("wrr"),
|
||||
servers(
|
||||
server("http://10.10.0.8:80", weight(1)),
|
||||
),
|
||||
),
|
||||
),
|
||||
frontends(
|
||||
frontend("foo/bar",
|
||||
|
@ -247,6 +274,10 @@ func TestLoadIngresses(t *testing.T) {
|
|||
passHostHeader(),
|
||||
routes(route("*.service7", "HostRegexp:{subdomain:[A-Za-z0-9-_]+}.service7")),
|
||||
),
|
||||
frontend("service8",
|
||||
passHostHeader(),
|
||||
routes(route("/", "PathPrefix:/")),
|
||||
),
|
||||
),
|
||||
)
|
||||
assert.Equal(t, expected, actual)
|
||||
|
@ -696,6 +727,7 @@ func TestGetPassHostHeader(t *testing.T) {
|
|||
assert.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
// Deprecated
|
||||
func TestGetPassTLSCert(t *testing.T) {
|
||||
ingresses := []*extensionsv1beta1.Ingress{
|
||||
buildIngress(iNamespace("awesome"),
|
||||
|
@ -875,6 +907,9 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
iRule(
|
||||
iHost("max-conn"),
|
||||
iPaths(onePath(iBackend("service4", intstr.FromInt(804))))),
|
||||
iRule(
|
||||
iHost("flush"),
|
||||
iPaths(onePath(iBackend("service5", intstr.FromInt(805))))),
|
||||
),
|
||||
),
|
||||
}
|
||||
|
@ -924,6 +959,15 @@ retryexpression: IsNetworkError() && Attempts() <= 2
|
|||
clusterIP("10.0.0.4"),
|
||||
sPorts(sPort(804, "http"))),
|
||||
),
|
||||
buildService(
|
||||
sName("service5"),
|
||||
sNamespace("testing"),
|
||||
sUID("5"),
|
||||
sAnnotation(annotationKubernetesResponseForwardingFlushInterval, "10ms"),
|
||||
sSpec(
|
||||
clusterIP("10.0.0.5"),
|
||||
sPorts(sPort(80, ""))),
|
||||
),
|
||||
}
|
||||
|
||||
endpoints := []*corev1.Endpoints{
|
||||
|
@ -971,6 +1015,17 @@ retryexpression: IsNetworkError() && Attempts() <= 2
|
|||
eAddresses(eAddress("10.4.0.2")),
|
||||
ePorts(ePort(8080, "http"))),
|
||||
),
|
||||
buildEndpoint(
|
||||
eNamespace("testing"),
|
||||
eName("service5"),
|
||||
eUID("5"),
|
||||
subset(
|
||||
eAddresses(eAddress("10.4.0.1")),
|
||||
ePorts(ePort(8080, "http"))),
|
||||
subset(
|
||||
eAddresses(eAddress("10.4.0.2")),
|
||||
ePorts(ePort(8080, "http"))),
|
||||
),
|
||||
}
|
||||
|
||||
watchChan := make(chan interface{})
|
||||
|
@ -994,6 +1049,11 @@ retryexpression: IsNetworkError() && Attempts() <= 2
|
|||
lbMethod("drr"),
|
||||
circuitBreaker("NetworkErrorRatio() > 0.5"),
|
||||
),
|
||||
backend("flush",
|
||||
servers(),
|
||||
lbMethod("wrr"),
|
||||
responseForwarding("10ms"),
|
||||
),
|
||||
backend("bar",
|
||||
servers(
|
||||
server("http://10.15.0.1:8080", weight(1)),
|
||||
|
@ -1039,6 +1099,10 @@ retryexpression: IsNetworkError() && Attempts() <= 2
|
|||
passHostHeader(),
|
||||
routes(
|
||||
route("max-conn", "Host:max-conn"))),
|
||||
frontend("flush",
|
||||
passHostHeader(),
|
||||
routes(
|
||||
route("flush", "Host:flush"))),
|
||||
),
|
||||
)
|
||||
|
||||
|
@ -1069,6 +1133,20 @@ func TestIngressAnnotations(t *testing.T) {
|
|||
buildIngress(
|
||||
iNamespace("testing"),
|
||||
iAnnotation(annotationKubernetesPassTLSCert, "true"),
|
||||
iAnnotation(annotationKubernetesPassTLSClientCert, `
|
||||
pem: true
|
||||
infos:
|
||||
notafter: true
|
||||
notbefore: true
|
||||
sans: true
|
||||
subject:
|
||||
country: true
|
||||
province: true
|
||||
locality: true
|
||||
organization: true
|
||||
commonname: true
|
||||
serialnumber: true
|
||||
`),
|
||||
iAnnotation(annotationKubernetesIngressClass, traefikDefaultRealm),
|
||||
iRules(
|
||||
iRule(
|
||||
|
@ -1484,13 +1562,7 @@ rateset:
|
|||
),
|
||||
frontend("other/sslstuff",
|
||||
passHostHeader(),
|
||||
passTLSCert(),
|
||||
routes(
|
||||
route("/sslstuff", "PathPrefix:/sslstuff"),
|
||||
route("other", "Host:other")),
|
||||
),
|
||||
frontend("other/sslstuff",
|
||||
passHostHeader(),
|
||||
passTLSClientCert(),
|
||||
passTLSCert(),
|
||||
routes(
|
||||
route("/sslstuff", "PathPrefix:/sslstuff"),
|
||||
|
|
|
@ -3,6 +3,7 @@ package kv
|
|||
const (
|
||||
pathBackends = "/backends/"
|
||||
pathBackendCircuitBreakerExpression = "/circuitbreaker/expression"
|
||||
pathBackendResponseForwardingFlushInterval = "/responseforwarding/flushinterval"
|
||||
pathBackendHealthCheckScheme = "/healthcheck/scheme"
|
||||
pathBackendHealthCheckPath = "/healthcheck/path"
|
||||
pathBackendHealthCheckPort = "/healthcheck/port"
|
||||
|
|
|
@ -56,12 +56,13 @@ func (p *Provider) buildConfiguration() *types.Configuration {
|
|||
"getWhiteList": p.getWhiteList,
|
||||
|
||||
// Backend functions
|
||||
"getServers": p.getServers,
|
||||
"getCircuitBreaker": p.getCircuitBreaker,
|
||||
"getLoadBalancer": p.getLoadBalancer,
|
||||
"getMaxConn": p.getMaxConn,
|
||||
"getHealthCheck": p.getHealthCheck,
|
||||
"getBuffering": p.getBuffering,
|
||||
"getServers": p.getServers,
|
||||
"getCircuitBreaker": p.getCircuitBreaker,
|
||||
"getResponseForwarding": p.getResponseForwarding,
|
||||
"getLoadBalancer": p.getLoadBalancer,
|
||||
"getMaxConn": p.getMaxConn,
|
||||
"getHealthCheck": p.getHealthCheck,
|
||||
"getBuffering": p.getBuffering,
|
||||
}
|
||||
|
||||
configuration, err := p.GetConfiguration("templates/kv.tmpl", KvFuncMap, templateObjects)
|
||||
|
@ -234,6 +235,20 @@ func (p *Provider) getLoadBalancer(rootPath string) *types.LoadBalancer {
|
|||
return lb
|
||||
}
|
||||
|
||||
func (p *Provider) getResponseForwarding(rootPath string) *types.ResponseForwarding {
|
||||
if !p.has(rootPath, pathBackendResponseForwardingFlushInterval) {
|
||||
return nil
|
||||
}
|
||||
value := p.get("", rootPath, pathBackendResponseForwardingFlushInterval)
|
||||
if len(value) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &types.ResponseForwarding{
|
||||
FlushInterval: value,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) getCircuitBreaker(rootPath string) *types.CircuitBreaker {
|
||||
if !p.has(rootPath, pathBackendCircuitBreakerExpression) {
|
||||
return nil
|
||||
|
|
|
@ -162,7 +162,7 @@ func HasPrefix(labels map[string]string, prefix string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// IsEnabled Check if a container is enabled in Træfik
|
||||
// IsEnabled Check if a container is enabled in Traefik
|
||||
func IsEnabled(labels map[string]string, exposedByDefault bool) bool {
|
||||
return GetBoolValue(labels, TraefikEnable, exposedByDefault)
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ const (
|
|||
SuffixBackendMaxConnAmount = "backend.maxconn.amount"
|
||||
SuffixBackendMaxConnExtractorFunc = "backend.maxconn.extractorfunc"
|
||||
SuffixBackendBuffering = "backend.buffering"
|
||||
SuffixBackendResponseForwardingFlushInterval = "backend.responseForwarding.flushInterval"
|
||||
SuffixBackendBufferingMaxRequestBodyBytes = SuffixBackendBuffering + ".maxRequestBodyBytes"
|
||||
SuffixBackendBufferingMemRequestBodyBytes = SuffixBackendBuffering + ".memRequestBodyBytes"
|
||||
SuffixBackendBufferingMaxResponseBodyBytes = SuffixBackendBuffering + ".maxResponseBodyBytes"
|
||||
|
@ -133,6 +134,7 @@ const (
|
|||
TraefikBackendMaxConnAmount = Prefix + SuffixBackendMaxConnAmount
|
||||
TraefikBackendMaxConnExtractorFunc = Prefix + SuffixBackendMaxConnExtractorFunc
|
||||
TraefikBackendBuffering = Prefix + SuffixBackendBuffering
|
||||
TraefikBackendResponseForwardingFlushInterval = Prefix + SuffixBackendResponseForwardingFlushInterval
|
||||
TraefikBackendBufferingMaxRequestBodyBytes = Prefix + SuffixBackendBufferingMaxRequestBodyBytes
|
||||
TraefikBackendBufferingMemRequestBodyBytes = Prefix + SuffixBackendBufferingMemRequestBodyBytes
|
||||
TraefikBackendBufferingMaxResponseBodyBytes = Prefix + SuffixBackendBufferingMaxResponseBodyBytes
|
||||
|
|
|
@ -359,6 +359,19 @@ func GetHealthCheck(labels map[string]string) *types.HealthCheck {
|
|||
}
|
||||
}
|
||||
|
||||
// GetResponseForwarding Create ResponseForwarding from labels
|
||||
func GetResponseForwarding(labels map[string]string) *types.ResponseForwarding {
|
||||
if !HasPrefix(labels, TraefikBackendResponseForwardingFlushInterval) {
|
||||
return nil
|
||||
}
|
||||
|
||||
value := GetStringValue(labels, TraefikBackendResponseForwardingFlushInterval, "0")
|
||||
|
||||
return &types.ResponseForwarding{
|
||||
FlushInterval: value,
|
||||
}
|
||||
}
|
||||
|
||||
// GetBuffering Create buffering from labels
|
||||
func GetBuffering(labels map[string]string) *types.Buffering {
|
||||
if !HasPrefix(labels, TraefikBackendBuffering) {
|
||||
|
|
|
@ -30,13 +30,14 @@ func (p *Provider) buildConfiguration(applications *marathon.Applications) *type
|
|||
"getBackendName": p.getBackendName,
|
||||
|
||||
// Backend functions
|
||||
"getPort": getPort,
|
||||
"getCircuitBreaker": label.GetCircuitBreaker,
|
||||
"getLoadBalancer": label.GetLoadBalancer,
|
||||
"getMaxConn": label.GetMaxConn,
|
||||
"getHealthCheck": label.GetHealthCheck,
|
||||
"getBuffering": label.GetBuffering,
|
||||
"getServers": p.getServers,
|
||||
"getPort": getPort,
|
||||
"getCircuitBreaker": label.GetCircuitBreaker,
|
||||
"getLoadBalancer": label.GetLoadBalancer,
|
||||
"getMaxConn": label.GetMaxConn,
|
||||
"getHealthCheck": label.GetHealthCheck,
|
||||
"getBuffering": label.GetBuffering,
|
||||
"getResponseForwarding": label.GetResponseForwarding,
|
||||
"getServers": p.getServers,
|
||||
|
||||
// Frontend functions
|
||||
"getSegmentNameSuffix": getSegmentNameSuffix,
|
||||
|
@ -212,11 +213,14 @@ func (p *Provider) getFrontendRule(app appData) string {
|
|||
}
|
||||
|
||||
domain := label.GetStringValue(app.SegmentLabels, label.TraefikDomain, p.Domain)
|
||||
if len(domain) > 0 {
|
||||
domain = "." + domain
|
||||
}
|
||||
|
||||
if len(app.SegmentName) > 0 {
|
||||
return "Host:" + strings.ToLower(provider.Normalize(app.SegmentName)) + "." + p.getSubDomain(app.ID) + "." + domain
|
||||
return "Host:" + strings.ToLower(provider.Normalize(app.SegmentName)) + "." + p.getSubDomain(app.ID) + domain
|
||||
}
|
||||
return "Host:" + p.getSubDomain(app.ID) + "." + domain
|
||||
return "Host:" + p.getSubDomain(app.ID) + domain
|
||||
}
|
||||
|
||||
func getPort(task marathon.Task, app appData) string {
|
||||
|
|
|
@ -357,6 +357,7 @@ func TestBuildConfiguration(t *testing.T) {
|
|||
withLabel(label.TraefikBackend, "foobar"),
|
||||
|
||||
withLabel(label.TraefikBackendCircuitBreakerExpression, "NetworkErrorRatio() > 0.5"),
|
||||
withLabel(label.TraefikBackendResponseForwardingFlushInterval, "10ms"),
|
||||
withLabel(label.TraefikBackendHealthCheckScheme, "http"),
|
||||
withLabel(label.TraefikBackendHealthCheckPath, "/health"),
|
||||
withLabel(label.TraefikBackendHealthCheckPort, "880"),
|
||||
|
@ -591,6 +592,9 @@ func TestBuildConfiguration(t *testing.T) {
|
|||
CircuitBreaker: &types.CircuitBreaker{
|
||||
Expression: "NetworkErrorRatio() > 0.5",
|
||||
},
|
||||
ResponseForwarding: &types.ResponseForwarding{
|
||||
FlushInterval: "10ms",
|
||||
},
|
||||
LoadBalancer: &types.LoadBalancer{
|
||||
Method: "drr",
|
||||
Stickiness: &types.Stickiness{
|
||||
|
|
|
@ -29,15 +29,16 @@ func (p *Provider) buildConfiguration(tasks []state.Task) *types.Configuration {
|
|||
"getID": getID,
|
||||
|
||||
// Backend functions
|
||||
"getBackendName": getBackendName,
|
||||
"getCircuitBreaker": label.GetCircuitBreaker,
|
||||
"getLoadBalancer": label.GetLoadBalancer,
|
||||
"getMaxConn": label.GetMaxConn,
|
||||
"getHealthCheck": label.GetHealthCheck,
|
||||
"getBuffering": label.GetBuffering,
|
||||
"getServers": p.getServers,
|
||||
"getHost": p.getHost,
|
||||
"getServerPort": p.getServerPort,
|
||||
"getBackendName": getBackendName,
|
||||
"getCircuitBreaker": label.GetCircuitBreaker,
|
||||
"getLoadBalancer": label.GetLoadBalancer,
|
||||
"getMaxConn": label.GetMaxConn,
|
||||
"getHealthCheck": label.GetHealthCheck,
|
||||
"getBuffering": label.GetBuffering,
|
||||
"getResponseForwarding": label.GetResponseForwarding,
|
||||
"getServers": p.getServers,
|
||||
"getHost": p.getHost,
|
||||
"getServerPort": p.getServerPort,
|
||||
|
||||
// Frontend functions
|
||||
"getSegmentNameSuffix": getSegmentNameSuffix,
|
||||
|
@ -222,8 +223,11 @@ func (p *Provider) getFrontendRule(task taskData) string {
|
|||
}
|
||||
|
||||
domain := label.GetStringValue(task.TraefikLabels, label.TraefikDomain, p.Domain)
|
||||
if len(domain) > 0 {
|
||||
domain = "." + domain
|
||||
}
|
||||
|
||||
return "Host:" + p.getSegmentSubDomain(task) + "." + domain
|
||||
return "Host:" + p.getSegmentSubDomain(task) + domain
|
||||
}
|
||||
|
||||
func (p *Provider) getServers(tasks []taskData) map[string]types.Server {
|
||||
|
|
|
@ -314,6 +314,7 @@ func TestBuildConfiguration(t *testing.T) {
|
|||
withLabel(label.TraefikBackend, "foobar"),
|
||||
|
||||
withLabel(label.TraefikBackendCircuitBreakerExpression, "NetworkErrorRatio() > 0.5"),
|
||||
withLabel(label.TraefikBackendResponseForwardingFlushInterval, "10ms"),
|
||||
withLabel(label.TraefikBackendHealthCheckScheme, "http"),
|
||||
withLabel(label.TraefikBackendHealthCheckPath, "/health"),
|
||||
withLabel(label.TraefikBackendHealthCheckPort, "880"),
|
||||
|
@ -551,6 +552,9 @@ func TestBuildConfiguration(t *testing.T) {
|
|||
CircuitBreaker: &types.CircuitBreaker{
|
||||
Expression: "NetworkErrorRatio() > 0.5",
|
||||
},
|
||||
ResponseForwarding: &types.ResponseForwarding{
|
||||
FlushInterval: "10ms",
|
||||
},
|
||||
LoadBalancer: &types.LoadBalancer{
|
||||
Method: "drr",
|
||||
Stickiness: &types.Stickiness{
|
||||
|
|
|
@ -20,12 +20,13 @@ func (p *Provider) buildConfiguration(services []rancherData) *types.Configurati
|
|||
"getDomain": label.GetFuncString(label.TraefikDomain, p.Domain),
|
||||
|
||||
// Backend functions
|
||||
"getCircuitBreaker": label.GetCircuitBreaker,
|
||||
"getLoadBalancer": label.GetLoadBalancer,
|
||||
"getMaxConn": label.GetMaxConn,
|
||||
"getHealthCheck": label.GetHealthCheck,
|
||||
"getBuffering": label.GetBuffering,
|
||||
"getServers": getServers,
|
||||
"getCircuitBreaker": label.GetCircuitBreaker,
|
||||
"getLoadBalancer": label.GetLoadBalancer,
|
||||
"getMaxConn": label.GetMaxConn,
|
||||
"getHealthCheck": label.GetHealthCheck,
|
||||
"getBuffering": label.GetBuffering,
|
||||
"getResponseForwarding": label.GetResponseForwarding,
|
||||
"getServers": getServers,
|
||||
|
||||
// Frontend functions
|
||||
"getBackendName": getBackendName,
|
||||
|
@ -128,7 +129,11 @@ func (p *Provider) serviceFilter(service rancherData) bool {
|
|||
|
||||
func (p *Provider) getFrontendRule(serviceName string, labels map[string]string) string {
|
||||
domain := label.GetStringValue(labels, label.TraefikDomain, p.Domain)
|
||||
defaultRule := "Host:" + strings.ToLower(strings.Replace(serviceName, "/", ".", -1)) + "." + domain
|
||||
if len(domain) > 0 {
|
||||
domain = "." + domain
|
||||
}
|
||||
|
||||
defaultRule := "Host:" + strings.ToLower(strings.Replace(serviceName, "/", ".", -1)) + domain
|
||||
|
||||
return label.GetStringValue(labels, label.TraefikFrontendRule, defaultRule)
|
||||
}
|
||||
|
|
|
@ -41,6 +41,7 @@ func TestProviderBuildConfiguration(t *testing.T) {
|
|||
label.TraefikBackend: "foobar",
|
||||
|
||||
label.TraefikBackendCircuitBreakerExpression: "NetworkErrorRatio() > 0.5",
|
||||
label.TraefikBackendResponseForwardingFlushInterval: "10ms",
|
||||
label.TraefikBackendHealthCheckScheme: "http",
|
||||
label.TraefikBackendHealthCheckPath: "/health",
|
||||
label.TraefikBackendHealthCheckPort: "880",
|
||||
|
@ -281,6 +282,9 @@ func TestProviderBuildConfiguration(t *testing.T) {
|
|||
CircuitBreaker: &types.CircuitBreaker{
|
||||
Expression: "NetworkErrorRatio() > 0.5",
|
||||
},
|
||||
ResponseForwarding: &types.ResponseForwarding{
|
||||
FlushInterval: "10ms",
|
||||
},
|
||||
LoadBalancer: &types.LoadBalancer{
|
||||
Method: "drr",
|
||||
Stickiness: &types.Stickiness{
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue