Merge tag 'v1.7.4' into master
This commit is contained in:
commit
d3ae88f108
154 changed files with 4356 additions and 1285 deletions
|
@ -33,13 +33,14 @@ func (p *Provider) buildConfiguration(containersInspected []dockerData) *types.C
|
|||
"getDomain": label.GetFuncString(label.TraefikDomain, p.Domain),
|
||||
|
||||
// Backend functions
|
||||
"getIPAddress": p.getDeprecatedIPAddress, // TODO: Should we expose getIPPort instead?
|
||||
"getServers": p.getServers,
|
||||
"getMaxConn": label.GetMaxConn,
|
||||
"getHealthCheck": label.GetHealthCheck,
|
||||
"getBuffering": label.GetBuffering,
|
||||
"getCircuitBreaker": label.GetCircuitBreaker,
|
||||
"getLoadBalancer": label.GetLoadBalancer,
|
||||
"getIPAddress": p.getDeprecatedIPAddress, // TODO: Should we expose getIPPort instead?
|
||||
"getServers": p.getServers,
|
||||
"getMaxConn": label.GetMaxConn,
|
||||
"getHealthCheck": label.GetHealthCheck,
|
||||
"getBuffering": label.GetBuffering,
|
||||
"getResponseForwarding": label.GetResponseForwarding,
|
||||
"getCircuitBreaker": label.GetCircuitBreaker,
|
||||
"getLoadBalancer": label.GetLoadBalancer,
|
||||
|
||||
// Frontend functions
|
||||
"getBackendName": getBackendName,
|
||||
|
@ -186,13 +187,16 @@ func (p *Provider) getFrontendRule(container dockerData, segmentLabels map[strin
|
|||
}
|
||||
|
||||
domain := label.GetStringValue(segmentLabels, label.TraefikDomain, p.Domain)
|
||||
if len(domain) > 0 {
|
||||
domain = "." + domain
|
||||
}
|
||||
|
||||
if values, err := label.GetStringMultipleStrict(container.Labels, labelDockerComposeProject, labelDockerComposeService); err == nil {
|
||||
return "Host:" + getSubDomain(values[labelDockerComposeService]+"."+values[labelDockerComposeProject]) + "." + domain
|
||||
return "Host:" + getSubDomain(values[labelDockerComposeService]+"."+values[labelDockerComposeProject]) + domain
|
||||
}
|
||||
|
||||
if len(domain) > 0 {
|
||||
return "Host:" + getSubDomain(container.ServiceName) + "." + domain
|
||||
return "Host:" + getSubDomain(container.ServiceName) + domain
|
||||
}
|
||||
|
||||
return ""
|
||||
|
|
|
@ -436,6 +436,7 @@ func TestDockerBuildConfiguration(t *testing.T) {
|
|||
label.TraefikBackend: "foobar",
|
||||
|
||||
label.TraefikBackendCircuitBreakerExpression: "NetworkErrorRatio() > 0.5",
|
||||
label.TraefikBackendResponseForwardingFlushInterval: "10ms",
|
||||
label.TraefikBackendHealthCheckScheme: "http",
|
||||
label.TraefikBackendHealthCheckPath: "/health",
|
||||
label.TraefikBackendHealthCheckPort: "880",
|
||||
|
@ -674,6 +675,9 @@ func TestDockerBuildConfiguration(t *testing.T) {
|
|||
CircuitBreaker: &types.CircuitBreaker{
|
||||
Expression: "NetworkErrorRatio() > 0.5",
|
||||
},
|
||||
ResponseForwarding: &types.ResponseForwarding{
|
||||
FlushInterval: "10ms",
|
||||
},
|
||||
LoadBalancer: &types.LoadBalancer{
|
||||
Method: "drr",
|
||||
Stickiness: &types.Stickiness{
|
||||
|
|
|
@ -385,6 +385,7 @@ func TestSwarmBuildConfiguration(t *testing.T) {
|
|||
label.TraefikBackend: "foobar",
|
||||
|
||||
label.TraefikBackendCircuitBreakerExpression: "NetworkErrorRatio() > 0.5",
|
||||
label.TraefikBackendResponseForwardingFlushInterval: "10ms",
|
||||
label.TraefikBackendHealthCheckScheme: "http",
|
||||
label.TraefikBackendHealthCheckPath: "/health",
|
||||
label.TraefikBackendHealthCheckPort: "880",
|
||||
|
@ -592,6 +593,9 @@ func TestSwarmBuildConfiguration(t *testing.T) {
|
|||
CircuitBreaker: &types.CircuitBreaker{
|
||||
Expression: "NetworkErrorRatio() > 0.5",
|
||||
},
|
||||
ResponseForwarding: &types.ResponseForwarding{
|
||||
FlushInterval: "10ms",
|
||||
},
|
||||
LoadBalancer: &types.LoadBalancer{
|
||||
Method: "drr",
|
||||
Stickiness: &types.Stickiness{
|
||||
|
|
|
@ -123,18 +123,17 @@ func (p *Provider) createClient() (client.APIClient, error) {
|
|||
// Provide allows the docker provider to provide configurations to traefik
|
||||
// using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error {
|
||||
// TODO register this routine in pool, and watch for stop channel
|
||||
safe.Go(func() {
|
||||
pool.GoCtx(func(routineCtx context.Context) {
|
||||
operation := func() error {
|
||||
var err error
|
||||
|
||||
ctx, cancel := context.WithCancel(routineCtx)
|
||||
defer cancel()
|
||||
dockerClient, err := p.createClient()
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create a client for docker, error: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
serverVersion, err := dockerClient.ServerVersion(ctx)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to retrieve information of the docker client and server host: %s", err)
|
||||
|
@ -162,12 +161,11 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s
|
|||
Configuration: configuration,
|
||||
}
|
||||
if p.Watch {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
if p.SwarmMode {
|
||||
errChan := make(chan error)
|
||||
// TODO: This need to be change. Linked to Swarm events docker/docker#23827
|
||||
ticker := time.NewTicker(SwarmDefaultWatchTime)
|
||||
pool.Go(func(stop chan bool) {
|
||||
pool.GoCtx(func(ctx context.Context) {
|
||||
defer close(errChan)
|
||||
for {
|
||||
select {
|
||||
|
@ -186,9 +184,8 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s
|
|||
}
|
||||
}
|
||||
|
||||
case <-stop:
|
||||
case <-ctx.Done():
|
||||
ticker.Stop()
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -199,10 +196,6 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s
|
|||
// channel closed
|
||||
|
||||
} else {
|
||||
pool.Go(func(stop chan bool) {
|
||||
<-stop
|
||||
cancel()
|
||||
})
|
||||
f := filters.NewArgs()
|
||||
f.Add("type", "container")
|
||||
options := dockertypes.EventsOptions{
|
||||
|
@ -215,7 +208,6 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s
|
|||
if err != nil {
|
||||
log.Errorf("Failed to list containers for docker, error %s", err)
|
||||
// Call cancel to get out of the monitor
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
configuration := p.buildConfiguration(containers)
|
||||
|
@ -240,8 +232,9 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s
|
|||
if err == io.EOF {
|
||||
log.Debug("Provider event stream closed")
|
||||
}
|
||||
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -251,7 +244,7 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s
|
|||
notify := func(err error, time time.Duration) {
|
||||
log.Errorf("Provider connection error %+v, retrying in %s", err, time)
|
||||
}
|
||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify)
|
||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), backoff.WithContext(job.NewBackOff(backoff.NewExponentialBackOff()), routineCtx), notify)
|
||||
if err != nil {
|
||||
log.Errorf("Cannot connect to docker server %+v", err)
|
||||
}
|
||||
|
@ -398,7 +391,7 @@ func parseService(service swarmtypes.Service, networkMap map[string]*dockertypes
|
|||
if service.Spec.EndpointSpec != nil {
|
||||
if service.Spec.EndpointSpec.Mode == swarmtypes.ResolutionModeDNSRR {
|
||||
if isBackendLBSwarm(dData) {
|
||||
log.Warnf("Ignored %s endpoint-mode not supported, service name: %s. Fallback to Træfik load balancing", swarmtypes.ResolutionModeDNSRR, service.Spec.Annotations.Name)
|
||||
log.Warnf("Ignored %s endpoint-mode not supported, service name: %s. Fallback to Traefik load balancing", swarmtypes.ResolutionModeDNSRR, service.Spec.Annotations.Name)
|
||||
}
|
||||
} else if service.Spec.EndpointSpec.Mode == swarmtypes.ResolutionModeVIP {
|
||||
dData.NetworkSettings.Networks = make(map[string]*networkData)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue