Remove Rancher v1 provider
This commit is contained in:
parent
7129f03dc9
commit
2ad1fd725a
66 changed files with 10 additions and 3316 deletions
|
@ -100,10 +100,6 @@ func NewProviderAggregator(conf static.Providers) ProviderAggregator {
|
|||
p.quietAddProvider(conf.KubernetesGateway)
|
||||
}
|
||||
|
||||
if conf.Rancher != nil {
|
||||
p.quietAddProvider(conf.Rancher)
|
||||
}
|
||||
|
||||
if conf.Ecs != nil {
|
||||
p.quietAddProvider(conf.Ecs)
|
||||
}
|
||||
|
|
|
@ -1,299 +0,0 @@
|
|||
package rancher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/traefik/traefik/v2/pkg/config/dynamic"
|
||||
"github.com/traefik/traefik/v2/pkg/config/label"
|
||||
"github.com/traefik/traefik/v2/pkg/provider"
|
||||
"github.com/traefik/traefik/v2/pkg/provider/constraints"
|
||||
)
|
||||
|
||||
func (p *Provider) buildConfiguration(ctx context.Context, services []rancherData) *dynamic.Configuration {
|
||||
configurations := make(map[string]*dynamic.Configuration)
|
||||
|
||||
for _, service := range services {
|
||||
logger := log.Ctx(ctx).With().Str("service", service.Name).Logger()
|
||||
ctxService := logger.WithContext(ctx)
|
||||
|
||||
if !p.keepService(ctx, service) {
|
||||
continue
|
||||
}
|
||||
|
||||
confFromLabel, err := label.DecodeConfiguration(service.Labels)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Send()
|
||||
continue
|
||||
}
|
||||
|
||||
var tcpOrUDP bool
|
||||
if len(confFromLabel.TCP.Routers) > 0 || len(confFromLabel.TCP.Services) > 0 {
|
||||
tcpOrUDP = true
|
||||
|
||||
err := p.buildTCPServiceConfiguration(ctxService, service, confFromLabel.TCP)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Send()
|
||||
continue
|
||||
}
|
||||
provider.BuildTCPRouterConfiguration(ctxService, confFromLabel.TCP)
|
||||
}
|
||||
|
||||
if len(confFromLabel.UDP.Routers) > 0 || len(confFromLabel.UDP.Services) > 0 {
|
||||
tcpOrUDP = true
|
||||
|
||||
err := p.buildUDPServiceConfiguration(ctxService, service, confFromLabel.UDP)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Send()
|
||||
continue
|
||||
}
|
||||
provider.BuildUDPRouterConfiguration(ctxService, confFromLabel.UDP)
|
||||
}
|
||||
|
||||
if tcpOrUDP && len(confFromLabel.HTTP.Routers) == 0 &&
|
||||
len(confFromLabel.HTTP.Middlewares) == 0 &&
|
||||
len(confFromLabel.HTTP.Services) == 0 {
|
||||
configurations[service.Name] = confFromLabel
|
||||
continue
|
||||
}
|
||||
|
||||
err = p.buildServiceConfiguration(ctx, service, confFromLabel.HTTP)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Send()
|
||||
continue
|
||||
}
|
||||
|
||||
model := struct {
|
||||
Name string
|
||||
Labels map[string]string
|
||||
}{
|
||||
Name: service.Name,
|
||||
Labels: service.Labels,
|
||||
}
|
||||
|
||||
provider.BuildRouterConfiguration(ctx, confFromLabel.HTTP, service.Name, p.defaultRuleTpl, model)
|
||||
|
||||
configurations[service.Name] = confFromLabel
|
||||
}
|
||||
|
||||
return provider.Merge(ctx, configurations)
|
||||
}
|
||||
|
||||
func (p *Provider) buildTCPServiceConfiguration(ctx context.Context, service rancherData, configuration *dynamic.TCPConfiguration) error {
|
||||
serviceName := service.Name
|
||||
|
||||
if len(configuration.Services) == 0 {
|
||||
configuration.Services = map[string]*dynamic.TCPService{
|
||||
serviceName: {
|
||||
LoadBalancer: new(dynamic.TCPServersLoadBalancer),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
for _, confService := range configuration.Services {
|
||||
err := p.addServerTCP(ctx, service, confService.LoadBalancer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) buildUDPServiceConfiguration(ctx context.Context, service rancherData, configuration *dynamic.UDPConfiguration) error {
|
||||
serviceName := service.Name
|
||||
|
||||
if len(configuration.Services) == 0 {
|
||||
configuration.Services = make(map[string]*dynamic.UDPService)
|
||||
lb := &dynamic.UDPServersLoadBalancer{}
|
||||
|
||||
configuration.Services[serviceName] = &dynamic.UDPService{
|
||||
LoadBalancer: lb,
|
||||
}
|
||||
}
|
||||
|
||||
for _, confService := range configuration.Services {
|
||||
err := p.addServerUDP(ctx, service, confService.LoadBalancer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) buildServiceConfiguration(ctx context.Context, service rancherData, configuration *dynamic.HTTPConfiguration) error {
|
||||
serviceName := service.Name
|
||||
|
||||
if len(configuration.Services) == 0 {
|
||||
configuration.Services = make(map[string]*dynamic.Service)
|
||||
lb := &dynamic.ServersLoadBalancer{}
|
||||
lb.SetDefaults()
|
||||
configuration.Services[serviceName] = &dynamic.Service{
|
||||
LoadBalancer: lb,
|
||||
}
|
||||
}
|
||||
|
||||
for _, confService := range configuration.Services {
|
||||
err := p.addServers(ctx, service, confService.LoadBalancer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) keepService(ctx context.Context, service rancherData) bool {
|
||||
logger := log.Ctx(ctx)
|
||||
|
||||
if !service.ExtraConf.Enable {
|
||||
logger.Debug().Msg("Filtering disabled service")
|
||||
return false
|
||||
}
|
||||
|
||||
matches, err := constraints.MatchLabels(service.Labels, p.Constraints)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Error matching constraint expression")
|
||||
return false
|
||||
}
|
||||
if !matches {
|
||||
logger.Debug().Msgf("Service pruned by constraint expression: %q", p.Constraints)
|
||||
return false
|
||||
}
|
||||
|
||||
if p.EnableServiceHealthFilter {
|
||||
if service.Health != "" && service.Health != healthy && service.Health != updatingHealthy {
|
||||
logger.Debug().Msgf("Filtering service %s with healthState of %s", service.Name, service.Health)
|
||||
return false
|
||||
}
|
||||
if service.State != "" && service.State != active && service.State != updatingActive && service.State != upgraded && service.State != upgrading {
|
||||
logger.Debug().Msgf("Filtering service %s with state of %s", service.Name, service.State)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *Provider) addServerTCP(ctx context.Context, service rancherData, loadBalancer *dynamic.TCPServersLoadBalancer) error {
|
||||
log.Ctx(ctx).Debug().Msgf("Trying to add servers for service %s", service.Name)
|
||||
|
||||
if loadBalancer == nil {
|
||||
return errors.New("load-balancer is not defined")
|
||||
}
|
||||
|
||||
if len(loadBalancer.Servers) == 0 {
|
||||
loadBalancer.Servers = []dynamic.TCPServer{{}}
|
||||
}
|
||||
|
||||
port := loadBalancer.Servers[0].Port
|
||||
loadBalancer.Servers[0].Port = ""
|
||||
|
||||
if port == "" {
|
||||
port = getServicePort(service)
|
||||
}
|
||||
|
||||
if port == "" {
|
||||
return errors.New("port is missing")
|
||||
}
|
||||
|
||||
var servers []dynamic.TCPServer
|
||||
for _, containerIP := range service.Containers {
|
||||
servers = append(servers, dynamic.TCPServer{
|
||||
Address: net.JoinHostPort(containerIP, port),
|
||||
})
|
||||
}
|
||||
|
||||
loadBalancer.Servers = servers
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) addServerUDP(ctx context.Context, service rancherData, loadBalancer *dynamic.UDPServersLoadBalancer) error {
|
||||
log.Ctx(ctx).Debug().Msgf("Trying to add servers for service %s", service.Name)
|
||||
|
||||
if loadBalancer == nil {
|
||||
return errors.New("load-balancer is not defined")
|
||||
}
|
||||
|
||||
if len(loadBalancer.Servers) == 0 {
|
||||
loadBalancer.Servers = []dynamic.UDPServer{{}}
|
||||
}
|
||||
|
||||
port := loadBalancer.Servers[0].Port
|
||||
loadBalancer.Servers[0].Port = ""
|
||||
|
||||
if port == "" {
|
||||
port = getServicePort(service)
|
||||
}
|
||||
|
||||
if port == "" {
|
||||
return errors.New("port is missing")
|
||||
}
|
||||
|
||||
var servers []dynamic.UDPServer
|
||||
for _, containerIP := range service.Containers {
|
||||
servers = append(servers, dynamic.UDPServer{
|
||||
Address: net.JoinHostPort(containerIP, port),
|
||||
})
|
||||
}
|
||||
|
||||
loadBalancer.Servers = servers
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) addServers(ctx context.Context, service rancherData, loadBalancer *dynamic.ServersLoadBalancer) error {
|
||||
log.Ctx(ctx).Debug().Msgf("Trying to add servers for service %s", service.Name)
|
||||
|
||||
if loadBalancer == nil {
|
||||
return errors.New("load-balancer is not defined")
|
||||
}
|
||||
|
||||
if len(loadBalancer.Servers) == 0 {
|
||||
server := dynamic.Server{}
|
||||
server.SetDefaults()
|
||||
|
||||
loadBalancer.Servers = []dynamic.Server{server}
|
||||
}
|
||||
|
||||
port := loadBalancer.Servers[0].Port
|
||||
loadBalancer.Servers[0].Port = ""
|
||||
|
||||
if port == "" {
|
||||
port = getServicePort(service)
|
||||
}
|
||||
|
||||
if port == "" {
|
||||
return errors.New("port is missing")
|
||||
}
|
||||
|
||||
var servers []dynamic.Server
|
||||
for _, containerIP := range service.Containers {
|
||||
servers = append(servers, dynamic.Server{
|
||||
URL: fmt.Sprintf("%s://%s", loadBalancer.Servers[0].Scheme, net.JoinHostPort(containerIP, port)),
|
||||
})
|
||||
}
|
||||
|
||||
loadBalancer.Servers = servers
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getServicePort(data rancherData) string {
|
||||
rawPort := strings.Split(data.Port, "/")[0]
|
||||
hostPort := strings.Split(rawPort, ":")
|
||||
|
||||
if len(hostPort) >= 2 {
|
||||
return hostPort[1]
|
||||
}
|
||||
if len(hostPort) > 0 && hostPort[0] != "" {
|
||||
return hostPort[0]
|
||||
}
|
||||
return rawPort
|
||||
}
|
File diff suppressed because it is too large
Load diff
|
@ -1,22 +0,0 @@
|
|||
package rancher
|
||||
|
||||
import (
|
||||
"github.com/traefik/traefik/v2/pkg/config/label"
|
||||
)
|
||||
|
||||
type configuration struct {
|
||||
Enable bool
|
||||
}
|
||||
|
||||
func (p *Provider) getConfiguration(service rancherData) (configuration, error) {
|
||||
conf := configuration{
|
||||
Enable: p.ExposedByDefault,
|
||||
}
|
||||
|
||||
err := label.Decode(service.Labels, &conf, "traefik.rancher.", "traefik.enable")
|
||||
if err != nil {
|
||||
return configuration{}, err
|
||||
}
|
||||
|
||||
return conf, nil
|
||||
}
|
|
@ -1,232 +0,0 @@
|
|||
package rancher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
rancher "github.com/rancher/go-rancher-metadata/metadata"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/traefik/traefik/v2/pkg/config/dynamic"
|
||||
"github.com/traefik/traefik/v2/pkg/job"
|
||||
"github.com/traefik/traefik/v2/pkg/logs"
|
||||
"github.com/traefik/traefik/v2/pkg/provider"
|
||||
"github.com/traefik/traefik/v2/pkg/safe"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultTemplateRule The default template for the default rule.
|
||||
DefaultTemplateRule = "Host(`{{ normalize .Name }}`)"
|
||||
)
|
||||
|
||||
// Health.
|
||||
const (
|
||||
healthy = "healthy"
|
||||
updatingHealthy = "updating-healthy"
|
||||
)
|
||||
|
||||
// States.
|
||||
const (
|
||||
active = "active"
|
||||
running = "running"
|
||||
upgraded = "upgraded"
|
||||
upgrading = "upgrading"
|
||||
updatingActive = "updating-active"
|
||||
updatingRunning = "updating-running"
|
||||
)
|
||||
|
||||
var _ provider.Provider = (*Provider)(nil)
|
||||
|
||||
// Provider holds configurations of the provider.
|
||||
type Provider struct {
|
||||
Constraints string `description:"Constraints is an expression that Traefik matches against the container's labels to determine whether to create any route for that container." json:"constraints,omitempty" toml:"constraints,omitempty" yaml:"constraints,omitempty" export:"true"`
|
||||
Watch bool `description:"Watch provider." json:"watch,omitempty" toml:"watch,omitempty" yaml:"watch,omitempty" export:"true"`
|
||||
DefaultRule string `description:"Default rule." json:"defaultRule,omitempty" toml:"defaultRule,omitempty" yaml:"defaultRule,omitempty"`
|
||||
ExposedByDefault bool `description:"Expose containers by default." json:"exposedByDefault,omitempty" toml:"exposedByDefault,omitempty" yaml:"exposedByDefault,omitempty" export:"true"`
|
||||
EnableServiceHealthFilter bool `description:"Filter services with unhealthy states and inactive states." json:"enableServiceHealthFilter,omitempty" toml:"enableServiceHealthFilter,omitempty" yaml:"enableServiceHealthFilter,omitempty" export:"true"`
|
||||
RefreshSeconds int `description:"Defines the polling interval in seconds." json:"refreshSeconds,omitempty" toml:"refreshSeconds,omitempty" yaml:"refreshSeconds,omitempty" export:"true"`
|
||||
IntervalPoll bool `description:"Poll the Rancher metadata service every 'rancher.refreshseconds' (less accurate)." json:"intervalPoll,omitempty" toml:"intervalPoll,omitempty" yaml:"intervalPoll,omitempty" export:"true"`
|
||||
Prefix string `description:"Prefix used for accessing the Rancher metadata service." json:"prefix,omitempty" toml:"prefix,omitempty" yaml:"prefix,omitempty"`
|
||||
defaultRuleTpl *template.Template
|
||||
}
|
||||
|
||||
// SetDefaults sets the default values.
|
||||
func (p *Provider) SetDefaults() {
|
||||
p.Watch = true
|
||||
p.ExposedByDefault = true
|
||||
p.EnableServiceHealthFilter = true
|
||||
p.RefreshSeconds = 15
|
||||
p.DefaultRule = DefaultTemplateRule
|
||||
p.Prefix = "latest"
|
||||
}
|
||||
|
||||
type rancherData struct {
|
||||
Name string
|
||||
Labels map[string]string
|
||||
Containers []string
|
||||
Health string
|
||||
State string
|
||||
Port string
|
||||
ExtraConf configuration
|
||||
}
|
||||
|
||||
// Init the provider.
|
||||
func (p *Provider) Init() error {
|
||||
defaultRuleTpl, err := provider.MakeDefaultRuleTemplate(p.DefaultRule, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while parsing default rule: %w", err)
|
||||
}
|
||||
|
||||
p.defaultRuleTpl = defaultRuleTpl
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) createClient(ctx context.Context) (rancher.Client, error) {
|
||||
metadataServiceURL := fmt.Sprintf("http://rancher-metadata.rancher.internal/%s", p.Prefix)
|
||||
client, err := rancher.NewClientAndWait(metadataServiceURL)
|
||||
if err != nil {
|
||||
log.Ctx(ctx).Error().Err(err).Msg("Failed to create Rancher metadata service client")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// Provide allows the rancher provider to provide configurations to traefik using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.Pool) error {
|
||||
pool.GoCtx(func(routineCtx context.Context) {
|
||||
logger := log.Ctx(routineCtx).With().Str(logs.ProviderName, "rancher").Logger()
|
||||
ctxLog := logger.WithContext(routineCtx)
|
||||
|
||||
operation := func() error {
|
||||
client, err := p.createClient(ctxLog)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Failed to create the metadata client metadata service")
|
||||
return err
|
||||
}
|
||||
|
||||
updateConfiguration := func(_ string) {
|
||||
stacks, err := client.GetStacks()
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Failed to query Rancher metadata service")
|
||||
return
|
||||
}
|
||||
|
||||
rancherData := p.parseMetadataSourcedRancherData(ctxLog, stacks)
|
||||
|
||||
logger.Printf("Received Rancher data %+v", rancherData)
|
||||
|
||||
configuration := p.buildConfiguration(ctxLog, rancherData)
|
||||
configurationChan <- dynamic.Message{
|
||||
ProviderName: "rancher",
|
||||
Configuration: configuration,
|
||||
}
|
||||
}
|
||||
updateConfiguration("init")
|
||||
|
||||
if p.Watch {
|
||||
if p.IntervalPoll {
|
||||
p.intervalPoll(ctxLog, client, updateConfiguration)
|
||||
} else {
|
||||
// Long polling should be favored for the most accurate configuration updates.
|
||||
// Holds the connection until there is either a change in the metadata repository or `p.RefreshSeconds` has elapsed.
|
||||
client.OnChangeCtx(ctxLog, p.RefreshSeconds, updateConfiguration)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
notify := func(err error, time time.Duration) {
|
||||
logger.Error().Err(err).Msgf("Provider error, retrying in %s", time)
|
||||
}
|
||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), backoff.WithContext(job.NewBackOff(backoff.NewExponentialBackOff()), ctxLog), notify)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Cannot retrieve data")
|
||||
}
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) intervalPoll(ctx context.Context, client rancher.Client, updateConfiguration func(string)) {
|
||||
ticker := time.NewTicker(time.Duration(p.RefreshSeconds) * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
var version string
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
newVersion, err := client.GetVersion()
|
||||
if err != nil {
|
||||
log.Ctx(ctx).Error().Err(err).Msg("Failed to create Rancher metadata service client")
|
||||
} else if version != newVersion {
|
||||
version = newVersion
|
||||
updateConfiguration(version)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) parseMetadataSourcedRancherData(ctx context.Context, stacks []rancher.Stack) (rancherDataList []rancherData) {
|
||||
for _, stack := range stacks {
|
||||
for _, service := range stack.Services {
|
||||
logger := log.Ctx(ctx).With().Str("stack", stack.Name).Str("service", service.Name).Logger()
|
||||
ctxSvc := logger.WithContext(ctx)
|
||||
|
||||
servicePort := ""
|
||||
if len(service.Ports) > 0 {
|
||||
servicePort = service.Ports[0]
|
||||
}
|
||||
for _, port := range service.Ports {
|
||||
logger.Debug().Msgf("Set Port %s", port)
|
||||
}
|
||||
|
||||
var containerIPAddresses []string
|
||||
for _, container := range service.Containers {
|
||||
if containerFilter(ctxSvc, container.Name, container.HealthState, container.State) {
|
||||
containerIPAddresses = append(containerIPAddresses, container.PrimaryIp)
|
||||
}
|
||||
}
|
||||
|
||||
service := rancherData{
|
||||
Name: service.Name + "_" + stack.Name,
|
||||
State: service.State,
|
||||
Labels: service.Labels,
|
||||
Port: servicePort,
|
||||
Containers: containerIPAddresses,
|
||||
}
|
||||
|
||||
extraConf, err := p.getConfiguration(service)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msgf("Skip container %s", service.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
service.ExtraConf = extraConf
|
||||
|
||||
rancherDataList = append(rancherDataList, service)
|
||||
}
|
||||
}
|
||||
return rancherDataList
|
||||
}
|
||||
|
||||
func containerFilter(ctx context.Context, name, healthState, state string) bool {
|
||||
logger := log.Ctx(ctx)
|
||||
|
||||
if healthState != "" && healthState != healthy && healthState != updatingHealthy {
|
||||
logger.Debug().Msgf("Filtering container %s with healthState of %s", name, healthState)
|
||||
return false
|
||||
}
|
||||
|
||||
if state != "" && state != running && state != updatingRunning && state != upgraded {
|
||||
logger.Debug().Msgf("Filtering container %s with state of %s", name, state)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue