Dynamic Configuration Refactoring
This commit is contained in:
parent
d3ae88f108
commit
a09dfa3ce1
452 changed files with 21023 additions and 9419 deletions
|
|
@ -1,6 +1,7 @@
|
|||
package acme
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
|
|
@ -24,8 +25,8 @@ const (
|
|||
)
|
||||
|
||||
// NewAccount creates an account
|
||||
func NewAccount(email string, keyTypeValue string) (*Account, error) {
|
||||
keyType := GetKeyType(keyTypeValue)
|
||||
func NewAccount(ctx context.Context, email string, keyTypeValue string) (*Account, error) {
|
||||
keyType := GetKeyType(ctx, keyTypeValue)
|
||||
|
||||
// Create a user. New accounts need an email and private key to start
|
||||
privateKey, err := rsa.GenerateKey(rand.Reader, 4096)
|
||||
|
|
@ -52,16 +53,20 @@ func (a *Account) GetRegistration() *acme.RegistrationResource {
|
|||
|
||||
// GetPrivateKey returns private key
|
||||
func (a *Account) GetPrivateKey() crypto.PrivateKey {
|
||||
if privateKey, err := x509.ParsePKCS1PrivateKey(a.PrivateKey); err == nil {
|
||||
return privateKey
|
||||
privateKey, err := x509.ParsePKCS1PrivateKey(a.PrivateKey)
|
||||
if err != nil {
|
||||
log.WithoutContext().WithField(log.ProviderName, "acme").
|
||||
Errorf("Cannot unmarshal private key %+v: %v", a.PrivateKey, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Errorf("Cannot unmarshal private key %+v", a.PrivateKey)
|
||||
return nil
|
||||
return privateKey
|
||||
}
|
||||
|
||||
// GetKeyType used to determine which algo to used
|
||||
func GetKeyType(value string) acme.KeyType {
|
||||
func GetKeyType(ctx context.Context, value string) acme.KeyType {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
switch value {
|
||||
case "EC256":
|
||||
return acme.EC256
|
||||
|
|
@ -74,10 +79,10 @@ func GetKeyType(value string) acme.KeyType {
|
|||
case "RSA8192":
|
||||
return acme.RSA8192
|
||||
case "":
|
||||
log.Infof("The key type is empty. Use default key type %v.", acme.RSA4096)
|
||||
logger.Infof("The key type is empty. Use default key type %v.", acme.RSA4096)
|
||||
return acme.RSA4096
|
||||
default:
|
||||
log.Infof("Unable to determine key type value %q. Use default key type %v.", value, acme.RSA4096)
|
||||
logger.Infof("Unable to determine the key type value %q: falling back on %v.", value, acme.RSA4096)
|
||||
return acme.RSA4096
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package acme
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
|
@ -18,23 +19,56 @@ type challengeHTTP struct {
|
|||
Store Store
|
||||
}
|
||||
|
||||
// Present presents a challenge to obtain new ACME certificate
|
||||
// Present presents a challenge to obtain new ACME certificate.
|
||||
func (c *challengeHTTP) Present(domain, token, keyAuth string) error {
|
||||
return c.Store.SetHTTPChallengeToken(token, domain, []byte(keyAuth))
|
||||
}
|
||||
|
||||
// CleanUp cleans the challenges when certificate is obtained
|
||||
// CleanUp cleans the challenges when certificate is obtained.
|
||||
func (c *challengeHTTP) CleanUp(domain, token, keyAuth string) error {
|
||||
return c.Store.RemoveHTTPChallengeToken(token, domain)
|
||||
}
|
||||
|
||||
// Timeout calculates the maximum of time allowed to resolved an ACME challenge
|
||||
// Timeout calculates the maximum of time allowed to resolved an ACME challenge.
|
||||
func (c *challengeHTTP) Timeout() (timeout, interval time.Duration) {
|
||||
return 60 * time.Second, 5 * time.Second
|
||||
}
|
||||
|
||||
func getTokenValue(token, domain string, store Store) []byte {
|
||||
log.Debugf("Looking for an existing ACME challenge for token %v...", token)
|
||||
// Append adds routes on internal router
|
||||
func (p *Provider) Append(router *mux.Router) {
|
||||
router.Methods(http.MethodGet).
|
||||
Path(acme.HTTP01ChallengePath("{token}")).
|
||||
Handler(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
vars := mux.Vars(req)
|
||||
|
||||
ctx := log.With(context.Background(), log.Str(log.ProviderName, "acme"))
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
if token, ok := vars["token"]; ok {
|
||||
domain, _, err := net.SplitHostPort(req.Host)
|
||||
if err != nil {
|
||||
logger.Debugf("Unable to split host and port: %v. Fallback to request host.", err)
|
||||
domain = req.Host
|
||||
}
|
||||
|
||||
tokenValue := getTokenValue(ctx, token, domain, p.Store)
|
||||
if len(tokenValue) > 0 {
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
_, err = rw.Write(tokenValue)
|
||||
if err != nil {
|
||||
logger.Errorf("Unable to write token: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
rw.WriteHeader(http.StatusNotFound)
|
||||
}))
|
||||
}
|
||||
|
||||
func getTokenValue(ctx context.Context, token, domain string, store Store) []byte {
|
||||
logger := log.FromContext(ctx)
|
||||
logger.Debugf("Retrieving the ACME challenge for token %v...", token)
|
||||
|
||||
var result []byte
|
||||
|
||||
operation := func() error {
|
||||
|
|
@ -44,43 +78,16 @@ func getTokenValue(token, domain string, store Store) []byte {
|
|||
}
|
||||
|
||||
notify := func(err error, time time.Duration) {
|
||||
log.Errorf("Error getting challenge for token retrying in %s", time)
|
||||
logger.Errorf("Error getting challenge for token retrying in %s", time)
|
||||
}
|
||||
|
||||
ebo := backoff.NewExponentialBackOff()
|
||||
ebo.MaxElapsedTime = 60 * time.Second
|
||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), ebo, notify)
|
||||
if err != nil {
|
||||
log.Errorf("Error getting challenge for token: %v", err)
|
||||
logger.Errorf("Cannot retrieve the ACME challenge for token %v: %v", token, err)
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// AddRoutes add routes on internal router
|
||||
func (p *Provider) AddRoutes(router *mux.Router) {
|
||||
router.Methods(http.MethodGet).
|
||||
Path(acme.HTTP01ChallengePath("{token}")).
|
||||
Handler(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
vars := mux.Vars(req)
|
||||
if token, ok := vars["token"]; ok {
|
||||
domain, _, err := net.SplitHostPort(req.Host)
|
||||
if err != nil {
|
||||
log.Debugf("Unable to split host and port: %v. Fallback to request host.", err)
|
||||
domain = req.Host
|
||||
}
|
||||
|
||||
tokenValue := getTokenValue(token, domain, p.Store)
|
||||
if len(tokenValue) > 0 {
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
_, err = rw.Write(tokenValue)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to write token : %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
rw.WriteHeader(http.StatusNotFound)
|
||||
}))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,7 +15,8 @@ type challengeTLSALPN struct {
|
|||
}
|
||||
|
||||
func (c *challengeTLSALPN) Present(domain, token, keyAuth string) error {
|
||||
log.Debugf("TLS Challenge Present temp certificate for %s", domain)
|
||||
log.WithoutContext().WithField(log.ProviderName, "acme").
|
||||
Debugf("TLS Challenge Present temp certificate for %s", domain)
|
||||
|
||||
certPEMBlock, keyPEMBlock, err := acme.TLSALPNChallengeBlocks(domain, keyAuth)
|
||||
if err != nil {
|
||||
|
|
@ -27,7 +28,8 @@ func (c *challengeTLSALPN) Present(domain, token, keyAuth string) error {
|
|||
}
|
||||
|
||||
func (c *challengeTLSALPN) CleanUp(domain, token, keyAuth string) error {
|
||||
log.Debugf("TLS Challenge CleanUp temp certificate for %s", domain)
|
||||
log.WithoutContext().WithField(log.ProviderName, "acme").
|
||||
Debugf("TLS Challenge CleanUp temp certificate for %s", domain)
|
||||
|
||||
return c.Store.RemoveTLSChallenge(domain)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -42,6 +42,8 @@ func (s *LocalStore) get() (*StoredData, error) {
|
|||
}
|
||||
|
||||
if hasData {
|
||||
logger := log.WithoutContext().WithField(log.ProviderName, "acme")
|
||||
|
||||
f, err := os.Open(s.filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -66,7 +68,7 @@ func (s *LocalStore) get() (*StoredData, error) {
|
|||
return nil, err
|
||||
}
|
||||
if isOldRegistration {
|
||||
log.Debug("Reset ACME account.")
|
||||
logger.Debug("Reseting ACME account.")
|
||||
s.storedData.Account = nil
|
||||
s.SaveDataChan <- s.storedData
|
||||
}
|
||||
|
|
@ -76,7 +78,7 @@ func (s *LocalStore) get() (*StoredData, error) {
|
|||
var certificates []*Certificate
|
||||
for _, certificate := range s.storedData.Certificates {
|
||||
if len(certificate.Certificate) == 0 || len(certificate.Key) == 0 {
|
||||
log.Debugf("Delete certificate %v for domains %v which have no value.", certificate, certificate.Domain.ToStrArray())
|
||||
logger.Debugf("Deleting empty certificate %v for %v", certificate, certificate.Domain.ToStrArray())
|
||||
continue
|
||||
}
|
||||
certificates = append(certificates, certificate)
|
||||
|
|
@ -95,15 +97,16 @@ func (s *LocalStore) get() (*StoredData, error) {
|
|||
// listenSaveAction listens to a chan to store ACME data in json format into LocalStore.filename
|
||||
func (s *LocalStore) listenSaveAction() {
|
||||
safe.Go(func() {
|
||||
logger := log.WithoutContext().WithField(log.ProviderName, "acme")
|
||||
for object := range s.SaveDataChan {
|
||||
data, err := json.MarshalIndent(object, "", " ")
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
logger.Error(err)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(s.filename, data, 0600)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
logger.Error(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package acme
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
|
|
@ -15,6 +16,7 @@ import (
|
|||
|
||||
"github.com/cenk/backoff"
|
||||
"github.com/containous/flaeg/parse"
|
||||
"github.com/containous/traefik/config"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/rules"
|
||||
"github.com/containous/traefik/safe"
|
||||
|
|
@ -29,8 +31,8 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
// OSCPMustStaple enables OSCP stapling as from https://github.com/xenolf/lego/issues/270
|
||||
OSCPMustStaple = false
|
||||
// oscpMustStaple enables OSCP stapling as from https://github.com/xenolf/lego/issues/270
|
||||
oscpMustStaple = false
|
||||
)
|
||||
|
||||
// Configuration holds ACME configuration provided by users
|
||||
|
|
@ -49,23 +51,6 @@ type Configuration struct {
|
|||
Domains []types.Domain `description:"CN and SANs (alternative domains) to each main domain using format: --acme.domains='main.com,san1.com,san2.com' --acme.domains='*.main.net'. No SANs for wildcards domain. Wildcard domains only accepted with DNSChallenge"`
|
||||
}
|
||||
|
||||
// Provider holds configurations of the provider.
|
||||
type Provider struct {
|
||||
*Configuration
|
||||
Store Store
|
||||
certificates []*Certificate
|
||||
account *Account
|
||||
client *acme.Client
|
||||
certsChan chan *Certificate
|
||||
configurationChan chan<- types.ConfigMessage
|
||||
certificateStore *traefiktls.CertificateStore
|
||||
clientMutex sync.Mutex
|
||||
configFromListenerChan chan types.Configuration
|
||||
pool *safe.Pool
|
||||
resolvingDomains map[string]struct{}
|
||||
resolvingDomainsMutex sync.RWMutex
|
||||
}
|
||||
|
||||
// Certificate is a struct which contains all data needed from an ACME certificate
|
||||
type Certificate struct {
|
||||
Domain types.Domain
|
||||
|
|
@ -79,8 +64,9 @@ type DNSChallenge struct {
|
|||
DelayBeforeCheck parse.Duration `description:"Assume DNS propagates after a delay in seconds rather than finding and querying nameservers."`
|
||||
Resolvers types.DNSResolvers `description:"Use following DNS servers to resolve the FQDN authority."`
|
||||
DisablePropagationCheck bool `description:"Disable the DNS propagation checks before notifying ACME that the DNS challenge is ready. [not recommended]"`
|
||||
preCheckTimeout time.Duration
|
||||
preCheckInterval time.Duration
|
||||
|
||||
preCheckTimeout time.Duration
|
||||
preCheckInterval time.Duration
|
||||
}
|
||||
|
||||
// HTTPChallenge contains HTTP challenge Configuration
|
||||
|
|
@ -91,8 +77,25 @@ type HTTPChallenge struct {
|
|||
// TLSChallenge contains TLS challenge Configuration
|
||||
type TLSChallenge struct{}
|
||||
|
||||
// Provider holds configurations of the provider.
|
||||
type Provider struct {
|
||||
*Configuration
|
||||
Store Store
|
||||
certificates []*Certificate
|
||||
account *Account
|
||||
client *acme.Client
|
||||
certsChan chan *Certificate
|
||||
configurationChan chan<- config.Message
|
||||
certificateStore *traefiktls.CertificateStore
|
||||
clientMutex sync.Mutex
|
||||
configFromListenerChan chan config.Configuration
|
||||
pool *safe.Pool
|
||||
resolvingDomains map[string]struct{}
|
||||
resolvingDomainsMutex sync.RWMutex
|
||||
}
|
||||
|
||||
// SetConfigListenerChan initializes the configFromListenerChan
|
||||
func (p *Provider) SetConfigListenerChan(configFromListenerChan chan types.Configuration) {
|
||||
func (p *Provider) SetConfigListenerChan(configFromListenerChan chan config.Configuration) {
|
||||
p.configFromListenerChan = configFromListenerChan
|
||||
}
|
||||
|
||||
|
|
@ -102,13 +105,15 @@ func (p *Provider) SetCertificateStore(certificateStore *traefiktls.CertificateS
|
|||
}
|
||||
|
||||
// ListenConfiguration sets a new Configuration into the configFromListenerChan
|
||||
func (p *Provider) ListenConfiguration(config types.Configuration) {
|
||||
func (p *Provider) ListenConfiguration(config config.Configuration) {
|
||||
p.configFromListenerChan <- config
|
||||
}
|
||||
|
||||
// ListenRequest resolves new certificates for a domain from an incoming request and return a valid Certificate to serve (onDemand option)
|
||||
func (p *Provider) ListenRequest(domain string) (*tls.Certificate, error) {
|
||||
acmeCert, err := p.resolveCertificate(types.Domain{Main: domain}, false)
|
||||
ctx := log.With(context.Background(), log.Str(log.ProviderName, "acme"))
|
||||
|
||||
acmeCert, err := p.resolveCertificate(ctx, types.Domain{Main: domain}, false)
|
||||
if acmeCert == nil || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -120,9 +125,13 @@ func (p *Provider) ListenRequest(domain string) (*tls.Certificate, error) {
|
|||
|
||||
// Init for compatibility reason the BaseProvider implements an empty Init
|
||||
func (p *Provider) Init(_ types.Constraints) error {
|
||||
ctx := log.With(context.Background(), log.Str(log.ProviderName, "acme"))
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
acme.UserAgent = fmt.Sprintf("containous-traefik/%s", version.Version)
|
||||
|
||||
if p.ACMELogging {
|
||||
legolog.Logger = fmtlog.New(log.WriterLevel(logrus.InfoLevel), "legolog: ", 0)
|
||||
legolog.Logger = fmtlog.New(logger.WriterLevel(logrus.InfoLevel), "legolog: ", 0)
|
||||
} else {
|
||||
legolog.Logger = fmtlog.New(ioutil.Discard, "", 0)
|
||||
}
|
||||
|
|
@ -138,8 +147,8 @@ func (p *Provider) Init(_ types.Constraints) error {
|
|||
}
|
||||
|
||||
// Reset Account if caServer changed, thus registration URI can be updated
|
||||
if p.account != nil && p.account.Registration != nil && !isAccountMatchingCaServer(p.account.Registration.URI, p.CAServer) {
|
||||
log.Info("Account URI does not match the current CAServer. The account will be reset")
|
||||
if p.account != nil && p.account.Registration != nil && !isAccountMatchingCaServer(ctx, p.account.Registration.URI, p.CAServer) {
|
||||
logger.Info("Account URI does not match the current CAServer. The account will be reset.")
|
||||
p.account = nil
|
||||
}
|
||||
|
||||
|
|
@ -154,49 +163,56 @@ func (p *Provider) Init(_ types.Constraints) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func isAccountMatchingCaServer(accountURI string, serverURI string) bool {
|
||||
func isAccountMatchingCaServer(ctx context.Context, accountURI string, serverURI string) bool {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
aru, err := url.Parse(accountURI)
|
||||
if err != nil {
|
||||
log.Infof("Unable to parse account.Registration URL : %v", err)
|
||||
logger.Infof("Unable to parse account.Registration URL: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
cau, err := url.Parse(serverURI)
|
||||
if err != nil {
|
||||
log.Infof("Unable to parse CAServer URL : %v", err)
|
||||
logger.Infof("Unable to parse CAServer URL: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return cau.Hostname() == aru.Hostname()
|
||||
}
|
||||
|
||||
// Provide allows the file provider to provide configurations to traefik
|
||||
// using the given Configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error {
|
||||
func (p *Provider) Provide(configurationChan chan<- config.Message, pool *safe.Pool) error {
|
||||
ctx := log.With(context.Background(), log.Str(log.ProviderName, "acme"))
|
||||
|
||||
p.pool = pool
|
||||
|
||||
p.watchCertificate()
|
||||
p.watchNewDomains()
|
||||
p.watchCertificate(ctx)
|
||||
p.watchNewDomains(ctx)
|
||||
|
||||
p.configurationChan = configurationChan
|
||||
p.refreshCertificates()
|
||||
|
||||
p.deleteUnnecessaryDomains()
|
||||
p.deleteUnnecessaryDomains(ctx)
|
||||
for i := 0; i < len(p.Domains); i++ {
|
||||
domain := p.Domains[i]
|
||||
safe.Go(func() {
|
||||
if _, err := p.resolveCertificate(domain, true); err != nil {
|
||||
log.Errorf("Unable to obtain ACME certificate for domains %q : %v", strings.Join(domain.ToStrArray(), ","), err)
|
||||
if _, err := p.resolveCertificate(ctx, domain, true); err != nil {
|
||||
log.WithoutContext().WithField(log.ProviderName, "acme").
|
||||
Errorf("Unable to obtain ACME certificate for domains %q : %v", strings.Join(domain.ToStrArray(), ","), err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
p.renewCertificates()
|
||||
p.renewCertificates(ctx)
|
||||
|
||||
ticker := time.NewTicker(24 * time.Hour)
|
||||
pool.Go(func(stop chan bool) {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
p.renewCertificates()
|
||||
p.renewCertificates(ctx)
|
||||
case <-stop:
|
||||
ticker.Stop()
|
||||
return
|
||||
|
|
@ -211,22 +227,25 @@ func (p *Provider) getClient() (*acme.Client, error) {
|
|||
p.clientMutex.Lock()
|
||||
defer p.clientMutex.Unlock()
|
||||
|
||||
ctx := log.With(context.Background(), log.Str(log.ProviderName, "acme"))
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
if p.client != nil {
|
||||
return p.client, nil
|
||||
}
|
||||
|
||||
account, err := p.initAccount()
|
||||
account, err := p.initAccount(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debug("Building ACME client...")
|
||||
logger.Debug("Building ACME client...")
|
||||
|
||||
caServer := "https://acme-v02.api.letsencrypt.org/directory"
|
||||
if len(p.CAServer) > 0 {
|
||||
caServer = p.CAServer
|
||||
}
|
||||
log.Debug(caServer)
|
||||
logger.Debug(caServer)
|
||||
|
||||
client, err := acme.NewClient(caServer, account, account.KeyType)
|
||||
if err != nil {
|
||||
|
|
@ -235,11 +254,11 @@ func (p *Provider) getClient() (*acme.Client, error) {
|
|||
|
||||
// New users will need to register; be sure to save it
|
||||
if account.GetRegistration() == nil {
|
||||
log.Info("Register...")
|
||||
logger.Info("Register...")
|
||||
|
||||
reg, err := client.Register(true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
reg, errR := client.Register(true)
|
||||
if errR != nil {
|
||||
return nil, errR
|
||||
}
|
||||
|
||||
account.Registration = reg
|
||||
|
|
@ -253,12 +272,12 @@ func (p *Provider) getClient() (*acme.Client, error) {
|
|||
}
|
||||
|
||||
if p.DNSChallenge != nil && len(p.DNSChallenge.Provider) > 0 {
|
||||
log.Debugf("Using DNS Challenge provider: %s", p.DNSChallenge.Provider)
|
||||
logger.Debugf("Using DNS Challenge provider: %s", p.DNSChallenge.Provider)
|
||||
|
||||
SetRecursiveNameServers(p.DNSChallenge.Resolvers)
|
||||
SetPropagationCheck(p.DNSChallenge.DisablePropagationCheck)
|
||||
|
||||
err = dnsOverrideDelay(p.DNSChallenge.DelayBeforeCheck)
|
||||
err = dnsOverrideDelay(ctx, p.DNSChallenge.DelayBeforeCheck)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -286,7 +305,7 @@ func (p *Provider) getClient() (*acme.Client, error) {
|
|||
}
|
||||
|
||||
} else if p.HTTPChallenge != nil && len(p.HTTPChallenge.EntryPoint) > 0 {
|
||||
log.Debug("Using HTTP Challenge provider.")
|
||||
logger.Debug("Using HTTP Challenge provider.")
|
||||
|
||||
client.ExcludeChallenges([]acme.Challenge{acme.DNS01, acme.TLSALPN01})
|
||||
|
||||
|
|
@ -295,7 +314,7 @@ func (p *Provider) getClient() (*acme.Client, error) {
|
|||
return nil, err
|
||||
}
|
||||
} else if p.TLSChallenge != nil {
|
||||
log.Debug("Using TLS Challenge provider.")
|
||||
logger.Debug("Using TLS Challenge provider.")
|
||||
|
||||
client.ExcludeChallenges([]acme.Challenge{acme.HTTP01, acme.DNS01})
|
||||
|
||||
|
|
@ -311,10 +330,10 @@ func (p *Provider) getClient() (*acme.Client, error) {
|
|||
return p.client, nil
|
||||
}
|
||||
|
||||
func (p *Provider) initAccount() (*Account, error) {
|
||||
func (p *Provider) initAccount(ctx context.Context) (*Account, error) {
|
||||
if p.account == nil || len(p.account.Email) == 0 {
|
||||
var err error
|
||||
p.account, err = NewAccount(p.Email, p.KeyType)
|
||||
p.account, err = NewAccount(ctx, p.Email, p.KeyType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -322,59 +341,49 @@ func (p *Provider) initAccount() (*Account, error) {
|
|||
|
||||
// Set the KeyType if not already defined in the account
|
||||
if len(p.account.KeyType) == 0 {
|
||||
p.account.KeyType = GetKeyType(p.KeyType)
|
||||
p.account.KeyType = GetKeyType(ctx, p.KeyType)
|
||||
}
|
||||
|
||||
return p.account, nil
|
||||
}
|
||||
|
||||
func contains(entryPoints []string, acmeEntryPoint string) bool {
|
||||
for _, entryPoint := range entryPoints {
|
||||
if entryPoint == acmeEntryPoint {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *Provider) watchNewDomains() {
|
||||
func (p *Provider) watchNewDomains(ctx context.Context) {
|
||||
p.pool.Go(func(stop chan bool) {
|
||||
for {
|
||||
select {
|
||||
case config := <-p.configFromListenerChan:
|
||||
for _, frontend := range config.Frontends {
|
||||
if !contains(frontend.EntryPoints, p.EntryPoint) {
|
||||
for routerName, route := range config.Routers {
|
||||
logger := log.FromContext(ctx).WithField(log.RouterName, routerName)
|
||||
|
||||
// FIXME use new rule system
|
||||
domainRules := rules.Rules{}
|
||||
domains, err := domainRules.ParseDomains(route.Rule)
|
||||
if err != nil {
|
||||
logger.Errorf("Error parsing domains in provider ACME: %v", err)
|
||||
continue
|
||||
}
|
||||
for _, route := range frontend.Routes {
|
||||
domainRules := rules.Rules{}
|
||||
domains, err := domainRules.ParseDomains(route.Rule)
|
||||
if err != nil {
|
||||
log.Errorf("Error parsing domains in provider ACME: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(domains) == 0 {
|
||||
log.Debugf("No domain parsed in rule %q in provider ACME", route.Rule)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("Try to challenge certificate for domain %v founded in Host rule", domains)
|
||||
|
||||
var domain types.Domain
|
||||
if len(domains) > 0 {
|
||||
domain = types.Domain{Main: domains[0]}
|
||||
if len(domains) > 1 {
|
||||
domain.SANs = domains[1:]
|
||||
}
|
||||
|
||||
safe.Go(func() {
|
||||
if _, err := p.resolveCertificate(domain, false); err != nil {
|
||||
log.Errorf("Unable to obtain ACME certificate for domains %q detected thanks to rule %q : %v", strings.Join(domains, ","), route.Rule, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
if len(domains) == 0 {
|
||||
logger.Debugf("No domain parsed in rule %q in provider ACME", route.Rule)
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Debugf("Try to challenge certificate for domain %v founded in Host rule", domains)
|
||||
|
||||
var domain types.Domain
|
||||
if len(domains) > 0 {
|
||||
domain = types.Domain{Main: domains[0]}
|
||||
if len(domains) > 1 {
|
||||
domain.SANs = domains[1:]
|
||||
}
|
||||
|
||||
safe.Go(func() {
|
||||
if _, err := p.resolveCertificate(ctx, domain, false); err != nil {
|
||||
logger.Errorf("Unable to obtain ACME certificate for domains %q detected thanks to rule %q : %v", strings.Join(domains, ","), route.Rule, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
case <-stop:
|
||||
return
|
||||
|
|
@ -383,14 +392,14 @@ func (p *Provider) watchNewDomains() {
|
|||
})
|
||||
}
|
||||
|
||||
func (p *Provider) resolveCertificate(domain types.Domain, domainFromConfigurationFile bool) (*acme.CertificateResource, error) {
|
||||
domains, err := p.getValidDomains(domain, domainFromConfigurationFile)
|
||||
func (p *Provider) resolveCertificate(ctx context.Context, domain types.Domain, domainFromConfigurationFile bool) (*acme.CertificateResource, error) {
|
||||
domains, err := p.getValidDomains(ctx, domain, domainFromConfigurationFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check provided certificates
|
||||
uncheckedDomains := p.getUncheckedDomains(domains, !domainFromConfigurationFile)
|
||||
uncheckedDomains := p.getUncheckedDomains(ctx, domains, !domainFromConfigurationFile)
|
||||
if len(uncheckedDomains) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
|
@ -398,7 +407,8 @@ func (p *Provider) resolveCertificate(domain types.Domain, domainFromConfigurati
|
|||
p.addResolvingDomains(uncheckedDomains)
|
||||
defer p.removeResolvingDomains(uncheckedDomains)
|
||||
|
||||
log.Debugf("Loading ACME certificates %+v...", uncheckedDomains)
|
||||
logger := log.FromContext(ctx)
|
||||
logger.Debugf("Loading ACME certificates %+v...", uncheckedDomains)
|
||||
|
||||
client, err := p.getClient()
|
||||
if err != nil {
|
||||
|
|
@ -408,9 +418,9 @@ func (p *Provider) resolveCertificate(domain types.Domain, domainFromConfigurati
|
|||
var certificate *acme.CertificateResource
|
||||
bundle := true
|
||||
if p.useCertificateWithRetry(uncheckedDomains) {
|
||||
certificate, err = obtainCertificateWithRetry(domains, client, p.DNSChallenge.preCheckTimeout, p.DNSChallenge.preCheckInterval, bundle)
|
||||
certificate, err = obtainCertificateWithRetry(ctx, domains, client, p.DNSChallenge.preCheckTimeout, p.DNSChallenge.preCheckInterval, bundle)
|
||||
} else {
|
||||
certificate, err = client.ObtainCertificate(domains, bundle, nil, OSCPMustStaple)
|
||||
certificate, err = client.ObtainCertificate(domains, bundle, nil, oscpMustStaple)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
|
@ -423,7 +433,7 @@ func (p *Provider) resolveCertificate(domain types.Domain, domainFromConfigurati
|
|||
return nil, fmt.Errorf("domains %v generate certificate with no value: %v", uncheckedDomains, certificate)
|
||||
}
|
||||
|
||||
log.Debugf("Certificates obtained for domains %+v", uncheckedDomains)
|
||||
logger.Debugf("Certificates obtained for domains %+v", uncheckedDomains)
|
||||
|
||||
if len(uncheckedDomains) > 1 {
|
||||
domain = types.Domain{Main: uncheckedDomains[0], SANs: uncheckedDomains[1:]}
|
||||
|
|
@ -479,17 +489,19 @@ func (p *Provider) useCertificateWithRetry(domains []string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func obtainCertificateWithRetry(domains []string, client *acme.Client, timeout, interval time.Duration, bundle bool) (*acme.CertificateResource, error) {
|
||||
func obtainCertificateWithRetry(ctx context.Context, domains []string, client *acme.Client, timeout, interval time.Duration, bundle bool) (*acme.CertificateResource, error) {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
var certificate *acme.CertificateResource
|
||||
var err error
|
||||
|
||||
operation := func() error {
|
||||
certificate, err = client.ObtainCertificate(domains, bundle, nil, OSCPMustStaple)
|
||||
certificate, err = client.ObtainCertificate(domains, bundle, nil, oscpMustStaple)
|
||||
return err
|
||||
}
|
||||
|
||||
notify := func(err error, time time.Duration) {
|
||||
log.Errorf("Error obtaining certificate retrying in %s", time)
|
||||
logger.Errorf("Error obtaining certificate retrying in %s", time)
|
||||
}
|
||||
|
||||
// Define a retry backOff to let LEGO tries twice to obtain a certificate for both wildcard and root domain
|
||||
|
|
@ -500,20 +512,20 @@ func obtainCertificateWithRetry(domains []string, client *acme.Client, timeout,
|
|||
|
||||
err = backoff.RetryNotify(safe.OperationWithRecover(operation), rbo, notify)
|
||||
if err != nil {
|
||||
log.Errorf("Error obtaining certificate: %v", err)
|
||||
logger.Errorf("Error obtaining certificate: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return certificate, nil
|
||||
}
|
||||
|
||||
func dnsOverrideDelay(delay parse.Duration) error {
|
||||
func dnsOverrideDelay(ctx context.Context, delay parse.Duration) error {
|
||||
if delay == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if delay > 0 {
|
||||
log.Debugf("Delaying %d rather than validating DNS propagation now.", delay)
|
||||
log.FromContext(ctx).Debugf("Delaying %d rather than validating DNS propagation now.", delay)
|
||||
|
||||
acme.PreCheckDNS = func(_, _ string) (bool, error) {
|
||||
time.Sleep(time.Duration(delay))
|
||||
|
|
@ -532,9 +544,11 @@ func (p *Provider) addCertificateForDomain(domain types.Domain, certificate []by
|
|||
// deleteUnnecessaryDomains deletes from the configuration :
|
||||
// - Duplicated domains
|
||||
// - Domains which are checked by wildcard domain
|
||||
func (p *Provider) deleteUnnecessaryDomains() {
|
||||
func (p *Provider) deleteUnnecessaryDomains(ctx context.Context) {
|
||||
var newDomains []types.Domain
|
||||
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
for idxDomainToCheck, domainToCheck := range p.Domains {
|
||||
keepDomain := true
|
||||
|
||||
|
|
@ -545,7 +559,7 @@ func (p *Provider) deleteUnnecessaryDomains() {
|
|||
|
||||
if reflect.DeepEqual(domain, domainToCheck) {
|
||||
if idxDomainToCheck > idxDomain {
|
||||
log.Warnf("The domain %v is duplicated in the configuration but will be process by ACME provider only once.", domainToCheck)
|
||||
logger.Warnf("The domain %v is duplicated in the configuration but will be process by ACME provider only once.", domainToCheck)
|
||||
keepDomain = false
|
||||
}
|
||||
break
|
||||
|
|
@ -557,11 +571,11 @@ func (p *Provider) deleteUnnecessaryDomains() {
|
|||
for _, domainProcessed := range domainToCheck.ToStrArray() {
|
||||
if idxDomain < idxDomainToCheck && isDomainAlreadyChecked(domainProcessed, domain.ToStrArray()) {
|
||||
// The domain is duplicated in a CN
|
||||
log.Warnf("Domain %q is duplicated in the configuration or validated by the domain %v. It will be processed once.", domainProcessed, domain)
|
||||
logger.Warnf("Domain %q is duplicated in the configuration or validated by the domain %v. It will be processed once.", domainProcessed, domain)
|
||||
continue
|
||||
} else if domain.Main != domainProcessed && strings.HasPrefix(domain.Main, "*") && isDomainAlreadyChecked(domainProcessed, []string{domain.Main}) {
|
||||
// Check if a wildcard can validate the domain
|
||||
log.Warnf("Domain %q will not be processed by ACME provider because it is validated by the wildcard %q", domainProcessed, domain.Main)
|
||||
logger.Warnf("Domain %q will not be processed by ACME provider because it is validated by the wildcard %q", domainProcessed, domain.Main)
|
||||
continue
|
||||
}
|
||||
newDomainsToCheck = append(newDomainsToCheck, domainProcessed)
|
||||
|
|
@ -584,8 +598,9 @@ func (p *Provider) deleteUnnecessaryDomains() {
|
|||
p.Domains = newDomains
|
||||
}
|
||||
|
||||
func (p *Provider) watchCertificate() {
|
||||
func (p *Provider) watchCertificate(ctx context.Context) {
|
||||
p.certsChan = make(chan *Certificate)
|
||||
|
||||
p.pool.Go(func(stop chan bool) {
|
||||
for {
|
||||
select {
|
||||
|
|
@ -605,9 +620,8 @@ func (p *Provider) watchCertificate() {
|
|||
|
||||
err := p.saveCertificates()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
log.FromContext(ctx).Error(err)
|
||||
}
|
||||
|
||||
case <-stop:
|
||||
return
|
||||
}
|
||||
|
|
@ -624,50 +638,53 @@ func (p *Provider) saveCertificates() error {
|
|||
}
|
||||
|
||||
func (p *Provider) refreshCertificates() {
|
||||
config := types.ConfigMessage{
|
||||
conf := config.Message{
|
||||
ProviderName: "ACME",
|
||||
Configuration: &types.Configuration{
|
||||
Backends: map[string]*types.Backend{},
|
||||
Frontends: map[string]*types.Frontend{},
|
||||
TLS: []*traefiktls.Configuration{},
|
||||
Configuration: &config.Configuration{
|
||||
Routers: map[string]*config.Router{},
|
||||
Middlewares: map[string]*config.Middleware{},
|
||||
Services: map[string]*config.Service{},
|
||||
TLS: []*traefiktls.Configuration{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, cert := range p.certificates {
|
||||
certificate := &traefiktls.Certificate{CertFile: traefiktls.FileOrContent(cert.Certificate), KeyFile: traefiktls.FileOrContent(cert.Key)}
|
||||
config.Configuration.TLS = append(config.Configuration.TLS, &traefiktls.Configuration{Certificate: certificate, EntryPoints: []string{p.EntryPoint}})
|
||||
conf.Configuration.TLS = append(conf.Configuration.TLS, &traefiktls.Configuration{Certificate: certificate, EntryPoints: []string{p.EntryPoint}})
|
||||
}
|
||||
p.configurationChan <- config
|
||||
p.configurationChan <- conf
|
||||
}
|
||||
|
||||
func (p *Provider) renewCertificates() {
|
||||
log.Info("Testing certificate renew...")
|
||||
func (p *Provider) renewCertificates(ctx context.Context) {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
logger.Info("Testing certificate renew...")
|
||||
for _, certificate := range p.certificates {
|
||||
crt, err := getX509Certificate(certificate)
|
||||
crt, err := getX509Certificate(ctx, certificate)
|
||||
// If there's an error, we assume the cert is broken, and needs update
|
||||
// <= 30 days left, renew certificate
|
||||
if err != nil || crt == nil || crt.NotAfter.Before(time.Now().Add(24*30*time.Hour)) {
|
||||
client, err := p.getClient()
|
||||
if err != nil {
|
||||
log.Infof("Error renewing certificate from LE : %+v, %v", certificate.Domain, err)
|
||||
logger.Infof("Error renewing certificate from LE : %+v, %v", certificate.Domain, err)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Infof("Renewing certificate from LE : %+v", certificate.Domain)
|
||||
logger.Infof("Renewing certificate from LE : %+v", certificate.Domain)
|
||||
|
||||
renewedCert, err := client.RenewCertificate(acme.CertificateResource{
|
||||
Domain: certificate.Domain.Main,
|
||||
PrivateKey: certificate.Key,
|
||||
Certificate: certificate.Certificate,
|
||||
}, true, OSCPMustStaple)
|
||||
}, true, oscpMustStaple)
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("Error renewing certificate from LE: %v, %v", certificate.Domain, err)
|
||||
logger.Errorf("Error renewing certificate from LE: %v, %v", certificate.Domain, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(renewedCert.Certificate) == 0 || len(renewedCert.PrivateKey) == 0 {
|
||||
log.Errorf("domains %v renew certificate with no value: %v", certificate.Domain.ToStrArray(), certificate)
|
||||
logger.Errorf("domains %v renew certificate with no value: %v", certificate.Domain.ToStrArray(), certificate)
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
@ -678,11 +695,11 @@ func (p *Provider) renewCertificates() {
|
|||
|
||||
// Get provided certificate which check a domains list (Main and SANs)
|
||||
// from static and dynamic provided certificates
|
||||
func (p *Provider) getUncheckedDomains(domainsToCheck []string, checkConfigurationDomains bool) []string {
|
||||
func (p *Provider) getUncheckedDomains(ctx context.Context, domainsToCheck []string, checkConfigurationDomains bool) []string {
|
||||
p.resolvingDomainsMutex.RLock()
|
||||
defer p.resolvingDomainsMutex.RUnlock()
|
||||
|
||||
log.Debugf("Looking for provided certificate(s) to validate %q...", domainsToCheck)
|
||||
log.FromContext(ctx).Debugf("Looking for provided certificate(s) to validate %q...", domainsToCheck)
|
||||
|
||||
allDomains := p.certificateStore.GetAllDomains()
|
||||
|
||||
|
|
@ -703,10 +720,10 @@ func (p *Provider) getUncheckedDomains(domainsToCheck []string, checkConfigurati
|
|||
}
|
||||
}
|
||||
|
||||
return searchUncheckedDomains(domainsToCheck, allDomains)
|
||||
return searchUncheckedDomains(ctx, domainsToCheck, allDomains)
|
||||
}
|
||||
|
||||
func searchUncheckedDomains(domainsToCheck []string, existentDomains []string) []string {
|
||||
func searchUncheckedDomains(ctx context.Context, domainsToCheck []string, existentDomains []string) []string {
|
||||
var uncheckedDomains []string
|
||||
for _, domainToCheck := range domainsToCheck {
|
||||
if !isDomainAlreadyChecked(domainToCheck, existentDomains) {
|
||||
|
|
@ -714,18 +731,21 @@ func searchUncheckedDomains(domainsToCheck []string, existentDomains []string) [
|
|||
}
|
||||
}
|
||||
|
||||
logger := log.FromContext(ctx)
|
||||
if len(uncheckedDomains) == 0 {
|
||||
log.Debugf("No ACME certificate generation required for domains %q.", domainsToCheck)
|
||||
logger.Debugf("No ACME certificate generation required for domains %q.", domainsToCheck)
|
||||
} else {
|
||||
log.Debugf("Domains %q need ACME certificates generation for domains %q.", domainsToCheck, strings.Join(uncheckedDomains, ","))
|
||||
logger.Debugf("Domains %q need ACME certificates generation for domains %q.", domainsToCheck, strings.Join(uncheckedDomains, ","))
|
||||
}
|
||||
return uncheckedDomains
|
||||
}
|
||||
|
||||
func getX509Certificate(certificate *Certificate) (*x509.Certificate, error) {
|
||||
func getX509Certificate(ctx context.Context, certificate *Certificate) (*x509.Certificate, error) {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
tlsCert, err := tls.X509KeyPair(certificate.Certificate, certificate.Key)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to load TLS keypair from ACME certificate for domain %q (SAN : %q), certificate will be renewed : %v", certificate.Domain.Main, strings.Join(certificate.Domain.SANs, ","), err)
|
||||
logger.Errorf("Failed to load TLS key pair from ACME certificate for domain %q (SAN : %q), certificate will be renewed : %v", certificate.Domain.Main, strings.Join(certificate.Domain.SANs, ","), err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
@ -733,7 +753,7 @@ func getX509Certificate(certificate *Certificate) (*x509.Certificate, error) {
|
|||
if crt == nil {
|
||||
crt, err = x509.ParseCertificate(tlsCert.Certificate[0])
|
||||
if err != nil {
|
||||
log.Errorf("Failed to parse TLS keypair from ACME certificate for domain %q (SAN : %q), certificate will be renewed : %v", certificate.Domain.Main, strings.Join(certificate.Domain.SANs, ","), err)
|
||||
logger.Errorf("Failed to parse TLS key pair from ACME certificate for domain %q (SAN : %q), certificate will be renewed : %v", certificate.Domain.Main, strings.Join(certificate.Domain.SANs, ","), err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -741,7 +761,7 @@ func getX509Certificate(certificate *Certificate) (*x509.Certificate, error) {
|
|||
}
|
||||
|
||||
// getValidDomains checks if given domain is allowed to generate a ACME certificate and return it
|
||||
func (p *Provider) getValidDomains(domain types.Domain, wildcardAllowed bool) ([]string, error) {
|
||||
func (p *Provider) getValidDomains(ctx context.Context, domain types.Domain, wildcardAllowed bool) ([]string, error) {
|
||||
domains := domain.ToStrArray()
|
||||
if len(domains) == 0 {
|
||||
return nil, errors.New("unable to generate a certificate in ACME provider when no domain is given")
|
||||
|
|
@ -772,7 +792,7 @@ func (p *Provider) getValidDomains(domain types.Domain, wildcardAllowed bool) ([
|
|||
canonicalDomain := types.CanonicalDomain(domain)
|
||||
cleanDomain := acme.UnFqdn(canonicalDomain)
|
||||
if canonicalDomain != cleanDomain {
|
||||
log.Warnf("FQDN detected, please remove the trailing dot: %s", canonicalDomain)
|
||||
log.FromContext(ctx).Warnf("FQDN detected, please remove the trailing dot: %s", canonicalDomain)
|
||||
}
|
||||
cleanDomains = append(cleanDomains, cleanDomain)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package acme
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"testing"
|
||||
|
||||
|
|
@ -195,7 +196,7 @@ func TestGetUncheckedCertificates(t *testing.T) {
|
|||
resolvingDomains: test.resolvingDomains,
|
||||
}
|
||||
|
||||
domains := acmeProvider.getUncheckedDomains(test.domains, false)
|
||||
domains := acmeProvider.getUncheckedDomains(context.Background(), test.domains, false)
|
||||
assert.Equal(t, len(test.expectedDomains), len(domains), "Unexpected domains.")
|
||||
})
|
||||
}
|
||||
|
|
@ -283,7 +284,7 @@ func TestGetValidDomain(t *testing.T) {
|
|||
|
||||
acmeProvider := Provider{Configuration: &Configuration{DNSChallenge: test.dnsChallenge}}
|
||||
|
||||
domains, err := acmeProvider.getValidDomains(test.domains, test.wildcardAllowed)
|
||||
domains, err := acmeProvider.getValidDomains(context.Background(), test.domains, test.wildcardAllowed)
|
||||
|
||||
if len(test.expectedErr) > 0 {
|
||||
assert.EqualError(t, err, test.expectedErr, "Unexpected error.")
|
||||
|
|
@ -465,7 +466,7 @@ func TestDeleteUnnecessaryDomains(t *testing.T) {
|
|||
|
||||
acmeProvider := Provider{Configuration: &Configuration{Domains: test.domains}}
|
||||
|
||||
acmeProvider.deleteUnnecessaryDomains()
|
||||
acmeProvider.deleteUnnecessaryDomains(context.Background())
|
||||
assert.Equal(t, test.expectedDomains, acmeProvider.Domains, "unexpected domain")
|
||||
})
|
||||
}
|
||||
|
|
@ -539,7 +540,7 @@ func TestIsAccountMatchingCaServer(t *testing.T) {
|
|||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
result := isAccountMatchingCaServer(test.accountURI, test.serverURI)
|
||||
result := isAccountMatchingCaServer(context.Background(), test.accountURI, test.serverURI)
|
||||
|
||||
assert.Equal(t, test.expected, result)
|
||||
})
|
||||
|
|
@ -675,7 +676,7 @@ func TestInitAccount(t *testing.T) {
|
|||
|
||||
acmeProvider := Provider{account: test.account, Configuration: &Configuration{Email: test.email, KeyType: test.keyType}}
|
||||
|
||||
actualAccount, err := acmeProvider.initAccount()
|
||||
actualAccount, err := acmeProvider.initAccount(context.Background())
|
||||
assert.Nil(t, err, "Init account in error")
|
||||
assert.Equal(t, test.expectedAccount.Email, actualAccount.Email, "unexpected email account")
|
||||
assert.Equal(t, test.expectedAccount.KeyType, actualAccount.KeyType, "unexpected keyType account")
|
||||
|
|
|
|||
72
provider/aggregator/aggregator.go
Normal file
72
provider/aggregator/aggregator.go
Normal file
|
|
@ -0,0 +1,72 @@
|
|||
package aggregator
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/containous/traefik/config"
|
||||
"github.com/containous/traefik/config/static"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/provider"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/containous/traefik/types"
|
||||
)
|
||||
|
||||
// ProviderAggregator aggregates providers.
|
||||
type ProviderAggregator struct {
|
||||
providers []provider.Provider
|
||||
constraints types.Constraints
|
||||
}
|
||||
|
||||
// NewProviderAggregator returns an aggregate of all the providers configured in the static configuration.
|
||||
func NewProviderAggregator(conf static.Configuration) ProviderAggregator {
|
||||
p := ProviderAggregator{
|
||||
constraints: conf.Constraints,
|
||||
}
|
||||
|
||||
if conf.File != nil {
|
||||
p.quietAddProvider(conf.File)
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *ProviderAggregator) quietAddProvider(provider provider.Provider) {
|
||||
err := p.AddProvider(provider)
|
||||
if err != nil {
|
||||
log.WithoutContext().Errorf("Error while initializing provider %T: %v", provider, err)
|
||||
}
|
||||
}
|
||||
|
||||
// AddProvider adds a provider in the providers map.
|
||||
func (p *ProviderAggregator) AddProvider(provider provider.Provider) error {
|
||||
err := provider.Init(p.constraints)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.providers = append(p.providers, provider)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Init the provider
|
||||
func (p ProviderAggregator) Init(_ types.Constraints) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Provide calls the provide method of every providers
|
||||
func (p ProviderAggregator) Provide(configurationChan chan<- config.Message, pool *safe.Pool) error {
|
||||
for _, prd := range p.providers {
|
||||
jsonConf, err := json.Marshal(prd)
|
||||
if err != nil {
|
||||
log.WithoutContext().Debugf("Cannot marshal the provider configuration %T: %v", prd, err)
|
||||
}
|
||||
log.WithoutContext().Infof("Starting provider %T %s", prd, jsonConf)
|
||||
currentProvider := prd
|
||||
safe.Go(func() {
|
||||
err := currentProvider.Provide(configurationChan, pool)
|
||||
if err != nil {
|
||||
log.WithoutContext().Errorf("Cannot start the provider %T: %v", prd, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
141
provider/base_provider.go
Normal file
141
provider/base_provider.go
Normal file
|
|
@ -0,0 +1,141 @@
|
|||
package provider
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"text/template"
|
||||
"unicode"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/Masterminds/sprig"
|
||||
"github.com/containous/traefik/autogen/gentemplates"
|
||||
"github.com/containous/traefik/config"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/types"
|
||||
)
|
||||
|
||||
// BaseProvider should be inherited by providers.
|
||||
type BaseProvider struct {
|
||||
Watch bool `description:"Watch provider" export:"true"`
|
||||
Filename string `description:"Override default configuration template. For advanced users :)" export:"true"`
|
||||
Constraints types.Constraints `description:"Filter services by constraint, matching with Traefik tags." export:"true"`
|
||||
Trace bool `description:"Display additional provider logs (if available)." export:"true"`
|
||||
DebugLogGeneratedTemplate bool `description:"Enable debug logging of generated configuration template." export:"true"`
|
||||
}
|
||||
|
||||
// Init for compatibility reason the BaseProvider implements an empty Init.
|
||||
func (p *BaseProvider) Init(constraints types.Constraints) error {
|
||||
p.Constraints = append(p.Constraints, constraints...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// MatchConstraints must match with EVERY single constraint
|
||||
// returns first constraint that do not match or nil.
|
||||
func (p *BaseProvider) MatchConstraints(tags []string) (bool, *types.Constraint) {
|
||||
// if there is no tags and no constraints, filtering is disabled
|
||||
if len(tags) == 0 && len(p.Constraints) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
for _, constraint := range p.Constraints {
|
||||
// xor: if ok and constraint.MustMatch are equal, then no tag is currently matching with the constraint
|
||||
if ok := constraint.MatchConstraintWithAtLeastOneTag(tags); ok != constraint.MustMatch {
|
||||
return false, constraint
|
||||
}
|
||||
}
|
||||
|
||||
// If no constraint or every constraints matching
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// GetConfiguration returns the provider configuration from default template (file or content) or overrode template file.
|
||||
func (p *BaseProvider) GetConfiguration(defaultTemplate string, funcMap template.FuncMap, templateObjects interface{}) (*config.Configuration, error) {
|
||||
tmplContent, err := p.getTemplateContent(defaultTemplate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.CreateConfiguration(tmplContent, funcMap, templateObjects)
|
||||
}
|
||||
|
||||
// CreateConfiguration creates a provider configuration from content using templating.
|
||||
func (p *BaseProvider) CreateConfiguration(tmplContent string, funcMap template.FuncMap, templateObjects interface{}) (*config.Configuration, error) {
|
||||
var defaultFuncMap = sprig.TxtFuncMap()
|
||||
// tolower is deprecated in favor of sprig's lower function
|
||||
defaultFuncMap["tolower"] = strings.ToLower
|
||||
defaultFuncMap["normalize"] = Normalize
|
||||
defaultFuncMap["split"] = split
|
||||
for funcID, funcElement := range funcMap {
|
||||
defaultFuncMap[funcID] = funcElement
|
||||
}
|
||||
|
||||
tmpl := template.New(p.Filename).Funcs(defaultFuncMap)
|
||||
|
||||
_, err := tmpl.Parse(tmplContent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var buffer bytes.Buffer
|
||||
err = tmpl.Execute(&buffer, templateObjects)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var renderedTemplate = buffer.String()
|
||||
if p.DebugLogGeneratedTemplate {
|
||||
log.Debugf("Template content: %s", tmplContent)
|
||||
log.Debugf("Rendering results: %s", renderedTemplate)
|
||||
}
|
||||
return p.DecodeConfiguration(renderedTemplate)
|
||||
}
|
||||
|
||||
// DecodeConfiguration Decodes a *types.Configuration from a content.
|
||||
func (p *BaseProvider) DecodeConfiguration(content string) (*config.Configuration, error) {
|
||||
configuration := new(config.Configuration)
|
||||
if _, err := toml.Decode(content, configuration); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return configuration, nil
|
||||
}
|
||||
|
||||
func (p *BaseProvider) getTemplateContent(defaultTemplateFile string) (string, error) {
|
||||
if len(p.Filename) > 0 {
|
||||
buf, err := ioutil.ReadFile(p.Filename)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
if strings.HasSuffix(defaultTemplateFile, ".tmpl") {
|
||||
buf, err := gentemplates.Asset(defaultTemplateFile)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
return defaultTemplateFile, nil
|
||||
}
|
||||
|
||||
func split(sep, s string) []string {
|
||||
return strings.Split(s, sep)
|
||||
}
|
||||
|
||||
// Normalize transforms a string that work with the rest of traefik.
|
||||
// Replace '.' with '-' in quoted keys because of this issue https://github.com/BurntSushi/toml/issues/78
|
||||
func Normalize(name string) string {
|
||||
fargs := func(c rune) bool {
|
||||
return !unicode.IsLetter(c) && !unicode.IsNumber(c)
|
||||
}
|
||||
// get function
|
||||
return strings.Join(strings.FieldsFunc(name, fargs), "-")
|
||||
}
|
||||
|
||||
// ReverseStringSlice inverts the order of the given slice of string.
|
||||
func ReverseStringSlice(slice *[]string) {
|
||||
for i, j := 0, len(*slice)-1; i < j; i, j = i+1, j-1 {
|
||||
(*slice)[i], (*slice)[j] = (*slice)[j], (*slice)[i]
|
||||
}
|
||||
}
|
||||
|
|
@ -1,48 +0,0 @@
|
|||
package boltdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/abronan/valkeyrie/store"
|
||||
"github.com/abronan/valkeyrie/store/boltdb"
|
||||
"github.com/containous/traefik/provider"
|
||||
"github.com/containous/traefik/provider/kv"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/containous/traefik/types"
|
||||
)
|
||||
|
||||
var _ provider.Provider = (*Provider)(nil)
|
||||
|
||||
// Provider holds configurations of the provider.
|
||||
type Provider struct {
|
||||
kv.Provider `mapstructure:",squash" export:"true"`
|
||||
}
|
||||
|
||||
// Init the provider
|
||||
func (p *Provider) Init(constraints types.Constraints) error {
|
||||
err := p.Provider.Init(constraints)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
store, err := p.CreateStore()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to Connect to KV store: %v", err)
|
||||
}
|
||||
|
||||
p.SetKVClient(store)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Provide allows the boltdb provider to Provide configurations to traefik
|
||||
// using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error {
|
||||
return p.Provider.Provide(configurationChan, pool)
|
||||
}
|
||||
|
||||
// CreateStore creates the KV store
|
||||
func (p *Provider) CreateStore() (store.Store, error) {
|
||||
p.SetStoreType(store.BOLTDB)
|
||||
boltdb.Register()
|
||||
return p.Provider.CreateStore()
|
||||
}
|
||||
|
|
@ -1,48 +0,0 @@
|
|||
package consul
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/abronan/valkeyrie/store"
|
||||
"github.com/abronan/valkeyrie/store/consul"
|
||||
"github.com/containous/traefik/provider"
|
||||
"github.com/containous/traefik/provider/kv"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/containous/traefik/types"
|
||||
)
|
||||
|
||||
var _ provider.Provider = (*Provider)(nil)
|
||||
|
||||
// Provider holds configurations of the p.
|
||||
type Provider struct {
|
||||
kv.Provider `mapstructure:",squash" export:"true"`
|
||||
}
|
||||
|
||||
// Init the provider
|
||||
func (p *Provider) Init(constraints types.Constraints) error {
|
||||
err := p.Provider.Init(constraints)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
store, err := p.CreateStore()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to Connect to KV store: %v", err)
|
||||
}
|
||||
|
||||
p.SetKVClient(store)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Provide allows the consul provider to provide configurations to traefik
|
||||
// using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error {
|
||||
return p.Provider.Provide(configurationChan, pool)
|
||||
}
|
||||
|
||||
// CreateStore creates the KV store
|
||||
func (p *Provider) CreateStore() (store.Store, error) {
|
||||
p.SetStoreType(store.CONSUL)
|
||||
consul.Register()
|
||||
return p.Provider.CreateStore()
|
||||
}
|
||||
|
|
@ -1,227 +0,0 @@
|
|||
package consulcatalog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/provider"
|
||||
"github.com/containous/traefik/provider/label"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/hashicorp/consul/api"
|
||||
)
|
||||
|
||||
func (p *Provider) buildConfiguration(catalog []catalogUpdate) *types.Configuration {
|
||||
var funcMap = template.FuncMap{
|
||||
"getAttribute": p.getAttribute,
|
||||
"getTag": getTag,
|
||||
"hasTag": hasTag,
|
||||
|
||||
// Backend functions
|
||||
"getNodeBackendName": getNodeBackendName,
|
||||
"getServiceBackendName": getServiceBackendName,
|
||||
"getBackendAddress": getBackendAddress,
|
||||
"getServerName": getServerName,
|
||||
"getCircuitBreaker": label.GetCircuitBreaker,
|
||||
"getLoadBalancer": label.GetLoadBalancer,
|
||||
"getMaxConn": label.GetMaxConn,
|
||||
"getHealthCheck": label.GetHealthCheck,
|
||||
"getBuffering": label.GetBuffering,
|
||||
"getResponseForwarding": label.GetResponseForwarding,
|
||||
"getServer": p.getServer,
|
||||
|
||||
// Frontend functions
|
||||
"getFrontendRule": p.getFrontendRule,
|
||||
"getBasicAuth": label.GetFuncSliceString(label.TraefikFrontendAuthBasic), // Deprecated
|
||||
"getAuth": label.GetAuth,
|
||||
"getFrontEndEntryPoints": label.GetFuncSliceString(label.TraefikFrontendEntryPoints),
|
||||
"getPriority": label.GetFuncInt(label.TraefikFrontendPriority, label.DefaultFrontendPriority),
|
||||
"getPassHostHeader": label.GetFuncBool(label.TraefikFrontendPassHostHeader, label.DefaultPassHostHeader),
|
||||
"getPassTLSCert": label.GetFuncBool(label.TraefikFrontendPassTLSCert, label.DefaultPassTLSCert),
|
||||
"getPassTLSClientCert": label.GetTLSClientCert,
|
||||
"getWhiteList": label.GetWhiteList,
|
||||
"getRedirect": label.GetRedirect,
|
||||
"getErrorPages": label.GetErrorPages,
|
||||
"getRateLimit": label.GetRateLimit,
|
||||
"getHeaders": label.GetHeaders,
|
||||
}
|
||||
|
||||
var allNodes []*api.ServiceEntry
|
||||
var services []*serviceUpdate
|
||||
for _, info := range catalog {
|
||||
if len(info.Nodes) > 0 {
|
||||
services = append(services, p.generateFrontends(info.Service)...)
|
||||
allNodes = append(allNodes, info.Nodes...)
|
||||
}
|
||||
}
|
||||
// Ensure a stable ordering of nodes so that identical configurations may be detected
|
||||
sort.Sort(nodeSorter(allNodes))
|
||||
|
||||
templateObjects := struct {
|
||||
Services []*serviceUpdate
|
||||
Nodes []*api.ServiceEntry
|
||||
}{
|
||||
Services: services,
|
||||
Nodes: allNodes,
|
||||
}
|
||||
|
||||
configuration, err := p.GetConfiguration("templates/consul_catalog.tmpl", funcMap, templateObjects)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to create config")
|
||||
}
|
||||
|
||||
return configuration
|
||||
}
|
||||
|
||||
// Specific functions
|
||||
|
||||
func (p *Provider) getFrontendRule(service serviceUpdate) string {
|
||||
customFrontendRule := label.GetStringValue(service.TraefikLabels, label.TraefikFrontendRule, "")
|
||||
if customFrontendRule == "" {
|
||||
customFrontendRule = p.FrontEndRule
|
||||
}
|
||||
|
||||
tmpl := p.frontEndRuleTemplate
|
||||
tmpl, err := tmpl.Parse(customFrontendRule)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to parse Consul Catalog custom frontend rule: %v", err)
|
||||
return ""
|
||||
}
|
||||
|
||||
templateObjects := struct {
|
||||
ServiceName string
|
||||
Domain string
|
||||
Attributes []string
|
||||
}{
|
||||
ServiceName: service.ServiceName,
|
||||
Domain: p.Domain,
|
||||
Attributes: service.Attributes,
|
||||
}
|
||||
|
||||
var buffer bytes.Buffer
|
||||
err = tmpl.Execute(&buffer, templateObjects)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to execute Consul Catalog custom frontend rule template: %v", err)
|
||||
return ""
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(buffer.String(), ".")
|
||||
}
|
||||
|
||||
func (p *Provider) getServer(node *api.ServiceEntry) types.Server {
|
||||
scheme := p.getAttribute(label.SuffixProtocol, node.Service.Tags, label.DefaultProtocol)
|
||||
address := getBackendAddress(node)
|
||||
|
||||
return types.Server{
|
||||
URL: fmt.Sprintf("%s://%s", scheme, net.JoinHostPort(address, strconv.Itoa(node.Service.Port))),
|
||||
Weight: p.getWeight(node.Service.Tags),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) setupFrontEndRuleTemplate() {
|
||||
var FuncMap = template.FuncMap{
|
||||
"getAttribute": p.getAttribute,
|
||||
"getTag": getTag,
|
||||
"hasTag": hasTag,
|
||||
}
|
||||
p.frontEndRuleTemplate = template.New("consul catalog frontend rule").Funcs(FuncMap)
|
||||
}
|
||||
|
||||
// Specific functions
|
||||
|
||||
func getServiceBackendName(service *serviceUpdate) string {
|
||||
if service.ParentServiceName != "" {
|
||||
return strings.ToLower(service.ParentServiceName)
|
||||
}
|
||||
return strings.ToLower(service.ServiceName)
|
||||
}
|
||||
|
||||
func getNodeBackendName(node *api.ServiceEntry) string {
|
||||
return strings.ToLower(node.Service.Service)
|
||||
}
|
||||
|
||||
func getBackendAddress(node *api.ServiceEntry) string {
|
||||
if node.Service.Address != "" {
|
||||
return node.Service.Address
|
||||
}
|
||||
return node.Node.Address
|
||||
}
|
||||
|
||||
func getServerName(node *api.ServiceEntry, index int) string {
|
||||
serviceName := node.Service.Service + node.Service.Address + strconv.Itoa(node.Service.Port)
|
||||
// TODO sort tags ?
|
||||
serviceName += strings.Join(node.Service.Tags, "")
|
||||
|
||||
hash := sha1.New()
|
||||
_, err := hash.Write([]byte(serviceName))
|
||||
if err != nil {
|
||||
// Impossible case
|
||||
log.Error(err)
|
||||
} else {
|
||||
serviceName = base64.URLEncoding.EncodeToString(hash.Sum(nil))
|
||||
}
|
||||
|
||||
// unique int at the end
|
||||
return provider.Normalize(node.Service.Service + "-" + strconv.Itoa(index) + "-" + serviceName)
|
||||
}
|
||||
|
||||
func (p *Provider) getWeight(tags []string) int {
|
||||
labels := tagsToNeutralLabels(tags, p.Prefix)
|
||||
return label.GetIntValue(labels, p.getPrefixedName(label.SuffixWeight), label.DefaultWeight)
|
||||
}
|
||||
|
||||
// Base functions
|
||||
|
||||
func (p *Provider) getAttribute(name string, tags []string, defaultValue string) string {
|
||||
return getTag(p.getPrefixedName(name), tags, defaultValue)
|
||||
}
|
||||
|
||||
func (p *Provider) getPrefixedName(name string) string {
|
||||
if len(p.Prefix) > 0 && len(name) > 0 {
|
||||
return p.Prefix + "." + name
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func hasTag(name string, tags []string) bool {
|
||||
lowerName := strings.ToLower(name)
|
||||
|
||||
for _, tag := range tags {
|
||||
lowerTag := strings.ToLower(tag)
|
||||
|
||||
// Given the nature of Consul tags, which could be either singular markers, or key=value pairs
|
||||
if strings.HasPrefix(lowerTag, lowerName+"=") || lowerTag == lowerName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getTag(name string, tags []string, defaultValue string) string {
|
||||
lowerName := strings.ToLower(name)
|
||||
|
||||
for _, tag := range tags {
|
||||
lowerTag := strings.ToLower(tag)
|
||||
|
||||
// Given the nature of Consul tags, which could be either singular markers, or key=value pairs
|
||||
if strings.HasPrefix(lowerTag, lowerName+"=") || lowerTag == lowerName {
|
||||
// In case, where a tag might be a key=value, try to split it by the first '='
|
||||
kv := strings.SplitN(tag, "=", 2)
|
||||
|
||||
// If the returned result is a key=value pair, return the 'value' component
|
||||
if len(kv) == 2 {
|
||||
return kv[1]
|
||||
}
|
||||
// If the returned result is a singular marker, return the 'key' component
|
||||
return kv[0]
|
||||
}
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,617 +0,0 @@
|
|||
package consulcatalog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/BurntSushi/ty/fun"
|
||||
"github.com/cenk/backoff"
|
||||
"github.com/containous/traefik/job"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/provider"
|
||||
"github.com/containous/traefik/provider/label"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/hashicorp/consul/api"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultWatchWaitTime is the duration to wait when polling consul
|
||||
DefaultWatchWaitTime = 15 * time.Second
|
||||
)
|
||||
|
||||
var _ provider.Provider = (*Provider)(nil)
|
||||
|
||||
// Provider holds configurations of the Consul catalog provider.
|
||||
type Provider struct {
|
||||
provider.BaseProvider `mapstructure:",squash" export:"true"`
|
||||
Endpoint string `description:"Consul server endpoint"`
|
||||
Domain string `description:"Default domain used"`
|
||||
Stale bool `description:"Use stale consistency for catalog reads" export:"true"`
|
||||
ExposedByDefault bool `description:"Expose Consul services by default" export:"true"`
|
||||
Prefix string `description:"Prefix used for Consul catalog tags" export:"true"`
|
||||
FrontEndRule string `description:"Frontend rule used for Consul services" export:"true"`
|
||||
TLS *types.ClientTLS `description:"Enable TLS support" export:"true"`
|
||||
client *api.Client
|
||||
frontEndRuleTemplate *template.Template
|
||||
}
|
||||
|
||||
// Service represent a Consul service.
|
||||
type Service struct {
|
||||
Name string
|
||||
Tags []string
|
||||
Nodes []string
|
||||
Addresses []string
|
||||
Ports []int
|
||||
}
|
||||
|
||||
type serviceUpdate struct {
|
||||
ServiceName string
|
||||
ParentServiceName string
|
||||
Attributes []string
|
||||
TraefikLabels map[string]string
|
||||
}
|
||||
|
||||
type frontendSegment struct {
|
||||
Name string
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
type catalogUpdate struct {
|
||||
Service *serviceUpdate
|
||||
Nodes []*api.ServiceEntry
|
||||
}
|
||||
|
||||
type nodeSorter []*api.ServiceEntry
|
||||
|
||||
func (a nodeSorter) Len() int {
|
||||
return len(a)
|
||||
}
|
||||
|
||||
func (a nodeSorter) Swap(i int, j int) {
|
||||
a[i], a[j] = a[j], a[i]
|
||||
}
|
||||
|
||||
func (a nodeSorter) Less(i int, j int) bool {
|
||||
lEntry := a[i]
|
||||
rEntry := a[j]
|
||||
|
||||
ls := strings.ToLower(lEntry.Service.Service)
|
||||
lr := strings.ToLower(rEntry.Service.Service)
|
||||
|
||||
if ls != lr {
|
||||
return ls < lr
|
||||
}
|
||||
if lEntry.Service.Address != rEntry.Service.Address {
|
||||
return lEntry.Service.Address < rEntry.Service.Address
|
||||
}
|
||||
if lEntry.Node.Address != rEntry.Node.Address {
|
||||
return lEntry.Node.Address < rEntry.Node.Address
|
||||
}
|
||||
return lEntry.Service.Port < rEntry.Service.Port
|
||||
}
|
||||
|
||||
// Init the provider
|
||||
func (p *Provider) Init(constraints types.Constraints) error {
|
||||
err := p.BaseProvider.Init(constraints)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := p.createClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.client = client
|
||||
p.setupFrontEndRuleTemplate()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Provide allows the consul catalog provider to provide configurations to traefik
|
||||
// using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error {
|
||||
pool.Go(func(stop chan bool) {
|
||||
notify := func(err error, time time.Duration) {
|
||||
log.Errorf("Consul connection error %+v, retrying in %s", err, time)
|
||||
}
|
||||
operation := func() error {
|
||||
return p.watch(configurationChan, stop)
|
||||
}
|
||||
errRetry := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify)
|
||||
if errRetry != nil {
|
||||
log.Errorf("Cannot connect to consul server %+v", errRetry)
|
||||
}
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) createClient() (*api.Client, error) {
|
||||
config := api.DefaultConfig()
|
||||
config.Address = p.Endpoint
|
||||
if p.TLS != nil {
|
||||
tlsConfig, err := p.TLS.CreateTLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config.Scheme = "https"
|
||||
config.Transport.TLSClientConfig = tlsConfig
|
||||
}
|
||||
|
||||
client, err := api.NewClient(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (p *Provider) watch(configurationChan chan<- types.ConfigMessage, stop chan bool) error {
|
||||
stopCh := make(chan struct{})
|
||||
watchCh := make(chan map[string][]string)
|
||||
errorCh := make(chan error)
|
||||
|
||||
var errorOnce sync.Once
|
||||
notifyError := func(err error) {
|
||||
errorOnce.Do(func() {
|
||||
errorCh <- err
|
||||
})
|
||||
}
|
||||
|
||||
p.watchHealthState(stopCh, watchCh, notifyError)
|
||||
p.watchCatalogServices(stopCh, watchCh, notifyError)
|
||||
|
||||
defer close(stopCh)
|
||||
defer close(watchCh)
|
||||
|
||||
safe.Go(func() {
|
||||
for index := range watchCh {
|
||||
log.Debug("List of services changed")
|
||||
nodes, err := p.getNodes(index)
|
||||
if err != nil {
|
||||
notifyError(err)
|
||||
}
|
||||
configuration := p.buildConfiguration(nodes)
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "consul_catalog",
|
||||
Configuration: configuration,
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
return nil
|
||||
case err := <-errorCh:
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) watchCatalogServices(stopCh <-chan struct{}, watchCh chan<- map[string][]string, notifyError func(error)) {
|
||||
catalog := p.client.Catalog()
|
||||
|
||||
safe.Go(func() {
|
||||
// variable to hold previous state
|
||||
var flashback map[string]Service
|
||||
|
||||
options := &api.QueryOptions{WaitTime: DefaultWatchWaitTime, AllowStale: p.Stale}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
data, meta, err := catalog.Services(options)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list services: %v", err)
|
||||
notifyError(err)
|
||||
return
|
||||
}
|
||||
|
||||
if options.WaitIndex == meta.LastIndex {
|
||||
continue
|
||||
}
|
||||
|
||||
options.WaitIndex = meta.LastIndex
|
||||
|
||||
if data != nil {
|
||||
current := make(map[string]Service)
|
||||
for key, value := range data {
|
||||
nodes, _, err := catalog.Service(key, "", &api.QueryOptions{AllowStale: p.Stale})
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get detail of service %s: %v", key, err)
|
||||
notifyError(err)
|
||||
return
|
||||
}
|
||||
|
||||
nodesID := getServiceIds(nodes)
|
||||
ports := getServicePorts(nodes)
|
||||
addresses := getServiceAddresses(nodes)
|
||||
|
||||
if service, ok := current[key]; ok {
|
||||
service.Tags = value
|
||||
service.Nodes = nodesID
|
||||
service.Ports = ports
|
||||
} else {
|
||||
service := Service{
|
||||
Name: key,
|
||||
Tags: value,
|
||||
Nodes: nodesID,
|
||||
Addresses: addresses,
|
||||
Ports: ports,
|
||||
}
|
||||
current[key] = service
|
||||
}
|
||||
}
|
||||
|
||||
// A critical note is that the return of a blocking request is no guarantee of a change.
|
||||
// It is possible that there was an idempotent write that does not affect the result of the query.
|
||||
// Thus it is required to do extra check for changes...
|
||||
if hasChanged(current, flashback) {
|
||||
watchCh <- data
|
||||
flashback = current
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (p *Provider) watchHealthState(stopCh <-chan struct{}, watchCh chan<- map[string][]string, notifyError func(error)) {
|
||||
health := p.client.Health()
|
||||
catalog := p.client.Catalog()
|
||||
|
||||
safe.Go(func() {
|
||||
// variable to hold previous state
|
||||
var flashback map[string][]string
|
||||
var flashbackMaintenance []string
|
||||
|
||||
options := &api.QueryOptions{WaitTime: DefaultWatchWaitTime, AllowStale: p.Stale}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// Listening to changes that leads to `passing` state or degrades from it.
|
||||
healthyState, meta, err := health.State("any", options)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to retrieve health checks")
|
||||
notifyError(err)
|
||||
return
|
||||
}
|
||||
|
||||
var current = make(map[string][]string)
|
||||
var currentFailing = make(map[string]*api.HealthCheck)
|
||||
var maintenance []string
|
||||
if healthyState != nil {
|
||||
for _, healthy := range healthyState {
|
||||
key := fmt.Sprintf("%s-%s", healthy.Node, healthy.ServiceID)
|
||||
_, failing := currentFailing[key]
|
||||
if healthy.Status == "passing" && !failing {
|
||||
current[key] = append(current[key], healthy.Node)
|
||||
} else if strings.HasPrefix(healthy.CheckID, "_service_maintenance") || strings.HasPrefix(healthy.CheckID, "_node_maintenance") {
|
||||
maintenance = append(maintenance, healthy.CheckID)
|
||||
} else {
|
||||
currentFailing[key] = healthy
|
||||
if _, ok := current[key]; ok {
|
||||
delete(current, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If LastIndex didn't change then it means `Get` returned
|
||||
// because of the WaitTime and the key didn't changed.
|
||||
if options.WaitIndex == meta.LastIndex {
|
||||
continue
|
||||
}
|
||||
|
||||
options.WaitIndex = meta.LastIndex
|
||||
|
||||
// The response should be unified with watchCatalogServices
|
||||
data, _, err := catalog.Services(&api.QueryOptions{AllowStale: p.Stale})
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list services: %v", err)
|
||||
notifyError(err)
|
||||
return
|
||||
}
|
||||
|
||||
if data != nil {
|
||||
// A critical note is that the return of a blocking request is no guarantee of a change.
|
||||
// It is possible that there was an idempotent write that does not affect the result of the query.
|
||||
// Thus it is required to do extra check for changes...
|
||||
addedKeys, removedKeys, changedKeys := getChangedHealth(current, flashback)
|
||||
|
||||
if len(addedKeys) > 0 || len(removedKeys) > 0 || len(changedKeys) > 0 {
|
||||
log.WithField("DiscoveredServices", addedKeys).
|
||||
WithField("MissingServices", removedKeys).
|
||||
WithField("ChangedServices", changedKeys).
|
||||
Debug("Health State change detected.")
|
||||
|
||||
watchCh <- data
|
||||
flashback = current
|
||||
flashbackMaintenance = maintenance
|
||||
} else {
|
||||
addedKeysMaintenance, removedMaintenance := getChangedStringKeys(maintenance, flashbackMaintenance)
|
||||
|
||||
if len(addedKeysMaintenance) > 0 || len(removedMaintenance) > 0 {
|
||||
log.WithField("MaintenanceMode", maintenance).Debug("Maintenance change detected.")
|
||||
watchCh <- data
|
||||
flashback = current
|
||||
flashbackMaintenance = maintenance
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (p *Provider) getNodes(index map[string][]string) ([]catalogUpdate, error) {
|
||||
visited := make(map[string]bool)
|
||||
|
||||
var nodes []catalogUpdate
|
||||
for service := range index {
|
||||
name := strings.ToLower(service)
|
||||
if !strings.Contains(name, " ") && !visited[name] {
|
||||
visited[name] = true
|
||||
log.WithField("service", name).Debug("Fetching service")
|
||||
healthy, err := p.healthyNodes(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// healthy.Nodes can be empty if constraints do not match, without throwing error
|
||||
if healthy.Service != nil && len(healthy.Nodes) > 0 {
|
||||
nodes = append(nodes, healthy)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func hasChanged(current map[string]Service, previous map[string]Service) bool {
|
||||
if len(current) != len(previous) {
|
||||
return true
|
||||
}
|
||||
addedServiceKeys, removedServiceKeys := getChangedServiceKeys(current, previous)
|
||||
return len(removedServiceKeys) > 0 || len(addedServiceKeys) > 0 || hasServiceChanged(current, previous)
|
||||
}
|
||||
|
||||
func getChangedServiceKeys(current map[string]Service, previous map[string]Service) ([]string, []string) {
|
||||
currKeySet := fun.Set(fun.Keys(current).([]string)).(map[string]bool)
|
||||
prevKeySet := fun.Set(fun.Keys(previous).([]string)).(map[string]bool)
|
||||
|
||||
addedKeys := fun.Difference(currKeySet, prevKeySet).(map[string]bool)
|
||||
removedKeys := fun.Difference(prevKeySet, currKeySet).(map[string]bool)
|
||||
|
||||
return fun.Keys(addedKeys).([]string), fun.Keys(removedKeys).([]string)
|
||||
}
|
||||
|
||||
func hasServiceChanged(current map[string]Service, previous map[string]Service) bool {
|
||||
for key, value := range current {
|
||||
if prevValue, ok := previous[key]; ok {
|
||||
addedNodesKeys, removedNodesKeys := getChangedStringKeys(value.Nodes, prevValue.Nodes)
|
||||
if len(addedNodesKeys) > 0 || len(removedNodesKeys) > 0 {
|
||||
return true
|
||||
}
|
||||
addedTagsKeys, removedTagsKeys := getChangedStringKeys(value.Tags, prevValue.Tags)
|
||||
if len(addedTagsKeys) > 0 || len(removedTagsKeys) > 0 {
|
||||
return true
|
||||
}
|
||||
addedAddressesKeys, removedAddressesKeys := getChangedStringKeys(value.Addresses, prevValue.Addresses)
|
||||
if len(addedAddressesKeys) > 0 || len(removedAddressesKeys) > 0 {
|
||||
return true
|
||||
}
|
||||
addedPortsKeys, removedPortsKeys := getChangedIntKeys(value.Ports, prevValue.Ports)
|
||||
if len(addedPortsKeys) > 0 || len(removedPortsKeys) > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getChangedStringKeys(currState []string, prevState []string) ([]string, []string) {
|
||||
currKeySet := fun.Set(currState).(map[string]bool)
|
||||
prevKeySet := fun.Set(prevState).(map[string]bool)
|
||||
|
||||
addedKeys := fun.Difference(currKeySet, prevKeySet).(map[string]bool)
|
||||
removedKeys := fun.Difference(prevKeySet, currKeySet).(map[string]bool)
|
||||
|
||||
return fun.Keys(addedKeys).([]string), fun.Keys(removedKeys).([]string)
|
||||
}
|
||||
|
||||
func getChangedHealth(current map[string][]string, previous map[string][]string) ([]string, []string, []string) {
|
||||
currKeySet := fun.Set(fun.Keys(current).([]string)).(map[string]bool)
|
||||
prevKeySet := fun.Set(fun.Keys(previous).([]string)).(map[string]bool)
|
||||
|
||||
addedKeys := fun.Difference(currKeySet, prevKeySet).(map[string]bool)
|
||||
removedKeys := fun.Difference(prevKeySet, currKeySet).(map[string]bool)
|
||||
|
||||
var changedKeys []string
|
||||
|
||||
for key, value := range current {
|
||||
if prevValue, ok := previous[key]; ok {
|
||||
addedNodesKeys, removedNodesKeys := getChangedStringKeys(value, prevValue)
|
||||
if len(addedNodesKeys) > 0 || len(removedNodesKeys) > 0 {
|
||||
changedKeys = append(changedKeys, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fun.Keys(addedKeys).([]string), fun.Keys(removedKeys).([]string), changedKeys
|
||||
}
|
||||
|
||||
func getChangedIntKeys(currState []int, prevState []int) ([]int, []int) {
|
||||
currKeySet := fun.Set(currState).(map[int]bool)
|
||||
prevKeySet := fun.Set(prevState).(map[int]bool)
|
||||
|
||||
addedKeys := fun.Difference(currKeySet, prevKeySet).(map[int]bool)
|
||||
removedKeys := fun.Difference(prevKeySet, currKeySet).(map[int]bool)
|
||||
|
||||
return fun.Keys(addedKeys).([]int), fun.Keys(removedKeys).([]int)
|
||||
}
|
||||
|
||||
func getServiceIds(services []*api.CatalogService) []string {
|
||||
var serviceIds []string
|
||||
for _, service := range services {
|
||||
serviceIds = append(serviceIds, service.ID)
|
||||
}
|
||||
return serviceIds
|
||||
}
|
||||
|
||||
func getServicePorts(services []*api.CatalogService) []int {
|
||||
var servicePorts []int
|
||||
for _, service := range services {
|
||||
servicePorts = append(servicePorts, service.ServicePort)
|
||||
}
|
||||
return servicePorts
|
||||
}
|
||||
|
||||
func getServiceAddresses(services []*api.CatalogService) []string {
|
||||
var serviceAddresses []string
|
||||
for _, service := range services {
|
||||
serviceAddresses = append(serviceAddresses, service.ServiceAddress)
|
||||
}
|
||||
return serviceAddresses
|
||||
}
|
||||
|
||||
func (p *Provider) healthyNodes(service string) (catalogUpdate, error) {
|
||||
health := p.client.Health()
|
||||
data, _, err := health.Service(service, "", true, &api.QueryOptions{AllowStale: p.Stale})
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Failed to fetch details of %s", service)
|
||||
return catalogUpdate{}, err
|
||||
}
|
||||
|
||||
nodes := fun.Filter(func(node *api.ServiceEntry) bool {
|
||||
return p.nodeFilter(service, node)
|
||||
}, data).([]*api.ServiceEntry)
|
||||
|
||||
// Merge tags of nodes matching constraints, in a single slice.
|
||||
tags := fun.Foldl(func(node *api.ServiceEntry, set []string) []string {
|
||||
return fun.Keys(fun.Union(
|
||||
fun.Set(set),
|
||||
fun.Set(node.Service.Tags),
|
||||
).(map[string]bool)).([]string)
|
||||
}, []string{}, nodes).([]string)
|
||||
|
||||
labels := tagsToNeutralLabels(tags, p.Prefix)
|
||||
|
||||
return catalogUpdate{
|
||||
Service: &serviceUpdate{
|
||||
ServiceName: service,
|
||||
Attributes: tags,
|
||||
TraefikLabels: labels,
|
||||
},
|
||||
Nodes: nodes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Provider) nodeFilter(service string, node *api.ServiceEntry) bool {
|
||||
// Filter disabled application.
|
||||
if !p.isServiceEnabled(node) {
|
||||
log.Debugf("Filtering disabled Consul service %s", service)
|
||||
return false
|
||||
}
|
||||
|
||||
// Filter by constraints.
|
||||
constraintTags := p.getConstraintTags(node.Service.Tags)
|
||||
ok, failingConstraint := p.MatchConstraints(constraintTags)
|
||||
if !ok && failingConstraint != nil {
|
||||
log.Debugf("Service %v pruned by '%v' constraint", service, failingConstraint.String())
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *Provider) isServiceEnabled(node *api.ServiceEntry) bool {
|
||||
rawValue := getTag(p.getPrefixedName(label.SuffixEnable), node.Service.Tags, "")
|
||||
|
||||
if len(rawValue) == 0 {
|
||||
return p.ExposedByDefault
|
||||
}
|
||||
|
||||
value, err := strconv.ParseBool(rawValue)
|
||||
if err != nil {
|
||||
log.Errorf("Invalid value for %s: %s", label.SuffixEnable, rawValue)
|
||||
return p.ExposedByDefault
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (p *Provider) getConstraintTags(tags []string) []string {
|
||||
var values []string
|
||||
|
||||
prefix := p.getPrefixedName("tags=")
|
||||
for _, tag := range tags {
|
||||
// We look for a Consul tag named 'traefik.tags' (unless different 'prefix' is configured)
|
||||
if strings.HasPrefix(strings.ToLower(tag), prefix) {
|
||||
// If 'traefik.tags=' tag is found, take the tag value and split by ',' adding the result to the list to be returned
|
||||
splitedTags := label.SplitAndTrimString(tag[len(prefix):], ",")
|
||||
values = append(values, splitedTags...)
|
||||
}
|
||||
}
|
||||
|
||||
return values
|
||||
}
|
||||
|
||||
func (p *Provider) generateFrontends(service *serviceUpdate) []*serviceUpdate {
|
||||
frontends := make([]*serviceUpdate, 0)
|
||||
// to support <prefix>.frontend.xxx
|
||||
frontends = append(frontends, &serviceUpdate{
|
||||
ServiceName: service.ServiceName,
|
||||
ParentServiceName: service.ServiceName,
|
||||
Attributes: service.Attributes,
|
||||
TraefikLabels: service.TraefikLabels,
|
||||
})
|
||||
|
||||
// loop over children of <prefix>.frontends.*
|
||||
for _, frontend := range getSegments(p.Prefix+".frontends", p.Prefix, service.TraefikLabels) {
|
||||
frontends = append(frontends, &serviceUpdate{
|
||||
ServiceName: service.ServiceName + "-" + frontend.Name,
|
||||
ParentServiceName: service.ServiceName,
|
||||
Attributes: service.Attributes,
|
||||
TraefikLabels: frontend.Labels,
|
||||
})
|
||||
}
|
||||
|
||||
return frontends
|
||||
}
|
||||
func getSegments(path string, prefix string, tree map[string]string) []*frontendSegment {
|
||||
segments := make([]*frontendSegment, 0)
|
||||
// find segment names
|
||||
segmentNames := make(map[string]bool)
|
||||
for key := range tree {
|
||||
if strings.HasPrefix(key, path+".") {
|
||||
segmentNames[strings.SplitN(strings.TrimPrefix(key, path+"."), ".", 2)[0]] = true
|
||||
}
|
||||
}
|
||||
|
||||
// get labels for each segment found
|
||||
for segment := range segmentNames {
|
||||
labels := make(map[string]string)
|
||||
for key, value := range tree {
|
||||
if strings.HasPrefix(key, path+"."+segment) {
|
||||
labels[prefix+".frontend"+strings.TrimPrefix(key, path+"."+segment)] = value
|
||||
}
|
||||
}
|
||||
segments = append(segments, &frontendSegment{
|
||||
Name: segment,
|
||||
Labels: labels,
|
||||
})
|
||||
}
|
||||
|
||||
return segments
|
||||
}
|
||||
|
|
@ -1,862 +0,0 @@
|
|||
package consulcatalog
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/BurntSushi/ty/fun"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNodeSorter(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
nodes []*api.ServiceEntry
|
||||
expected []*api.ServiceEntry
|
||||
}{
|
||||
{
|
||||
desc: "Should sort nothing",
|
||||
nodes: []*api.ServiceEntry{},
|
||||
expected: []*api.ServiceEntry{},
|
||||
},
|
||||
{
|
||||
desc: "Should sort by node address",
|
||||
nodes: []*api.ServiceEntry{
|
||||
{
|
||||
Service: &api.AgentService{
|
||||
Service: "foo",
|
||||
Address: "127.0.0.1",
|
||||
Port: 80,
|
||||
},
|
||||
Node: &api.Node{
|
||||
Node: "localhost",
|
||||
Address: "127.0.0.1",
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []*api.ServiceEntry{
|
||||
{
|
||||
Service: &api.AgentService{
|
||||
Service: "foo",
|
||||
Address: "127.0.0.1",
|
||||
Port: 80,
|
||||
},
|
||||
Node: &api.Node{
|
||||
Node: "localhost",
|
||||
Address: "127.0.0.1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Should sort by service name",
|
||||
nodes: []*api.ServiceEntry{
|
||||
{
|
||||
Service: &api.AgentService{
|
||||
Service: "foo",
|
||||
Address: "127.0.0.2",
|
||||
Port: 80,
|
||||
},
|
||||
Node: &api.Node{
|
||||
Node: "localhost",
|
||||
Address: "127.0.0.2",
|
||||
},
|
||||
},
|
||||
{
|
||||
Service: &api.AgentService{
|
||||
Service: "bar",
|
||||
Address: "127.0.0.2",
|
||||
Port: 81,
|
||||
},
|
||||
Node: &api.Node{
|
||||
Node: "localhost",
|
||||
Address: "127.0.0.2",
|
||||
},
|
||||
},
|
||||
{
|
||||
Service: &api.AgentService{
|
||||
Service: "foo",
|
||||
Address: "127.0.0.1",
|
||||
Port: 80,
|
||||
},
|
||||
Node: &api.Node{
|
||||
Node: "localhost",
|
||||
Address: "127.0.0.1",
|
||||
},
|
||||
},
|
||||
{
|
||||
Service: &api.AgentService{
|
||||
Service: "bar",
|
||||
Address: "127.0.0.2",
|
||||
Port: 80,
|
||||
},
|
||||
Node: &api.Node{
|
||||
Node: "localhost",
|
||||
Address: "127.0.0.2",
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []*api.ServiceEntry{
|
||||
{
|
||||
Service: &api.AgentService{
|
||||
Service: "bar",
|
||||
Address: "127.0.0.2",
|
||||
Port: 80,
|
||||
},
|
||||
Node: &api.Node{
|
||||
Node: "localhost",
|
||||
Address: "127.0.0.2",
|
||||
},
|
||||
},
|
||||
{
|
||||
Service: &api.AgentService{
|
||||
Service: "bar",
|
||||
Address: "127.0.0.2",
|
||||
Port: 81,
|
||||
},
|
||||
Node: &api.Node{
|
||||
Node: "localhost",
|
||||
Address: "127.0.0.2",
|
||||
},
|
||||
},
|
||||
{
|
||||
Service: &api.AgentService{
|
||||
Service: "foo",
|
||||
Address: "127.0.0.1",
|
||||
Port: 80,
|
||||
},
|
||||
Node: &api.Node{
|
||||
Node: "localhost",
|
||||
Address: "127.0.0.1",
|
||||
},
|
||||
},
|
||||
{
|
||||
Service: &api.AgentService{
|
||||
Service: "foo",
|
||||
Address: "127.0.0.2",
|
||||
Port: 80,
|
||||
},
|
||||
Node: &api.Node{
|
||||
Node: "localhost",
|
||||
Address: "127.0.0.2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Should sort by node address",
|
||||
nodes: []*api.ServiceEntry{
|
||||
{
|
||||
Service: &api.AgentService{
|
||||
Service: "foo",
|
||||
Address: "",
|
||||
Port: 80,
|
||||
},
|
||||
Node: &api.Node{
|
||||
Node: "localhost",
|
||||
Address: "127.0.0.2",
|
||||
},
|
||||
},
|
||||
{
|
||||
Service: &api.AgentService{
|
||||
Service: "foo",
|
||||
Address: "",
|
||||
Port: 80,
|
||||
},
|
||||
Node: &api.Node{
|
||||
Node: "localhost",
|
||||
Address: "127.0.0.1",
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []*api.ServiceEntry{
|
||||
{
|
||||
Service: &api.AgentService{
|
||||
Service: "foo",
|
||||
Address: "",
|
||||
Port: 80,
|
||||
},
|
||||
Node: &api.Node{
|
||||
Node: "localhost",
|
||||
Address: "127.0.0.1",
|
||||
},
|
||||
},
|
||||
{
|
||||
Service: &api.AgentService{
|
||||
Service: "foo",
|
||||
Address: "",
|
||||
Port: 80,
|
||||
},
|
||||
Node: &api.Node{
|
||||
Node: "localhost",
|
||||
Address: "127.0.0.2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sort.Sort(nodeSorter(test.nodes))
|
||||
actual := test.nodes
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetChangedKeys(t *testing.T) {
|
||||
type Input struct {
|
||||
currState map[string]Service
|
||||
prevState map[string]Service
|
||||
}
|
||||
|
||||
type Output struct {
|
||||
addedKeys []string
|
||||
removedKeys []string
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
input Input
|
||||
output Output
|
||||
}{
|
||||
{
|
||||
desc: "Should add 0 services and removed 0",
|
||||
input: Input{
|
||||
currState: map[string]Service{
|
||||
"foo-service": {Name: "v1"},
|
||||
"bar-service": {Name: "v1"},
|
||||
"baz-service": {Name: "v1"},
|
||||
"qux-service": {Name: "v1"},
|
||||
"quux-service": {Name: "v1"},
|
||||
"quuz-service": {Name: "v1"},
|
||||
"corge-service": {Name: "v1"},
|
||||
"grault-service": {Name: "v1"},
|
||||
"garply-service": {Name: "v1"},
|
||||
"waldo-service": {Name: "v1"},
|
||||
"fred-service": {Name: "v1"},
|
||||
"plugh-service": {Name: "v1"},
|
||||
"xyzzy-service": {Name: "v1"},
|
||||
"thud-service": {Name: "v1"},
|
||||
},
|
||||
prevState: map[string]Service{
|
||||
"foo-service": {Name: "v1"},
|
||||
"bar-service": {Name: "v1"},
|
||||
"baz-service": {Name: "v1"},
|
||||
"qux-service": {Name: "v1"},
|
||||
"quux-service": {Name: "v1"},
|
||||
"quuz-service": {Name: "v1"},
|
||||
"corge-service": {Name: "v1"},
|
||||
"grault-service": {Name: "v1"},
|
||||
"garply-service": {Name: "v1"},
|
||||
"waldo-service": {Name: "v1"},
|
||||
"fred-service": {Name: "v1"},
|
||||
"plugh-service": {Name: "v1"},
|
||||
"xyzzy-service": {Name: "v1"},
|
||||
"thud-service": {Name: "v1"},
|
||||
},
|
||||
},
|
||||
output: Output{
|
||||
addedKeys: []string{},
|
||||
removedKeys: []string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Should add 3 services and removed 0",
|
||||
input: Input{
|
||||
currState: map[string]Service{
|
||||
"foo-service": {Name: "v1"},
|
||||
"bar-service": {Name: "v1"},
|
||||
"baz-service": {Name: "v1"},
|
||||
"qux-service": {Name: "v1"},
|
||||
"quux-service": {Name: "v1"},
|
||||
"quuz-service": {Name: "v1"},
|
||||
"corge-service": {Name: "v1"},
|
||||
"grault-service": {Name: "v1"},
|
||||
"garply-service": {Name: "v1"},
|
||||
"waldo-service": {Name: "v1"},
|
||||
"fred-service": {Name: "v1"},
|
||||
"plugh-service": {Name: "v1"},
|
||||
"xyzzy-service": {Name: "v1"},
|
||||
"thud-service": {Name: "v1"},
|
||||
},
|
||||
prevState: map[string]Service{
|
||||
"foo-service": {Name: "v1"},
|
||||
"bar-service": {Name: "v1"},
|
||||
"baz-service": {Name: "v1"},
|
||||
"corge-service": {Name: "v1"},
|
||||
"grault-service": {Name: "v1"},
|
||||
"garply-service": {Name: "v1"},
|
||||
"waldo-service": {Name: "v1"},
|
||||
"fred-service": {Name: "v1"},
|
||||
"plugh-service": {Name: "v1"},
|
||||
"xyzzy-service": {Name: "v1"},
|
||||
"thud-service": {Name: "v1"},
|
||||
},
|
||||
},
|
||||
output: Output{
|
||||
addedKeys: []string{"qux-service", "quux-service", "quuz-service"},
|
||||
removedKeys: []string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Should add 2 services and removed 2",
|
||||
input: Input{
|
||||
currState: map[string]Service{
|
||||
"foo-service": {Name: "v1"},
|
||||
"qux-service": {Name: "v1"},
|
||||
"quux-service": {Name: "v1"},
|
||||
"quuz-service": {Name: "v1"},
|
||||
"corge-service": {Name: "v1"},
|
||||
"grault-service": {Name: "v1"},
|
||||
"garply-service": {Name: "v1"},
|
||||
"waldo-service": {Name: "v1"},
|
||||
"fred-service": {Name: "v1"},
|
||||
"plugh-service": {Name: "v1"},
|
||||
"xyzzy-service": {Name: "v1"},
|
||||
"thud-service": {Name: "v1"},
|
||||
},
|
||||
prevState: map[string]Service{
|
||||
"foo-service": {Name: "v1"},
|
||||
"bar-service": {Name: "v1"},
|
||||
"baz-service": {Name: "v1"},
|
||||
"qux-service": {Name: "v1"},
|
||||
"quux-service": {Name: "v1"},
|
||||
"quuz-service": {Name: "v1"},
|
||||
"corge-service": {Name: "v1"},
|
||||
"waldo-service": {Name: "v1"},
|
||||
"fred-service": {Name: "v1"},
|
||||
"plugh-service": {Name: "v1"},
|
||||
"xyzzy-service": {Name: "v1"},
|
||||
"thud-service": {Name: "v1"},
|
||||
},
|
||||
},
|
||||
output: Output{
|
||||
addedKeys: []string{"grault-service", "garply-service"},
|
||||
removedKeys: []string{"bar-service", "baz-service"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
addedKeys, removedKeys := getChangedServiceKeys(test.input.currState, test.input.prevState)
|
||||
assert.Equal(t, fun.Set(test.output.addedKeys), fun.Set(addedKeys), "Added keys comparison results: got %q, want %q", addedKeys, test.output.addedKeys)
|
||||
assert.Equal(t, fun.Set(test.output.removedKeys), fun.Set(removedKeys), "Removed keys comparison results: got %q, want %q", removedKeys, test.output.removedKeys)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterEnabled(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
exposedByDefault bool
|
||||
node *api.ServiceEntry
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
desc: "exposed",
|
||||
exposedByDefault: true,
|
||||
node: &api.ServiceEntry{
|
||||
Service: &api.AgentService{
|
||||
Service: "api",
|
||||
Address: "10.0.0.1",
|
||||
Port: 80,
|
||||
Tags: []string{""},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
desc: "exposed and tolerated by valid label value",
|
||||
exposedByDefault: true,
|
||||
node: &api.ServiceEntry{
|
||||
Service: &api.AgentService{
|
||||
Service: "api",
|
||||
Address: "10.0.0.1",
|
||||
Port: 80,
|
||||
Tags: []string{"", "traefik.enable=true"},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
desc: "exposed and tolerated by invalid label value",
|
||||
exposedByDefault: true,
|
||||
node: &api.ServiceEntry{
|
||||
Service: &api.AgentService{
|
||||
Service: "api",
|
||||
Address: "10.0.0.1",
|
||||
Port: 80,
|
||||
Tags: []string{"", "traefik.enable=bad"},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
desc: "exposed but overridden by label",
|
||||
exposedByDefault: true,
|
||||
node: &api.ServiceEntry{
|
||||
Service: &api.AgentService{
|
||||
Service: "api",
|
||||
Address: "10.0.0.1",
|
||||
Port: 80,
|
||||
Tags: []string{"", "traefik.enable=false"},
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
desc: "non-exposed",
|
||||
exposedByDefault: false,
|
||||
node: &api.ServiceEntry{
|
||||
Service: &api.AgentService{
|
||||
Service: "api",
|
||||
Address: "10.0.0.1",
|
||||
Port: 80,
|
||||
Tags: []string{""},
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
desc: "non-exposed but overridden by label",
|
||||
exposedByDefault: false,
|
||||
node: &api.ServiceEntry{
|
||||
Service: &api.AgentService{
|
||||
Service: "api",
|
||||
Address: "10.0.0.1",
|
||||
Port: 80,
|
||||
Tags: []string{"", "traefik.enable=true"},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
provider := &Provider{
|
||||
Domain: "localhost",
|
||||
Prefix: "traefik",
|
||||
ExposedByDefault: test.exposedByDefault,
|
||||
}
|
||||
actual := provider.nodeFilter("test", test.node)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetChangedStringKeys(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
current []string
|
||||
previous []string
|
||||
expectedAdded []string
|
||||
expectedRemoved []string
|
||||
}{
|
||||
{
|
||||
desc: "1 element added, 0 removed",
|
||||
current: []string{"chou"},
|
||||
previous: []string{},
|
||||
expectedAdded: []string{"chou"},
|
||||
expectedRemoved: []string{},
|
||||
}, {
|
||||
desc: "0 element added, 0 removed",
|
||||
current: []string{"chou"},
|
||||
previous: []string{"chou"},
|
||||
expectedAdded: []string{},
|
||||
expectedRemoved: []string{},
|
||||
},
|
||||
{
|
||||
desc: "0 element added, 1 removed",
|
||||
current: []string{},
|
||||
previous: []string{"chou"},
|
||||
expectedAdded: []string{},
|
||||
expectedRemoved: []string{"chou"},
|
||||
},
|
||||
{
|
||||
desc: "1 element added, 1 removed",
|
||||
current: []string{"carotte"},
|
||||
previous: []string{"chou"},
|
||||
expectedAdded: []string{"carotte"},
|
||||
expectedRemoved: []string{"chou"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
actualAdded, actualRemoved := getChangedStringKeys(test.current, test.previous)
|
||||
assert.Equal(t, test.expectedAdded, actualAdded)
|
||||
assert.Equal(t, test.expectedRemoved, actualRemoved)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasServiceChanged(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
current map[string]Service
|
||||
previous map[string]Service
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
desc: "Change detected due to change of nodes",
|
||||
current: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node1"},
|
||||
Tags: []string{},
|
||||
},
|
||||
},
|
||||
previous: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node2"},
|
||||
Tags: []string{},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
desc: "No change missing current service",
|
||||
current: make(map[string]Service),
|
||||
previous: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node1"},
|
||||
Tags: []string{},
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
desc: "No change on nodes",
|
||||
current: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node1"},
|
||||
Tags: []string{},
|
||||
},
|
||||
},
|
||||
previous: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node1"},
|
||||
Tags: []string{},
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
desc: "No change on nodes and tags",
|
||||
current: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node1"},
|
||||
Tags: []string{"foo=bar"},
|
||||
},
|
||||
},
|
||||
previous: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node1"},
|
||||
Tags: []string{"foo=bar"},
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
desc: "Change detected on tags",
|
||||
current: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node1"},
|
||||
Tags: []string{"foo=bar"},
|
||||
},
|
||||
},
|
||||
previous: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node1"},
|
||||
Tags: []string{"foo"},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
desc: "Change detected on ports",
|
||||
current: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node1"},
|
||||
Tags: []string{"foo=bar"},
|
||||
Ports: []int{80},
|
||||
},
|
||||
},
|
||||
previous: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node1"},
|
||||
Tags: []string{"foo"},
|
||||
Ports: []int{81},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
desc: "Change detected on ports",
|
||||
current: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node1"},
|
||||
Tags: []string{"foo=bar"},
|
||||
Ports: []int{80},
|
||||
},
|
||||
},
|
||||
previous: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node1"},
|
||||
Tags: []string{"foo"},
|
||||
Ports: []int{81, 82},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
desc: "Change detected on addresses",
|
||||
current: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node1"},
|
||||
Addresses: []string{"127.0.0.1"},
|
||||
},
|
||||
},
|
||||
previous: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node1"},
|
||||
Addresses: []string{"127.0.0.2"},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
desc: "No Change detected",
|
||||
current: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node1"},
|
||||
Tags: []string{"foo"},
|
||||
Ports: []int{80},
|
||||
},
|
||||
},
|
||||
previous: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node1"},
|
||||
Tags: []string{"foo"},
|
||||
Ports: []int{80},
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
actual := hasServiceChanged(test.current, test.previous)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasChanged(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
current map[string]Service
|
||||
previous map[string]Service
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
desc: "Change detected due to change new service",
|
||||
current: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node1"},
|
||||
Tags: []string{},
|
||||
},
|
||||
},
|
||||
previous: make(map[string]Service),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
desc: "Change detected due to change service removed",
|
||||
current: make(map[string]Service),
|
||||
previous: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node1"},
|
||||
Tags: []string{},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
desc: "Change detected due to change of nodes",
|
||||
current: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node1"},
|
||||
Tags: []string{},
|
||||
},
|
||||
},
|
||||
previous: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node2"},
|
||||
Tags: []string{},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
desc: "No change on nodes",
|
||||
current: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node1"},
|
||||
Tags: []string{},
|
||||
},
|
||||
},
|
||||
previous: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node1"},
|
||||
Tags: []string{},
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
desc: "No change on nodes and tags",
|
||||
current: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node1"},
|
||||
Tags: []string{"foo=bar"},
|
||||
},
|
||||
},
|
||||
previous: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node1"},
|
||||
Tags: []string{"foo=bar"},
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
desc: "Change detected on tags",
|
||||
current: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node1"},
|
||||
Tags: []string{"foo=bar"},
|
||||
},
|
||||
},
|
||||
previous: map[string]Service{
|
||||
"foo-service": {
|
||||
Name: "foo",
|
||||
Nodes: []string{"node1"},
|
||||
Tags: []string{"foo"},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
actual := hasChanged(test.current, test.previous)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetConstraintTags(t *testing.T) {
|
||||
provider := &Provider{
|
||||
Domain: "localhost",
|
||||
Prefix: "traefik",
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
tags []string
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
desc: "nil tags",
|
||||
},
|
||||
{
|
||||
desc: "invalid tag",
|
||||
tags: []string{"tags=foobar"},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "wrong tag",
|
||||
tags: []string{"traefik_tags=foobar"},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "empty value",
|
||||
tags: []string{"traefik.tags="},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "simple tag",
|
||||
tags: []string{"traefik.tags=foobar "},
|
||||
expected: []string{"foobar"},
|
||||
},
|
||||
{
|
||||
desc: "multiple values tag",
|
||||
tags: []string{"traefik.tags=foobar, fiibir"},
|
||||
expected: []string{"foobar", "fiibir"},
|
||||
},
|
||||
{
|
||||
desc: "multiple tags",
|
||||
tags: []string{"traefik.tags=foobar", "traefik.tags=foobor"},
|
||||
expected: []string{"foobar", "foobor"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
constraints := provider.getConstraintTags(test.tags)
|
||||
assert.EqualValues(t, test.expected, constraints)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
package consulcatalog
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/containous/traefik/provider/label"
|
||||
)
|
||||
|
||||
func tagsToNeutralLabels(tags []string, prefix string) map[string]string {
|
||||
var labels map[string]string
|
||||
|
||||
for _, tag := range tags {
|
||||
if strings.HasPrefix(tag, prefix) {
|
||||
|
||||
parts := strings.SplitN(tag, "=", 2)
|
||||
if len(parts) == 2 {
|
||||
if labels == nil {
|
||||
labels = make(map[string]string)
|
||||
}
|
||||
|
||||
// replace custom prefix by the generic prefix
|
||||
key := label.Prefix + strings.TrimPrefix(parts[0], prefix+".")
|
||||
labels[key] = parts[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return labels
|
||||
}
|
||||
|
|
@ -1,64 +0,0 @@
|
|||
package consulcatalog
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestTagsToNeutralLabels(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
tags []string
|
||||
prefix string
|
||||
expected map[string]string
|
||||
}{
|
||||
{
|
||||
desc: "without tags",
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "with a prefix",
|
||||
prefix: "test",
|
||||
tags: []string{
|
||||
"test.aaa=01",
|
||||
"test.bbb=02",
|
||||
"ccc=03",
|
||||
"test.ddd=04=to",
|
||||
},
|
||||
expected: map[string]string{
|
||||
"traefik.aaa": "01",
|
||||
"traefik.bbb": "02",
|
||||
"traefik.ddd": "04=to",
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
desc: "with an empty prefix",
|
||||
prefix: "",
|
||||
tags: []string{
|
||||
"test.aaa=01",
|
||||
"test.bbb=02",
|
||||
"ccc=03",
|
||||
"test.ddd=04=to",
|
||||
},
|
||||
expected: map[string]string{
|
||||
"traefik.test.aaa": "01",
|
||||
"traefik.test.bbb": "02",
|
||||
"traefik.ccc": "03",
|
||||
"traefik.test.ddd": "04=to",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
labels := tagsToNeutralLabels(test.tags, test.prefix)
|
||||
|
||||
assert.Equal(t, test.expected, labels)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,214 +0,0 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
docker "github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/go-connections/nat"
|
||||
)
|
||||
|
||||
func containerJSON(ops ...func(*docker.ContainerJSON)) docker.ContainerJSON {
|
||||
c := &docker.ContainerJSON{
|
||||
ContainerJSONBase: &docker.ContainerJSONBase{
|
||||
Name: "fake",
|
||||
HostConfig: &container.HostConfig{},
|
||||
},
|
||||
Config: &container.Config{},
|
||||
NetworkSettings: &docker.NetworkSettings{
|
||||
NetworkSettingsBase: docker.NetworkSettingsBase{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, op := range ops {
|
||||
op(c)
|
||||
}
|
||||
|
||||
return *c
|
||||
}
|
||||
|
||||
func name(name string) func(*docker.ContainerJSON) {
|
||||
return func(c *docker.ContainerJSON) {
|
||||
c.ContainerJSONBase.Name = name
|
||||
}
|
||||
}
|
||||
|
||||
func networkMode(mode string) func(*docker.ContainerJSON) {
|
||||
return func(c *docker.ContainerJSON) {
|
||||
c.ContainerJSONBase.HostConfig.NetworkMode = container.NetworkMode(mode)
|
||||
}
|
||||
}
|
||||
|
||||
func nodeIP(ip string) func(*docker.ContainerJSON) {
|
||||
return func(c *docker.ContainerJSON) {
|
||||
c.ContainerJSONBase.Node = &docker.ContainerNode{
|
||||
IPAddress: ip,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func labels(labels map[string]string) func(*docker.ContainerJSON) {
|
||||
return func(c *docker.ContainerJSON) {
|
||||
c.Config.Labels = labels
|
||||
}
|
||||
}
|
||||
|
||||
func ports(portMap nat.PortMap) func(*docker.ContainerJSON) {
|
||||
return func(c *docker.ContainerJSON) {
|
||||
c.NetworkSettings.NetworkSettingsBase.Ports = portMap
|
||||
}
|
||||
}
|
||||
|
||||
func withNetwork(name string, ops ...func(*network.EndpointSettings)) func(*docker.ContainerJSON) {
|
||||
return func(c *docker.ContainerJSON) {
|
||||
if c.NetworkSettings.Networks == nil {
|
||||
c.NetworkSettings.Networks = map[string]*network.EndpointSettings{}
|
||||
}
|
||||
c.NetworkSettings.Networks[name] = &network.EndpointSettings{}
|
||||
for _, op := range ops {
|
||||
op(c.NetworkSettings.Networks[name])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ipv4(ip string) func(*network.EndpointSettings) {
|
||||
return func(s *network.EndpointSettings) {
|
||||
s.IPAddress = ip
|
||||
}
|
||||
}
|
||||
|
||||
func swarmTask(id string, ops ...func(*swarm.Task)) swarm.Task {
|
||||
task := &swarm.Task{
|
||||
ID: id,
|
||||
}
|
||||
|
||||
for _, op := range ops {
|
||||
op(task)
|
||||
}
|
||||
|
||||
return *task
|
||||
}
|
||||
|
||||
func taskSlot(slot int) func(*swarm.Task) {
|
||||
return func(task *swarm.Task) {
|
||||
task.Slot = slot
|
||||
}
|
||||
}
|
||||
|
||||
func taskNetworkAttachment(id string, name string, driver string, addresses []string) func(*swarm.Task) {
|
||||
return func(task *swarm.Task) {
|
||||
task.NetworksAttachments = append(task.NetworksAttachments, swarm.NetworkAttachment{
|
||||
Network: swarm.Network{
|
||||
ID: id,
|
||||
Spec: swarm.NetworkSpec{
|
||||
Annotations: swarm.Annotations{
|
||||
Name: name,
|
||||
},
|
||||
DriverConfiguration: &swarm.Driver{
|
||||
Name: driver,
|
||||
},
|
||||
},
|
||||
},
|
||||
Addresses: addresses,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func taskStatus(ops ...func(*swarm.TaskStatus)) func(*swarm.Task) {
|
||||
return func(task *swarm.Task) {
|
||||
status := &swarm.TaskStatus{}
|
||||
|
||||
for _, op := range ops {
|
||||
op(status)
|
||||
}
|
||||
|
||||
task.Status = *status
|
||||
}
|
||||
}
|
||||
|
||||
func taskState(state swarm.TaskState) func(*swarm.TaskStatus) {
|
||||
return func(status *swarm.TaskStatus) {
|
||||
status.State = state
|
||||
}
|
||||
}
|
||||
|
||||
func taskContainerStatus(id string) func(*swarm.TaskStatus) {
|
||||
return func(status *swarm.TaskStatus) {
|
||||
status.ContainerStatus = swarm.ContainerStatus{
|
||||
ContainerID: id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func swarmService(ops ...func(*swarm.Service)) swarm.Service {
|
||||
service := &swarm.Service{
|
||||
ID: "serviceID",
|
||||
Spec: swarm.ServiceSpec{
|
||||
Annotations: swarm.Annotations{
|
||||
Name: "defaultServiceName",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, op := range ops {
|
||||
op(service)
|
||||
}
|
||||
|
||||
return *service
|
||||
}
|
||||
|
||||
func serviceName(name string) func(service *swarm.Service) {
|
||||
return func(service *swarm.Service) {
|
||||
service.Spec.Annotations.Name = name
|
||||
}
|
||||
}
|
||||
|
||||
func serviceLabels(labels map[string]string) func(service *swarm.Service) {
|
||||
return func(service *swarm.Service) {
|
||||
service.Spec.Annotations.Labels = labels
|
||||
}
|
||||
}
|
||||
|
||||
func withEndpoint(ops ...func(*swarm.Endpoint)) func(*swarm.Service) {
|
||||
return func(service *swarm.Service) {
|
||||
endpoint := &swarm.Endpoint{}
|
||||
|
||||
for _, op := range ops {
|
||||
op(endpoint)
|
||||
}
|
||||
|
||||
service.Endpoint = *endpoint
|
||||
}
|
||||
}
|
||||
|
||||
func virtualIP(networkID, addr string) func(*swarm.Endpoint) {
|
||||
return func(endpoint *swarm.Endpoint) {
|
||||
if endpoint.VirtualIPs == nil {
|
||||
endpoint.VirtualIPs = []swarm.EndpointVirtualIP{}
|
||||
}
|
||||
endpoint.VirtualIPs = append(endpoint.VirtualIPs, swarm.EndpointVirtualIP{
|
||||
NetworkID: networkID,
|
||||
Addr: addr,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func withEndpointSpec(ops ...func(*swarm.EndpointSpec)) func(*swarm.Service) {
|
||||
return func(service *swarm.Service) {
|
||||
endpointSpec := &swarm.EndpointSpec{}
|
||||
|
||||
for _, op := range ops {
|
||||
op(endpointSpec)
|
||||
}
|
||||
|
||||
service.Spec.EndpointSpec = endpointSpec
|
||||
}
|
||||
}
|
||||
|
||||
func modeDNSSR(spec *swarm.EndpointSpec) {
|
||||
spec.Mode = swarm.ResolutionModeDNSRR
|
||||
}
|
||||
|
||||
func modeVIP(spec *swarm.EndpointSpec) {
|
||||
spec.Mode = swarm.ResolutionModeVIP
|
||||
}
|
||||
|
|
@ -1,413 +0,0 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/BurntSushi/ty/fun"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/provider"
|
||||
"github.com/containous/traefik/provider/label"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/docker/go-connections/nat"
|
||||
)
|
||||
|
||||
const (
|
||||
labelDockerNetwork = "traefik.docker.network"
|
||||
labelBackendLoadBalancerSwarm = "traefik.backend.loadbalancer.swarm"
|
||||
labelDockerComposeProject = "com.docker.compose.project"
|
||||
labelDockerComposeService = "com.docker.compose.service"
|
||||
)
|
||||
|
||||
func (p *Provider) buildConfiguration(containersInspected []dockerData) *types.Configuration {
|
||||
dockerFuncMap := template.FuncMap{
|
||||
"getLabelValue": label.GetStringValue,
|
||||
"getSubDomain": getSubDomain,
|
||||
"isBackendLBSwarm": isBackendLBSwarm,
|
||||
"getDomain": label.GetFuncString(label.TraefikDomain, p.Domain),
|
||||
|
||||
// Backend functions
|
||||
"getIPAddress": p.getDeprecatedIPAddress, // TODO: Should we expose getIPPort instead?
|
||||
"getServers": p.getServers,
|
||||
"getMaxConn": label.GetMaxConn,
|
||||
"getHealthCheck": label.GetHealthCheck,
|
||||
"getBuffering": label.GetBuffering,
|
||||
"getResponseForwarding": label.GetResponseForwarding,
|
||||
"getCircuitBreaker": label.GetCircuitBreaker,
|
||||
"getLoadBalancer": label.GetLoadBalancer,
|
||||
|
||||
// Frontend functions
|
||||
"getBackendName": getBackendName,
|
||||
"getPriority": label.GetFuncInt(label.TraefikFrontendPriority, label.DefaultFrontendPriority),
|
||||
"getPassHostHeader": label.GetFuncBool(label.TraefikFrontendPassHostHeader, label.DefaultPassHostHeader),
|
||||
"getPassTLSCert": label.GetFuncBool(label.TraefikFrontendPassTLSCert, label.DefaultPassTLSCert),
|
||||
"getPassTLSClientCert": label.GetTLSClientCert,
|
||||
"getEntryPoints": label.GetFuncSliceString(label.TraefikFrontendEntryPoints),
|
||||
"getBasicAuth": label.GetFuncSliceString(label.TraefikFrontendAuthBasic), // Deprecated
|
||||
"getAuth": label.GetAuth,
|
||||
"getFrontendRule": p.getFrontendRule,
|
||||
"getRedirect": label.GetRedirect,
|
||||
"getErrorPages": label.GetErrorPages,
|
||||
"getRateLimit": label.GetRateLimit,
|
||||
"getHeaders": label.GetHeaders,
|
||||
"getWhiteList": label.GetWhiteList,
|
||||
}
|
||||
|
||||
// filter containers
|
||||
filteredContainers := fun.Filter(p.containerFilter, containersInspected).([]dockerData)
|
||||
|
||||
frontends := map[string][]dockerData{}
|
||||
servers := map[string][]dockerData{}
|
||||
|
||||
serviceNames := make(map[string]struct{})
|
||||
|
||||
for idx, container := range filteredContainers {
|
||||
segmentProperties := label.ExtractTraefikLabels(container.Labels)
|
||||
for segmentName, labels := range segmentProperties {
|
||||
container.SegmentLabels = labels
|
||||
container.SegmentName = segmentName
|
||||
|
||||
serviceNamesKey := getServiceNameKey(container, p.SwarmMode, segmentName)
|
||||
|
||||
if _, exists := serviceNames[serviceNamesKey]; !exists {
|
||||
frontendName := p.getFrontendName(container, idx)
|
||||
frontends[frontendName] = append(frontends[frontendName], container)
|
||||
if len(serviceNamesKey) > 0 {
|
||||
serviceNames[serviceNamesKey] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Backends
|
||||
backendName := getBackendName(container)
|
||||
|
||||
// Servers
|
||||
servers[backendName] = append(servers[backendName], container)
|
||||
}
|
||||
}
|
||||
|
||||
templateObjects := struct {
|
||||
Containers []dockerData
|
||||
Frontends map[string][]dockerData
|
||||
Servers map[string][]dockerData
|
||||
Domain string
|
||||
}{
|
||||
Containers: filteredContainers,
|
||||
Frontends: frontends,
|
||||
Servers: servers,
|
||||
Domain: p.Domain,
|
||||
}
|
||||
|
||||
configuration, err := p.GetConfiguration("templates/docker.tmpl", dockerFuncMap, templateObjects)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
return configuration
|
||||
}
|
||||
|
||||
func getServiceNameKey(container dockerData, swarmMode bool, segmentName string) string {
|
||||
if swarmMode {
|
||||
return container.ServiceName + segmentName
|
||||
}
|
||||
|
||||
return getServiceName(container) + segmentName
|
||||
}
|
||||
|
||||
func (p *Provider) containerFilter(container dockerData) bool {
|
||||
if !label.IsEnabled(container.Labels, p.ExposedByDefault) {
|
||||
log.Debugf("Filtering disabled container %s", container.Name)
|
||||
return false
|
||||
}
|
||||
|
||||
segmentProperties := label.ExtractTraefikLabels(container.Labels)
|
||||
|
||||
var errPort error
|
||||
for segmentName, labels := range segmentProperties {
|
||||
errPort = checkSegmentPort(labels, segmentName)
|
||||
|
||||
if len(p.getFrontendRule(container, labels)) == 0 {
|
||||
log.Debugf("Filtering container with empty frontend rule %s %s", container.Name, segmentName)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if len(container.NetworkSettings.Ports) == 0 && errPort != nil {
|
||||
log.Debugf("Filtering container without port, %s: %v", container.Name, errPort)
|
||||
return false
|
||||
}
|
||||
|
||||
constraintTags := label.SplitAndTrimString(container.Labels[label.TraefikTags], ",")
|
||||
if ok, failingConstraint := p.MatchConstraints(constraintTags); !ok {
|
||||
if failingConstraint != nil {
|
||||
log.Debugf("Container %s pruned by %q constraint", container.Name, failingConstraint.String())
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if container.Health != "" && container.Health != "healthy" {
|
||||
log.Debugf("Filtering unhealthy or starting container %s", container.Name)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func checkSegmentPort(labels map[string]string, segmentName string) error {
|
||||
if port, ok := labels[label.TraefikPort]; ok {
|
||||
_, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid port value %q for the segment %q: %v", port, segmentName, err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("port label is missing, please use %s as default value or define port label for all segments ('traefik.<segment_name>.port')", label.TraefikPort)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) getFrontendName(container dockerData, idx int) string {
|
||||
var name string
|
||||
if len(container.SegmentName) > 0 {
|
||||
name = container.SegmentName + "-" + getBackendName(container)
|
||||
} else {
|
||||
name = p.getFrontendRule(container, container.SegmentLabels) + "-" + strconv.Itoa(idx)
|
||||
}
|
||||
|
||||
return provider.Normalize(name)
|
||||
}
|
||||
|
||||
func (p *Provider) getFrontendRule(container dockerData, segmentLabels map[string]string) string {
|
||||
if value := label.GetStringValue(segmentLabels, label.TraefikFrontendRule, ""); len(value) != 0 {
|
||||
return value
|
||||
}
|
||||
|
||||
domain := label.GetStringValue(segmentLabels, label.TraefikDomain, p.Domain)
|
||||
if len(domain) > 0 {
|
||||
domain = "." + domain
|
||||
}
|
||||
|
||||
if values, err := label.GetStringMultipleStrict(container.Labels, labelDockerComposeProject, labelDockerComposeService); err == nil {
|
||||
return "Host:" + getSubDomain(values[labelDockerComposeService]+"."+values[labelDockerComposeProject]) + domain
|
||||
}
|
||||
|
||||
if len(domain) > 0 {
|
||||
return "Host:" + getSubDomain(container.ServiceName) + domain
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (p Provider) getIPAddress(container dockerData) string {
|
||||
if value := label.GetStringValue(container.Labels, labelDockerNetwork, p.Network); value != "" {
|
||||
networkSettings := container.NetworkSettings
|
||||
if networkSettings.Networks != nil {
|
||||
network := networkSettings.Networks[value]
|
||||
if network != nil {
|
||||
return network.Addr
|
||||
}
|
||||
|
||||
log.Warnf("Could not find network named '%s' for container '%s'! Maybe you're missing the project's prefix in the label? Defaulting to first available network.", value, container.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if container.NetworkSettings.NetworkMode.IsHost() {
|
||||
if container.Node != nil {
|
||||
if container.Node.IPAddress != "" {
|
||||
return container.Node.IPAddress
|
||||
}
|
||||
}
|
||||
return "127.0.0.1"
|
||||
}
|
||||
|
||||
if container.NetworkSettings.NetworkMode.IsContainer() {
|
||||
dockerClient, err := p.createClient()
|
||||
if err != nil {
|
||||
log.Warnf("Unable to get IP address for container %s, error: %s", container.Name, err)
|
||||
return ""
|
||||
}
|
||||
|
||||
connectedContainer := container.NetworkSettings.NetworkMode.ConnectedContainer()
|
||||
containerInspected, err := dockerClient.ContainerInspect(context.Background(), connectedContainer)
|
||||
if err != nil {
|
||||
log.Warnf("Unable to get IP address for container %s : Failed to inspect container ID %s, error: %s", container.Name, connectedContainer, err)
|
||||
return ""
|
||||
}
|
||||
return p.getIPAddress(parseContainer(containerInspected))
|
||||
}
|
||||
|
||||
for _, network := range container.NetworkSettings.Networks {
|
||||
return network.Addr
|
||||
}
|
||||
|
||||
log.Warnf("Unable to find the IP address for the container %q.", container.Name)
|
||||
return ""
|
||||
}
|
||||
|
||||
// Deprecated: Please use getIPPort instead
|
||||
func (p *Provider) getDeprecatedIPAddress(container dockerData) string {
|
||||
ip, _, err := p.getIPPort(container)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
return ""
|
||||
}
|
||||
return ip
|
||||
}
|
||||
|
||||
// Escape beginning slash "/", convert all others to dash "-", and convert underscores "_" to dash "-"
|
||||
func getSubDomain(name string) string {
|
||||
return strings.Replace(strings.Replace(strings.TrimPrefix(name, "/"), "/", "-", -1), "_", "-", -1)
|
||||
}
|
||||
|
||||
func isBackendLBSwarm(container dockerData) bool {
|
||||
return label.GetBoolValue(container.Labels, labelBackendLoadBalancerSwarm, false)
|
||||
}
|
||||
|
||||
func getBackendName(container dockerData) string {
|
||||
if len(container.SegmentName) > 0 {
|
||||
return getSegmentBackendName(container)
|
||||
}
|
||||
|
||||
return getDefaultBackendName(container)
|
||||
}
|
||||
|
||||
func getSegmentBackendName(container dockerData) string {
|
||||
serviceName := getServiceName(container)
|
||||
if value := label.GetStringValue(container.SegmentLabels, label.TraefikBackend, ""); len(value) > 0 {
|
||||
return provider.Normalize(serviceName + "-" + value)
|
||||
}
|
||||
|
||||
return provider.Normalize(serviceName + "-" + container.SegmentName)
|
||||
}
|
||||
|
||||
func getDefaultBackendName(container dockerData) string {
|
||||
if value := label.GetStringValue(container.SegmentLabels, label.TraefikBackend, ""); len(value) != 0 {
|
||||
return provider.Normalize(value)
|
||||
}
|
||||
|
||||
return provider.Normalize(getServiceName(container))
|
||||
}
|
||||
|
||||
func getServiceName(container dockerData) string {
|
||||
serviceName := container.ServiceName
|
||||
|
||||
if values, err := label.GetStringMultipleStrict(container.Labels, labelDockerComposeProject, labelDockerComposeService); err == nil {
|
||||
serviceName = values[labelDockerComposeService] + "_" + values[labelDockerComposeProject]
|
||||
}
|
||||
|
||||
return serviceName
|
||||
}
|
||||
|
||||
func getPort(container dockerData) string {
|
||||
if value := label.GetStringValue(container.SegmentLabels, label.TraefikPort, ""); len(value) != 0 {
|
||||
return value
|
||||
}
|
||||
|
||||
// See iteration order in https://blog.golang.org/go-maps-in-action
|
||||
var ports []nat.Port
|
||||
for port := range container.NetworkSettings.Ports {
|
||||
ports = append(ports, port)
|
||||
}
|
||||
|
||||
less := func(i, j nat.Port) bool {
|
||||
return i.Int() < j.Int()
|
||||
}
|
||||
nat.Sort(ports, less)
|
||||
|
||||
if len(ports) > 0 {
|
||||
min := ports[0]
|
||||
return min.Port()
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (p *Provider) getPortBinding(container dockerData) (*nat.PortBinding, error) {
|
||||
port := getPort(container)
|
||||
for netPort, portBindings := range container.NetworkSettings.Ports {
|
||||
if strings.EqualFold(string(netPort), port+"/TCP") || strings.EqualFold(string(netPort), port+"/UDP") {
|
||||
for _, p := range portBindings {
|
||||
return &p, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unable to find the external IP:Port for the container %q", container.Name)
|
||||
}
|
||||
|
||||
func (p *Provider) getIPPort(container dockerData) (string, string, error) {
|
||||
var ip, port string
|
||||
usedBound := false
|
||||
|
||||
if p.UseBindPortIP {
|
||||
portBinding, err := p.getPortBinding(container)
|
||||
if err != nil {
|
||||
log.Infof("Unable to find a binding for container %q, falling back on its internal IP/Port.", container.Name)
|
||||
} else if (portBinding.HostIP == "0.0.0.0") || (len(portBinding.HostIP) == 0) {
|
||||
log.Infof("Cannot determine the IP address (got %q) for %q's binding, falling back on its internal IP/Port.", portBinding.HostIP, container.Name)
|
||||
} else {
|
||||
ip = portBinding.HostIP
|
||||
port = portBinding.HostPort
|
||||
usedBound = true
|
||||
}
|
||||
}
|
||||
|
||||
if !usedBound {
|
||||
ip = p.getIPAddress(container)
|
||||
port = getPort(container)
|
||||
}
|
||||
|
||||
if len(ip) == 0 {
|
||||
return "", "", fmt.Errorf("unable to find the IP address for the container %q: the server is ignored", container.Name)
|
||||
}
|
||||
|
||||
return ip, port, nil
|
||||
}
|
||||
|
||||
func (p *Provider) getServers(containers []dockerData) map[string]types.Server {
|
||||
var servers map[string]types.Server
|
||||
|
||||
for _, container := range containers {
|
||||
ip, port, err := p.getIPPort(container)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if servers == nil {
|
||||
servers = make(map[string]types.Server)
|
||||
}
|
||||
|
||||
protocol := label.GetStringValue(container.SegmentLabels, label.TraefikProtocol, label.DefaultProtocol)
|
||||
|
||||
serverURL := fmt.Sprintf("%s://%s", protocol, net.JoinHostPort(ip, port))
|
||||
|
||||
serverName := getServerName(container.Name, serverURL)
|
||||
if _, exist := servers[serverName]; exist {
|
||||
log.Debugf("Skipping server %q with the same URL.", serverName)
|
||||
continue
|
||||
}
|
||||
|
||||
servers[serverName] = types.Server{
|
||||
URL: serverURL,
|
||||
Weight: label.GetIntValue(container.SegmentLabels, label.TraefikWeight, label.DefaultWeight),
|
||||
}
|
||||
}
|
||||
|
||||
return servers
|
||||
}
|
||||
|
||||
func getServerName(containerName, url string) string {
|
||||
hash := md5.New()
|
||||
_, err := hash.Write([]byte(url))
|
||||
if err != nil {
|
||||
// Impossible case
|
||||
log.Errorf("Fail to hash server URL %q", url)
|
||||
}
|
||||
|
||||
return provider.Normalize("server-" + containerName + "-" + hex.EncodeToString(hash.Sum(nil)))
|
||||
}
|
||||
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
|
@ -1,833 +0,0 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/containous/flaeg/parse"
|
||||
"github.com/containous/traefik/provider/label"
|
||||
"github.com/containous/traefik/types"
|
||||
docker "github.com/docker/docker/api/types"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSegmentBuildConfiguration(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
containers []docker.ContainerJSON
|
||||
expectedFrontends map[string]*types.Frontend
|
||||
expectedBackends map[string]*types.Backend
|
||||
}{
|
||||
{
|
||||
desc: "when no container",
|
||||
containers: []docker.ContainerJSON{},
|
||||
expectedFrontends: map[string]*types.Frontend{},
|
||||
expectedBackends: map[string]*types.Backend{},
|
||||
},
|
||||
{
|
||||
desc: "simple configuration",
|
||||
containers: []docker.ContainerJSON{
|
||||
containerJSON(
|
||||
name("foo"),
|
||||
labels(map[string]string{
|
||||
"traefik.sauternes.port": "2503",
|
||||
"traefik.sauternes.frontend.entryPoints": "http,https",
|
||||
}),
|
||||
ports(nat.PortMap{
|
||||
"80/tcp": {},
|
||||
}),
|
||||
withNetwork("bridge", ipv4("127.0.0.1")),
|
||||
),
|
||||
},
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-sauternes-foo-sauternes": {
|
||||
Backend: "backend-foo-sauternes",
|
||||
PassHostHeader: true,
|
||||
EntryPoints: []string{"http", "https"},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-sauternes-foo-sauternes": {
|
||||
Rule: "Host:foo.docker.localhost",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-foo-sauternes": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-foo-863563a2e23c95502862016417ee95ea": {
|
||||
URL: "http://127.0.0.1:2503",
|
||||
Weight: label.DefaultWeight,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "pass tls client cert",
|
||||
containers: []docker.ContainerJSON{
|
||||
containerJSON(
|
||||
name("foo"),
|
||||
labels(map[string]string{
|
||||
"traefik.sauternes.port": "2503",
|
||||
"traefik.sauternes.frontend.entryPoints": "http,https",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertPem: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosNotAfter: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosNotBefore: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSans: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectCommonName: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectCountry: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectLocality: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectOrganization: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectProvince: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectSerialNumber: "true",
|
||||
}),
|
||||
ports(nat.PortMap{
|
||||
"80/tcp": {},
|
||||
}),
|
||||
withNetwork("bridge", ipv4("127.0.0.1")),
|
||||
),
|
||||
},
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-sauternes-foo-sauternes": {
|
||||
Backend: "backend-foo-sauternes",
|
||||
PassHostHeader: true,
|
||||
EntryPoints: []string{"http", "https"},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-sauternes-foo-sauternes": {
|
||||
Rule: "Host:foo.docker.localhost",
|
||||
},
|
||||
},
|
||||
PassTLSClientCert: &types.TLSClientHeaders{
|
||||
PEM: true,
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
NotBefore: true,
|
||||
Sans: true,
|
||||
NotAfter: true,
|
||||
Subject: &types.TLSCLientCertificateSubjectInfos{
|
||||
CommonName: true,
|
||||
Country: true,
|
||||
Locality: true,
|
||||
Organization: true,
|
||||
Province: true,
|
||||
SerialNumber: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-foo-sauternes": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-foo-863563a2e23c95502862016417ee95ea": {
|
||||
URL: "http://127.0.0.1:2503",
|
||||
Weight: label.DefaultWeight,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "auth basic",
|
||||
containers: []docker.ContainerJSON{
|
||||
containerJSON(
|
||||
name("foo"),
|
||||
labels(map[string]string{
|
||||
"traefik.sauternes.port": "2503",
|
||||
"traefik.sauternes.frontend.entryPoints": "http,https",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthHeaderField: "X-WebAuth-User",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthBasicRealm: "myRealm",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthBasicUsers: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthBasicUsersFile: ".htpasswd",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthBasicRemoveHeader: "true",
|
||||
}),
|
||||
ports(nat.PortMap{
|
||||
"80/tcp": {},
|
||||
}),
|
||||
withNetwork("bridge", ipv4("127.0.0.1")),
|
||||
),
|
||||
},
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-sauternes-foo-sauternes": {
|
||||
Backend: "backend-foo-sauternes",
|
||||
PassHostHeader: true,
|
||||
EntryPoints: []string{"http", "https"},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-sauternes-foo-sauternes": {
|
||||
Rule: "Host:foo.docker.localhost",
|
||||
},
|
||||
},
|
||||
Auth: &types.Auth{
|
||||
HeaderField: "X-WebAuth-User",
|
||||
Basic: &types.Basic{
|
||||
RemoveHeader: true,
|
||||
Realm: "myRealm",
|
||||
Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/",
|
||||
"test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"},
|
||||
UsersFile: ".htpasswd",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-foo-sauternes": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-foo-863563a2e23c95502862016417ee95ea": {
|
||||
URL: "http://127.0.0.1:2503",
|
||||
Weight: label.DefaultWeight,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "auth basic backward compatibility",
|
||||
containers: []docker.ContainerJSON{
|
||||
containerJSON(
|
||||
name("foo"),
|
||||
labels(map[string]string{
|
||||
"traefik.sauternes.port": "2503",
|
||||
"traefik.sauternes.frontend.entryPoints": "http,https",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthBasic: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0",
|
||||
}),
|
||||
ports(nat.PortMap{
|
||||
"80/tcp": {},
|
||||
}),
|
||||
withNetwork("bridge", ipv4("127.0.0.1")),
|
||||
),
|
||||
},
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-sauternes-foo-sauternes": {
|
||||
Backend: "backend-foo-sauternes",
|
||||
PassHostHeader: true,
|
||||
EntryPoints: []string{"http", "https"},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-sauternes-foo-sauternes": {
|
||||
Rule: "Host:foo.docker.localhost",
|
||||
},
|
||||
},
|
||||
Auth: &types.Auth{
|
||||
Basic: &types.Basic{
|
||||
Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/",
|
||||
"test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-foo-sauternes": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-foo-863563a2e23c95502862016417ee95ea": {
|
||||
URL: "http://127.0.0.1:2503",
|
||||
Weight: label.DefaultWeight,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "auth digest",
|
||||
containers: []docker.ContainerJSON{
|
||||
containerJSON(
|
||||
name("foo"),
|
||||
labels(map[string]string{
|
||||
"traefik.sauternes.port": "2503",
|
||||
"traefik.sauternes.frontend.entryPoints": "http,https",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthHeaderField: "X-WebAuth-User",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthDigestUsers: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthDigestUsersFile: ".htpasswd",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthDigestRemoveHeader: "true",
|
||||
}),
|
||||
ports(nat.PortMap{
|
||||
"80/tcp": {},
|
||||
}),
|
||||
withNetwork("bridge", ipv4("127.0.0.1")),
|
||||
),
|
||||
},
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-sauternes-foo-sauternes": {
|
||||
Backend: "backend-foo-sauternes",
|
||||
PassHostHeader: true,
|
||||
EntryPoints: []string{"http", "https"},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-sauternes-foo-sauternes": {
|
||||
Rule: "Host:foo.docker.localhost",
|
||||
},
|
||||
},
|
||||
Auth: &types.Auth{
|
||||
HeaderField: "X-WebAuth-User",
|
||||
Digest: &types.Digest{
|
||||
RemoveHeader: true,
|
||||
Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/",
|
||||
"test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"},
|
||||
UsersFile: ".htpasswd",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-foo-sauternes": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-foo-863563a2e23c95502862016417ee95ea": {
|
||||
URL: "http://127.0.0.1:2503",
|
||||
Weight: label.DefaultWeight,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "auth forward",
|
||||
containers: []docker.ContainerJSON{
|
||||
containerJSON(
|
||||
name("foo"),
|
||||
labels(map[string]string{
|
||||
"traefik.sauternes.port": "2503",
|
||||
"traefik.sauternes.frontend.entryPoints": "http,https",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthHeaderField: "X-WebAuth-User",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardAddress: "auth.server",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTrustForwardHeader: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSCa: "ca.crt",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSCaOptional: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSCert: "server.crt",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSKey: "server.key",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSInsecureSkipVerify: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardAuthResponseHeaders: "X-Auth-User,X-Auth-Token",
|
||||
}),
|
||||
ports(nat.PortMap{
|
||||
"80/tcp": {},
|
||||
}),
|
||||
withNetwork("bridge", ipv4("127.0.0.1")),
|
||||
),
|
||||
},
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-sauternes-foo-sauternes": {
|
||||
Backend: "backend-foo-sauternes",
|
||||
PassHostHeader: true,
|
||||
EntryPoints: []string{"http", "https"},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-sauternes-foo-sauternes": {
|
||||
Rule: "Host:foo.docker.localhost",
|
||||
},
|
||||
},
|
||||
Auth: &types.Auth{
|
||||
HeaderField: "X-WebAuth-User",
|
||||
Forward: &types.Forward{
|
||||
Address: "auth.server",
|
||||
TLS: &types.ClientTLS{
|
||||
CA: "ca.crt",
|
||||
CAOptional: true,
|
||||
Cert: "server.crt",
|
||||
Key: "server.key",
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
TrustForwardHeader: true,
|
||||
AuthResponseHeaders: []string{"X-Auth-User", "X-Auth-Token"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-foo-sauternes": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-foo-863563a2e23c95502862016417ee95ea": {
|
||||
URL: "http://127.0.0.1:2503",
|
||||
Weight: label.DefaultWeight,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "when all labels are set",
|
||||
containers: []docker.ContainerJSON{
|
||||
containerJSON(
|
||||
name("foo"),
|
||||
labels(map[string]string{
|
||||
label.Prefix + "sauternes." + label.SuffixPort: "666",
|
||||
label.Prefix + "sauternes." + label.SuffixProtocol: "https",
|
||||
label.Prefix + "sauternes." + label.SuffixWeight: "12",
|
||||
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertPem: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosNotAfter: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosNotBefore: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSans: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectCommonName: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectCountry: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectLocality: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectOrganization: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectProvince: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectSerialNumber: "true",
|
||||
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthBasicRemoveHeader: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthBasicRealm: "myRealm",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthBasicUsers: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthBasicUsersFile: ".htpasswd",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthDigestRemoveHeader: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthDigestUsers: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthDigestUsersFile: ".htpasswd",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardAddress: "auth.server",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTrustForwardHeader: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSCa: "ca.crt",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSCaOptional: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSCert: "server.crt",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSKey: "server.key",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSInsecureSkipVerify: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthHeaderField: "X-WebAuth-User",
|
||||
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthBasic: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendEntryPoints: "http,https",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassHostHeader: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSCert: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPriority: "666",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendRedirectEntryPoint: "https",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendRedirectRegex: "nope",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendRedirectReplacement: "nope",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendRedirectPermanent: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendWhiteListSourceRange: "10.10.10.10",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendWhiteListIPStrategyExcludedIPS: "10.10.10.10,10.10.10.11",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendWhiteListIPStrategyDepth: "5",
|
||||
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendRequestHeaders: "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendResponseHeaders: "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersSSLProxyHeaders: "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersAllowedHosts: "foo,bar,bor",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersHostsProxyHeaders: "foo,bar,bor",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersSSLHost: "foo",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersCustomFrameOptionsValue: "foo",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersContentSecurityPolicy: "foo",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersPublicKey: "foo",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersReferrerPolicy: "foo",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersCustomBrowserXSSValue: "foo",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersSTSSeconds: "666",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersSSLForceHost: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersSSLRedirect: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersSSLTemporaryRedirect: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersSTSIncludeSubdomains: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersSTSPreload: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersForceSTSHeader: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersFrameDeny: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersContentTypeNosniff: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersBrowserXSSFilter: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersIsDevelopment: "true",
|
||||
|
||||
label.Prefix + "sauternes." + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageStatus: "404",
|
||||
label.Prefix + "sauternes." + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageBackend: "foobar",
|
||||
label.Prefix + "sauternes." + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageQuery: "foo_query",
|
||||
label.Prefix + "sauternes." + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageStatus: "500,600",
|
||||
label.Prefix + "sauternes." + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageBackend: "foobar",
|
||||
label.Prefix + "sauternes." + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageQuery: "bar_query",
|
||||
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendRateLimitExtractorFunc: "client.ip",
|
||||
label.Prefix + "sauternes." + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitPeriod: "6",
|
||||
label.Prefix + "sauternes." + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitAverage: "12",
|
||||
label.Prefix + "sauternes." + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitBurst: "18",
|
||||
label.Prefix + "sauternes." + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitPeriod: "3",
|
||||
label.Prefix + "sauternes." + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitAverage: "6",
|
||||
label.Prefix + "sauternes." + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitBurst: "9",
|
||||
}),
|
||||
ports(nat.PortMap{
|
||||
"80/tcp": {},
|
||||
}),
|
||||
withNetwork("bridge", ipv4("127.0.0.1")),
|
||||
),
|
||||
},
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-sauternes-foo-sauternes": {
|
||||
Backend: "backend-foo-sauternes",
|
||||
EntryPoints: []string{
|
||||
"http",
|
||||
"https",
|
||||
},
|
||||
PassHostHeader: true,
|
||||
PassTLSCert: true,
|
||||
Priority: 666,
|
||||
PassTLSClientCert: &types.TLSClientHeaders{
|
||||
PEM: true,
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
NotBefore: true,
|
||||
Sans: true,
|
||||
NotAfter: true,
|
||||
Subject: &types.TLSCLientCertificateSubjectInfos{
|
||||
CommonName: true,
|
||||
Country: true,
|
||||
Locality: true,
|
||||
Organization: true,
|
||||
Province: true,
|
||||
SerialNumber: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Auth: &types.Auth{
|
||||
HeaderField: "X-WebAuth-User",
|
||||
Basic: &types.Basic{
|
||||
RemoveHeader: true,
|
||||
Realm: "myRealm",
|
||||
Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/",
|
||||
"test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"},
|
||||
UsersFile: ".htpasswd",
|
||||
},
|
||||
},
|
||||
WhiteList: &types.WhiteList{
|
||||
SourceRange: []string{"10.10.10.10"},
|
||||
IPStrategy: &types.IPStrategy{
|
||||
Depth: 5,
|
||||
ExcludedIPs: []string{"10.10.10.10", "10.10.10.11"},
|
||||
},
|
||||
},
|
||||
Headers: &types.Headers{
|
||||
CustomRequestHeaders: map[string]string{
|
||||
"Access-Control-Allow-Methods": "POST,GET,OPTIONS",
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
},
|
||||
CustomResponseHeaders: map[string]string{
|
||||
"Access-Control-Allow-Methods": "POST,GET,OPTIONS",
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
},
|
||||
AllowedHosts: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
"bor",
|
||||
},
|
||||
HostsProxyHeaders: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
"bor",
|
||||
},
|
||||
SSLRedirect: true,
|
||||
SSLTemporaryRedirect: true,
|
||||
SSLForceHost: true,
|
||||
SSLHost: "foo",
|
||||
SSLProxyHeaders: map[string]string{
|
||||
"Access-Control-Allow-Methods": "POST,GET,OPTIONS",
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
},
|
||||
STSSeconds: 666,
|
||||
STSIncludeSubdomains: true,
|
||||
STSPreload: true,
|
||||
ForceSTSHeader: true,
|
||||
FrameDeny: true,
|
||||
CustomFrameOptionsValue: "foo",
|
||||
ContentTypeNosniff: true,
|
||||
BrowserXSSFilter: true,
|
||||
CustomBrowserXSSValue: "foo",
|
||||
ContentSecurityPolicy: "foo",
|
||||
PublicKey: "foo",
|
||||
ReferrerPolicy: "foo",
|
||||
IsDevelopment: true,
|
||||
},
|
||||
Errors: map[string]*types.ErrorPage{
|
||||
"foo": {
|
||||
Status: []string{"404"},
|
||||
Query: "foo_query",
|
||||
Backend: "backend-foobar",
|
||||
},
|
||||
"bar": {
|
||||
Status: []string{"500", "600"},
|
||||
Query: "bar_query",
|
||||
Backend: "backend-foobar",
|
||||
},
|
||||
},
|
||||
RateLimit: &types.RateLimit{
|
||||
ExtractorFunc: "client.ip",
|
||||
RateSet: map[string]*types.Rate{
|
||||
"foo": {
|
||||
Period: parse.Duration(6 * time.Second),
|
||||
Average: 12,
|
||||
Burst: 18,
|
||||
},
|
||||
"bar": {
|
||||
Period: parse.Duration(3 * time.Second),
|
||||
Average: 6,
|
||||
Burst: 9,
|
||||
},
|
||||
},
|
||||
},
|
||||
Redirect: &types.Redirect{
|
||||
EntryPoint: "https",
|
||||
Regex: "",
|
||||
Replacement: "",
|
||||
Permanent: true,
|
||||
},
|
||||
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-sauternes-foo-sauternes": {
|
||||
Rule: "Host:foo.docker.localhost",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-foo-sauternes": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-foo-7f6444e0dff3330c8b0ad2bbbd383b0f": {
|
||||
URL: "https://127.0.0.1:666",
|
||||
Weight: 12,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "several containers",
|
||||
containers: []docker.ContainerJSON{
|
||||
containerJSON(
|
||||
name("test1"),
|
||||
labels(map[string]string{
|
||||
"traefik.sauternes.port": "2503",
|
||||
"traefik.sauternes.protocol": "https",
|
||||
"traefik.sauternes.weight": "80",
|
||||
"traefik.sauternes.backend": "foobar",
|
||||
"traefik.sauternes.frontend.passHostHeader": "false",
|
||||
"traefik.sauternes.frontend.rule": "Path:/mypath",
|
||||
"traefik.sauternes.frontend.priority": "5000",
|
||||
"traefik.sauternes.frontend.entryPoints": "http,https,ws",
|
||||
"traefik.sauternes.frontend.auth.basic": "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0",
|
||||
"traefik.sauternes.frontend.redirect.entryPoint": "https",
|
||||
}),
|
||||
ports(nat.PortMap{
|
||||
"80/tcp": {},
|
||||
}),
|
||||
withNetwork("bridge", ipv4("127.0.0.1")),
|
||||
),
|
||||
containerJSON(
|
||||
name("test2"),
|
||||
labels(map[string]string{
|
||||
"traefik.anothersauternes.port": "8079",
|
||||
"traefik.anothersauternes.weight": "33",
|
||||
"traefik.anothersauternes.frontend.rule": "Path:/anotherpath",
|
||||
}),
|
||||
ports(nat.PortMap{
|
||||
"80/tcp": {},
|
||||
}),
|
||||
withNetwork("bridge", ipv4("127.0.0.1")),
|
||||
),
|
||||
},
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-sauternes-test1-foobar": {
|
||||
Backend: "backend-test1-foobar",
|
||||
PassHostHeader: false,
|
||||
Priority: 5000,
|
||||
EntryPoints: []string{"http", "https", "ws"},
|
||||
Auth: &types.Auth{
|
||||
Basic: &types.Basic{
|
||||
Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/",
|
||||
"test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"},
|
||||
},
|
||||
},
|
||||
Redirect: &types.Redirect{
|
||||
EntryPoint: "https",
|
||||
},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-sauternes-test1-foobar": {
|
||||
Rule: "Path:/mypath",
|
||||
},
|
||||
},
|
||||
},
|
||||
"frontend-anothersauternes-test2-anothersauternes": {
|
||||
Backend: "backend-test2-anothersauternes",
|
||||
PassHostHeader: true,
|
||||
EntryPoints: []string{},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-anothersauternes-test2-anothersauternes": {
|
||||
Rule: "Path:/anotherpath",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-test1-foobar": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-test1-79533a101142718f0fdf84c42593c41e": {
|
||||
URL: "https://127.0.0.1:2503",
|
||||
Weight: 80,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
},
|
||||
"backend-test2-anothersauternes": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-test2-e9c1b66f9af919aa46053fbc2391bb4a": {
|
||||
URL: "http://127.0.0.1:8079",
|
||||
Weight: 33,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "several segments with the same backend name and same port",
|
||||
containers: []docker.ContainerJSON{
|
||||
containerJSON(
|
||||
name("test1"),
|
||||
labels(map[string]string{
|
||||
"traefik.port": "2503",
|
||||
"traefik.protocol": "https",
|
||||
"traefik.weight": "80",
|
||||
"traefik.frontend.entryPoints": "http,https",
|
||||
"traefik.frontend.redirect.entryPoint": "https",
|
||||
|
||||
"traefik.sauternes.backend": "foobar",
|
||||
"traefik.sauternes.frontend.rule": "Path:/sauternes",
|
||||
"traefik.sauternes.frontend.priority": "5000",
|
||||
|
||||
"traefik.arbois.backend": "foobar",
|
||||
"traefik.arbois.frontend.rule": "Path:/arbois",
|
||||
"traefik.arbois.frontend.priority": "3000",
|
||||
}),
|
||||
ports(nat.PortMap{
|
||||
"80/tcp": {},
|
||||
}),
|
||||
withNetwork("bridge", ipv4("127.0.0.1")),
|
||||
),
|
||||
},
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-sauternes-test1-foobar": {
|
||||
Backend: "backend-test1-foobar",
|
||||
PassHostHeader: true,
|
||||
Priority: 5000,
|
||||
EntryPoints: []string{"http", "https"},
|
||||
Redirect: &types.Redirect{
|
||||
EntryPoint: "https",
|
||||
},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-sauternes-test1-foobar": {
|
||||
Rule: "Path:/sauternes",
|
||||
},
|
||||
},
|
||||
},
|
||||
"frontend-arbois-test1-foobar": {
|
||||
Backend: "backend-test1-foobar",
|
||||
PassHostHeader: true,
|
||||
Priority: 3000,
|
||||
EntryPoints: []string{"http", "https"},
|
||||
Redirect: &types.Redirect{
|
||||
EntryPoint: "https",
|
||||
},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-arbois-test1-foobar": {
|
||||
Rule: "Path:/arbois",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-test1-foobar": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-test1-79533a101142718f0fdf84c42593c41e": {
|
||||
URL: "https://127.0.0.1:2503",
|
||||
Weight: 80,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "several segments with the same backend name and different port (wrong behavior)",
|
||||
containers: []docker.ContainerJSON{
|
||||
containerJSON(
|
||||
name("test1"),
|
||||
labels(map[string]string{
|
||||
"traefik.protocol": "https",
|
||||
"traefik.frontend.entryPoints": "http,https",
|
||||
"traefik.frontend.redirect.entryPoint": "https",
|
||||
|
||||
"traefik.sauternes.port": "2503",
|
||||
"traefik.sauternes.weight": "80",
|
||||
"traefik.sauternes.backend": "foobar",
|
||||
"traefik.sauternes.frontend.rule": "Path:/sauternes",
|
||||
"traefik.sauternes.frontend.priority": "5000",
|
||||
|
||||
"traefik.arbois.port": "2504",
|
||||
"traefik.arbois.weight": "90",
|
||||
"traefik.arbois.backend": "foobar",
|
||||
"traefik.arbois.frontend.rule": "Path:/arbois",
|
||||
"traefik.arbois.frontend.priority": "3000",
|
||||
}),
|
||||
ports(nat.PortMap{
|
||||
"80/tcp": {},
|
||||
}),
|
||||
withNetwork("bridge", ipv4("127.0.0.1")),
|
||||
),
|
||||
},
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-sauternes-test1-foobar": {
|
||||
Backend: "backend-test1-foobar",
|
||||
PassHostHeader: true,
|
||||
Priority: 5000,
|
||||
EntryPoints: []string{"http", "https"},
|
||||
Redirect: &types.Redirect{
|
||||
EntryPoint: "https",
|
||||
},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-sauternes-test1-foobar": {
|
||||
Rule: "Path:/sauternes",
|
||||
},
|
||||
},
|
||||
},
|
||||
"frontend-arbois-test1-foobar": {
|
||||
Backend: "backend-test1-foobar",
|
||||
PassHostHeader: true,
|
||||
Priority: 3000,
|
||||
EntryPoints: []string{"http", "https"},
|
||||
Redirect: &types.Redirect{
|
||||
EntryPoint: "https",
|
||||
},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-arbois-test1-foobar": {
|
||||
Rule: "Path:/arbois",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-test1-foobar": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-test1-79533a101142718f0fdf84c42593c41e": {
|
||||
URL: "https://127.0.0.1:2503",
|
||||
Weight: 80,
|
||||
},
|
||||
"server-test1-315a41140f1bd825b066e39686c18482": {
|
||||
URL: "https://127.0.0.1:2504",
|
||||
Weight: 90,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
provider := &Provider{
|
||||
Domain: "docker.localhost",
|
||||
ExposedByDefault: true,
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
var dockerDataList []dockerData
|
||||
for _, container := range test.containers {
|
||||
dData := parseContainer(container)
|
||||
dockerDataList = append(dockerDataList, dData)
|
||||
}
|
||||
|
||||
actualConfig := provider.buildConfiguration(dockerDataList)
|
||||
require.NotNil(t, actualConfig, "actualConfig")
|
||||
|
||||
assert.EqualValues(t, test.expectedBackends, actualConfig.Backends)
|
||||
assert.EqualValues(t, test.expectedFrontends, actualConfig.Frontends)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,480 +0,0 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cenk/backoff"
|
||||
"github.com/containous/traefik/job"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/provider"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/containous/traefik/version"
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
dockercontainertypes "github.com/docker/docker/api/types/container"
|
||||
eventtypes "github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
swarmtypes "github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/docker/go-connections/sockets"
|
||||
)
|
||||
|
||||
const (
|
||||
// SwarmAPIVersion is a constant holding the version of the Provider API traefik will use
|
||||
SwarmAPIVersion = "1.24"
|
||||
// SwarmDefaultWatchTime is the duration of the interval when polling docker
|
||||
SwarmDefaultWatchTime = 15 * time.Second
|
||||
)
|
||||
|
||||
var _ provider.Provider = (*Provider)(nil)
|
||||
|
||||
// Provider holds configurations of the provider.
|
||||
type Provider struct {
|
||||
provider.BaseProvider `mapstructure:",squash" export:"true"`
|
||||
Endpoint string `description:"Docker server endpoint. Can be a tcp or a unix socket endpoint"`
|
||||
Domain string `description:"Default domain used"`
|
||||
TLS *types.ClientTLS `description:"Enable Docker TLS support" export:"true"`
|
||||
ExposedByDefault bool `description:"Expose containers by default" export:"true"`
|
||||
UseBindPortIP bool `description:"Use the ip address from the bound port, rather than from the inner network" export:"true"`
|
||||
SwarmMode bool `description:"Use Docker on Swarm Mode" export:"true"`
|
||||
Network string `description:"Default Docker network used" export:"true"`
|
||||
}
|
||||
|
||||
// Init the provider
|
||||
func (p *Provider) Init(constraints types.Constraints) error {
|
||||
return p.BaseProvider.Init(constraints)
|
||||
}
|
||||
|
||||
// dockerData holds the need data to the Provider p
|
||||
type dockerData struct {
|
||||
ServiceName string
|
||||
Name string
|
||||
Labels map[string]string // List of labels set to container or service
|
||||
NetworkSettings networkSettings
|
||||
Health string
|
||||
Node *dockertypes.ContainerNode
|
||||
SegmentLabels map[string]string
|
||||
SegmentName string
|
||||
}
|
||||
|
||||
// NetworkSettings holds the networks data to the Provider p
|
||||
type networkSettings struct {
|
||||
NetworkMode dockercontainertypes.NetworkMode
|
||||
Ports nat.PortMap
|
||||
Networks map[string]*networkData
|
||||
}
|
||||
|
||||
// Network holds the network data to the Provider p
|
||||
type networkData struct {
|
||||
Name string
|
||||
Addr string
|
||||
Port int
|
||||
Protocol string
|
||||
ID string
|
||||
}
|
||||
|
||||
func (p *Provider) createClient() (client.APIClient, error) {
|
||||
var httpClient *http.Client
|
||||
|
||||
if p.TLS != nil {
|
||||
config, err := p.TLS.CreateTLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: config,
|
||||
}
|
||||
|
||||
hostURL, err := client.ParseHostURL(p.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := sockets.ConfigureTransport(tr, hostURL.Scheme, hostURL.Host); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
httpClient = &http.Client{
|
||||
Transport: tr,
|
||||
}
|
||||
}
|
||||
|
||||
httpHeaders := map[string]string{
|
||||
"User-Agent": "Traefik " + version.Version,
|
||||
}
|
||||
|
||||
var apiVersion string
|
||||
if p.SwarmMode {
|
||||
apiVersion = SwarmAPIVersion
|
||||
} else {
|
||||
apiVersion = DockerAPIVersion
|
||||
}
|
||||
|
||||
return client.NewClient(p.Endpoint, apiVersion, httpClient, httpHeaders)
|
||||
}
|
||||
|
||||
// Provide allows the docker provider to provide configurations to traefik
|
||||
// using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error {
|
||||
pool.GoCtx(func(routineCtx context.Context) {
|
||||
operation := func() error {
|
||||
var err error
|
||||
ctx, cancel := context.WithCancel(routineCtx)
|
||||
defer cancel()
|
||||
dockerClient, err := p.createClient()
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create a client for docker, error: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
serverVersion, err := dockerClient.ServerVersion(ctx)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to retrieve information of the docker client and server host: %s", err)
|
||||
return err
|
||||
}
|
||||
log.Debugf("Provider connection established with docker %s (API %s)", serverVersion.Version, serverVersion.APIVersion)
|
||||
var dockerDataList []dockerData
|
||||
if p.SwarmMode {
|
||||
dockerDataList, err = listServices(ctx, dockerClient)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list services for docker swarm mode, error %s", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
dockerDataList, err = listContainers(ctx, dockerClient)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list containers for docker, error %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
configuration := p.buildConfiguration(dockerDataList)
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "docker",
|
||||
Configuration: configuration,
|
||||
}
|
||||
if p.Watch {
|
||||
if p.SwarmMode {
|
||||
errChan := make(chan error)
|
||||
// TODO: This need to be change. Linked to Swarm events docker/docker#23827
|
||||
ticker := time.NewTicker(SwarmDefaultWatchTime)
|
||||
pool.GoCtx(func(ctx context.Context) {
|
||||
defer close(errChan)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
services, err := listServices(ctx, dockerClient)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list services for docker, error %s", err)
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
configuration := p.buildConfiguration(services)
|
||||
if configuration != nil {
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "docker",
|
||||
Configuration: configuration,
|
||||
}
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
if err, ok := <-errChan; ok {
|
||||
return err
|
||||
}
|
||||
// channel closed
|
||||
|
||||
} else {
|
||||
f := filters.NewArgs()
|
||||
f.Add("type", "container")
|
||||
options := dockertypes.EventsOptions{
|
||||
Filters: f,
|
||||
}
|
||||
|
||||
startStopHandle := func(m eventtypes.Message) {
|
||||
log.Debugf("Provider event received %+v", m)
|
||||
containers, err := listContainers(ctx, dockerClient)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list containers for docker, error %s", err)
|
||||
// Call cancel to get out of the monitor
|
||||
return
|
||||
}
|
||||
configuration := p.buildConfiguration(containers)
|
||||
if configuration != nil {
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "docker",
|
||||
Configuration: configuration,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eventsc, errc := dockerClient.Events(ctx, options)
|
||||
for {
|
||||
select {
|
||||
case event := <-eventsc:
|
||||
if event.Action == "start" ||
|
||||
event.Action == "die" ||
|
||||
strings.HasPrefix(event.Action, "health_status") {
|
||||
startStopHandle(event)
|
||||
}
|
||||
case err := <-errc:
|
||||
if err == io.EOF {
|
||||
log.Debug("Provider event stream closed")
|
||||
}
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
notify := func(err error, time time.Duration) {
|
||||
log.Errorf("Provider connection error %+v, retrying in %s", err, time)
|
||||
}
|
||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), backoff.WithContext(job.NewBackOff(backoff.NewExponentialBackOff()), routineCtx), notify)
|
||||
if err != nil {
|
||||
log.Errorf("Cannot connect to docker server %+v", err)
|
||||
}
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func listContainers(ctx context.Context, dockerClient client.ContainerAPIClient) ([]dockerData, error) {
|
||||
containerList, err := dockerClient.ContainerList(ctx, dockertypes.ContainerListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var containersInspected []dockerData
|
||||
// get inspect containers
|
||||
for _, container := range containerList {
|
||||
dData := inspectContainers(ctx, dockerClient, container.ID)
|
||||
if len(dData.Name) > 0 {
|
||||
containersInspected = append(containersInspected, dData)
|
||||
}
|
||||
}
|
||||
return containersInspected, nil
|
||||
}
|
||||
|
||||
func inspectContainers(ctx context.Context, dockerClient client.ContainerAPIClient, containerID string) dockerData {
|
||||
dData := dockerData{}
|
||||
containerInspected, err := dockerClient.ContainerInspect(ctx, containerID)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to inspect container %s, error: %s", containerID, err)
|
||||
} else {
|
||||
// This condition is here to avoid to have empty IP https://github.com/containous/traefik/issues/2459
|
||||
// We register only container which are running
|
||||
if containerInspected.ContainerJSONBase != nil && containerInspected.ContainerJSONBase.State != nil && containerInspected.ContainerJSONBase.State.Running {
|
||||
dData = parseContainer(containerInspected)
|
||||
}
|
||||
}
|
||||
return dData
|
||||
}
|
||||
|
||||
func parseContainer(container dockertypes.ContainerJSON) dockerData {
|
||||
dData := dockerData{
|
||||
NetworkSettings: networkSettings{},
|
||||
}
|
||||
|
||||
if container.ContainerJSONBase != nil {
|
||||
dData.Name = container.ContainerJSONBase.Name
|
||||
dData.ServiceName = dData.Name // Default ServiceName to be the container's Name.
|
||||
dData.Node = container.ContainerJSONBase.Node
|
||||
|
||||
if container.ContainerJSONBase.HostConfig != nil {
|
||||
dData.NetworkSettings.NetworkMode = container.ContainerJSONBase.HostConfig.NetworkMode
|
||||
}
|
||||
|
||||
if container.State != nil && container.State.Health != nil {
|
||||
dData.Health = container.State.Health.Status
|
||||
}
|
||||
}
|
||||
|
||||
if container.Config != nil && container.Config.Labels != nil {
|
||||
dData.Labels = container.Config.Labels
|
||||
}
|
||||
|
||||
if container.NetworkSettings != nil {
|
||||
if container.NetworkSettings.Ports != nil {
|
||||
dData.NetworkSettings.Ports = container.NetworkSettings.Ports
|
||||
}
|
||||
if container.NetworkSettings.Networks != nil {
|
||||
dData.NetworkSettings.Networks = make(map[string]*networkData)
|
||||
for name, containerNetwork := range container.NetworkSettings.Networks {
|
||||
dData.NetworkSettings.Networks[name] = &networkData{
|
||||
ID: containerNetwork.NetworkID,
|
||||
Name: name,
|
||||
Addr: containerNetwork.IPAddress,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return dData
|
||||
}
|
||||
|
||||
func listServices(ctx context.Context, dockerClient client.APIClient) ([]dockerData, error) {
|
||||
serviceList, err := dockerClient.ServiceList(ctx, dockertypes.ServiceListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serverVersion, err := dockerClient.ServerVersion(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
networkListArgs := filters.NewArgs()
|
||||
// https://docs.docker.com/engine/api/v1.29/#tag/Network (Docker 17.06)
|
||||
if versions.GreaterThanOrEqualTo(serverVersion.APIVersion, "1.29") {
|
||||
networkListArgs.Add("scope", "swarm")
|
||||
} else {
|
||||
networkListArgs.Add("driver", "overlay")
|
||||
}
|
||||
|
||||
networkList, err := dockerClient.NetworkList(ctx, dockertypes.NetworkListOptions{Filters: networkListArgs})
|
||||
if err != nil {
|
||||
log.Debugf("Failed to network inspect on client for docker, error: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
networkMap := make(map[string]*dockertypes.NetworkResource)
|
||||
for _, network := range networkList {
|
||||
networkToAdd := network
|
||||
networkMap[network.ID] = &networkToAdd
|
||||
}
|
||||
|
||||
var dockerDataList []dockerData
|
||||
var dockerDataListTasks []dockerData
|
||||
|
||||
for _, service := range serviceList {
|
||||
dData := parseService(service, networkMap)
|
||||
|
||||
if isBackendLBSwarm(dData) {
|
||||
if len(dData.NetworkSettings.Networks) > 0 {
|
||||
dockerDataList = append(dockerDataList, dData)
|
||||
}
|
||||
} else {
|
||||
isGlobalSvc := service.Spec.Mode.Global != nil
|
||||
dockerDataListTasks, err = listTasks(ctx, dockerClient, service.ID, dData, networkMap, isGlobalSvc)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
} else {
|
||||
dockerDataList = append(dockerDataList, dockerDataListTasks...)
|
||||
}
|
||||
}
|
||||
}
|
||||
return dockerDataList, err
|
||||
}
|
||||
|
||||
func parseService(service swarmtypes.Service, networkMap map[string]*dockertypes.NetworkResource) dockerData {
|
||||
dData := dockerData{
|
||||
ServiceName: service.Spec.Annotations.Name,
|
||||
Name: service.Spec.Annotations.Name,
|
||||
Labels: service.Spec.Annotations.Labels,
|
||||
NetworkSettings: networkSettings{},
|
||||
}
|
||||
|
||||
if service.Spec.EndpointSpec != nil {
|
||||
if service.Spec.EndpointSpec.Mode == swarmtypes.ResolutionModeDNSRR {
|
||||
if isBackendLBSwarm(dData) {
|
||||
log.Warnf("Ignored %s endpoint-mode not supported, service name: %s. Fallback to Traefik load balancing", swarmtypes.ResolutionModeDNSRR, service.Spec.Annotations.Name)
|
||||
}
|
||||
} else if service.Spec.EndpointSpec.Mode == swarmtypes.ResolutionModeVIP {
|
||||
dData.NetworkSettings.Networks = make(map[string]*networkData)
|
||||
for _, virtualIP := range service.Endpoint.VirtualIPs {
|
||||
networkService := networkMap[virtualIP.NetworkID]
|
||||
if networkService != nil {
|
||||
if len(virtualIP.Addr) > 0 {
|
||||
ip, _, _ := net.ParseCIDR(virtualIP.Addr)
|
||||
network := &networkData{
|
||||
Name: networkService.Name,
|
||||
ID: virtualIP.NetworkID,
|
||||
Addr: ip.String(),
|
||||
}
|
||||
dData.NetworkSettings.Networks[network.Name] = network
|
||||
} else {
|
||||
log.Debugf("No virtual IPs found in network %s", virtualIP.NetworkID)
|
||||
}
|
||||
} else {
|
||||
log.Debugf("Network not found, id: %s", virtualIP.NetworkID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return dData
|
||||
}
|
||||
|
||||
func listTasks(ctx context.Context, dockerClient client.APIClient, serviceID string,
|
||||
serviceDockerData dockerData, networkMap map[string]*dockertypes.NetworkResource, isGlobalSvc bool) ([]dockerData, error) {
|
||||
serviceIDFilter := filters.NewArgs()
|
||||
serviceIDFilter.Add("service", serviceID)
|
||||
serviceIDFilter.Add("desired-state", "running")
|
||||
|
||||
taskList, err := dockerClient.TaskList(ctx, dockertypes.TaskListOptions{Filters: serviceIDFilter})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var dockerDataList []dockerData
|
||||
for _, task := range taskList {
|
||||
if task.Status.State != swarmtypes.TaskStateRunning {
|
||||
continue
|
||||
}
|
||||
dData := parseTasks(task, serviceDockerData, networkMap, isGlobalSvc)
|
||||
if len(dData.NetworkSettings.Networks) > 0 {
|
||||
dockerDataList = append(dockerDataList, dData)
|
||||
}
|
||||
}
|
||||
return dockerDataList, err
|
||||
}
|
||||
|
||||
func parseTasks(task swarmtypes.Task, serviceDockerData dockerData,
|
||||
networkMap map[string]*dockertypes.NetworkResource, isGlobalSvc bool) dockerData {
|
||||
dData := dockerData{
|
||||
ServiceName: serviceDockerData.Name,
|
||||
Name: serviceDockerData.Name + "." + strconv.Itoa(task.Slot),
|
||||
Labels: serviceDockerData.Labels,
|
||||
NetworkSettings: networkSettings{},
|
||||
}
|
||||
|
||||
if isGlobalSvc {
|
||||
dData.Name = serviceDockerData.Name + "." + task.ID
|
||||
}
|
||||
|
||||
if task.NetworksAttachments != nil {
|
||||
dData.NetworkSettings.Networks = make(map[string]*networkData)
|
||||
for _, virtualIP := range task.NetworksAttachments {
|
||||
if networkService, present := networkMap[virtualIP.Network.ID]; present {
|
||||
if len(virtualIP.Addresses) > 0 {
|
||||
// Not sure about this next loop - when would a task have multiple IP's for the same network?
|
||||
for _, addr := range virtualIP.Addresses {
|
||||
ip, _, _ := net.ParseCIDR(addr)
|
||||
network := &networkData{
|
||||
ID: virtualIP.Network.ID,
|
||||
Name: networkService.Name,
|
||||
Addr: ip.String(),
|
||||
}
|
||||
dData.NetworkSettings.Networks[network.Name] = network
|
||||
}
|
||||
} else {
|
||||
log.Debugf("No IP addresses found for network %s", virtualIP.Network.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return dData
|
||||
}
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
// +build !windows
|
||||
|
||||
package docker
|
||||
|
||||
const (
|
||||
// DockerAPIVersion is a constant holding the version of the Provider API traefik will use
|
||||
DockerAPIVersion = "1.21"
|
||||
)
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
package docker
|
||||
|
||||
const (
|
||||
// DockerAPIVersion is a constant holding the version of the Provider API traefik will use
|
||||
DockerAPIVersion string = "1.24"
|
||||
)
|
||||
|
|
@ -1,397 +0,0 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
docker "github.com/docker/docker/api/types"
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type fakeTasksClient struct {
|
||||
dockerclient.APIClient
|
||||
tasks []swarm.Task
|
||||
container dockertypes.ContainerJSON
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *fakeTasksClient) TaskList(ctx context.Context, options dockertypes.TaskListOptions) ([]swarm.Task, error) {
|
||||
return c.tasks, c.err
|
||||
}
|
||||
|
||||
func (c *fakeTasksClient) ContainerInspect(ctx context.Context, container string) (dockertypes.ContainerJSON, error) {
|
||||
return c.container, c.err
|
||||
}
|
||||
|
||||
func TestListTasks(t *testing.T) {
|
||||
testCases := []struct {
|
||||
service swarm.Service
|
||||
tasks []swarm.Task
|
||||
isGlobalSVC bool
|
||||
expectedTasks []string
|
||||
networks map[string]*docker.NetworkResource
|
||||
}{
|
||||
{
|
||||
service: swarmService(serviceName("container")),
|
||||
tasks: []swarm.Task{
|
||||
swarmTask("id1",
|
||||
taskSlot(1),
|
||||
taskNetworkAttachment("1", "network1", "overlay", []string{"127.0.0.1"}),
|
||||
taskStatus(taskState(swarm.TaskStateRunning)),
|
||||
),
|
||||
swarmTask("id2",
|
||||
taskSlot(2),
|
||||
taskNetworkAttachment("1", "network1", "overlay", []string{"127.0.0.2"}),
|
||||
taskStatus(taskState(swarm.TaskStatePending)),
|
||||
),
|
||||
swarmTask("id3",
|
||||
taskSlot(3),
|
||||
taskNetworkAttachment("1", "network1", "overlay", []string{"127.0.0.3"}),
|
||||
),
|
||||
swarmTask("id4",
|
||||
taskSlot(4),
|
||||
taskNetworkAttachment("1", "network1", "overlay", []string{"127.0.0.4"}),
|
||||
taskStatus(taskState(swarm.TaskStateRunning)),
|
||||
),
|
||||
swarmTask("id5",
|
||||
taskSlot(5),
|
||||
taskNetworkAttachment("1", "network1", "overlay", []string{"127.0.0.5"}),
|
||||
taskStatus(taskState(swarm.TaskStateFailed)),
|
||||
),
|
||||
},
|
||||
isGlobalSVC: false,
|
||||
expectedTasks: []string{
|
||||
"container.1",
|
||||
"container.4",
|
||||
},
|
||||
networks: map[string]*docker.NetworkResource{
|
||||
"1": {
|
||||
Name: "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for caseID, test := range testCases {
|
||||
test := test
|
||||
t.Run(strconv.Itoa(caseID), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
dockerData := parseService(test.service, test.networks)
|
||||
dockerClient := &fakeTasksClient{tasks: test.tasks}
|
||||
taskDockerData, _ := listTasks(context.Background(), dockerClient, test.service.ID, dockerData, test.networks, test.isGlobalSVC)
|
||||
|
||||
if len(test.expectedTasks) != len(taskDockerData) {
|
||||
t.Errorf("expected tasks %v, got %v", spew.Sdump(test.expectedTasks), spew.Sdump(taskDockerData))
|
||||
}
|
||||
|
||||
for i, taskID := range test.expectedTasks {
|
||||
if taskDockerData[i].Name != taskID {
|
||||
t.Errorf("expect task id %v, got %v", taskID, taskDockerData[i].Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type fakeServicesClient struct {
|
||||
dockerclient.APIClient
|
||||
dockerVersion string
|
||||
networks []dockertypes.NetworkResource
|
||||
services []swarm.Service
|
||||
tasks []swarm.Task
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *fakeServicesClient) ServiceList(ctx context.Context, options dockertypes.ServiceListOptions) ([]swarm.Service, error) {
|
||||
return c.services, c.err
|
||||
}
|
||||
|
||||
func (c *fakeServicesClient) ServerVersion(ctx context.Context) (dockertypes.Version, error) {
|
||||
return dockertypes.Version{APIVersion: c.dockerVersion}, c.err
|
||||
}
|
||||
|
||||
func (c *fakeServicesClient) NetworkList(ctx context.Context, options dockertypes.NetworkListOptions) ([]dockertypes.NetworkResource, error) {
|
||||
return c.networks, c.err
|
||||
}
|
||||
|
||||
func (c *fakeServicesClient) TaskList(ctx context.Context, options dockertypes.TaskListOptions) ([]swarm.Task, error) {
|
||||
return c.tasks, c.err
|
||||
}
|
||||
|
||||
func TestListServices(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
services []swarm.Service
|
||||
tasks []swarm.Task
|
||||
dockerVersion string
|
||||
networks []dockertypes.NetworkResource
|
||||
expectedServices []string
|
||||
}{
|
||||
{
|
||||
desc: "Should return no service due to no networks defined",
|
||||
services: []swarm.Service{
|
||||
swarmService(
|
||||
serviceName("service1"),
|
||||
serviceLabels(map[string]string{
|
||||
labelDockerNetwork: "barnet",
|
||||
labelBackendLoadBalancerSwarm: "true",
|
||||
}),
|
||||
withEndpointSpec(modeVIP),
|
||||
withEndpoint(
|
||||
virtualIP("1", "10.11.12.13/24"),
|
||||
virtualIP("2", "10.11.12.99/24"),
|
||||
)),
|
||||
swarmService(
|
||||
serviceName("service2"),
|
||||
serviceLabels(map[string]string{
|
||||
labelDockerNetwork: "barnet",
|
||||
labelBackendLoadBalancerSwarm: "true",
|
||||
}),
|
||||
withEndpointSpec(modeDNSSR)),
|
||||
},
|
||||
dockerVersion: "1.30",
|
||||
networks: []dockertypes.NetworkResource{},
|
||||
expectedServices: []string{},
|
||||
},
|
||||
{
|
||||
desc: "Should return only service1",
|
||||
services: []swarm.Service{
|
||||
swarmService(
|
||||
serviceName("service1"),
|
||||
serviceLabels(map[string]string{
|
||||
labelDockerNetwork: "barnet",
|
||||
labelBackendLoadBalancerSwarm: "true",
|
||||
}),
|
||||
withEndpointSpec(modeVIP),
|
||||
withEndpoint(
|
||||
virtualIP("yk6l57rfwizjzxxzftn4amaot", "10.11.12.13/24"),
|
||||
virtualIP("2", "10.11.12.99/24"),
|
||||
)),
|
||||
swarmService(
|
||||
serviceName("service2"),
|
||||
serviceLabels(map[string]string{
|
||||
labelDockerNetwork: "barnet",
|
||||
labelBackendLoadBalancerSwarm: "true",
|
||||
}),
|
||||
withEndpointSpec(modeDNSSR)),
|
||||
},
|
||||
dockerVersion: "1.30",
|
||||
networks: []dockertypes.NetworkResource{
|
||||
{
|
||||
Name: "network_name",
|
||||
ID: "yk6l57rfwizjzxxzftn4amaot",
|
||||
Created: time.Now(),
|
||||
Scope: "swarm",
|
||||
Driver: "overlay",
|
||||
EnableIPv6: false,
|
||||
Internal: true,
|
||||
Ingress: false,
|
||||
ConfigOnly: false,
|
||||
Options: map[string]string{
|
||||
"com.docker.network.driver.overlay.vxlanid_list": "4098",
|
||||
"com.docker.network.enable_ipv6": "false",
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"com.docker.stack.namespace": "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedServices: []string{
|
||||
"service1",
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Should return service1 and service2",
|
||||
services: []swarm.Service{
|
||||
swarmService(
|
||||
serviceName("service1"),
|
||||
serviceLabels(map[string]string{
|
||||
labelDockerNetwork: "barnet",
|
||||
}),
|
||||
withEndpointSpec(modeVIP),
|
||||
withEndpoint(
|
||||
virtualIP("yk6l57rfwizjzxxzftn4amaot", "10.11.12.13/24"),
|
||||
virtualIP("2", "10.11.12.99/24"),
|
||||
)),
|
||||
swarmService(
|
||||
serviceName("service2"),
|
||||
serviceLabels(map[string]string{
|
||||
labelDockerNetwork: "barnet",
|
||||
}),
|
||||
withEndpointSpec(modeDNSSR)),
|
||||
},
|
||||
tasks: []swarm.Task{
|
||||
swarmTask("id1",
|
||||
taskNetworkAttachment("yk6l57rfwizjzxxzftn4amaot", "network_name", "overlay", []string{"127.0.0.1"}),
|
||||
taskStatus(taskState(swarm.TaskStateRunning)),
|
||||
),
|
||||
swarmTask("id2",
|
||||
taskNetworkAttachment("yk6l57rfwizjzxxzftn4amaot", "network_name", "overlay", []string{"127.0.0.1"}),
|
||||
taskStatus(taskState(swarm.TaskStateRunning)),
|
||||
),
|
||||
},
|
||||
dockerVersion: "1.30",
|
||||
networks: []dockertypes.NetworkResource{
|
||||
{
|
||||
Name: "network_name",
|
||||
ID: "yk6l57rfwizjzxxzftn4amaot",
|
||||
Created: time.Now(),
|
||||
Scope: "swarm",
|
||||
Driver: "overlay",
|
||||
EnableIPv6: false,
|
||||
Internal: true,
|
||||
Ingress: false,
|
||||
ConfigOnly: false,
|
||||
Options: map[string]string{
|
||||
"com.docker.network.driver.overlay.vxlanid_list": "4098",
|
||||
"com.docker.network.enable_ipv6": "false",
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"com.docker.stack.namespace": "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedServices: []string{
|
||||
"service1.0",
|
||||
"service1.0",
|
||||
"service2.0",
|
||||
"service2.0",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for caseID, test := range testCases {
|
||||
test := test
|
||||
t.Run(strconv.Itoa(caseID), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
dockerClient := &fakeServicesClient{services: test.services, tasks: test.tasks, dockerVersion: test.dockerVersion, networks: test.networks}
|
||||
|
||||
serviceDockerData, err := listServices(context.Background(), dockerClient)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, len(test.expectedServices), len(serviceDockerData))
|
||||
for i, serviceName := range test.expectedServices {
|
||||
assert.Equal(t, serviceName, serviceDockerData[i].Name)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSwarmTaskParsing(t *testing.T) {
|
||||
testCases := []struct {
|
||||
service swarm.Service
|
||||
tasks []swarm.Task
|
||||
isGlobalSVC bool
|
||||
expected map[string]dockerData
|
||||
networks map[string]*docker.NetworkResource
|
||||
}{
|
||||
{
|
||||
service: swarmService(serviceName("container")),
|
||||
tasks: []swarm.Task{
|
||||
swarmTask("id1", taskSlot(1)),
|
||||
swarmTask("id2", taskSlot(2)),
|
||||
swarmTask("id3", taskSlot(3)),
|
||||
},
|
||||
isGlobalSVC: false,
|
||||
expected: map[string]dockerData{
|
||||
"id1": {
|
||||
Name: "container.1",
|
||||
},
|
||||
"id2": {
|
||||
Name: "container.2",
|
||||
},
|
||||
"id3": {
|
||||
Name: "container.3",
|
||||
},
|
||||
},
|
||||
networks: map[string]*docker.NetworkResource{
|
||||
"1": {
|
||||
Name: "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
service: swarmService(serviceName("container")),
|
||||
tasks: []swarm.Task{
|
||||
swarmTask("id1"),
|
||||
swarmTask("id2"),
|
||||
swarmTask("id3"),
|
||||
},
|
||||
isGlobalSVC: true,
|
||||
expected: map[string]dockerData{
|
||||
"id1": {
|
||||
Name: "container.id1",
|
||||
},
|
||||
"id2": {
|
||||
Name: "container.id2",
|
||||
},
|
||||
"id3": {
|
||||
Name: "container.id3",
|
||||
},
|
||||
},
|
||||
networks: map[string]*docker.NetworkResource{
|
||||
"1": {
|
||||
Name: "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
service: swarmService(
|
||||
serviceName("container"),
|
||||
withEndpointSpec(modeVIP),
|
||||
withEndpoint(
|
||||
virtualIP("1", ""),
|
||||
),
|
||||
),
|
||||
tasks: []swarm.Task{
|
||||
swarmTask(
|
||||
"id1",
|
||||
taskNetworkAttachment("1", "vlan", "macvlan", []string{"127.0.0.1"}),
|
||||
taskStatus(
|
||||
taskState(swarm.TaskStateRunning),
|
||||
taskContainerStatus("c1"),
|
||||
),
|
||||
),
|
||||
},
|
||||
isGlobalSVC: true,
|
||||
expected: map[string]dockerData{
|
||||
"id1": {
|
||||
Name: "container.id1",
|
||||
NetworkSettings: networkSettings{
|
||||
Networks: map[string]*networkData{
|
||||
"vlan": {
|
||||
Name: "vlan",
|
||||
Addr: "10.11.12.13",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
networks: map[string]*docker.NetworkResource{
|
||||
"1": {
|
||||
Name: "vlan",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for caseID, test := range testCases {
|
||||
test := test
|
||||
t.Run(strconv.Itoa(caseID), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
dData := parseService(test.service, test.networks)
|
||||
|
||||
for _, task := range test.tasks {
|
||||
taskDockerData := parseTasks(task, dData, test.networks, test.isGlobalSVC)
|
||||
expected := test.expected[task.ID]
|
||||
assert.Equal(t, expected.Name, taskDockerData.Name)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,217 +0,0 @@
|
|||
package dynamodb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/defaults"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/dynamodb"
|
||||
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
|
||||
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface"
|
||||
"github.com/cenk/backoff"
|
||||
"github.com/containous/traefik/job"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/provider"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/containous/traefik/types"
|
||||
)
|
||||
|
||||
var _ provider.Provider = (*Provider)(nil)
|
||||
|
||||
// Provider holds configuration for provider.
|
||||
type Provider struct {
|
||||
provider.BaseProvider `mapstructure:",squash" export:"true"`
|
||||
AccessKeyID string `description:"The AWS credentials access key to use for making requests"`
|
||||
RefreshSeconds int `description:"Polling interval (in seconds)" export:"true"`
|
||||
Region string `description:"The AWS region to use for requests" export:"true"`
|
||||
SecretAccessKey string `description:"The AWS credentials secret key to use for making requests"`
|
||||
TableName string `description:"The AWS dynamodb table that stores configuration for traefik" export:"true"`
|
||||
Endpoint string `description:"The endpoint of a dynamodb. Used for testing with a local dynamodb"`
|
||||
}
|
||||
|
||||
type dynamoClient struct {
|
||||
db dynamodbiface.DynamoDBAPI
|
||||
}
|
||||
|
||||
// Init the provider
|
||||
func (p *Provider) Init(constraints types.Constraints) error {
|
||||
return p.BaseProvider.Init(constraints)
|
||||
}
|
||||
|
||||
// createClient configures aws credentials and creates a dynamoClient
|
||||
func (p *Provider) createClient() (*dynamoClient, error) {
|
||||
log.Info("Creating Provider client...")
|
||||
sess, err := session.NewSession()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.Region == "" {
|
||||
return nil, errors.New("no Region provided for Provider")
|
||||
}
|
||||
cfg := &aws.Config{
|
||||
Region: &p.Region,
|
||||
Credentials: credentials.NewChainCredentials(
|
||||
[]credentials.Provider{
|
||||
&credentials.StaticProvider{
|
||||
Value: credentials.Value{
|
||||
AccessKeyID: p.AccessKeyID,
|
||||
SecretAccessKey: p.SecretAccessKey,
|
||||
},
|
||||
},
|
||||
&credentials.EnvProvider{},
|
||||
&credentials.SharedCredentialsProvider{},
|
||||
defaults.RemoteCredProvider(*(defaults.Config()), defaults.Handlers()),
|
||||
}),
|
||||
}
|
||||
|
||||
if p.Trace {
|
||||
cfg.WithLogger(aws.LoggerFunc(func(args ...interface{}) {
|
||||
log.Debug(args...)
|
||||
}))
|
||||
}
|
||||
|
||||
if p.Endpoint != "" {
|
||||
cfg.Endpoint = aws.String(p.Endpoint)
|
||||
}
|
||||
|
||||
return &dynamoClient{
|
||||
db: dynamodb.New(sess, cfg),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// scanTable scans the given table and returns slice of all items in the table
|
||||
func (p *Provider) scanTable(client *dynamoClient) ([]map[string]*dynamodb.AttributeValue, error) {
|
||||
log.Debugf("Scanning Provider table: %s ...", p.TableName)
|
||||
params := &dynamodb.ScanInput{
|
||||
TableName: aws.String(p.TableName),
|
||||
}
|
||||
items := make([]map[string]*dynamodb.AttributeValue, 0)
|
||||
err := client.db.ScanPages(params,
|
||||
func(page *dynamodb.ScanOutput, lastPage bool) bool {
|
||||
items = append(items, page.Items...)
|
||||
return !lastPage
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("Failed to scan Provider table %s", p.TableName)
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("Successfully scanned Provider table %s", p.TableName)
|
||||
return items, nil
|
||||
}
|
||||
|
||||
// buildConfiguration retrieves items from dynamodb and converts them into Backends and Frontends in a Configuration
|
||||
func (p *Provider) buildConfiguration(client *dynamoClient) (*types.Configuration, error) {
|
||||
items, err := p.scanTable(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("Number of Items retrieved from Provider: %d", len(items))
|
||||
backends := make(map[string]*types.Backend)
|
||||
frontends := make(map[string]*types.Frontend)
|
||||
// unmarshal dynamoAttributes into Backends and Frontends
|
||||
for i, item := range items {
|
||||
log.Debugf("Provider Item: %d\n%v", i, item)
|
||||
// verify the type of each item by checking to see if it has
|
||||
// the corresponding type, backend or frontend map
|
||||
if backend, exists := item["backend"]; exists {
|
||||
log.Debug("Unmarshaling backend from Provider...")
|
||||
tmpBackend := &types.Backend{}
|
||||
err = dynamodbattribute.Unmarshal(backend, tmpBackend)
|
||||
if err != nil {
|
||||
log.Errorf(err.Error())
|
||||
} else {
|
||||
backends[*item["name"].S] = tmpBackend
|
||||
log.Debug("Backend from Provider unmarshalled successfully")
|
||||
}
|
||||
} else if frontend, exists := item["frontend"]; exists {
|
||||
log.Debugf("Unmarshaling frontend from Provider...")
|
||||
tmpFrontend := &types.Frontend{}
|
||||
err = dynamodbattribute.Unmarshal(frontend, tmpFrontend)
|
||||
if err != nil {
|
||||
log.Errorf(err.Error())
|
||||
} else {
|
||||
frontends[*item["name"].S] = tmpFrontend
|
||||
log.Debug("Frontend from Provider unmarshalled successfully")
|
||||
}
|
||||
} else {
|
||||
log.Warnf("Error in format of Provider Item: %v", item)
|
||||
}
|
||||
}
|
||||
|
||||
return &types.Configuration{
|
||||
Backends: backends,
|
||||
Frontends: frontends,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Provide provides the configuration to traefik via the configuration channel
|
||||
// if watch is enabled it polls dynamodb
|
||||
func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error {
|
||||
handleCanceled := func(ctx context.Context, err error) error {
|
||||
if ctx.Err() == context.Canceled || err == context.Canceled {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
pool.Go(func(stop chan bool) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
safe.Go(func() {
|
||||
<-stop
|
||||
cancel()
|
||||
})
|
||||
|
||||
operation := func() error {
|
||||
awsClient, err := p.createClient()
|
||||
if err != nil {
|
||||
return handleCanceled(ctx, err)
|
||||
}
|
||||
|
||||
configuration, err := p.buildConfiguration(awsClient)
|
||||
if err != nil {
|
||||
return handleCanceled(ctx, err)
|
||||
}
|
||||
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "dynamodb",
|
||||
Configuration: configuration,
|
||||
}
|
||||
|
||||
if p.Watch {
|
||||
reload := time.NewTicker(time.Second * time.Duration(p.RefreshSeconds))
|
||||
defer reload.Stop()
|
||||
for {
|
||||
log.Debug("Watching Provider...")
|
||||
select {
|
||||
case <-reload.C:
|
||||
configuration, err := p.buildConfiguration(awsClient)
|
||||
if err != nil {
|
||||
return handleCanceled(ctx, err)
|
||||
}
|
||||
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "dynamodb",
|
||||
Configuration: configuration,
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return handleCanceled(ctx, ctx.Err())
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
notify := func(err error, time time.Duration) {
|
||||
log.Errorf("Provider error: %s time: %v", err.Error(), time)
|
||||
}
|
||||
|
||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to connect to Provider. %s", err.Error())
|
||||
}
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,140 +0,0 @@
|
|||
package dynamodb
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/dynamodb"
|
||||
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
|
||||
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface"
|
||||
"github.com/containous/traefik/types"
|
||||
)
|
||||
|
||||
type mockDynamoDBClient struct {
|
||||
dynamodbiface.DynamoDBAPI
|
||||
testWithError bool
|
||||
}
|
||||
|
||||
var backend = &types.Backend{
|
||||
HealthCheck: &types.HealthCheck{
|
||||
Path: "/build",
|
||||
},
|
||||
Servers: map[string]types.Server{
|
||||
"server1": {
|
||||
URL: "http://test.traefik.io",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var frontend = &types.Frontend{
|
||||
EntryPoints: []string{"http"},
|
||||
Backend: "test.traefik.io",
|
||||
Routes: map[string]types.Route{
|
||||
"route1": {
|
||||
Rule: "Host:test.traefik.io",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// ScanPages simulates a call to ScanPages (see https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/#DynamoDB.ScanPages)
|
||||
// by running the fn function twice and returning an item each time.
|
||||
func (m *mockDynamoDBClient) ScanPages(input *dynamodb.ScanInput, fn func(*dynamodb.ScanOutput, bool) bool) error {
|
||||
if m.testWithError {
|
||||
return errors.New("fake error")
|
||||
}
|
||||
attributeBackend, err := dynamodbattribute.Marshal(backend)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
attributeFrontend, err := dynamodbattribute.Marshal(frontend)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fn(&dynamodb.ScanOutput{
|
||||
Items: []map[string]*dynamodb.AttributeValue{
|
||||
{
|
||||
"id": &dynamodb.AttributeValue{
|
||||
S: aws.String("test.traefik.io_backend"),
|
||||
},
|
||||
"name": &dynamodb.AttributeValue{
|
||||
S: aws.String("test.traefik.io"),
|
||||
},
|
||||
"backend": attributeBackend,
|
||||
},
|
||||
},
|
||||
}, false)
|
||||
|
||||
fn(&dynamodb.ScanOutput{
|
||||
Items: []map[string]*dynamodb.AttributeValue{
|
||||
{
|
||||
"id": &dynamodb.AttributeValue{
|
||||
S: aws.String("test.traefik.io_frontend"),
|
||||
},
|
||||
"name": &dynamodb.AttributeValue{
|
||||
S: aws.String("test.traefik.io"),
|
||||
},
|
||||
"frontend": attributeFrontend,
|
||||
},
|
||||
},
|
||||
}, true)
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestBuildConfigurationSuccessful(t *testing.T) {
|
||||
dbiface := &dynamoClient{
|
||||
db: &mockDynamoDBClient{
|
||||
testWithError: false,
|
||||
},
|
||||
}
|
||||
provider := Provider{}
|
||||
loadedConfig, err := provider.buildConfiguration(dbiface)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expectedConfig := &types.Configuration{
|
||||
Backends: map[string]*types.Backend{
|
||||
"test.traefik.io": backend,
|
||||
},
|
||||
Frontends: map[string]*types.Frontend{
|
||||
"test.traefik.io": frontend,
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(loadedConfig, expectedConfig) {
|
||||
t.Fatalf("Configurations did not match: %v %v", loadedConfig, expectedConfig)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildConfigurationFailure(t *testing.T) {
|
||||
dbiface := &dynamoClient{
|
||||
db: &mockDynamoDBClient{
|
||||
testWithError: true,
|
||||
},
|
||||
}
|
||||
provider := Provider{}
|
||||
_, err := provider.buildConfiguration(dbiface)
|
||||
if err == nil {
|
||||
t.Fatal("Expected error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateClientSuccessful(t *testing.T) {
|
||||
provider := Provider{
|
||||
Region: "us-east-1",
|
||||
}
|
||||
_, err := provider.createClient()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateClientFailure(t *testing.T) {
|
||||
provider := Provider{}
|
||||
_, err := provider.createClient()
|
||||
if err == nil {
|
||||
t.Fatal("Expected error")
|
||||
}
|
||||
}
|
||||
|
|
@ -1,80 +0,0 @@
|
|||
package ecs
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/service/ecs"
|
||||
)
|
||||
|
||||
func instance(ops ...func(*ecsInstance)) ecsInstance {
|
||||
e := &ecsInstance{
|
||||
containerDefinition: &ecs.ContainerDefinition{},
|
||||
}
|
||||
|
||||
for _, op := range ops {
|
||||
op(e)
|
||||
}
|
||||
|
||||
return *e
|
||||
}
|
||||
|
||||
func name(name string) func(*ecsInstance) {
|
||||
return func(e *ecsInstance) {
|
||||
e.Name = name
|
||||
}
|
||||
}
|
||||
|
||||
func ID(ID string) func(*ecsInstance) {
|
||||
return func(e *ecsInstance) {
|
||||
e.ID = ID
|
||||
}
|
||||
}
|
||||
|
||||
func iMachine(opts ...func(*machine)) func(*ecsInstance) {
|
||||
return func(e *ecsInstance) {
|
||||
e.machine = &machine{}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(e.machine)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mState(state string) func(*machine) {
|
||||
return func(m *machine) {
|
||||
m.state = state
|
||||
}
|
||||
}
|
||||
|
||||
func mPrivateIP(ip string) func(*machine) {
|
||||
return func(m *machine) {
|
||||
m.privateIP = ip
|
||||
}
|
||||
}
|
||||
|
||||
func mPorts(opts ...func(*portMapping)) func(*machine) {
|
||||
return func(m *machine) {
|
||||
for _, opt := range opts {
|
||||
p := &portMapping{}
|
||||
opt(p)
|
||||
m.ports = append(m.ports, *p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mPort(containerPort int32, hostPort int32) func(*portMapping) {
|
||||
return func(pm *portMapping) {
|
||||
pm.containerPort = int64(containerPort)
|
||||
pm.hostPort = int64(hostPort)
|
||||
}
|
||||
}
|
||||
|
||||
func labels(labels map[string]string) func(*ecsInstance) {
|
||||
return func(c *ecsInstance) {
|
||||
c.TraefikLabels = labels
|
||||
}
|
||||
}
|
||||
|
||||
func dockerLabels(labels map[string]*string) func(*ecsInstance) {
|
||||
return func(c *ecsInstance) {
|
||||
c.containerDefinition.DockerLabels = labels
|
||||
}
|
||||
}
|
||||
|
|
@ -1,32 +0,0 @@
|
|||
package ecs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Clusters holds ecs clusters name
|
||||
type Clusters []string
|
||||
|
||||
// Set adds strings elem into the the parser
|
||||
// it splits str on , and ;
|
||||
func (c *Clusters) Set(str string) error {
|
||||
fargs := func(c rune) bool {
|
||||
return c == ',' || c == ';'
|
||||
}
|
||||
// get function
|
||||
slice := strings.FieldsFunc(str, fargs)
|
||||
*c = append(*c, slice...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get Clusters
|
||||
func (c *Clusters) Get() interface{} { return *c }
|
||||
|
||||
// String return slice in a string
|
||||
func (c *Clusters) String() string { return fmt.Sprintf("%v", *c) }
|
||||
|
||||
// SetValue sets Clusters into the parser
|
||||
func (c *Clusters) SetValue(val interface{}) {
|
||||
*c = val.(Clusters)
|
||||
}
|
||||
|
|
@ -1,145 +0,0 @@
|
|||
package ecs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestClustersSet(t *testing.T) {
|
||||
tests := []struct {
|
||||
desc string
|
||||
value string
|
||||
expected Clusters
|
||||
}{
|
||||
{
|
||||
desc: "One value should return Clusters of size 1",
|
||||
value: "cluster",
|
||||
expected: Clusters{"cluster"},
|
||||
},
|
||||
{
|
||||
desc: "Two values separated by comma should return Clusters of size 2",
|
||||
value: "cluster1,cluster2",
|
||||
expected: Clusters{"cluster1", "cluster2"},
|
||||
},
|
||||
{
|
||||
desc: "Two values separated by semicolon should return Clusters of size 2",
|
||||
value: "cluster1;cluster2",
|
||||
expected: Clusters{"cluster1", "cluster2"},
|
||||
},
|
||||
{
|
||||
desc: "Three values separated by comma and semicolon should return Clusters of size 3",
|
||||
value: "cluster1,cluster2;cluster3",
|
||||
expected: Clusters{"cluster1", "cluster2", "cluster3"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
var clusters Clusters
|
||||
err := clusters.Set(test.value)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, test.expected, clusters)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestClustersGet(t *testing.T) {
|
||||
tests := []struct {
|
||||
desc string
|
||||
clusters Clusters
|
||||
expected Clusters
|
||||
}{
|
||||
{
|
||||
desc: "Should return 1 cluster",
|
||||
clusters: Clusters{"cluster"},
|
||||
expected: Clusters{"cluster"},
|
||||
},
|
||||
{
|
||||
desc: "Should return 2 clusters",
|
||||
clusters: Clusters{"cluster1", "cluster2"},
|
||||
expected: Clusters{"cluster1", "cluster2"},
|
||||
},
|
||||
{
|
||||
desc: "Should return 3 clusters",
|
||||
clusters: Clusters{"cluster1", "cluster2", "cluster3"},
|
||||
expected: Clusters{"cluster1", "cluster2", "cluster3"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
actual := test.clusters.Get()
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestClustersString(t *testing.T) {
|
||||
tests := []struct {
|
||||
desc string
|
||||
clusters Clusters
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
desc: "Should return 1 cluster",
|
||||
clusters: Clusters{"cluster"},
|
||||
expected: "[cluster]",
|
||||
},
|
||||
{
|
||||
desc: "Should return 2 clusters",
|
||||
clusters: Clusters{"cluster1", "cluster2"},
|
||||
expected: "[cluster1 cluster2]",
|
||||
},
|
||||
{
|
||||
desc: "Should return 3 clusters",
|
||||
clusters: Clusters{"cluster1", "cluster2", "cluster3"},
|
||||
expected: "[cluster1 cluster2 cluster3]",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
actual := test.clusters.String()
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestClustersSetValue(t *testing.T) {
|
||||
tests := []struct {
|
||||
desc string
|
||||
clusters Clusters
|
||||
expected Clusters
|
||||
}{
|
||||
{
|
||||
desc: "Should return Clusters of size 1",
|
||||
clusters: Clusters{"cluster"},
|
||||
expected: Clusters{"cluster"},
|
||||
},
|
||||
{
|
||||
desc: "Should return Clusters of size 2",
|
||||
clusters: Clusters{"cluster1", "cluster2"},
|
||||
expected: Clusters{"cluster1", "cluster2"},
|
||||
},
|
||||
{
|
||||
desc: "Should return Clusters of size 3",
|
||||
clusters: Clusters{"cluster1", "cluster2", "cluster3"},
|
||||
expected: Clusters{"cluster1", "cluster2", "cluster3"},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
var slice Clusters
|
||||
slice.SetValue(test.clusters)
|
||||
assert.Equal(t, test.expected, slice)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,252 +0,0 @@
|
|||
package ecs
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/BurntSushi/ty/fun"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/provider"
|
||||
"github.com/containous/traefik/provider/label"
|
||||
"github.com/containous/traefik/types"
|
||||
)
|
||||
|
||||
// buildConfiguration fills the config template with the given instances
|
||||
func (p *Provider) buildConfiguration(instances []ecsInstance) (*types.Configuration, error) {
|
||||
var ecsFuncMap = template.FuncMap{
|
||||
// Backend functions
|
||||
"getHost": getHost,
|
||||
"getPort": getPort,
|
||||
"getCircuitBreaker": label.GetCircuitBreaker,
|
||||
"getLoadBalancer": label.GetLoadBalancer,
|
||||
"getMaxConn": label.GetMaxConn,
|
||||
"getHealthCheck": label.GetHealthCheck,
|
||||
"getBuffering": label.GetBuffering,
|
||||
"getResponseForwarding": label.GetResponseForwarding,
|
||||
|
||||
"getServers": getServers,
|
||||
|
||||
// Frontend functions
|
||||
"filterFrontends": filterFrontends,
|
||||
"getFrontendRule": p.getFrontendRule,
|
||||
"getFrontendName": p.getFrontendName,
|
||||
"getPassHostHeader": label.GetFuncBool(label.TraefikFrontendPassHostHeader, label.DefaultPassHostHeader),
|
||||
"getPassTLSCert": label.GetFuncBool(label.TraefikFrontendPassTLSCert, label.DefaultPassTLSCert),
|
||||
"getPassTLSClientCert": label.GetTLSClientCert,
|
||||
"getPriority": label.GetFuncInt(label.TraefikFrontendPriority, label.DefaultFrontendPriority),
|
||||
"getBasicAuth": label.GetFuncSliceString(label.TraefikFrontendAuthBasic), // Deprecated
|
||||
"getAuth": label.GetAuth,
|
||||
"getEntryPoints": label.GetFuncSliceString(label.TraefikFrontendEntryPoints),
|
||||
"getRedirect": label.GetRedirect,
|
||||
"getErrorPages": label.GetErrorPages,
|
||||
"getRateLimit": label.GetRateLimit,
|
||||
"getHeaders": label.GetHeaders,
|
||||
"getWhiteList": label.GetWhiteList,
|
||||
}
|
||||
|
||||
services := make(map[string][]ecsInstance)
|
||||
for _, instance := range instances {
|
||||
segmentProperties := label.ExtractTraefikLabels(instance.TraefikLabels)
|
||||
|
||||
for segmentName, labels := range segmentProperties {
|
||||
instance.SegmentLabels = labels
|
||||
instance.SegmentName = segmentName
|
||||
|
||||
backendName := getBackendName(instance)
|
||||
if p.filterInstance(instance) {
|
||||
if serviceInstances, ok := services[backendName]; ok {
|
||||
services[backendName] = append(serviceInstances, instance)
|
||||
} else {
|
||||
services[backendName] = []ecsInstance{instance}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return p.GetConfiguration("templates/ecs.tmpl", ecsFuncMap, struct {
|
||||
Services map[string][]ecsInstance
|
||||
}{
|
||||
Services: services,
|
||||
})
|
||||
}
|
||||
|
||||
func (p *Provider) filterInstance(i ecsInstance) bool {
|
||||
if i.machine == nil {
|
||||
log.Debug("Filtering ecs instance with nil machine")
|
||||
return false
|
||||
}
|
||||
|
||||
if labelPort := label.GetStringValue(i.TraefikLabels, label.TraefikPort, ""); len(i.machine.ports) == 0 && labelPort == "" {
|
||||
log.Debugf("Filtering ecs instance without port %s (%s)", i.Name, i.ID)
|
||||
return false
|
||||
}
|
||||
|
||||
if strings.ToLower(i.machine.state) != ec2.InstanceStateNameRunning {
|
||||
log.Debugf("Filtering ecs instance with an incorrect state %s (%s) (state = %s)", i.Name, i.ID, i.machine.state)
|
||||
return false
|
||||
}
|
||||
|
||||
if len(i.machine.privateIP) == 0 {
|
||||
log.Debugf("Filtering ecs instance without an ip address %s (%s)", i.Name, i.ID)
|
||||
return false
|
||||
}
|
||||
|
||||
if !isEnabled(i, p.ExposedByDefault) {
|
||||
log.Debugf("Filtering disabled ecs instance %s (%s)", i.Name, i.ID)
|
||||
return false
|
||||
}
|
||||
|
||||
constraintTags := label.GetSliceStringValue(i.TraefikLabels, label.TraefikTags)
|
||||
if ok, failingConstraint := p.MatchConstraints(constraintTags); !ok {
|
||||
if failingConstraint != nil {
|
||||
log.Debugf("Filtering ecs instance pruned by constraint %s (%s) (constraint = %q)", i.Name, i.ID, failingConstraint.String())
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func getBackendName(i ecsInstance) string {
|
||||
if len(i.SegmentName) > 0 {
|
||||
return getSegmentBackendName(i)
|
||||
}
|
||||
|
||||
return getDefaultBackendName(i)
|
||||
}
|
||||
|
||||
func getSegmentBackendName(i ecsInstance) string {
|
||||
if value := label.GetStringValue(i.SegmentLabels, label.TraefikBackend, ""); len(value) > 0 {
|
||||
return provider.Normalize(i.Name + "-" + value)
|
||||
}
|
||||
|
||||
return provider.Normalize(i.Name + "-" + i.SegmentName)
|
||||
}
|
||||
|
||||
func getDefaultBackendName(i ecsInstance) string {
|
||||
if value := label.GetStringValue(i.SegmentLabels, label.TraefikBackend, ""); len(value) != 0 {
|
||||
return provider.Normalize(value)
|
||||
}
|
||||
|
||||
return provider.Normalize(i.Name)
|
||||
}
|
||||
|
||||
func (p *Provider) getFrontendRule(i ecsInstance) string {
|
||||
if value := label.GetStringValue(i.SegmentLabels, label.TraefikFrontendRule, ""); len(value) != 0 {
|
||||
return value
|
||||
}
|
||||
|
||||
domain := label.GetStringValue(i.SegmentLabels, label.TraefikDomain, p.Domain)
|
||||
if len(domain) > 0 {
|
||||
domain = "." + domain
|
||||
}
|
||||
|
||||
defaultRule := "Host:" + strings.ToLower(strings.Replace(i.Name, "_", "-", -1)) + domain
|
||||
|
||||
return label.GetStringValue(i.TraefikLabels, label.TraefikFrontendRule, defaultRule)
|
||||
}
|
||||
|
||||
func (p *Provider) getFrontendName(instance ecsInstance) string {
|
||||
name := getBackendName(instance)
|
||||
if len(instance.SegmentName) > 0 {
|
||||
name = instance.SegmentName + "-" + name
|
||||
}
|
||||
|
||||
return provider.Normalize(name)
|
||||
}
|
||||
|
||||
func getHost(i ecsInstance) string {
|
||||
return i.machine.privateIP
|
||||
}
|
||||
|
||||
func getPort(i ecsInstance) string {
|
||||
value := label.GetStringValue(i.SegmentLabels, label.TraefikPort, "")
|
||||
|
||||
if len(value) == 0 {
|
||||
value = label.GetStringValue(i.TraefikLabels, label.TraefikPort, "")
|
||||
}
|
||||
|
||||
if len(value) > 0 {
|
||||
port, err := strconv.ParseInt(value, 10, 64)
|
||||
if err == nil {
|
||||
for _, mapping := range i.machine.ports {
|
||||
if port == mapping.hostPort || port == mapping.containerPort {
|
||||
return strconv.FormatInt(mapping.hostPort, 10)
|
||||
}
|
||||
}
|
||||
return value
|
||||
}
|
||||
}
|
||||
return strconv.FormatInt(i.machine.ports[0].hostPort, 10)
|
||||
}
|
||||
|
||||
func filterFrontends(instances []ecsInstance) []ecsInstance {
|
||||
byName := make(map[string]struct{})
|
||||
|
||||
return fun.Filter(func(i ecsInstance) bool {
|
||||
backendName := getBackendName(i)
|
||||
if len(i.SegmentName) > 0 {
|
||||
backendName = backendName + "-" + i.SegmentName
|
||||
}
|
||||
|
||||
_, found := byName[backendName]
|
||||
if !found {
|
||||
byName[backendName] = struct{}{}
|
||||
}
|
||||
return !found
|
||||
}, instances).([]ecsInstance)
|
||||
}
|
||||
|
||||
func getServers(instances []ecsInstance) map[string]types.Server {
|
||||
var servers map[string]types.Server
|
||||
|
||||
for _, instance := range instances {
|
||||
if servers == nil {
|
||||
servers = make(map[string]types.Server)
|
||||
}
|
||||
|
||||
protocol := label.GetStringValue(instance.SegmentLabels, label.TraefikProtocol, label.DefaultProtocol)
|
||||
host := getHost(instance)
|
||||
port := getPort(instance)
|
||||
|
||||
serverURL := fmt.Sprintf("%s://%s", protocol, net.JoinHostPort(host, port))
|
||||
serverName := getServerName(instance, serverURL)
|
||||
|
||||
if _, exist := servers[serverName]; exist {
|
||||
log.Debugf("Skipping server %q with the same URL.", serverName)
|
||||
continue
|
||||
}
|
||||
|
||||
servers[serverName] = types.Server{
|
||||
URL: serverURL,
|
||||
Weight: label.GetIntValue(instance.SegmentLabels, label.TraefikWeight, label.DefaultWeight),
|
||||
}
|
||||
}
|
||||
|
||||
return servers
|
||||
}
|
||||
|
||||
func isEnabled(i ecsInstance, exposedByDefault bool) bool {
|
||||
return label.GetBoolValue(i.TraefikLabels, label.TraefikEnable, exposedByDefault)
|
||||
}
|
||||
|
||||
func getServerName(instance ecsInstance, url string) string {
|
||||
hash := md5.New()
|
||||
_, err := hash.Write([]byte(url))
|
||||
if err != nil {
|
||||
// Impossible case
|
||||
log.Errorf("Fail to hash server URL %q", url)
|
||||
}
|
||||
|
||||
if len(instance.SegmentName) > 0 {
|
||||
return provider.Normalize(fmt.Sprintf("server-%s-%s-%s", instance.Name, instance.ID, hex.EncodeToString(hash.Sum(nil))))
|
||||
}
|
||||
|
||||
return provider.Normalize(fmt.Sprintf("server-%s-%s", instance.Name, instance.ID))
|
||||
}
|
||||
|
|
@ -1,881 +0,0 @@
|
|||
package ecs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/containous/flaeg/parse"
|
||||
"github.com/containous/traefik/provider/label"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSegmentBuildConfiguration(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
instanceInfo []ecsInstance
|
||||
expectedFrontends map[string]*types.Frontend
|
||||
expectedBackends map[string]*types.Backend
|
||||
}{
|
||||
{
|
||||
desc: "when no container",
|
||||
instanceInfo: []ecsInstance{},
|
||||
expectedFrontends: map[string]*types.Frontend{},
|
||||
expectedBackends: map[string]*types.Backend{},
|
||||
},
|
||||
{
|
||||
desc: "simple configuration",
|
||||
instanceInfo: []ecsInstance{
|
||||
instance(
|
||||
ID("123456789abc"),
|
||||
name("foo"),
|
||||
labels(map[string]string{
|
||||
"traefik.sauternes.port": "2503",
|
||||
"traefik.sauternes.frontend.entryPoints": "http,https",
|
||||
}),
|
||||
iMachine(
|
||||
mState(ec2.InstanceStateNameRunning),
|
||||
mPrivateIP("127.0.0.1"),
|
||||
mPorts(
|
||||
mPort(80, 2503),
|
||||
),
|
||||
),
|
||||
),
|
||||
},
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-sauternes-foo-sauternes": {
|
||||
Backend: "backend-foo-sauternes",
|
||||
PassHostHeader: true,
|
||||
EntryPoints: []string{"http", "https"},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-sauternes-foo-sauternes": {
|
||||
Rule: "Host:foo.ecs.localhost",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-foo-sauternes": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-foo-123456789abc-863563a2e23c95502862016417ee95ea": {
|
||||
URL: "http://127.0.0.1:2503",
|
||||
Weight: label.DefaultWeight,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "auth basic",
|
||||
instanceInfo: []ecsInstance{
|
||||
instance(
|
||||
ID("123456789abc"),
|
||||
name("foo"),
|
||||
labels(map[string]string{
|
||||
"traefik.sauternes.port": "2503",
|
||||
"traefik.sauternes.frontend.entryPoints": "http,https",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthHeaderField: "X-WebAuth-User",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthBasicUsers: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthBasicUsersFile: ".htpasswd",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthBasicRemoveHeader: "true",
|
||||
}),
|
||||
iMachine(
|
||||
mState(ec2.InstanceStateNameRunning),
|
||||
mPrivateIP("127.0.0.1"),
|
||||
mPorts(
|
||||
mPort(80, 2503),
|
||||
),
|
||||
),
|
||||
),
|
||||
},
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-sauternes-foo-sauternes": {
|
||||
Backend: "backend-foo-sauternes",
|
||||
PassHostHeader: true,
|
||||
EntryPoints: []string{"http", "https"},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-sauternes-foo-sauternes": {
|
||||
Rule: "Host:foo.ecs.localhost",
|
||||
},
|
||||
},
|
||||
Auth: &types.Auth{
|
||||
HeaderField: "X-WebAuth-User",
|
||||
Basic: &types.Basic{
|
||||
RemoveHeader: true,
|
||||
Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/",
|
||||
"test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"},
|
||||
UsersFile: ".htpasswd",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-foo-sauternes": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-foo-123456789abc-863563a2e23c95502862016417ee95ea": {
|
||||
URL: "http://127.0.0.1:2503",
|
||||
Weight: label.DefaultWeight,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "auth basic backward compatibility",
|
||||
instanceInfo: []ecsInstance{
|
||||
instance(
|
||||
ID("123456789abc"),
|
||||
name("foo"),
|
||||
labels(map[string]string{
|
||||
"traefik.sauternes.port": "2503",
|
||||
"traefik.sauternes.frontend.entryPoints": "http,https",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthBasic: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0",
|
||||
}),
|
||||
iMachine(
|
||||
mState(ec2.InstanceStateNameRunning),
|
||||
mPrivateIP("127.0.0.1"),
|
||||
mPorts(
|
||||
mPort(80, 2503),
|
||||
),
|
||||
),
|
||||
),
|
||||
},
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-sauternes-foo-sauternes": {
|
||||
Backend: "backend-foo-sauternes",
|
||||
PassHostHeader: true,
|
||||
EntryPoints: []string{"http", "https"},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-sauternes-foo-sauternes": {
|
||||
Rule: "Host:foo.ecs.localhost",
|
||||
},
|
||||
},
|
||||
Auth: &types.Auth{
|
||||
Basic: &types.Basic{
|
||||
Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/",
|
||||
"test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-foo-sauternes": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-foo-123456789abc-863563a2e23c95502862016417ee95ea": {
|
||||
URL: "http://127.0.0.1:2503",
|
||||
Weight: label.DefaultWeight,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "auth digest",
|
||||
instanceInfo: []ecsInstance{
|
||||
instance(
|
||||
ID("123456789abc"),
|
||||
name("foo"),
|
||||
labels(map[string]string{
|
||||
"traefik.sauternes.port": "2503",
|
||||
"traefik.sauternes.frontend.entryPoints": "http,https",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthHeaderField: "X-WebAuth-User",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthDigestUsers: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthDigestUsersFile: ".htpasswd",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthDigestRemoveHeader: "true",
|
||||
}),
|
||||
iMachine(
|
||||
mState(ec2.InstanceStateNameRunning),
|
||||
mPrivateIP("127.0.0.1"),
|
||||
mPorts(
|
||||
mPort(80, 2503),
|
||||
),
|
||||
),
|
||||
),
|
||||
},
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-sauternes-foo-sauternes": {
|
||||
Backend: "backend-foo-sauternes",
|
||||
PassHostHeader: true,
|
||||
EntryPoints: []string{"http", "https"},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-sauternes-foo-sauternes": {
|
||||
Rule: "Host:foo.ecs.localhost",
|
||||
},
|
||||
},
|
||||
Auth: &types.Auth{
|
||||
HeaderField: "X-WebAuth-User",
|
||||
Digest: &types.Digest{
|
||||
RemoveHeader: true,
|
||||
Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/",
|
||||
"test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"},
|
||||
UsersFile: ".htpasswd",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-foo-sauternes": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-foo-123456789abc-863563a2e23c95502862016417ee95ea": {
|
||||
URL: "http://127.0.0.1:2503",
|
||||
Weight: label.DefaultWeight,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "auth forward",
|
||||
instanceInfo: []ecsInstance{
|
||||
instance(
|
||||
ID("123456789abc"),
|
||||
name("foo"),
|
||||
labels(map[string]string{
|
||||
"traefik.sauternes.port": "2503",
|
||||
"traefik.sauternes.frontend.entryPoints": "http,https",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthHeaderField: "X-WebAuth-User",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardAddress: "auth.server",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTrustForwardHeader: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSCa: "ca.crt",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSCaOptional: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSCert: "server.crt",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSKey: "server.key",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSInsecureSkipVerify: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardAuthResponseHeaders: "X-Auth-User,X-Auth-Token",
|
||||
}),
|
||||
iMachine(
|
||||
mState(ec2.InstanceStateNameRunning),
|
||||
mPrivateIP("127.0.0.1"),
|
||||
mPorts(
|
||||
mPort(80, 2503),
|
||||
),
|
||||
),
|
||||
),
|
||||
},
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-sauternes-foo-sauternes": {
|
||||
Backend: "backend-foo-sauternes",
|
||||
PassHostHeader: true,
|
||||
EntryPoints: []string{"http", "https"},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-sauternes-foo-sauternes": {
|
||||
Rule: "Host:foo.ecs.localhost",
|
||||
},
|
||||
},
|
||||
Auth: &types.Auth{
|
||||
HeaderField: "X-WebAuth-User",
|
||||
Forward: &types.Forward{
|
||||
Address: "auth.server",
|
||||
TLS: &types.ClientTLS{
|
||||
CA: "ca.crt",
|
||||
CAOptional: true,
|
||||
Cert: "server.crt",
|
||||
Key: "server.key",
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
TrustForwardHeader: true,
|
||||
AuthResponseHeaders: []string{"X-Auth-User", "X-Auth-Token"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-foo-sauternes": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-foo-123456789abc-863563a2e23c95502862016417ee95ea": {
|
||||
URL: "http://127.0.0.1:2503",
|
||||
Weight: label.DefaultWeight,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "when all labels are set",
|
||||
instanceInfo: []ecsInstance{
|
||||
instance(
|
||||
ID("123456789abc"),
|
||||
name("foo"),
|
||||
labels(map[string]string{
|
||||
label.Prefix + "sauternes." + label.SuffixPort: "666",
|
||||
label.Prefix + "sauternes." + label.SuffixProtocol: "https",
|
||||
label.Prefix + "sauternes." + label.SuffixWeight: "12",
|
||||
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthBasicRemoveHeader: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthBasicUsers: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthBasicUsersFile: ".htpasswd",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthDigestRemoveHeader: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthDigestUsers: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthDigestUsersFile: ".htpasswd",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardAddress: "auth.server",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTrustForwardHeader: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSCa: "ca.crt",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSCaOptional: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSCert: "server.crt",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSKey: "server.key",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSInsecureSkipVerify: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthHeaderField: "X-WebAuth-User",
|
||||
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertPem: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosNotBefore: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosNotAfter: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSans: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectCommonName: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectCountry: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectLocality: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectOrganization: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectProvince: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectSerialNumber: "true",
|
||||
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendAuthBasic: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendEntryPoints: "http,https",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassHostHeader: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPassTLSCert: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendPriority: "666",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendRedirectEntryPoint: "https",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendRedirectRegex: "nope",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendRedirectReplacement: "nope",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendRedirectPermanent: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendWhiteListSourceRange: "10.10.10.10",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendWhiteListIPStrategyExcludedIPS: "10.10.10.10,10.10.10.11",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendWhiteListIPStrategyDepth: "5",
|
||||
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendRequestHeaders: "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendResponseHeaders: "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersSSLProxyHeaders: "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersAllowedHosts: "foo,bar,bor",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersHostsProxyHeaders: "foo,bar,bor",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersSSLHost: "foo",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersCustomFrameOptionsValue: "foo",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersContentSecurityPolicy: "foo",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersPublicKey: "foo",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersReferrerPolicy: "foo",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersCustomBrowserXSSValue: "foo",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersSTSSeconds: "666",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersSSLForceHost: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersSSLRedirect: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersSSLTemporaryRedirect: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersSTSIncludeSubdomains: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersSTSPreload: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersForceSTSHeader: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersFrameDeny: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersContentTypeNosniff: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersBrowserXSSFilter: "true",
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendHeadersIsDevelopment: "true",
|
||||
|
||||
label.Prefix + "sauternes." + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageStatus: "404",
|
||||
label.Prefix + "sauternes." + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageBackend: "foobar",
|
||||
label.Prefix + "sauternes." + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageQuery: "foo_query",
|
||||
label.Prefix + "sauternes." + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageStatus: "500,600",
|
||||
label.Prefix + "sauternes." + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageBackend: "foobar",
|
||||
label.Prefix + "sauternes." + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageQuery: "bar_query",
|
||||
|
||||
label.Prefix + "sauternes." + label.SuffixFrontendRateLimitExtractorFunc: "client.ip",
|
||||
label.Prefix + "sauternes." + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitPeriod: "6",
|
||||
label.Prefix + "sauternes." + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitAverage: "12",
|
||||
label.Prefix + "sauternes." + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitBurst: "18",
|
||||
label.Prefix + "sauternes." + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitPeriod: "3",
|
||||
label.Prefix + "sauternes." + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitAverage: "6",
|
||||
label.Prefix + "sauternes." + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitBurst: "9",
|
||||
}),
|
||||
iMachine(
|
||||
mState(ec2.InstanceStateNameRunning),
|
||||
mPrivateIP("127.0.0.1"),
|
||||
mPorts(
|
||||
mPort(80, 666),
|
||||
),
|
||||
),
|
||||
),
|
||||
},
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-sauternes-foo-sauternes": {
|
||||
Backend: "backend-foo-sauternes",
|
||||
EntryPoints: []string{
|
||||
"http",
|
||||
"https",
|
||||
},
|
||||
PassHostHeader: true,
|
||||
PassTLSCert: true,
|
||||
Priority: 666,
|
||||
PassTLSClientCert: &types.TLSClientHeaders{
|
||||
PEM: true,
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
NotBefore: true,
|
||||
Sans: true,
|
||||
NotAfter: true,
|
||||
Subject: &types.TLSCLientCertificateSubjectInfos{
|
||||
CommonName: true,
|
||||
Country: true,
|
||||
Locality: true,
|
||||
Organization: true,
|
||||
Province: true,
|
||||
SerialNumber: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Auth: &types.Auth{
|
||||
HeaderField: "X-WebAuth-User",
|
||||
Basic: &types.Basic{
|
||||
RemoveHeader: true,
|
||||
Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/",
|
||||
"test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"},
|
||||
UsersFile: ".htpasswd",
|
||||
},
|
||||
},
|
||||
WhiteList: &types.WhiteList{
|
||||
SourceRange: []string{"10.10.10.10"},
|
||||
IPStrategy: &types.IPStrategy{
|
||||
Depth: 5,
|
||||
ExcludedIPs: []string{"10.10.10.10", "10.10.10.11"},
|
||||
},
|
||||
},
|
||||
Headers: &types.Headers{
|
||||
CustomRequestHeaders: map[string]string{
|
||||
"Access-Control-Allow-Methods": "POST,GET,OPTIONS",
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
},
|
||||
CustomResponseHeaders: map[string]string{
|
||||
"Access-Control-Allow-Methods": "POST,GET,OPTIONS",
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
},
|
||||
AllowedHosts: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
"bor",
|
||||
},
|
||||
HostsProxyHeaders: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
"bor",
|
||||
},
|
||||
SSLRedirect: true,
|
||||
SSLTemporaryRedirect: true,
|
||||
SSLForceHost: true,
|
||||
SSLHost: "foo",
|
||||
SSLProxyHeaders: map[string]string{
|
||||
"Access-Control-Allow-Methods": "POST,GET,OPTIONS",
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
},
|
||||
STSSeconds: 666,
|
||||
STSIncludeSubdomains: true,
|
||||
STSPreload: true,
|
||||
ForceSTSHeader: true,
|
||||
FrameDeny: true,
|
||||
CustomFrameOptionsValue: "foo",
|
||||
ContentTypeNosniff: true,
|
||||
BrowserXSSFilter: true,
|
||||
CustomBrowserXSSValue: "foo",
|
||||
ContentSecurityPolicy: "foo",
|
||||
PublicKey: "foo",
|
||||
ReferrerPolicy: "foo",
|
||||
IsDevelopment: true,
|
||||
},
|
||||
Errors: map[string]*types.ErrorPage{
|
||||
"foo": {
|
||||
Status: []string{"404"},
|
||||
Query: "foo_query",
|
||||
Backend: "backend-foobar",
|
||||
},
|
||||
"bar": {
|
||||
Status: []string{"500", "600"},
|
||||
Query: "bar_query",
|
||||
Backend: "backend-foobar",
|
||||
},
|
||||
},
|
||||
RateLimit: &types.RateLimit{
|
||||
ExtractorFunc: "client.ip",
|
||||
RateSet: map[string]*types.Rate{
|
||||
"foo": {
|
||||
Period: parse.Duration(6 * time.Second),
|
||||
Average: 12,
|
||||
Burst: 18,
|
||||
},
|
||||
"bar": {
|
||||
Period: parse.Duration(3 * time.Second),
|
||||
Average: 6,
|
||||
Burst: 9,
|
||||
},
|
||||
},
|
||||
},
|
||||
Redirect: &types.Redirect{
|
||||
EntryPoint: "https",
|
||||
Regex: "",
|
||||
Replacement: "",
|
||||
Permanent: true,
|
||||
},
|
||||
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-sauternes-foo-sauternes": {
|
||||
Rule: "Host:foo.ecs.localhost",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-foo-sauternes": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-foo-123456789abc-7f6444e0dff3330c8b0ad2bbbd383b0f": {
|
||||
URL: "https://127.0.0.1:666",
|
||||
Weight: 12,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "several containers",
|
||||
instanceInfo: []ecsInstance{
|
||||
instance(
|
||||
ID("123456789abc"),
|
||||
name("test1"),
|
||||
labels(map[string]string{
|
||||
"traefik.sauternes.port": "2503",
|
||||
"traefik.sauternes.protocol": "https",
|
||||
"traefik.sauternes.weight": "80",
|
||||
"traefik.sauternes.backend": "foobar",
|
||||
"traefik.sauternes.frontend.passHostHeader": "false",
|
||||
"traefik.sauternes.frontend.rule": "Path:/mypath",
|
||||
"traefik.sauternes.frontend.priority": "5000",
|
||||
"traefik.sauternes.frontend.entryPoints": "http,https,ws",
|
||||
"traefik.sauternes.frontend.auth.basic": "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0",
|
||||
"traefik.sauternes.frontend.redirect.entryPoint": "https",
|
||||
}),
|
||||
iMachine(
|
||||
mState(ec2.InstanceStateNameRunning),
|
||||
mPrivateIP("127.0.0.1"),
|
||||
mPorts(
|
||||
mPort(80, 2503),
|
||||
),
|
||||
),
|
||||
),
|
||||
instance(
|
||||
ID("abc987654321"),
|
||||
name("test2"),
|
||||
labels(map[string]string{
|
||||
"traefik.anothersauternes.port": "8079",
|
||||
"traefik.anothersauternes.weight": "33",
|
||||
"traefik.anothersauternes.frontend.rule": "Path:/anotherpath",
|
||||
}),
|
||||
iMachine(
|
||||
mState(ec2.InstanceStateNameRunning),
|
||||
mPrivateIP("127.0.0.2"),
|
||||
mPorts(
|
||||
mPort(80, 8079),
|
||||
),
|
||||
),
|
||||
),
|
||||
},
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-sauternes-test1-foobar": {
|
||||
Backend: "backend-test1-foobar",
|
||||
PassHostHeader: false,
|
||||
Priority: 5000,
|
||||
EntryPoints: []string{"http", "https", "ws"},
|
||||
Auth: &types.Auth{
|
||||
Basic: &types.Basic{
|
||||
Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/",
|
||||
"test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"},
|
||||
},
|
||||
},
|
||||
Redirect: &types.Redirect{
|
||||
EntryPoint: "https",
|
||||
},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-sauternes-test1-foobar": {
|
||||
Rule: "Path:/mypath",
|
||||
},
|
||||
},
|
||||
},
|
||||
"frontend-anothersauternes-test2-anothersauternes": {
|
||||
Backend: "backend-test2-anothersauternes",
|
||||
PassHostHeader: true,
|
||||
EntryPoints: []string{},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-anothersauternes-test2-anothersauternes": {
|
||||
Rule: "Path:/anotherpath",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-test1-foobar": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-test1-123456789abc-79533a101142718f0fdf84c42593c41e": {
|
||||
URL: "https://127.0.0.1:2503",
|
||||
Weight: 80,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
},
|
||||
"backend-test2-anothersauternes": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-test2-abc987654321-045e3e4aa5a744a325c099b803700a93": {
|
||||
URL: "http://127.0.0.2:8079",
|
||||
Weight: 33,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "several segments with the same backend name and same port",
|
||||
instanceInfo: []ecsInstance{
|
||||
instance(
|
||||
ID("123456789abc"),
|
||||
name("test1"),
|
||||
labels(map[string]string{
|
||||
"traefik.port": "2503",
|
||||
"traefik.protocol": "https",
|
||||
"traefik.weight": "80",
|
||||
"traefik.frontend.entryPoints": "http,https",
|
||||
"traefik.frontend.redirect.entryPoint": "https",
|
||||
|
||||
"traefik.sauternes.backend": "foobar",
|
||||
"traefik.sauternes.frontend.rule": "Path:/sauternes",
|
||||
"traefik.sauternes.frontend.priority": "5000",
|
||||
|
||||
"traefik.arbois.backend": "foobar",
|
||||
"traefik.arbois.frontend.rule": "Path:/arbois",
|
||||
"traefik.arbois.frontend.priority": "3000",
|
||||
}),
|
||||
|
||||
iMachine(
|
||||
mState(ec2.InstanceStateNameRunning),
|
||||
mPrivateIP("127.0.0.1"),
|
||||
mPorts(
|
||||
mPort(80, 2503),
|
||||
),
|
||||
),
|
||||
),
|
||||
},
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-sauternes-test1-foobar": {
|
||||
Backend: "backend-test1-foobar",
|
||||
PassHostHeader: true,
|
||||
Priority: 5000,
|
||||
EntryPoints: []string{"http", "https"},
|
||||
Redirect: &types.Redirect{
|
||||
EntryPoint: "https",
|
||||
},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-sauternes-test1-foobar": {
|
||||
Rule: "Path:/sauternes",
|
||||
},
|
||||
},
|
||||
},
|
||||
"frontend-arbois-test1-foobar": {
|
||||
Backend: "backend-test1-foobar",
|
||||
PassHostHeader: true,
|
||||
Priority: 3000,
|
||||
EntryPoints: []string{"http", "https"},
|
||||
Redirect: &types.Redirect{
|
||||
EntryPoint: "https",
|
||||
},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-arbois-test1-foobar": {
|
||||
Rule: "Path:/arbois",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-test1-foobar": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-test1-123456789abc-79533a101142718f0fdf84c42593c41e": {
|
||||
URL: "https://127.0.0.1:2503",
|
||||
Weight: 80,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "several segments with the same backend name and different port (wrong behavior)",
|
||||
instanceInfo: []ecsInstance{
|
||||
instance(
|
||||
ID("123456789abc"),
|
||||
name("test1"),
|
||||
labels(map[string]string{
|
||||
"traefik.protocol": "https",
|
||||
"traefik.frontend.entryPoints": "http,https",
|
||||
"traefik.frontend.redirect.entryPoint": "https",
|
||||
|
||||
"traefik.sauternes.port": "2503",
|
||||
"traefik.sauternes.weight": "80",
|
||||
"traefik.sauternes.backend": "foobar",
|
||||
"traefik.sauternes.frontend.rule": "Path:/sauternes",
|
||||
"traefik.sauternes.frontend.priority": "5000",
|
||||
|
||||
"traefik.arbois.port": "2504",
|
||||
"traefik.arbois.weight": "90",
|
||||
"traefik.arbois.backend": "foobar",
|
||||
"traefik.arbois.frontend.rule": "Path:/arbois",
|
||||
"traefik.arbois.frontend.priority": "3000",
|
||||
}),
|
||||
iMachine(
|
||||
mState(ec2.InstanceStateNameRunning),
|
||||
mPrivateIP("127.0.0.1"),
|
||||
mPorts(
|
||||
mPort(80, 2503),
|
||||
mPort(80, 2504),
|
||||
),
|
||||
),
|
||||
),
|
||||
},
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-sauternes-test1-foobar": {
|
||||
Backend: "backend-test1-foobar",
|
||||
PassHostHeader: true,
|
||||
Priority: 5000,
|
||||
EntryPoints: []string{"http", "https"},
|
||||
Redirect: &types.Redirect{
|
||||
EntryPoint: "https",
|
||||
},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-sauternes-test1-foobar": {
|
||||
Rule: "Path:/sauternes",
|
||||
},
|
||||
},
|
||||
},
|
||||
"frontend-arbois-test1-foobar": {
|
||||
Backend: "backend-test1-foobar",
|
||||
PassHostHeader: true,
|
||||
Priority: 3000,
|
||||
EntryPoints: []string{"http", "https"},
|
||||
Redirect: &types.Redirect{
|
||||
EntryPoint: "https",
|
||||
},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-arbois-test1-foobar": {
|
||||
Rule: "Path:/arbois",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-test1-foobar": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-test1-123456789abc-79533a101142718f0fdf84c42593c41e": {
|
||||
URL: "https://127.0.0.1:2503",
|
||||
Weight: 80,
|
||||
},
|
||||
"server-test1-123456789abc-315a41140f1bd825b066e39686c18482": {
|
||||
URL: "https://127.0.0.1:2504",
|
||||
Weight: 90,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "several segments with the same backend name and different port binding",
|
||||
instanceInfo: []ecsInstance{
|
||||
instance(
|
||||
ID("123456789abc"),
|
||||
name("test1"),
|
||||
labels(map[string]string{
|
||||
"traefik.protocol": "https",
|
||||
"traefik.frontend.entryPoints": "http,https",
|
||||
"traefik.frontend.redirect.entryPoint": "https",
|
||||
|
||||
"traefik.sauternes.port": "2503",
|
||||
"traefik.sauternes.weight": "80",
|
||||
"traefik.sauternes.backend": "foobar",
|
||||
"traefik.sauternes.frontend.rule": "Path:/sauternes",
|
||||
"traefik.sauternes.frontend.priority": "5000",
|
||||
|
||||
"traefik.arbois.port": "8080",
|
||||
"traefik.arbois.weight": "90",
|
||||
"traefik.arbois.backend": "foobar",
|
||||
"traefik.arbois.frontend.rule": "Path:/arbois",
|
||||
"traefik.arbois.frontend.priority": "3000",
|
||||
}),
|
||||
iMachine(
|
||||
mState(ec2.InstanceStateNameRunning),
|
||||
mPrivateIP("127.0.0.1"),
|
||||
mPorts(
|
||||
mPort(80, 2503),
|
||||
mPort(8080, 2504),
|
||||
),
|
||||
),
|
||||
),
|
||||
},
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-sauternes-test1-foobar": {
|
||||
Backend: "backend-test1-foobar",
|
||||
PassHostHeader: true,
|
||||
Priority: 5000,
|
||||
EntryPoints: []string{"http", "https"},
|
||||
Redirect: &types.Redirect{
|
||||
EntryPoint: "https",
|
||||
},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-sauternes-test1-foobar": {
|
||||
Rule: "Path:/sauternes",
|
||||
},
|
||||
},
|
||||
},
|
||||
"frontend-arbois-test1-foobar": {
|
||||
Backend: "backend-test1-foobar",
|
||||
PassHostHeader: true,
|
||||
Priority: 3000,
|
||||
EntryPoints: []string{"http", "https"},
|
||||
Redirect: &types.Redirect{
|
||||
EntryPoint: "https",
|
||||
},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-arbois-test1-foobar": {
|
||||
Rule: "Path:/arbois",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-test1-foobar": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-test1-123456789abc-79533a101142718f0fdf84c42593c41e": {
|
||||
URL: "https://127.0.0.1:2503",
|
||||
Weight: 80,
|
||||
},
|
||||
"server-test1-123456789abc-315a41140f1bd825b066e39686c18482": {
|
||||
URL: "https://127.0.0.1:2504",
|
||||
Weight: 90,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
provider := &Provider{
|
||||
Domain: "ecs.localhost",
|
||||
ExposedByDefault: true,
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
actualConfig, err := provider.buildConfiguration(test.instanceInfo)
|
||||
|
||||
assert.NoError(t, err)
|
||||
require.NotNil(t, actualConfig, "actualConfig")
|
||||
|
||||
assert.EqualValues(t, test.expectedBackends, actualConfig.Backends)
|
||||
assert.EqualValues(t, test.expectedFrontends, actualConfig.Frontends)
|
||||
})
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,432 +0,0 @@
|
|||
package ecs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/defaults"
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ecs"
|
||||
"github.com/cenk/backoff"
|
||||
"github.com/containous/traefik/job"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/provider"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/containous/traefik/types"
|
||||
)
|
||||
|
||||
var _ provider.Provider = (*Provider)(nil)
|
||||
|
||||
// Provider holds configurations of the provider.
|
||||
type Provider struct {
|
||||
provider.BaseProvider `mapstructure:",squash" export:"true"`
|
||||
|
||||
Domain string `description:"Default domain used"`
|
||||
ExposedByDefault bool `description:"Expose containers by default" export:"true"`
|
||||
RefreshSeconds int `description:"Polling interval (in seconds)" export:"true"`
|
||||
|
||||
// Provider lookup parameters
|
||||
Clusters Clusters `description:"ECS Clusters name"`
|
||||
AutoDiscoverClusters bool `description:"Auto discover cluster" export:"true"`
|
||||
Region string `description:"The AWS region to use for requests" export:"true"`
|
||||
AccessKeyID string `description:"The AWS credentials access key to use for making requests"`
|
||||
SecretAccessKey string `description:"The AWS credentials access key to use for making requests"`
|
||||
}
|
||||
|
||||
type ecsInstance struct {
|
||||
Name string
|
||||
ID string
|
||||
containerDefinition *ecs.ContainerDefinition
|
||||
machine *machine
|
||||
TraefikLabels map[string]string
|
||||
SegmentLabels map[string]string
|
||||
SegmentName string
|
||||
}
|
||||
|
||||
type portMapping struct {
|
||||
containerPort int64
|
||||
hostPort int64
|
||||
}
|
||||
|
||||
type machine struct {
|
||||
state string
|
||||
privateIP string
|
||||
ports []portMapping
|
||||
}
|
||||
|
||||
type awsClient struct {
|
||||
ecs *ecs.ECS
|
||||
ec2 *ec2.EC2
|
||||
}
|
||||
|
||||
// Init the provider
|
||||
func (p *Provider) Init(constraints types.Constraints) error {
|
||||
return p.BaseProvider.Init(constraints)
|
||||
}
|
||||
|
||||
func (p *Provider) createClient() (*awsClient, error) {
|
||||
sess, err := session.NewSession()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ec2meta := ec2metadata.New(sess)
|
||||
if p.Region == "" {
|
||||
log.Infoln("No EC2 region provided, querying instance metadata endpoint...")
|
||||
identity, err := ec2meta.GetInstanceIdentityDocument()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.Region = identity.Region
|
||||
}
|
||||
|
||||
cfg := &aws.Config{
|
||||
Region: &p.Region,
|
||||
Credentials: credentials.NewChainCredentials(
|
||||
[]credentials.Provider{
|
||||
&credentials.StaticProvider{
|
||||
Value: credentials.Value{
|
||||
AccessKeyID: p.AccessKeyID,
|
||||
SecretAccessKey: p.SecretAccessKey,
|
||||
},
|
||||
},
|
||||
&credentials.EnvProvider{},
|
||||
&credentials.SharedCredentialsProvider{},
|
||||
defaults.RemoteCredProvider(*(defaults.Config()), defaults.Handlers()),
|
||||
}),
|
||||
}
|
||||
|
||||
if p.Trace {
|
||||
cfg.WithLogger(aws.LoggerFunc(func(args ...interface{}) {
|
||||
log.Debug(args...)
|
||||
}))
|
||||
}
|
||||
|
||||
return &awsClient{
|
||||
ecs: ecs.New(sess, cfg),
|
||||
ec2: ec2.New(sess, cfg),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Provide allows the ecs provider to provide configurations to traefik
|
||||
// using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error {
|
||||
handleCanceled := func(ctx context.Context, err error) error {
|
||||
if ctx.Err() == context.Canceled || err == context.Canceled {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
pool.Go(func(stop chan bool) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
safe.Go(func() {
|
||||
<-stop
|
||||
cancel()
|
||||
})
|
||||
|
||||
operation := func() error {
|
||||
awsClient, err := p.createClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configuration, err := p.loadECSConfig(ctx, awsClient)
|
||||
if err != nil {
|
||||
return handleCanceled(ctx, err)
|
||||
}
|
||||
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "ecs",
|
||||
Configuration: configuration,
|
||||
}
|
||||
|
||||
if p.Watch {
|
||||
reload := time.NewTicker(time.Second * time.Duration(p.RefreshSeconds))
|
||||
defer reload.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-reload.C:
|
||||
configuration, err := p.loadECSConfig(ctx, awsClient)
|
||||
if err != nil {
|
||||
return handleCanceled(ctx, err)
|
||||
}
|
||||
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "ecs",
|
||||
Configuration: configuration,
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return handleCanceled(ctx, ctx.Err())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
notify := func(err error, time time.Duration) {
|
||||
log.Errorf("Provider connection error %+v, retrying in %s", err, time)
|
||||
}
|
||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify)
|
||||
if err != nil {
|
||||
log.Errorf("Cannot connect to Provider api %+v", err)
|
||||
}
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Find all running Provider tasks in a cluster, also collect the task definitions (for docker labels)
|
||||
// and the EC2 instance data
|
||||
func (p *Provider) listInstances(ctx context.Context, client *awsClient) ([]ecsInstance, error) {
|
||||
var clustersArn []*string
|
||||
var clusters Clusters
|
||||
|
||||
if p.AutoDiscoverClusters {
|
||||
input := &ecs.ListClustersInput{}
|
||||
for {
|
||||
result, err := client.ecs.ListClusters(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result != nil {
|
||||
clustersArn = append(clustersArn, result.ClusterArns...)
|
||||
input.NextToken = result.NextToken
|
||||
if result.NextToken == nil {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
for _, cArn := range clustersArn {
|
||||
clusters = append(clusters, *cArn)
|
||||
}
|
||||
} else {
|
||||
clusters = p.Clusters
|
||||
}
|
||||
|
||||
var instances []ecsInstance
|
||||
|
||||
log.Debugf("ECS Clusters: %s", clusters)
|
||||
|
||||
for _, c := range clusters {
|
||||
|
||||
input := &ecs.ListTasksInput{
|
||||
Cluster: &c,
|
||||
DesiredStatus: aws.String(ecs.DesiredStatusRunning),
|
||||
}
|
||||
tasks := make(map[string]*ecs.Task)
|
||||
err := client.ecs.ListTasksPagesWithContext(ctx, input, func(page *ecs.ListTasksOutput, lastPage bool) bool {
|
||||
if len(page.TaskArns) > 0 {
|
||||
resp, err := client.ecs.DescribeTasksWithContext(ctx, &ecs.DescribeTasksInput{
|
||||
Tasks: page.TaskArns,
|
||||
Cluster: &c,
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("Unable to describe tasks for %v", page.TaskArns)
|
||||
} else {
|
||||
for _, t := range resp.Tasks {
|
||||
tasks[aws.StringValue(t.TaskArn)] = t
|
||||
}
|
||||
}
|
||||
}
|
||||
return !lastPage
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Error("Unable to list tasks")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Skip to the next cluster if there are no tasks found on
|
||||
// this cluster.
|
||||
if len(tasks) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
ec2Instances, err := p.lookupEc2Instances(ctx, client, &c, tasks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
taskDefinitions, err := p.lookupTaskDefinitions(ctx, client, tasks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for key, task := range tasks {
|
||||
|
||||
containerInstance := ec2Instances[aws.StringValue(task.ContainerInstanceArn)]
|
||||
taskDef := taskDefinitions[key]
|
||||
|
||||
for _, container := range task.Containers {
|
||||
|
||||
var containerDefinition *ecs.ContainerDefinition
|
||||
for _, def := range taskDef.ContainerDefinitions {
|
||||
if aws.StringValue(container.Name) == aws.StringValue(def.Name) {
|
||||
containerDefinition = def
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if containerDefinition == nil {
|
||||
log.Debugf("Unable to find container definition for %s", aws.StringValue(container.Name))
|
||||
continue
|
||||
}
|
||||
|
||||
var mach *machine
|
||||
if aws.StringValue(task.LaunchType) == ecs.LaunchTypeFargate {
|
||||
var ports []portMapping
|
||||
for _, mapping := range containerDefinition.PortMappings {
|
||||
if mapping != nil {
|
||||
ports = append(ports, portMapping{
|
||||
hostPort: aws.Int64Value(mapping.HostPort),
|
||||
containerPort: aws.Int64Value(mapping.ContainerPort),
|
||||
})
|
||||
}
|
||||
}
|
||||
mach = &machine{
|
||||
privateIP: aws.StringValue(container.NetworkInterfaces[0].PrivateIpv4Address),
|
||||
ports: ports,
|
||||
state: aws.StringValue(task.LastStatus),
|
||||
}
|
||||
} else {
|
||||
var ports []portMapping
|
||||
for _, mapping := range container.NetworkBindings {
|
||||
if mapping != nil {
|
||||
ports = append(ports, portMapping{
|
||||
hostPort: aws.Int64Value(mapping.HostPort),
|
||||
containerPort: aws.Int64Value(mapping.ContainerPort),
|
||||
})
|
||||
}
|
||||
}
|
||||
mach = &machine{
|
||||
privateIP: aws.StringValue(containerInstance.PrivateIpAddress),
|
||||
ports: ports,
|
||||
state: aws.StringValue(containerInstance.State.Name),
|
||||
}
|
||||
}
|
||||
|
||||
instances = append(instances, ecsInstance{
|
||||
Name: fmt.Sprintf("%s-%s", strings.Replace(aws.StringValue(task.Group), ":", "-", 1), *container.Name),
|
||||
ID: key[len(key)-12:],
|
||||
containerDefinition: containerDefinition,
|
||||
machine: mach,
|
||||
TraefikLabels: aws.StringValueMap(containerDefinition.DockerLabels),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return instances, nil
|
||||
}
|
||||
|
||||
func (p *Provider) lookupEc2Instances(ctx context.Context, client *awsClient, clusterName *string, ecsDatas map[string]*ecs.Task) (map[string]*ec2.Instance, error) {
|
||||
|
||||
instanceIds := make(map[string]string)
|
||||
ec2Instances := make(map[string]*ec2.Instance)
|
||||
|
||||
var containerInstancesArns []*string
|
||||
var instanceArns []*string
|
||||
|
||||
for _, task := range ecsDatas {
|
||||
if task.ContainerInstanceArn != nil {
|
||||
containerInstancesArns = append(containerInstancesArns, task.ContainerInstanceArn)
|
||||
}
|
||||
}
|
||||
|
||||
for _, arns := range p.chunkIDs(containerInstancesArns) {
|
||||
resp, err := client.ecs.DescribeContainerInstancesWithContext(ctx, &ecs.DescribeContainerInstancesInput{
|
||||
ContainerInstances: arns,
|
||||
Cluster: clusterName,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("Unable to describe container instances: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, container := range resp.ContainerInstances {
|
||||
instanceIds[aws.StringValue(container.Ec2InstanceId)] = aws.StringValue(container.ContainerInstanceArn)
|
||||
instanceArns = append(instanceArns, container.Ec2InstanceId)
|
||||
}
|
||||
}
|
||||
|
||||
if len(instanceArns) > 0 {
|
||||
for _, ids := range p.chunkIDs(instanceArns) {
|
||||
input := &ec2.DescribeInstancesInput{
|
||||
InstanceIds: ids,
|
||||
}
|
||||
|
||||
err := client.ec2.DescribeInstancesPagesWithContext(ctx, input, func(page *ec2.DescribeInstancesOutput, lastPage bool) bool {
|
||||
if len(page.Reservations) > 0 {
|
||||
for _, r := range page.Reservations {
|
||||
for _, i := range r.Instances {
|
||||
if i.InstanceId != nil {
|
||||
ec2Instances[instanceIds[aws.StringValue(i.InstanceId)]] = i
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return !lastPage
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("Unable to describe instances: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ec2Instances, nil
|
||||
}
|
||||
|
||||
func (p *Provider) lookupTaskDefinitions(ctx context.Context, client *awsClient, taskDefArns map[string]*ecs.Task) (map[string]*ecs.TaskDefinition, error) {
|
||||
taskDef := make(map[string]*ecs.TaskDefinition)
|
||||
for arn, task := range taskDefArns {
|
||||
resp, err := client.ecs.DescribeTaskDefinitionWithContext(ctx, &ecs.DescribeTaskDefinitionInput{
|
||||
TaskDefinition: task.TaskDefinitionArn,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("Unable to describe task definition: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
taskDef[arn] = resp.TaskDefinition
|
||||
}
|
||||
return taskDef, nil
|
||||
}
|
||||
|
||||
func (p *Provider) loadECSConfig(ctx context.Context, client *awsClient) (*types.Configuration, error) {
|
||||
instances, err := p.listInstances(ctx, client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p.buildConfiguration(instances)
|
||||
}
|
||||
|
||||
// chunkIDs ECS expects no more than 100 parameters be passed to a API call;
|
||||
// thus, pack each string into an array capped at 100 elements
|
||||
func (p *Provider) chunkIDs(ids []*string) [][]*string {
|
||||
var chuncked [][]*string
|
||||
for i := 0; i < len(ids); i += 100 {
|
||||
var sliceEnd int
|
||||
if i+100 < len(ids) {
|
||||
sliceEnd = i + 100
|
||||
} else {
|
||||
sliceEnd = len(ids)
|
||||
}
|
||||
chuncked = append(chuncked, ids[i:sliceEnd])
|
||||
}
|
||||
return chuncked
|
||||
}
|
||||
|
|
@ -1,88 +0,0 @@
|
|||
package ecs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestChunkIDs(t *testing.T) {
|
||||
provider := &Provider{}
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
count int
|
||||
expected []int
|
||||
}{
|
||||
{
|
||||
desc: "0 element",
|
||||
count: 0,
|
||||
expected: []int(nil),
|
||||
},
|
||||
{
|
||||
desc: "1 element",
|
||||
count: 1,
|
||||
expected: []int{1},
|
||||
},
|
||||
{
|
||||
desc: "99 elements, 1 chunk",
|
||||
count: 99,
|
||||
expected: []int{99},
|
||||
},
|
||||
{
|
||||
desc: "100 elements, 1 chunk",
|
||||
count: 100,
|
||||
expected: []int{100},
|
||||
},
|
||||
{
|
||||
desc: "101 elements, 2 chunks",
|
||||
count: 101,
|
||||
expected: []int{100, 1},
|
||||
},
|
||||
{
|
||||
desc: "199 elements, 2 chunks",
|
||||
count: 199,
|
||||
expected: []int{100, 99},
|
||||
},
|
||||
{
|
||||
desc: "200 elements, 2 chunks",
|
||||
count: 200,
|
||||
expected: []int{100, 100},
|
||||
},
|
||||
{
|
||||
desc: "201 elements, 3 chunks",
|
||||
count: 201,
|
||||
expected: []int{100, 100, 1},
|
||||
},
|
||||
{
|
||||
desc: "555 elements, 5 chunks",
|
||||
count: 555,
|
||||
expected: []int{100, 100, 100, 100, 100, 55},
|
||||
},
|
||||
{
|
||||
desc: "1001 elements, 11 chunks",
|
||||
count: 1001,
|
||||
expected: []int{100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 1},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var IDs []*string
|
||||
for v := 0; v < test.count; v++ {
|
||||
IDs = append(IDs, aws.String("a"))
|
||||
}
|
||||
|
||||
var outCount []int
|
||||
for _, el := range provider.chunkIDs(IDs) {
|
||||
outCount = append(outCount, len(el))
|
||||
}
|
||||
|
||||
assert.Equal(t, test.expected, outCount)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,48 +0,0 @@
|
|||
package etcd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/abronan/valkeyrie/store"
|
||||
"github.com/abronan/valkeyrie/store/etcd/v3"
|
||||
"github.com/containous/traefik/provider"
|
||||
"github.com/containous/traefik/provider/kv"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/containous/traefik/types"
|
||||
)
|
||||
|
||||
var _ provider.Provider = (*Provider)(nil)
|
||||
|
||||
// Provider holds configurations of the provider.
|
||||
type Provider struct {
|
||||
kv.Provider `mapstructure:",squash" export:"true"`
|
||||
}
|
||||
|
||||
// Init the provider
|
||||
func (p *Provider) Init(constraints types.Constraints) error {
|
||||
err := p.Provider.Init(constraints)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
store, err := p.CreateStore()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to Connect to KV store: %v", err)
|
||||
}
|
||||
|
||||
p.SetKVClient(store)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Provide allows the etcd provider to Provide configurations to traefik
|
||||
// using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error {
|
||||
return p.Provider.Provide(configurationChan, pool)
|
||||
}
|
||||
|
||||
// CreateStore creates the KV store
|
||||
func (p *Provider) CreateStore() (store.Store, error) {
|
||||
etcdv3.Register()
|
||||
p.SetStoreType(store.ETCDV3)
|
||||
return p.Provider.CreateStore()
|
||||
}
|
||||
|
|
@ -1,57 +0,0 @@
|
|||
package eureka
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"text/template"
|
||||
|
||||
"github.com/ArthurHlt/go-eureka-client/eureka"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/provider"
|
||||
"github.com/containous/traefik/provider/label"
|
||||
"github.com/containous/traefik/types"
|
||||
)
|
||||
|
||||
// Build the configuration from Provider server
|
||||
func (p *Provider) buildConfiguration(apps *eureka.Applications) (*types.Configuration, error) {
|
||||
var eurekaFuncMap = template.FuncMap{
|
||||
"getPort": getPort,
|
||||
"getProtocol": getProtocol,
|
||||
"getWeight": getWeight,
|
||||
"getInstanceID": getInstanceID,
|
||||
}
|
||||
|
||||
templateObjects := struct {
|
||||
Applications []eureka.Application
|
||||
}{
|
||||
Applications: apps.Applications,
|
||||
}
|
||||
|
||||
configuration, err := p.GetConfiguration("templates/eureka.tmpl", eurekaFuncMap, templateObjects)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
return configuration, nil
|
||||
}
|
||||
|
||||
func getInstanceID(instance eureka.InstanceInfo) string {
|
||||
defaultID := provider.Normalize(instance.IpAddr) + "-" + getPort(instance)
|
||||
return label.GetStringValue(instance.Metadata.Map, label.TraefikBackendID, defaultID)
|
||||
}
|
||||
|
||||
func getPort(instance eureka.InstanceInfo) string {
|
||||
if instance.SecurePort.Enabled {
|
||||
return strconv.Itoa(instance.SecurePort.Port)
|
||||
}
|
||||
return strconv.Itoa(instance.Port.Port)
|
||||
}
|
||||
|
||||
func getProtocol(instance eureka.InstanceInfo) string {
|
||||
if instance.SecurePort.Enabled {
|
||||
return "https"
|
||||
}
|
||||
return label.DefaultProtocol
|
||||
}
|
||||
|
||||
func getWeight(instance eureka.InstanceInfo) int {
|
||||
return label.GetIntValue(instance.Metadata.Map, label.TraefikWeight, label.DefaultWeight)
|
||||
}
|
||||
|
|
@ -1,182 +0,0 @@
|
|||
package eureka
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/ArthurHlt/go-eureka-client/eureka"
|
||||
"github.com/containous/traefik/provider/label"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetPort(t *testing.T) {
|
||||
testCases := []struct {
|
||||
expectedPort string
|
||||
instanceInfo eureka.InstanceInfo
|
||||
}{
|
||||
{
|
||||
expectedPort: "80",
|
||||
instanceInfo: eureka.InstanceInfo{
|
||||
SecurePort: &eureka.Port{
|
||||
Port: 443, Enabled: false,
|
||||
},
|
||||
Port: &eureka.Port{
|
||||
Port: 80, Enabled: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
expectedPort: "443",
|
||||
instanceInfo: eureka.InstanceInfo{
|
||||
SecurePort: &eureka.Port{
|
||||
Port: 443, Enabled: true,
|
||||
},
|
||||
Port: &eureka.Port{
|
||||
Port: 80, Enabled: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
test := test
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
port := getPort(test.instanceInfo)
|
||||
assert.Equal(t, test.expectedPort, port)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetProtocol(t *testing.T) {
|
||||
testCases := []struct {
|
||||
expectedProtocol string
|
||||
instanceInfo eureka.InstanceInfo
|
||||
}{
|
||||
{
|
||||
expectedProtocol: "http",
|
||||
instanceInfo: eureka.InstanceInfo{
|
||||
SecurePort: &eureka.Port{
|
||||
Port: 443, Enabled: false,
|
||||
},
|
||||
Port: &eureka.Port{
|
||||
Port: 80, Enabled: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
expectedProtocol: "https",
|
||||
instanceInfo: eureka.InstanceInfo{
|
||||
SecurePort: &eureka.Port{
|
||||
Port: 443, Enabled: true,
|
||||
},
|
||||
Port: &eureka.Port{
|
||||
Port: 80, Enabled: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
test := test
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
protocol := getProtocol(test.instanceInfo)
|
||||
assert.Equal(t, test.expectedProtocol, protocol)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetWeight(t *testing.T) {
|
||||
testCases := []struct {
|
||||
expectedWeight int
|
||||
instanceInfo eureka.InstanceInfo
|
||||
}{
|
||||
{
|
||||
expectedWeight: label.DefaultWeight,
|
||||
instanceInfo: eureka.InstanceInfo{
|
||||
Port: &eureka.Port{
|
||||
Port: 80, Enabled: true,
|
||||
},
|
||||
Metadata: &eureka.MetaData{
|
||||
Map: map[string]string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
expectedWeight: 10,
|
||||
instanceInfo: eureka.InstanceInfo{
|
||||
Port: &eureka.Port{
|
||||
Port: 80, Enabled: true,
|
||||
},
|
||||
Metadata: &eureka.MetaData{
|
||||
Map: map[string]string{
|
||||
label.TraefikWeight: "10",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
test := test
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
weight := getWeight(test.instanceInfo)
|
||||
assert.Equal(t, test.expectedWeight, weight)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetInstanceId(t *testing.T) {
|
||||
testCases := []struct {
|
||||
expectedID string
|
||||
instanceInfo eureka.InstanceInfo
|
||||
}{
|
||||
{
|
||||
expectedID: "MyInstanceId",
|
||||
instanceInfo: eureka.InstanceInfo{
|
||||
IpAddr: "10.11.12.13",
|
||||
SecurePort: &eureka.Port{
|
||||
Port: 443, Enabled: false,
|
||||
},
|
||||
Port: &eureka.Port{
|
||||
Port: 80, Enabled: true,
|
||||
},
|
||||
Metadata: &eureka.MetaData{
|
||||
Map: map[string]string{
|
||||
label.TraefikBackendID: "MyInstanceId",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
expectedID: "10-11-12-13-80",
|
||||
instanceInfo: eureka.InstanceInfo{
|
||||
IpAddr: "10.11.12.13",
|
||||
SecurePort: &eureka.Port{
|
||||
Port: 443, Enabled: false,
|
||||
},
|
||||
Port: &eureka.Port{
|
||||
Port: 80, Enabled: true,
|
||||
},
|
||||
Metadata: &eureka.MetaData{
|
||||
Map: map[string]string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
test := test
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
id := getInstanceID(test.instanceInfo)
|
||||
assert.Equal(t, test.expectedID, id)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,92 +0,0 @@
|
|||
package eureka
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/ArthurHlt/go-eureka-client/eureka"
|
||||
"github.com/cenk/backoff"
|
||||
"github.com/containous/flaeg/parse"
|
||||
"github.com/containous/traefik/job"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/provider"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/containous/traefik/types"
|
||||
)
|
||||
|
||||
// Provider holds configuration of the Provider provider.
|
||||
type Provider struct {
|
||||
provider.BaseProvider `mapstructure:",squash" export:"true"`
|
||||
Endpoint string `description:"Eureka server endpoint"`
|
||||
RefreshSeconds parse.Duration `description:"Override default configuration time between refresh" export:"true"`
|
||||
}
|
||||
|
||||
// Init the provider
|
||||
func (p *Provider) Init(constraints types.Constraints) error {
|
||||
return p.BaseProvider.Init(constraints)
|
||||
}
|
||||
|
||||
// Provide allows the eureka provider to provide configurations to traefik
|
||||
// using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error {
|
||||
eureka.GetLogger().SetOutput(ioutil.Discard)
|
||||
|
||||
operation := func() error {
|
||||
client := eureka.NewClient([]string{p.Endpoint})
|
||||
|
||||
applications, err := client.GetApplications()
|
||||
if err != nil {
|
||||
log.Errorf("Failed to retrieve applications, error: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
configuration, err := p.buildConfiguration(applications)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to build configuration for Provider, error: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "eureka",
|
||||
Configuration: configuration,
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(time.Duration(p.RefreshSeconds))
|
||||
pool.Go(func(stop chan bool) {
|
||||
for {
|
||||
select {
|
||||
case t := <-ticker.C:
|
||||
log.Debugf("Refreshing Provider %s", t.String())
|
||||
applications, err := client.GetApplications()
|
||||
if err != nil {
|
||||
log.Errorf("Failed to retrieve applications, error: %s", err)
|
||||
continue
|
||||
}
|
||||
configuration, err := p.buildConfiguration(applications)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to refresh Provider configuration, error: %s", err)
|
||||
continue
|
||||
}
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "eureka",
|
||||
Configuration: configuration,
|
||||
}
|
||||
case <-stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
err := backoff.RetryNotify(operation, job.NewBackOff(backoff.NewExponentialBackOff()), notify)
|
||||
if err != nil {
|
||||
log.Errorf("Cannot connect to Provider server %+v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func notify(err error, time time.Duration) {
|
||||
log.Errorf("Provider connection error %+v, retrying in %s", err, time)
|
||||
}
|
||||
|
|
@ -1,6 +1,7 @@
|
|||
package file
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
|
@ -9,6 +10,7 @@ import (
|
|||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/containous/traefik/config"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/provider"
|
||||
"github.com/containous/traefik/safe"
|
||||
|
|
@ -18,6 +20,8 @@ import (
|
|||
"gopkg.in/fsnotify.v1"
|
||||
)
|
||||
|
||||
const providerName = "file"
|
||||
|
||||
var _ provider.Provider = (*Provider)(nil)
|
||||
|
||||
// Provider holds configurations of the provider.
|
||||
|
|
@ -34,7 +38,7 @@ func (p *Provider) Init(constraints types.Constraints) error {
|
|||
|
||||
// Provide allows the file provider to provide configurations to traefik
|
||||
// using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error {
|
||||
func (p *Provider) Provide(configurationChan chan<- config.Message, pool *safe.Pool) error {
|
||||
configuration, err := p.BuildConfiguration()
|
||||
|
||||
if err != nil {
|
||||
|
|
@ -63,9 +67,11 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s
|
|||
|
||||
// BuildConfiguration loads configuration either from file or a directory specified by 'Filename'/'Directory'
|
||||
// and returns a 'Configuration' object
|
||||
func (p *Provider) BuildConfiguration() (*types.Configuration, error) {
|
||||
func (p *Provider) BuildConfiguration() (*config.Configuration, error) {
|
||||
ctx := log.With(context.Background(), log.Str(log.ProviderName, providerName))
|
||||
|
||||
if len(p.Directory) > 0 {
|
||||
return p.loadFileConfigFromDirectory(p.Directory, nil)
|
||||
return p.loadFileConfigFromDirectory(ctx, p.Directory, nil)
|
||||
}
|
||||
|
||||
if len(p.Filename) > 0 {
|
||||
|
|
@ -79,7 +85,7 @@ func (p *Provider) BuildConfiguration() (*types.Configuration, error) {
|
|||
return nil, errors.New("error using file configuration backend, no filename defined")
|
||||
}
|
||||
|
||||
func (p *Provider) addWatcher(pool *safe.Pool, directory string, configurationChan chan<- types.ConfigMessage, callback func(chan<- types.ConfigMessage, fsnotify.Event)) error {
|
||||
func (p *Provider) addWatcher(pool *safe.Pool, directory string, configurationChan chan<- config.Message, callback func(chan<- config.Message, fsnotify.Event)) error {
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating file watcher: %s", err)
|
||||
|
|
@ -115,14 +121,14 @@ func (p *Provider) addWatcher(pool *safe.Pool, directory string, configurationCh
|
|||
callback(configurationChan, evt)
|
||||
}
|
||||
case err := <-watcher.Errors:
|
||||
log.Errorf("Watcher event error: %s", err)
|
||||
log.WithoutContext().WithField(log.ProviderName, providerName).Errorf("Watcher event error: %s", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) watcherCallback(configurationChan chan<- types.ConfigMessage, event fsnotify.Event) {
|
||||
func (p *Provider) watcherCallback(configurationChan chan<- config.Message, event fsnotify.Event) {
|
||||
watchItem := p.TraefikFile
|
||||
if len(p.Directory) > 0 {
|
||||
watchItem = p.Directory
|
||||
|
|
@ -130,23 +136,24 @@ func (p *Provider) watcherCallback(configurationChan chan<- types.ConfigMessage,
|
|||
watchItem = p.Filename
|
||||
}
|
||||
|
||||
logger := log.WithoutContext().WithField(log.ProviderName, providerName)
|
||||
|
||||
if _, err := os.Stat(watchItem); err != nil {
|
||||
log.Debugf("Unable to watch %s : %v", watchItem, err)
|
||||
logger.Errorf("Unable to watch %s : %v", watchItem, err)
|
||||
return
|
||||
}
|
||||
|
||||
configuration, err := p.BuildConfiguration()
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("Error occurred during watcher callback: %s", err)
|
||||
logger.Errorf("Error occurred during watcher callback: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
sendConfigToChannel(configurationChan, configuration)
|
||||
}
|
||||
|
||||
func sendConfigToChannel(configurationChan chan<- types.ConfigMessage, configuration *types.Configuration) {
|
||||
configurationChan <- types.ConfigMessage{
|
||||
func sendConfigToChannel(configurationChan chan<- config.Message, configuration *config.Configuration) {
|
||||
configurationChan <- config.Message{
|
||||
ProviderName: "file",
|
||||
Configuration: configuration,
|
||||
}
|
||||
|
|
@ -163,13 +170,13 @@ func readFile(filename string) (string, error) {
|
|||
return "", fmt.Errorf("invalid filename: %s", filename)
|
||||
}
|
||||
|
||||
func (p *Provider) loadFileConfig(filename string, parseTemplate bool) (*types.Configuration, error) {
|
||||
func (p *Provider) loadFileConfig(filename string, parseTemplate bool) (*config.Configuration, error) {
|
||||
fileContent, err := readFile(filename)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading configuration file: %s - %s", filename, err)
|
||||
}
|
||||
|
||||
var configuration *types.Configuration
|
||||
var configuration *config.Configuration
|
||||
if parseTemplate {
|
||||
configuration, err = p.CreateConfiguration(fileContent, template.FuncMap{}, false)
|
||||
} else {
|
||||
|
|
@ -179,16 +186,20 @@ func (p *Provider) loadFileConfig(filename string, parseTemplate bool) (*types.C
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if configuration == nil || configuration.Backends == nil && configuration.Frontends == nil && configuration.TLS == nil {
|
||||
configuration = &types.Configuration{
|
||||
Frontends: make(map[string]*types.Frontend),
|
||||
Backends: make(map[string]*types.Backend),
|
||||
|
||||
if configuration == nil || configuration.Routers == nil && configuration.Middlewares == nil && configuration.Services == nil && configuration.TLS == nil {
|
||||
configuration = &config.Configuration{
|
||||
Routers: make(map[string]*config.Router),
|
||||
Middlewares: make(map[string]*config.Middleware),
|
||||
Services: make(map[string]*config.Service),
|
||||
}
|
||||
}
|
||||
return configuration, err
|
||||
}
|
||||
|
||||
func (p *Provider) loadFileConfigFromDirectory(directory string, configuration *types.Configuration) (*types.Configuration, error) {
|
||||
func (p *Provider) loadFileConfigFromDirectory(ctx context.Context, directory string, configuration *config.Configuration) (*config.Configuration, error) {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
fileList, err := ioutil.ReadDir(directory)
|
||||
|
||||
if err != nil {
|
||||
|
|
@ -196,9 +207,10 @@ func (p *Provider) loadFileConfigFromDirectory(directory string, configuration *
|
|||
}
|
||||
|
||||
if configuration == nil {
|
||||
configuration = &types.Configuration{
|
||||
Frontends: make(map[string]*types.Frontend),
|
||||
Backends: make(map[string]*types.Backend),
|
||||
configuration = &config.Configuration{
|
||||
Routers: make(map[string]*config.Router),
|
||||
Middlewares: make(map[string]*config.Middleware),
|
||||
Services: make(map[string]*config.Service),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -206,7 +218,7 @@ func (p *Provider) loadFileConfigFromDirectory(directory string, configuration *
|
|||
for _, item := range fileList {
|
||||
|
||||
if item.IsDir() {
|
||||
configuration, err = p.loadFileConfigFromDirectory(filepath.Join(directory, item.Name()), configuration)
|
||||
configuration, err = p.loadFileConfigFromDirectory(ctx, filepath.Join(directory, item.Name()), configuration)
|
||||
if err != nil {
|
||||
return configuration, fmt.Errorf("unable to load content configuration from subdirectory %s: %v", item, err)
|
||||
}
|
||||
|
|
@ -215,38 +227,46 @@ func (p *Provider) loadFileConfigFromDirectory(directory string, configuration *
|
|||
continue
|
||||
}
|
||||
|
||||
var c *types.Configuration
|
||||
var c *config.Configuration
|
||||
c, err = p.loadFileConfig(path.Join(directory, item.Name()), true)
|
||||
|
||||
if err != nil {
|
||||
return configuration, err
|
||||
}
|
||||
|
||||
for backendName, backend := range c.Backends {
|
||||
if _, exists := configuration.Backends[backendName]; exists {
|
||||
log.Warnf("Backend %s already configured, skipping", backendName)
|
||||
for name, conf := range c.Routers {
|
||||
if _, exists := configuration.Routers[name]; exists {
|
||||
logger.WithField(log.RouterName, name).Warn("Router already configured, skipping")
|
||||
} else {
|
||||
configuration.Backends[backendName] = backend
|
||||
configuration.Routers[name] = conf
|
||||
}
|
||||
}
|
||||
|
||||
for frontendName, frontend := range c.Frontends {
|
||||
if _, exists := configuration.Frontends[frontendName]; exists {
|
||||
log.Warnf("Frontend %s already configured, skipping", frontendName)
|
||||
for name, conf := range c.Middlewares {
|
||||
if _, exists := configuration.Middlewares[name]; exists {
|
||||
logger.WithField(log.MiddlewareName, name).Warn("Middleware already configured, skipping")
|
||||
} else {
|
||||
configuration.Frontends[frontendName] = frontend
|
||||
configuration.Middlewares[name] = conf
|
||||
}
|
||||
}
|
||||
|
||||
for name, conf := range c.Services {
|
||||
if _, exists := configuration.Services[name]; exists {
|
||||
logger.WithField(log.ServiceName, name).Warn("Service already configured, skipping")
|
||||
} else {
|
||||
configuration.Services[name] = conf
|
||||
}
|
||||
}
|
||||
|
||||
for _, conf := range c.TLS {
|
||||
if _, exists := configTLSMaps[conf]; exists {
|
||||
log.Warnf("TLS Configuration %v already configured, skipping", conf)
|
||||
logger.Warnf("TLS Configuration %v already configured, skipping", conf)
|
||||
} else {
|
||||
configTLSMaps[conf] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
for conf := range configTLSMaps {
|
||||
configuration.TLS = append(configuration.TLS, conf)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,193 +9,29 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/containous/traefik/config"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// createRandomFile Helper
|
||||
func createRandomFile(t *testing.T, tempDir string, contents ...string) *os.File {
|
||||
return createFile(t, tempDir, fmt.Sprintf("temp%d.toml", time.Now().UnixNano()), contents...)
|
||||
}
|
||||
|
||||
// createFile Helper
|
||||
func createFile(t *testing.T, tempDir string, name string, contents ...string) *os.File {
|
||||
t.Helper()
|
||||
fileName := path.Join(tempDir, name)
|
||||
|
||||
tempFile, err := os.Create(fileName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, content := range contents {
|
||||
_, err := tempFile.WriteString(content)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
err = tempFile.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return tempFile
|
||||
}
|
||||
|
||||
// createTempDir Helper
|
||||
func createTempDir(t *testing.T, dir string) string {
|
||||
t.Helper()
|
||||
d, err := ioutil.TempDir("", dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// createFrontendConfiguration Helper
|
||||
func createFrontendConfiguration(n int) string {
|
||||
conf := "[frontends]\n"
|
||||
for i := 1; i <= n; i++ {
|
||||
conf += fmt.Sprintf(` [frontends."frontend%[1]d"]
|
||||
backend = "backend%[1]d"
|
||||
`, i)
|
||||
}
|
||||
return conf
|
||||
}
|
||||
|
||||
// createBackendConfiguration Helper
|
||||
func createBackendConfiguration(n int) string {
|
||||
conf := "[backends]\n"
|
||||
for i := 1; i <= n; i++ {
|
||||
conf += fmt.Sprintf(` [backends.backend%[1]d]
|
||||
[backends.backend%[1]d.servers.server1]
|
||||
url = "http://172.17.0.%[1]d:80"
|
||||
`, i)
|
||||
}
|
||||
return conf
|
||||
}
|
||||
|
||||
// createTLS Helper
|
||||
func createTLS(n int) string {
|
||||
var conf string
|
||||
for i := 1; i <= n; i++ {
|
||||
conf += fmt.Sprintf(`[[TLS]]
|
||||
EntryPoints = ["https"]
|
||||
[TLS.Certificate]
|
||||
CertFile = "integration/fixtures/https/snitest%[1]d.com.cert"
|
||||
KeyFile = "integration/fixtures/https/snitest%[1]d.com.key"
|
||||
`, i)
|
||||
}
|
||||
return conf
|
||||
}
|
||||
|
||||
type ProvideTestCase struct {
|
||||
desc string
|
||||
directoryContent []string
|
||||
fileContent string
|
||||
traefikFileContent string
|
||||
expectedNumFrontend int
|
||||
expectedNumBackend int
|
||||
expectedNumTLSConf int
|
||||
}
|
||||
|
||||
func getTestCases() []ProvideTestCase {
|
||||
return []ProvideTestCase{
|
||||
{
|
||||
desc: "simple file",
|
||||
fileContent: createFrontendConfiguration(2) + createBackendConfiguration(3) + createTLS(4),
|
||||
expectedNumFrontend: 2,
|
||||
expectedNumBackend: 3,
|
||||
expectedNumTLSConf: 4,
|
||||
},
|
||||
{
|
||||
desc: "simple file and a traefik file",
|
||||
fileContent: createFrontendConfiguration(2) + createBackendConfiguration(3) + createTLS(4),
|
||||
traefikFileContent: `
|
||||
debug=true
|
||||
`,
|
||||
expectedNumFrontend: 2,
|
||||
expectedNumBackend: 3,
|
||||
expectedNumTLSConf: 4,
|
||||
},
|
||||
{
|
||||
desc: "template file",
|
||||
fileContent: `
|
||||
[frontends]
|
||||
{{ range $i, $e := until 20 }}
|
||||
[frontends.frontend{{ $e }}]
|
||||
backend = "backend"
|
||||
{{ end }}
|
||||
`,
|
||||
expectedNumFrontend: 20,
|
||||
},
|
||||
{
|
||||
desc: "simple directory",
|
||||
directoryContent: []string{
|
||||
createFrontendConfiguration(2),
|
||||
createBackendConfiguration(3),
|
||||
createTLS(4),
|
||||
},
|
||||
expectedNumFrontend: 2,
|
||||
expectedNumBackend: 3,
|
||||
expectedNumTLSConf: 4,
|
||||
},
|
||||
{
|
||||
desc: "template in directory",
|
||||
directoryContent: []string{
|
||||
`
|
||||
[frontends]
|
||||
{{ range $i, $e := until 20 }}
|
||||
[frontends.frontend{{ $e }}]
|
||||
backend = "backend"
|
||||
{{ end }}
|
||||
`,
|
||||
`
|
||||
[backends]
|
||||
{{ range $i, $e := until 20 }}
|
||||
[backends.backend{{ $e }}]
|
||||
[backends.backend{{ $e }}.servers.server1]
|
||||
url="http://127.0.0.1"
|
||||
{{ end }}
|
||||
`,
|
||||
},
|
||||
expectedNumFrontend: 20,
|
||||
expectedNumBackend: 20,
|
||||
},
|
||||
{
|
||||
desc: "simple traefik file",
|
||||
traefikFileContent: `
|
||||
debug=true
|
||||
[file]
|
||||
` + createFrontendConfiguration(2) + createBackendConfiguration(3) + createTLS(4),
|
||||
expectedNumFrontend: 2,
|
||||
expectedNumBackend: 3,
|
||||
expectedNumTLSConf: 4,
|
||||
},
|
||||
{
|
||||
desc: "simple traefik file with templating",
|
||||
traefikFileContent: `
|
||||
temp="{{ getTag \"test\" }}"
|
||||
[file]
|
||||
` + createFrontendConfiguration(2) + createBackendConfiguration(3) + createTLS(4),
|
||||
expectedNumFrontend: 2,
|
||||
expectedNumBackend: 3,
|
||||
expectedNumTLSConf: 4,
|
||||
},
|
||||
}
|
||||
desc string
|
||||
directoryContent []string
|
||||
fileContent string
|
||||
traefikFileContent string
|
||||
expectedNumRouter int
|
||||
expectedNumService int
|
||||
expectedNumTLSConf int
|
||||
}
|
||||
|
||||
func TestProvideWithoutWatch(t *testing.T) {
|
||||
for _, test := range getTestCases() {
|
||||
test := test
|
||||
t.Run(test.desc+" without watch", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
provider, clean := createProvider(t, test, false)
|
||||
defer clean()
|
||||
configChan := make(chan types.ConfigMessage)
|
||||
configChan := make(chan config.Message)
|
||||
|
||||
provider.DebugLogGeneratedTemplate = true
|
||||
|
||||
go func() {
|
||||
err := provider.Provide(configChan, safe.NewPool(context.Background()))
|
||||
|
|
@ -204,10 +40,10 @@ func TestProvideWithoutWatch(t *testing.T) {
|
|||
|
||||
timeout := time.After(time.Second)
|
||||
select {
|
||||
case config := <-configChan:
|
||||
assert.Len(t, config.Configuration.Backends, test.expectedNumBackend)
|
||||
assert.Len(t, config.Configuration.Frontends, test.expectedNumFrontend)
|
||||
assert.Len(t, config.Configuration.TLS, test.expectedNumTLSConf)
|
||||
case conf := <-configChan:
|
||||
assert.Len(t, conf.Configuration.Services, test.expectedNumService)
|
||||
assert.Len(t, conf.Configuration.Routers, test.expectedNumRouter)
|
||||
assert.Len(t, conf.Configuration.TLS, test.expectedNumTLSConf)
|
||||
case <-timeout:
|
||||
t.Errorf("timeout while waiting for config")
|
||||
}
|
||||
|
|
@ -217,13 +53,10 @@ func TestProvideWithoutWatch(t *testing.T) {
|
|||
|
||||
func TestProvideWithWatch(t *testing.T) {
|
||||
for _, test := range getTestCases() {
|
||||
test := test
|
||||
t.Run(test.desc+" with watch", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
provider, clean := createProvider(t, test, true)
|
||||
defer clean()
|
||||
configChan := make(chan types.ConfigMessage)
|
||||
configChan := make(chan config.Message)
|
||||
|
||||
go func() {
|
||||
err := provider.Provide(configChan, safe.NewPool(context.Background()))
|
||||
|
|
@ -232,10 +65,10 @@ func TestProvideWithWatch(t *testing.T) {
|
|||
|
||||
timeout := time.After(time.Second)
|
||||
select {
|
||||
case config := <-configChan:
|
||||
assert.Len(t, config.Configuration.Backends, 0)
|
||||
assert.Len(t, config.Configuration.Frontends, 0)
|
||||
assert.Len(t, config.Configuration.TLS, 0)
|
||||
case conf := <-configChan:
|
||||
assert.Len(t, conf.Configuration.Services, 0)
|
||||
assert.Len(t, conf.Configuration.Routers, 0)
|
||||
assert.Len(t, conf.Configuration.TLS, 0)
|
||||
case <-timeout:
|
||||
t.Errorf("timeout while waiting for config")
|
||||
}
|
||||
|
|
@ -259,17 +92,17 @@ func TestProvideWithWatch(t *testing.T) {
|
|||
}
|
||||
|
||||
timeout = time.After(time.Second * 1)
|
||||
var numUpdates, numBackends, numFrontends, numTLSConfs int
|
||||
var numUpdates, numServices, numRouters, numTLSConfs int
|
||||
for {
|
||||
select {
|
||||
case config := <-configChan:
|
||||
case conf := <-configChan:
|
||||
numUpdates++
|
||||
numBackends = len(config.Configuration.Backends)
|
||||
numFrontends = len(config.Configuration.Frontends)
|
||||
numTLSConfs = len(config.Configuration.TLS)
|
||||
t.Logf("received update #%d: backends %d/%d, frontends %d/%d, TLS configs %d/%d", numUpdates, numBackends, test.expectedNumBackend, numFrontends, test.expectedNumFrontend, numTLSConfs, test.expectedNumTLSConf)
|
||||
numServices = len(conf.Configuration.Services)
|
||||
numRouters = len(conf.Configuration.Routers)
|
||||
numTLSConfs = len(conf.Configuration.TLS)
|
||||
t.Logf("received update #%d: services %d/%d, routers %d/%d, TLS configs %d/%d", numUpdates, numServices, test.expectedNumService, numRouters, test.expectedNumRouter, numTLSConfs, test.expectedNumTLSConf)
|
||||
|
||||
if numBackends == test.expectedNumBackend && numFrontends == test.expectedNumFrontend && numTLSConfs == test.expectedNumTLSConf {
|
||||
if numServices == test.expectedNumService && numRouters == test.expectedNumRouter && numTLSConfs == test.expectedNumTLSConf {
|
||||
return
|
||||
}
|
||||
case <-timeout:
|
||||
|
|
@ -282,7 +115,7 @@ func TestProvideWithWatch(t *testing.T) {
|
|||
|
||||
func TestErrorWhenEmptyConfig(t *testing.T) {
|
||||
provider := &Provider{}
|
||||
configChan := make(chan types.ConfigMessage)
|
||||
configChan := make(chan config.Message)
|
||||
errorChan := make(chan struct{})
|
||||
go func() {
|
||||
err := provider.Provide(configChan, safe.NewPool(context.Background()))
|
||||
|
|
@ -300,6 +133,93 @@ func TestErrorWhenEmptyConfig(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func getTestCases() []ProvideTestCase {
|
||||
return []ProvideTestCase{
|
||||
{
|
||||
desc: "simple file",
|
||||
fileContent: createRoutersConfiguration(3) + createServicesConfiguration(6) + createTLS(5),
|
||||
expectedNumRouter: 3,
|
||||
expectedNumService: 6,
|
||||
expectedNumTLSConf: 5,
|
||||
},
|
||||
{
|
||||
desc: "simple file and a traefik file",
|
||||
fileContent: createRoutersConfiguration(4) + createServicesConfiguration(8) + createTLS(4),
|
||||
traefikFileContent: `
|
||||
debug=true
|
||||
`,
|
||||
expectedNumRouter: 4,
|
||||
expectedNumService: 8,
|
||||
expectedNumTLSConf: 4,
|
||||
},
|
||||
{
|
||||
desc: "template file",
|
||||
fileContent: `
|
||||
[routers]
|
||||
{{ range $i, $e := until 20 }}
|
||||
[routers.router{{ $e }}]
|
||||
service = "application"
|
||||
{{ end }}
|
||||
`,
|
||||
expectedNumRouter: 20,
|
||||
},
|
||||
{
|
||||
desc: "simple directory",
|
||||
directoryContent: []string{
|
||||
createRoutersConfiguration(2),
|
||||
createServicesConfiguration(3),
|
||||
createTLS(4),
|
||||
},
|
||||
expectedNumRouter: 2,
|
||||
expectedNumService: 3,
|
||||
expectedNumTLSConf: 4,
|
||||
},
|
||||
{
|
||||
desc: "template in directory",
|
||||
directoryContent: []string{
|
||||
`
|
||||
[routers]
|
||||
{{ range $i, $e := until 20 }}
|
||||
[routers.router{{ $e }}]
|
||||
service = "application"
|
||||
{{ end }}
|
||||
`,
|
||||
`
|
||||
[services]
|
||||
{{ range $i, $e := until 20 }}
|
||||
[services.application-{{ $e }}]
|
||||
[[services.application-{{ $e }}.servers]]
|
||||
url="http://127.0.0.1"
|
||||
weight = 1
|
||||
{{ end }}
|
||||
`,
|
||||
},
|
||||
expectedNumRouter: 20,
|
||||
expectedNumService: 20,
|
||||
},
|
||||
{
|
||||
desc: "simple traefik file",
|
||||
traefikFileContent: `
|
||||
debug=true
|
||||
[file]
|
||||
` + createRoutersConfiguration(2) + createServicesConfiguration(3) + createTLS(4),
|
||||
expectedNumRouter: 2,
|
||||
expectedNumService: 3,
|
||||
expectedNumTLSConf: 4,
|
||||
},
|
||||
{
|
||||
desc: "simple traefik file with templating",
|
||||
traefikFileContent: `
|
||||
temp="{{ getTag \"test\" }}"
|
||||
[file]
|
||||
` + createRoutersConfiguration(2) + createServicesConfiguration(3) + createTLS(4),
|
||||
expectedNumRouter: 2,
|
||||
expectedNumService: 3,
|
||||
expectedNumTLSConf: 4,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createProvider(t *testing.T, test ProvideTestCase, watch bool) (*Provider, func()) {
|
||||
tempDir := createTempDir(t, "testdir")
|
||||
|
||||
|
|
@ -336,3 +256,83 @@ func createProvider(t *testing.T, test ProvideTestCase, watch bool) (*Provider,
|
|||
os.Remove(tempDir)
|
||||
}
|
||||
}
|
||||
|
||||
// createRandomFile Helper
|
||||
func createRandomFile(t *testing.T, tempDir string, contents ...string) *os.File {
|
||||
return createFile(t, tempDir, fmt.Sprintf("temp%d.toml", time.Now().UnixNano()), contents...)
|
||||
}
|
||||
|
||||
// createFile Helper
|
||||
func createFile(t *testing.T, tempDir string, name string, contents ...string) *os.File {
|
||||
t.Helper()
|
||||
fileName := path.Join(tempDir, name)
|
||||
|
||||
tempFile, err := os.Create(fileName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, content := range contents {
|
||||
_, err = tempFile.WriteString(content)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
err = tempFile.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return tempFile
|
||||
}
|
||||
|
||||
// createTempDir Helper
|
||||
func createTempDir(t *testing.T, dir string) string {
|
||||
t.Helper()
|
||||
d, err := ioutil.TempDir("", dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// createRoutersConfiguration Helper
|
||||
func createRoutersConfiguration(n int) string {
|
||||
conf := "[routers]\n"
|
||||
for i := 1; i <= n; i++ {
|
||||
conf += fmt.Sprintf(`
|
||||
[routers."router%[1]d"]
|
||||
service = "application-%[1]d"
|
||||
`, i)
|
||||
}
|
||||
return conf
|
||||
}
|
||||
|
||||
// createServicesConfiguration Helper
|
||||
func createServicesConfiguration(n int) string {
|
||||
conf := "[services]\n"
|
||||
for i := 1; i <= n; i++ {
|
||||
conf += fmt.Sprintf(`
|
||||
[services.application-%[1]d.loadbalancer]
|
||||
[[services.application-%[1]d.loadbalancer.servers]]
|
||||
url = "http://172.17.0.%[1]d:80"
|
||||
weight = 1
|
||||
`, i)
|
||||
}
|
||||
return conf
|
||||
}
|
||||
|
||||
// createTLS Helper
|
||||
func createTLS(n int) string {
|
||||
var conf string
|
||||
for i := 1; i <= n; i++ {
|
||||
conf += fmt.Sprintf(`[[TLS]]
|
||||
EntryPoints = ["https"]
|
||||
[TLS.Certificate]
|
||||
CertFile = "integration/fixtures/https/snitest%[1]d.com.cert"
|
||||
KeyFile = "integration/fixtures/https/snitest%[1]d.com.key"
|
||||
`, i)
|
||||
}
|
||||
return conf
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,122 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/containous/traefik/provider/label"
|
||||
)
|
||||
|
||||
const (
|
||||
annotationKubernetesIngressClass = "kubernetes.io/ingress.class"
|
||||
annotationKubernetesAuthRealm = "ingress.kubernetes.io/auth-realm"
|
||||
annotationKubernetesAuthType = "ingress.kubernetes.io/auth-type"
|
||||
annotationKubernetesAuthSecret = "ingress.kubernetes.io/auth-secret"
|
||||
annotationKubernetesAuthHeaderField = "ingress.kubernetes.io/auth-header-field"
|
||||
annotationKubernetesAuthForwardResponseHeaders = "ingress.kubernetes.io/auth-response-headers"
|
||||
annotationKubernetesAuthRemoveHeader = "ingress.kubernetes.io/auth-remove-header"
|
||||
annotationKubernetesAuthForwardURL = "ingress.kubernetes.io/auth-url"
|
||||
annotationKubernetesAuthForwardTrustHeaders = "ingress.kubernetes.io/auth-trust-headers"
|
||||
annotationKubernetesAuthForwardTLSSecret = "ingress.kubernetes.io/auth-tls-secret"
|
||||
annotationKubernetesAuthForwardTLSInsecure = "ingress.kubernetes.io/auth-tls-insecure"
|
||||
annotationKubernetesRewriteTarget = "ingress.kubernetes.io/rewrite-target"
|
||||
annotationKubernetesWhiteListSourceRange = "ingress.kubernetes.io/whitelist-source-range"
|
||||
annotationKubernetesWhiteListIPStrategy = "ingress.kubernetes.io/whitelist-ipstrategy"
|
||||
annotationKubernetesWhiteListIPStrategyDepth = "ingress.kubernetes.io/whitelist-ipstrategy-depth"
|
||||
annotationKubernetesWhiteListIPStrategyExcludedIPs = "ingress.kubernetes.io/whitelist-ipstrategy-excluded-ips"
|
||||
annotationKubernetesPreserveHost = "ingress.kubernetes.io/preserve-host"
|
||||
annotationKubernetesPassTLSCert = "ingress.kubernetes.io/pass-tls-cert" // Deprecated
|
||||
annotationKubernetesPassTLSClientCert = "ingress.kubernetes.io/pass-client-tls-cert"
|
||||
annotationKubernetesFrontendEntryPoints = "ingress.kubernetes.io/frontend-entry-points"
|
||||
annotationKubernetesPriority = "ingress.kubernetes.io/priority"
|
||||
annotationKubernetesCircuitBreakerExpression = "ingress.kubernetes.io/circuit-breaker-expression"
|
||||
annotationKubernetesLoadBalancerMethod = "ingress.kubernetes.io/load-balancer-method"
|
||||
annotationKubernetesAffinity = "ingress.kubernetes.io/affinity"
|
||||
annotationKubernetesSessionCookieName = "ingress.kubernetes.io/session-cookie-name"
|
||||
annotationKubernetesRuleType = "ingress.kubernetes.io/rule-type"
|
||||
annotationKubernetesRedirectEntryPoint = "ingress.kubernetes.io/redirect-entry-point"
|
||||
annotationKubernetesRedirectPermanent = "ingress.kubernetes.io/redirect-permanent"
|
||||
annotationKubernetesRedirectRegex = "ingress.kubernetes.io/redirect-regex"
|
||||
annotationKubernetesRedirectReplacement = "ingress.kubernetes.io/redirect-replacement"
|
||||
annotationKubernetesMaxConnAmount = "ingress.kubernetes.io/max-conn-amount"
|
||||
annotationKubernetesMaxConnExtractorFunc = "ingress.kubernetes.io/max-conn-extractor-func"
|
||||
annotationKubernetesRateLimit = "ingress.kubernetes.io/rate-limit"
|
||||
annotationKubernetesErrorPages = "ingress.kubernetes.io/error-pages"
|
||||
annotationKubernetesBuffering = "ingress.kubernetes.io/buffering"
|
||||
annotationKubernetesResponseForwardingFlushInterval = "ingress.kubernetes.io/responseforwarding-flushinterval"
|
||||
annotationKubernetesAppRoot = "ingress.kubernetes.io/app-root"
|
||||
annotationKubernetesServiceWeights = "ingress.kubernetes.io/service-weights"
|
||||
annotationKubernetesRequestModifier = "ingress.kubernetes.io/request-modifier"
|
||||
|
||||
annotationKubernetesSSLForceHost = "ingress.kubernetes.io/ssl-force-host"
|
||||
annotationKubernetesSSLRedirect = "ingress.kubernetes.io/ssl-redirect"
|
||||
annotationKubernetesHSTSMaxAge = "ingress.kubernetes.io/hsts-max-age"
|
||||
annotationKubernetesHSTSIncludeSubdomains = "ingress.kubernetes.io/hsts-include-subdomains"
|
||||
annotationKubernetesCustomRequestHeaders = "ingress.kubernetes.io/custom-request-headers"
|
||||
annotationKubernetesCustomResponseHeaders = "ingress.kubernetes.io/custom-response-headers"
|
||||
annotationKubernetesAllowedHosts = "ingress.kubernetes.io/allowed-hosts"
|
||||
annotationKubernetesProxyHeaders = "ingress.kubernetes.io/proxy-headers"
|
||||
annotationKubernetesSSLTemporaryRedirect = "ingress.kubernetes.io/ssl-temporary-redirect"
|
||||
annotationKubernetesSSLHost = "ingress.kubernetes.io/ssl-host"
|
||||
annotationKubernetesSSLProxyHeaders = "ingress.kubernetes.io/ssl-proxy-headers"
|
||||
annotationKubernetesHSTSPreload = "ingress.kubernetes.io/hsts-preload"
|
||||
annotationKubernetesForceHSTSHeader = "ingress.kubernetes.io/force-hsts"
|
||||
annotationKubernetesFrameDeny = "ingress.kubernetes.io/frame-deny"
|
||||
annotationKubernetesCustomFrameOptionsValue = "ingress.kubernetes.io/custom-frame-options-value"
|
||||
annotationKubernetesContentTypeNosniff = "ingress.kubernetes.io/content-type-nosniff"
|
||||
annotationKubernetesBrowserXSSFilter = "ingress.kubernetes.io/browser-xss-filter"
|
||||
annotationKubernetesCustomBrowserXSSValue = "ingress.kubernetes.io/custom-browser-xss-value"
|
||||
annotationKubernetesContentSecurityPolicy = "ingress.kubernetes.io/content-security-policy"
|
||||
annotationKubernetesPublicKey = "ingress.kubernetes.io/public-key"
|
||||
annotationKubernetesReferrerPolicy = "ingress.kubernetes.io/referrer-policy"
|
||||
annotationKubernetesIsDevelopment = "ingress.kubernetes.io/is-development"
|
||||
annotationKubernetesProtocol = "ingress.kubernetes.io/protocol"
|
||||
)
|
||||
|
||||
func getAnnotationName(annotations map[string]string, name string) string {
|
||||
if _, ok := annotations[name]; ok {
|
||||
return name
|
||||
}
|
||||
|
||||
if _, ok := annotations[label.Prefix+name]; ok {
|
||||
return label.Prefix + name
|
||||
}
|
||||
|
||||
return name
|
||||
}
|
||||
|
||||
func getStringValue(annotations map[string]string, annotation string, defaultValue string) string {
|
||||
annotationName := getAnnotationName(annotations, annotation)
|
||||
return label.GetStringValue(annotations, annotationName, defaultValue)
|
||||
}
|
||||
|
||||
func getStringSafeValue(annotations map[string]string, annotation string, defaultValue string) (string, error) {
|
||||
annotationName := getAnnotationName(annotations, annotation)
|
||||
value := label.GetStringValue(annotations, annotationName, defaultValue)
|
||||
_, err := strconv.Unquote(`"` + value + `"`)
|
||||
return value, err
|
||||
}
|
||||
|
||||
func getBoolValue(annotations map[string]string, annotation string, defaultValue bool) bool {
|
||||
annotationName := getAnnotationName(annotations, annotation)
|
||||
return label.GetBoolValue(annotations, annotationName, defaultValue)
|
||||
}
|
||||
|
||||
func getIntValue(annotations map[string]string, annotation string, defaultValue int) int {
|
||||
annotationName := getAnnotationName(annotations, annotation)
|
||||
return label.GetIntValue(annotations, annotationName, defaultValue)
|
||||
}
|
||||
|
||||
func getInt64Value(annotations map[string]string, annotation string, defaultValue int64) int64 {
|
||||
annotationName := getAnnotationName(annotations, annotation)
|
||||
return label.GetInt64Value(annotations, annotationName, defaultValue)
|
||||
}
|
||||
|
||||
func getSliceStringValue(annotations map[string]string, annotation string) []string {
|
||||
annotationName := getAnnotationName(annotations, annotation)
|
||||
return label.GetSliceStringValue(annotations, annotationName)
|
||||
}
|
||||
|
||||
func getMapValue(annotations map[string]string, annotation string) map[string]string {
|
||||
annotationName := getAnnotationName(annotations, annotation)
|
||||
return label.GetMapValue(annotations, annotationName)
|
||||
}
|
||||
|
|
@ -1,44 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/containous/traefik/provider/label"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetAnnotationName(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
annotations map[string]string
|
||||
name string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
desc: "with standard annotation",
|
||||
name: annotationKubernetesPreserveHost,
|
||||
annotations: map[string]string{
|
||||
annotationKubernetesPreserveHost: "true",
|
||||
},
|
||||
expected: annotationKubernetesPreserveHost,
|
||||
},
|
||||
{
|
||||
desc: "with prefixed annotation",
|
||||
name: annotationKubernetesPreserveHost,
|
||||
annotations: map[string]string{
|
||||
label.Prefix + annotationKubernetesPreserveHost: "true",
|
||||
},
|
||||
expected: label.Prefix + annotationKubernetesPreserveHost,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
actual := getAnnotationName(test.annotations, test.name)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,637 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/containous/flaeg/parse"
|
||||
"github.com/containous/traefik/provider/label"
|
||||
"github.com/containous/traefik/tls"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func buildConfiguration(opts ...func(*types.Configuration)) *types.Configuration {
|
||||
conf := &types.Configuration{}
|
||||
for _, opt := range opts {
|
||||
opt(conf)
|
||||
}
|
||||
return conf
|
||||
}
|
||||
|
||||
// Backend
|
||||
|
||||
func backends(opts ...func(*types.Backend) string) func(*types.Configuration) {
|
||||
return func(c *types.Configuration) {
|
||||
c.Backends = make(map[string]*types.Backend)
|
||||
for _, opt := range opts {
|
||||
b := &types.Backend{}
|
||||
name := opt(b)
|
||||
c.Backends[name] = b
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func backend(name string, opts ...func(*types.Backend)) func(*types.Backend) string {
|
||||
return func(b *types.Backend) string {
|
||||
for _, opt := range opts {
|
||||
opt(b)
|
||||
}
|
||||
return name
|
||||
}
|
||||
}
|
||||
|
||||
func servers(opts ...func(*types.Server) string) func(*types.Backend) {
|
||||
return func(b *types.Backend) {
|
||||
b.Servers = make(map[string]types.Server)
|
||||
for _, opt := range opts {
|
||||
s := &types.Server{}
|
||||
name := opt(s)
|
||||
b.Servers[name] = *s
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func server(url string, opts ...func(*types.Server)) func(*types.Server) string {
|
||||
return func(s *types.Server) string {
|
||||
for _, opt := range opts {
|
||||
opt(s)
|
||||
}
|
||||
s.URL = url
|
||||
return url
|
||||
}
|
||||
}
|
||||
|
||||
func weight(value int) func(*types.Server) {
|
||||
return func(s *types.Server) {
|
||||
s.Weight = value
|
||||
}
|
||||
}
|
||||
|
||||
func lbMethod(method string) func(*types.Backend) {
|
||||
return func(b *types.Backend) {
|
||||
if b.LoadBalancer == nil {
|
||||
b.LoadBalancer = &types.LoadBalancer{}
|
||||
}
|
||||
b.LoadBalancer.Method = method
|
||||
}
|
||||
}
|
||||
|
||||
func lbStickiness() func(*types.Backend) {
|
||||
return func(b *types.Backend) {
|
||||
if b.LoadBalancer == nil {
|
||||
b.LoadBalancer = &types.LoadBalancer{}
|
||||
}
|
||||
b.LoadBalancer.Stickiness = &types.Stickiness{}
|
||||
}
|
||||
}
|
||||
|
||||
func circuitBreaker(exp string) func(*types.Backend) {
|
||||
return func(b *types.Backend) {
|
||||
b.CircuitBreaker = &types.CircuitBreaker{}
|
||||
b.CircuitBreaker.Expression = exp
|
||||
}
|
||||
}
|
||||
|
||||
func responseForwarding(interval string) func(*types.Backend) {
|
||||
return func(b *types.Backend) {
|
||||
b.ResponseForwarding = &types.ResponseForwarding{}
|
||||
b.ResponseForwarding.FlushInterval = interval
|
||||
}
|
||||
}
|
||||
|
||||
func buffering(opts ...func(*types.Buffering)) func(*types.Backend) {
|
||||
return func(b *types.Backend) {
|
||||
if b.Buffering == nil {
|
||||
b.Buffering = &types.Buffering{}
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(b.Buffering)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func maxRequestBodyBytes(value int64) func(*types.Buffering) {
|
||||
return func(b *types.Buffering) {
|
||||
b.MaxRequestBodyBytes = value
|
||||
}
|
||||
}
|
||||
|
||||
func memRequestBodyBytes(value int64) func(*types.Buffering) {
|
||||
return func(b *types.Buffering) {
|
||||
b.MemRequestBodyBytes = value
|
||||
}
|
||||
}
|
||||
|
||||
func maxResponseBodyBytes(value int64) func(*types.Buffering) {
|
||||
return func(b *types.Buffering) {
|
||||
b.MaxResponseBodyBytes = value
|
||||
}
|
||||
}
|
||||
|
||||
func memResponseBodyBytes(value int64) func(*types.Buffering) {
|
||||
return func(b *types.Buffering) {
|
||||
b.MemResponseBodyBytes = value
|
||||
}
|
||||
}
|
||||
|
||||
func retrying(exp string) func(*types.Buffering) {
|
||||
return func(b *types.Buffering) {
|
||||
b.RetryExpression = exp
|
||||
}
|
||||
}
|
||||
|
||||
func maxConnExtractorFunc(exp string) func(*types.Backend) {
|
||||
return func(b *types.Backend) {
|
||||
if b.MaxConn == nil {
|
||||
b.MaxConn = &types.MaxConn{}
|
||||
}
|
||||
b.MaxConn.ExtractorFunc = exp
|
||||
}
|
||||
}
|
||||
|
||||
func maxConnAmount(value int64) func(*types.Backend) {
|
||||
return func(b *types.Backend) {
|
||||
if b.MaxConn == nil {
|
||||
b.MaxConn = &types.MaxConn{}
|
||||
}
|
||||
b.MaxConn.Amount = value
|
||||
}
|
||||
}
|
||||
|
||||
// Frontend
|
||||
|
||||
func buildFrontends(opts ...func(*types.Frontend) string) map[string]*types.Frontend {
|
||||
fronts := make(map[string]*types.Frontend)
|
||||
for _, opt := range opts {
|
||||
f := &types.Frontend{}
|
||||
name := opt(f)
|
||||
fronts[name] = f
|
||||
}
|
||||
return fronts
|
||||
}
|
||||
|
||||
func frontends(opts ...func(*types.Frontend) string) func(*types.Configuration) {
|
||||
return func(c *types.Configuration) {
|
||||
c.Frontends = make(map[string]*types.Frontend)
|
||||
for _, opt := range opts {
|
||||
f := &types.Frontend{}
|
||||
name := opt(f)
|
||||
c.Frontends[name] = f
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func frontend(backend string, opts ...func(*types.Frontend)) func(*types.Frontend) string {
|
||||
return func(f *types.Frontend) string {
|
||||
for _, opt := range opts {
|
||||
opt(f)
|
||||
}
|
||||
// related the function frontendName
|
||||
name := f.Backend
|
||||
f.Backend = backend
|
||||
if len(name) > 0 {
|
||||
return name
|
||||
}
|
||||
return backend
|
||||
}
|
||||
}
|
||||
|
||||
func frontendName(name string) func(*types.Frontend) {
|
||||
return func(f *types.Frontend) {
|
||||
// store temporary the frontend name into the backend name
|
||||
f.Backend = name
|
||||
}
|
||||
}
|
||||
func passHostHeader() func(*types.Frontend) {
|
||||
return func(f *types.Frontend) {
|
||||
f.PassHostHeader = true
|
||||
}
|
||||
}
|
||||
|
||||
func entryPoints(eps ...string) func(*types.Frontend) {
|
||||
return func(f *types.Frontend) {
|
||||
f.EntryPoints = eps
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated
|
||||
func basicAuthDeprecated(auth ...string) func(*types.Frontend) {
|
||||
return func(f *types.Frontend) {
|
||||
f.Auth = &types.Auth{Basic: &types.Basic{Users: auth}}
|
||||
}
|
||||
}
|
||||
|
||||
func auth(opt func(*types.Auth)) func(*types.Frontend) {
|
||||
return func(f *types.Frontend) {
|
||||
auth := &types.Auth{}
|
||||
opt(auth)
|
||||
f.Auth = auth
|
||||
}
|
||||
}
|
||||
|
||||
func basicAuth(users ...string) func(*types.Auth) {
|
||||
return func(a *types.Auth) {
|
||||
a.Basic = &types.Basic{Users: users}
|
||||
}
|
||||
}
|
||||
|
||||
func forwardAuth(forwardURL string, opts ...func(*types.Forward)) func(*types.Auth) {
|
||||
return func(a *types.Auth) {
|
||||
fwd := &types.Forward{Address: forwardURL}
|
||||
for _, opt := range opts {
|
||||
opt(fwd)
|
||||
}
|
||||
a.Forward = fwd
|
||||
}
|
||||
}
|
||||
|
||||
func fwdAuthResponseHeaders(headers ...string) func(*types.Forward) {
|
||||
return func(f *types.Forward) {
|
||||
f.AuthResponseHeaders = headers
|
||||
}
|
||||
}
|
||||
|
||||
func fwdTrustForwardHeader() func(*types.Forward) {
|
||||
return func(f *types.Forward) {
|
||||
f.TrustForwardHeader = true
|
||||
}
|
||||
}
|
||||
|
||||
func fwdAuthTLS(cert, key string, insecure bool) func(*types.Forward) {
|
||||
return func(f *types.Forward) {
|
||||
f.TLS = &types.ClientTLS{Cert: cert, Key: key, InsecureSkipVerify: insecure}
|
||||
}
|
||||
}
|
||||
|
||||
func whiteListRange(ranges ...string) func(*types.WhiteList) {
|
||||
return func(wl *types.WhiteList) {
|
||||
wl.SourceRange = ranges
|
||||
}
|
||||
}
|
||||
|
||||
func whiteListIPStrategy(depth int, excludedIPs ...string) func(*types.WhiteList) {
|
||||
return func(wl *types.WhiteList) {
|
||||
wl.IPStrategy = &types.IPStrategy{
|
||||
Depth: depth,
|
||||
ExcludedIPs: excludedIPs,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func whiteList(opts ...func(*types.WhiteList)) func(*types.Frontend) {
|
||||
return func(f *types.Frontend) {
|
||||
if f.WhiteList == nil {
|
||||
f.WhiteList = &types.WhiteList{}
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(f.WhiteList)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func priority(value int) func(*types.Frontend) {
|
||||
return func(f *types.Frontend) {
|
||||
f.Priority = value
|
||||
}
|
||||
}
|
||||
|
||||
func headers(h *types.Headers) func(*types.Frontend) {
|
||||
return func(f *types.Frontend) {
|
||||
f.Headers = h
|
||||
}
|
||||
}
|
||||
|
||||
func redirectEntryPoint(name string) func(*types.Frontend) {
|
||||
return func(f *types.Frontend) {
|
||||
if f.Redirect == nil {
|
||||
f.Redirect = &types.Redirect{}
|
||||
}
|
||||
f.Redirect.EntryPoint = name
|
||||
}
|
||||
}
|
||||
|
||||
func redirectRegex(regex, replacement string) func(*types.Frontend) {
|
||||
return func(f *types.Frontend) {
|
||||
if f.Redirect == nil {
|
||||
f.Redirect = &types.Redirect{}
|
||||
}
|
||||
f.Redirect.Regex = regex
|
||||
f.Redirect.Replacement = replacement
|
||||
}
|
||||
}
|
||||
|
||||
func errorPage(name string, opts ...func(*types.ErrorPage)) func(*types.Frontend) {
|
||||
return func(f *types.Frontend) {
|
||||
if f.Errors == nil {
|
||||
f.Errors = make(map[string]*types.ErrorPage)
|
||||
}
|
||||
|
||||
if len(name) > 0 {
|
||||
f.Errors[name] = &types.ErrorPage{}
|
||||
for _, opt := range opts {
|
||||
opt(f.Errors[name])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func errorStatus(status ...string) func(*types.ErrorPage) {
|
||||
return func(page *types.ErrorPage) {
|
||||
page.Status = status
|
||||
}
|
||||
}
|
||||
|
||||
func errorQuery(query string) func(*types.ErrorPage) {
|
||||
return func(page *types.ErrorPage) {
|
||||
page.Query = query
|
||||
}
|
||||
}
|
||||
|
||||
func errorBackend(backend string) func(*types.ErrorPage) {
|
||||
return func(page *types.ErrorPage) {
|
||||
page.Backend = backend
|
||||
}
|
||||
}
|
||||
|
||||
func rateLimit(opts ...func(*types.RateLimit)) func(*types.Frontend) {
|
||||
return func(f *types.Frontend) {
|
||||
if f.RateLimit == nil {
|
||||
f.RateLimit = &types.RateLimit{}
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(f.RateLimit)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func rateExtractorFunc(exp string) func(*types.RateLimit) {
|
||||
return func(limit *types.RateLimit) {
|
||||
limit.ExtractorFunc = exp
|
||||
}
|
||||
}
|
||||
|
||||
func rateSet(name string, opts ...func(*types.Rate)) func(*types.RateLimit) {
|
||||
return func(limit *types.RateLimit) {
|
||||
if limit.RateSet == nil {
|
||||
limit.RateSet = make(map[string]*types.Rate)
|
||||
}
|
||||
|
||||
if len(name) > 0 {
|
||||
limit.RateSet[name] = &types.Rate{}
|
||||
for _, opt := range opts {
|
||||
opt(limit.RateSet[name])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func limitAverage(avg int64) func(*types.Rate) {
|
||||
return func(rate *types.Rate) {
|
||||
rate.Average = avg
|
||||
}
|
||||
}
|
||||
|
||||
func limitBurst(burst int64) func(*types.Rate) {
|
||||
return func(rate *types.Rate) {
|
||||
rate.Burst = burst
|
||||
}
|
||||
}
|
||||
|
||||
func limitPeriod(period time.Duration) func(*types.Rate) {
|
||||
return func(rate *types.Rate) {
|
||||
rate.Period = parse.Duration(period)
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated
|
||||
func passTLSCert() func(*types.Frontend) {
|
||||
return func(f *types.Frontend) {
|
||||
f.PassTLSCert = true
|
||||
}
|
||||
}
|
||||
|
||||
func passTLSClientCert() func(*types.Frontend) {
|
||||
return func(f *types.Frontend) {
|
||||
f.PassTLSClientCert = &types.TLSClientHeaders{
|
||||
PEM: true,
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
NotAfter: true,
|
||||
NotBefore: true,
|
||||
Subject: &types.TLSCLientCertificateSubjectInfos{
|
||||
Country: true,
|
||||
Province: true,
|
||||
Locality: true,
|
||||
Organization: true,
|
||||
CommonName: true,
|
||||
SerialNumber: true,
|
||||
},
|
||||
Sans: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func routes(opts ...func(*types.Route) string) func(*types.Frontend) {
|
||||
return func(f *types.Frontend) {
|
||||
f.Routes = make(map[string]types.Route)
|
||||
for _, opt := range opts {
|
||||
s := &types.Route{}
|
||||
name := opt(s)
|
||||
f.Routes[name] = *s
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func route(name string, rule string) func(*types.Route) string {
|
||||
return func(r *types.Route) string {
|
||||
r.Rule = rule
|
||||
return name
|
||||
}
|
||||
}
|
||||
|
||||
func tlsesSection(opts ...func(*tls.Configuration)) func(*types.Configuration) {
|
||||
return func(c *types.Configuration) {
|
||||
for _, opt := range opts {
|
||||
tlsConf := &tls.Configuration{}
|
||||
opt(tlsConf)
|
||||
c.TLS = append(c.TLS, tlsConf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func tlsSection(opts ...func(*tls.Configuration)) func(*tls.Configuration) {
|
||||
return func(c *tls.Configuration) {
|
||||
for _, opt := range opts {
|
||||
opt(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func tlsEntryPoints(entryPoints ...string) func(*tls.Configuration) {
|
||||
return func(c *tls.Configuration) {
|
||||
c.EntryPoints = entryPoints
|
||||
}
|
||||
}
|
||||
|
||||
func certificate(cert string, key string) func(*tls.Configuration) {
|
||||
return func(c *tls.Configuration) {
|
||||
c.Certificate = &tls.Certificate{
|
||||
CertFile: tls.FileOrContent(cert),
|
||||
KeyFile: tls.FileOrContent(key),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test
|
||||
|
||||
func TestBuildConfiguration(t *testing.T) {
|
||||
actual := buildConfiguration(
|
||||
backends(
|
||||
backend("foo/bar",
|
||||
servers(
|
||||
server("http://10.10.0.1:8080", weight(1)),
|
||||
server("http://10.21.0.1:8080", weight(1)),
|
||||
),
|
||||
lbMethod("wrr"),
|
||||
),
|
||||
backend("foo/namedthing",
|
||||
servers(server("https://example.com", weight(1))),
|
||||
lbMethod("wrr"),
|
||||
),
|
||||
backend("bar",
|
||||
servers(
|
||||
server("https://10.15.0.1:8443", weight(1)),
|
||||
server("https://10.15.0.2:9443", weight(1)),
|
||||
),
|
||||
lbMethod("wrr"),
|
||||
),
|
||||
),
|
||||
frontends(
|
||||
frontend("foo/bar",
|
||||
passHostHeader(),
|
||||
routes(
|
||||
route("/bar", "PathPrefix:/bar"),
|
||||
route("foo", "Host:foo"),
|
||||
),
|
||||
),
|
||||
frontend("foo/namedthing",
|
||||
passHostHeader(),
|
||||
routes(
|
||||
route("/namedthing", "PathPrefix:/namedthing"),
|
||||
route("foo", "Host:foo"),
|
||||
),
|
||||
),
|
||||
frontend("bar",
|
||||
passHostHeader(),
|
||||
routes(
|
||||
route("bar", "Host:bar"),
|
||||
),
|
||||
),
|
||||
),
|
||||
tlsesSection(
|
||||
tlsSection(
|
||||
tlsEntryPoints("https"),
|
||||
certificate("certificate", "key"),
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
assert.EqualValues(t, sampleConfiguration(), actual)
|
||||
}
|
||||
|
||||
func sampleConfiguration() *types.Configuration {
|
||||
return &types.Configuration{
|
||||
Backends: map[string]*types.Backend{
|
||||
"foo/bar": {
|
||||
Servers: map[string]types.Server{
|
||||
"http://10.10.0.1:8080": {
|
||||
URL: "http://10.10.0.1:8080",
|
||||
Weight: label.DefaultWeight,
|
||||
},
|
||||
"http://10.21.0.1:8080": {
|
||||
URL: "http://10.21.0.1:8080",
|
||||
Weight: label.DefaultWeight,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
LoadBalancer: &types.LoadBalancer{
|
||||
Method: "wrr",
|
||||
},
|
||||
},
|
||||
"foo/namedthing": {
|
||||
Servers: map[string]types.Server{
|
||||
"https://example.com": {
|
||||
URL: "https://example.com",
|
||||
Weight: label.DefaultWeight,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
LoadBalancer: &types.LoadBalancer{
|
||||
Method: "wrr",
|
||||
},
|
||||
},
|
||||
"bar": {
|
||||
Servers: map[string]types.Server{
|
||||
"https://10.15.0.1:8443": {
|
||||
URL: "https://10.15.0.1:8443",
|
||||
Weight: label.DefaultWeight,
|
||||
},
|
||||
"https://10.15.0.2:9443": {
|
||||
URL: "https://10.15.0.2:9443",
|
||||
Weight: label.DefaultWeight,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
LoadBalancer: &types.LoadBalancer{
|
||||
Method: "wrr",
|
||||
},
|
||||
},
|
||||
},
|
||||
Frontends: map[string]*types.Frontend{
|
||||
"foo/bar": {
|
||||
Backend: "foo/bar",
|
||||
PassHostHeader: true,
|
||||
Routes: map[string]types.Route{
|
||||
"/bar": {
|
||||
Rule: "PathPrefix:/bar",
|
||||
},
|
||||
"foo": {
|
||||
Rule: "Host:foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
"foo/namedthing": {
|
||||
Backend: "foo/namedthing",
|
||||
PassHostHeader: true,
|
||||
Routes: map[string]types.Route{
|
||||
"/namedthing": {
|
||||
Rule: "PathPrefix:/namedthing",
|
||||
},
|
||||
"foo": {
|
||||
Rule: "Host:foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
"bar": {
|
||||
Backend: "bar",
|
||||
PassHostHeader: true,
|
||||
Routes: map[string]types.Route{
|
||||
"bar": {
|
||||
Rule: "Host:bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
TLS: []*tls.Configuration{
|
||||
{
|
||||
EntryPoints: []string{"https"},
|
||||
Certificate: &tls.Certificate{
|
||||
CertFile: tls.FileOrContent("certificate"),
|
||||
KeyFile: tls.FileOrContent("key"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
@ -1,158 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
func buildEndpoint(opts ...func(*corev1.Endpoints)) *corev1.Endpoints {
|
||||
e := &corev1.Endpoints{}
|
||||
for _, opt := range opts {
|
||||
opt(e)
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
func eNamespace(value string) func(*corev1.Endpoints) {
|
||||
return func(i *corev1.Endpoints) {
|
||||
i.Namespace = value
|
||||
}
|
||||
}
|
||||
|
||||
func eName(value string) func(*corev1.Endpoints) {
|
||||
return func(i *corev1.Endpoints) {
|
||||
i.Name = value
|
||||
}
|
||||
}
|
||||
|
||||
func eUID(value types.UID) func(*corev1.Endpoints) {
|
||||
return func(i *corev1.Endpoints) {
|
||||
i.UID = value
|
||||
}
|
||||
}
|
||||
|
||||
func subset(opts ...func(*corev1.EndpointSubset)) func(*corev1.Endpoints) {
|
||||
return func(e *corev1.Endpoints) {
|
||||
s := &corev1.EndpointSubset{}
|
||||
for _, opt := range opts {
|
||||
opt(s)
|
||||
}
|
||||
e.Subsets = append(e.Subsets, *s)
|
||||
}
|
||||
}
|
||||
|
||||
func eAddresses(opts ...func(*corev1.EndpointAddress)) func(*corev1.EndpointSubset) {
|
||||
return func(subset *corev1.EndpointSubset) {
|
||||
for _, opt := range opts {
|
||||
a := &corev1.EndpointAddress{}
|
||||
opt(a)
|
||||
subset.Addresses = append(subset.Addresses, *a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func eAddress(ip string) func(*corev1.EndpointAddress) {
|
||||
return func(address *corev1.EndpointAddress) {
|
||||
address.IP = ip
|
||||
}
|
||||
}
|
||||
|
||||
func eAddressWithTargetRef(targetRef, ip string) func(*corev1.EndpointAddress) {
|
||||
return func(address *corev1.EndpointAddress) {
|
||||
address.TargetRef = &corev1.ObjectReference{Name: targetRef}
|
||||
address.IP = ip
|
||||
}
|
||||
}
|
||||
|
||||
func ePorts(opts ...func(port *corev1.EndpointPort)) func(*corev1.EndpointSubset) {
|
||||
return func(spec *corev1.EndpointSubset) {
|
||||
for _, opt := range opts {
|
||||
p := &corev1.EndpointPort{}
|
||||
opt(p)
|
||||
spec.Ports = append(spec.Ports, *p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ePort(port int32, name string) func(*corev1.EndpointPort) {
|
||||
return func(sp *corev1.EndpointPort) {
|
||||
sp.Port = port
|
||||
sp.Name = name
|
||||
}
|
||||
}
|
||||
|
||||
// Test
|
||||
|
||||
func TestBuildEndpoint(t *testing.T) {
|
||||
actual := buildEndpoint(
|
||||
eNamespace("testing"),
|
||||
eName("service3"),
|
||||
eUID("3"),
|
||||
subset(
|
||||
eAddresses(eAddress("10.15.0.1")),
|
||||
ePorts(
|
||||
ePort(8080, "http"),
|
||||
ePort(8443, "https"),
|
||||
),
|
||||
),
|
||||
subset(
|
||||
eAddresses(eAddress("10.15.0.2")),
|
||||
ePorts(
|
||||
ePort(9080, "http"),
|
||||
ePort(9443, "https"),
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
assert.EqualValues(t, sampleEndpoint1(), actual)
|
||||
}
|
||||
|
||||
func sampleEndpoint1() *corev1.Endpoints {
|
||||
return &corev1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "service3",
|
||||
UID: "3",
|
||||
Namespace: "testing",
|
||||
},
|
||||
Subsets: []corev1.EndpointSubset{
|
||||
{
|
||||
Addresses: []corev1.EndpointAddress{
|
||||
{
|
||||
IP: "10.15.0.1",
|
||||
},
|
||||
},
|
||||
Ports: []corev1.EndpointPort{
|
||||
{
|
||||
Name: "http",
|
||||
Port: 8080,
|
||||
},
|
||||
{
|
||||
Name: "https",
|
||||
Port: 8443,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Addresses: []corev1.EndpointAddress{
|
||||
{
|
||||
IP: "10.15.0.2",
|
||||
},
|
||||
},
|
||||
Ports: []corev1.EndpointPort{
|
||||
{
|
||||
Name: "http",
|
||||
Port: 9080,
|
||||
},
|
||||
{
|
||||
Name: "https",
|
||||
Port: 9443,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
@ -1,223 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
||||
func buildIngress(opts ...func(*extensionsv1beta1.Ingress)) *extensionsv1beta1.Ingress {
|
||||
i := &extensionsv1beta1.Ingress{}
|
||||
for _, opt := range opts {
|
||||
opt(i)
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func iNamespace(value string) func(*extensionsv1beta1.Ingress) {
|
||||
return func(i *extensionsv1beta1.Ingress) {
|
||||
i.Namespace = value
|
||||
}
|
||||
}
|
||||
|
||||
func iAnnotation(name string, value string) func(*extensionsv1beta1.Ingress) {
|
||||
return func(i *extensionsv1beta1.Ingress) {
|
||||
if i.Annotations == nil {
|
||||
i.Annotations = make(map[string]string)
|
||||
}
|
||||
i.Annotations[name] = value
|
||||
}
|
||||
}
|
||||
|
||||
func iRules(opts ...func(*extensionsv1beta1.IngressSpec)) func(*extensionsv1beta1.Ingress) {
|
||||
return func(i *extensionsv1beta1.Ingress) {
|
||||
s := &extensionsv1beta1.IngressSpec{}
|
||||
for _, opt := range opts {
|
||||
opt(s)
|
||||
}
|
||||
i.Spec = *s
|
||||
}
|
||||
}
|
||||
|
||||
func iSpecBackends(opts ...func(*extensionsv1beta1.IngressSpec)) func(*extensionsv1beta1.Ingress) {
|
||||
return func(i *extensionsv1beta1.Ingress) {
|
||||
s := &extensionsv1beta1.IngressSpec{}
|
||||
for _, opt := range opts {
|
||||
opt(s)
|
||||
}
|
||||
i.Spec = *s
|
||||
}
|
||||
}
|
||||
|
||||
func iSpecBackend(opts ...func(*extensionsv1beta1.IngressBackend)) func(*extensionsv1beta1.IngressSpec) {
|
||||
return func(s *extensionsv1beta1.IngressSpec) {
|
||||
p := &extensionsv1beta1.IngressBackend{}
|
||||
for _, opt := range opts {
|
||||
opt(p)
|
||||
}
|
||||
s.Backend = p
|
||||
}
|
||||
}
|
||||
|
||||
func iIngressBackend(name string, port intstr.IntOrString) func(*extensionsv1beta1.IngressBackend) {
|
||||
return func(p *extensionsv1beta1.IngressBackend) {
|
||||
p.ServiceName = name
|
||||
p.ServicePort = port
|
||||
}
|
||||
}
|
||||
|
||||
func iRule(opts ...func(*extensionsv1beta1.IngressRule)) func(*extensionsv1beta1.IngressSpec) {
|
||||
return func(spec *extensionsv1beta1.IngressSpec) {
|
||||
r := &extensionsv1beta1.IngressRule{}
|
||||
for _, opt := range opts {
|
||||
opt(r)
|
||||
}
|
||||
spec.Rules = append(spec.Rules, *r)
|
||||
}
|
||||
}
|
||||
|
||||
func iHost(name string) func(*extensionsv1beta1.IngressRule) {
|
||||
return func(rule *extensionsv1beta1.IngressRule) {
|
||||
rule.Host = name
|
||||
}
|
||||
}
|
||||
|
||||
func iPaths(opts ...func(*extensionsv1beta1.HTTPIngressRuleValue)) func(*extensionsv1beta1.IngressRule) {
|
||||
return func(rule *extensionsv1beta1.IngressRule) {
|
||||
rule.HTTP = &extensionsv1beta1.HTTPIngressRuleValue{}
|
||||
for _, opt := range opts {
|
||||
opt(rule.HTTP)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func onePath(opts ...func(*extensionsv1beta1.HTTPIngressPath)) func(*extensionsv1beta1.HTTPIngressRuleValue) {
|
||||
return func(irv *extensionsv1beta1.HTTPIngressRuleValue) {
|
||||
p := &extensionsv1beta1.HTTPIngressPath{}
|
||||
for _, opt := range opts {
|
||||
opt(p)
|
||||
}
|
||||
irv.Paths = append(irv.Paths, *p)
|
||||
}
|
||||
}
|
||||
|
||||
func iPath(name string) func(*extensionsv1beta1.HTTPIngressPath) {
|
||||
return func(p *extensionsv1beta1.HTTPIngressPath) {
|
||||
p.Path = name
|
||||
}
|
||||
}
|
||||
|
||||
func iBackend(name string, port intstr.IntOrString) func(*extensionsv1beta1.HTTPIngressPath) {
|
||||
return func(p *extensionsv1beta1.HTTPIngressPath) {
|
||||
p.Backend = extensionsv1beta1.IngressBackend{
|
||||
ServiceName: name,
|
||||
ServicePort: port,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func iTLSes(opts ...func(*extensionsv1beta1.IngressTLS)) func(*extensionsv1beta1.Ingress) {
|
||||
return func(i *extensionsv1beta1.Ingress) {
|
||||
for _, opt := range opts {
|
||||
iTLS := extensionsv1beta1.IngressTLS{}
|
||||
opt(&iTLS)
|
||||
i.Spec.TLS = append(i.Spec.TLS, iTLS)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func iTLS(secret string, hosts ...string) func(*extensionsv1beta1.IngressTLS) {
|
||||
return func(i *extensionsv1beta1.IngressTLS) {
|
||||
i.SecretName = secret
|
||||
i.Hosts = hosts
|
||||
}
|
||||
}
|
||||
|
||||
// Test
|
||||
|
||||
func TestBuildIngress(t *testing.T) {
|
||||
i := buildIngress(
|
||||
iNamespace("testing"),
|
||||
iRules(
|
||||
iRule(iHost("foo"), iPaths(
|
||||
onePath(iPath("/bar"), iBackend("service1", intstr.FromInt(80))),
|
||||
onePath(iPath("/namedthing"), iBackend("service4", intstr.FromString("https")))),
|
||||
),
|
||||
iRule(iHost("bar"), iPaths(
|
||||
onePath(iBackend("service3", intstr.FromString("https"))),
|
||||
onePath(iBackend("service2", intstr.FromInt(802))),
|
||||
),
|
||||
),
|
||||
),
|
||||
iTLSes(
|
||||
iTLS("tls-secret", "foo"),
|
||||
),
|
||||
)
|
||||
|
||||
assert.EqualValues(t, sampleIngress(), i)
|
||||
}
|
||||
|
||||
func sampleIngress() *extensionsv1beta1.Ingress {
|
||||
return &extensionsv1beta1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "testing",
|
||||
},
|
||||
Spec: extensionsv1beta1.IngressSpec{
|
||||
Rules: []extensionsv1beta1.IngressRule{
|
||||
{
|
||||
Host: "foo",
|
||||
IngressRuleValue: extensionsv1beta1.IngressRuleValue{
|
||||
HTTP: &extensionsv1beta1.HTTPIngressRuleValue{
|
||||
Paths: []extensionsv1beta1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/bar",
|
||||
Backend: extensionsv1beta1.IngressBackend{
|
||||
ServiceName: "service1",
|
||||
ServicePort: intstr.FromInt(80),
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: "/namedthing",
|
||||
Backend: extensionsv1beta1.IngressBackend{
|
||||
ServiceName: "service4",
|
||||
ServicePort: intstr.FromString("https"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Host: "bar",
|
||||
IngressRuleValue: extensionsv1beta1.IngressRuleValue{
|
||||
HTTP: &extensionsv1beta1.HTTPIngressRuleValue{
|
||||
Paths: []extensionsv1beta1.HTTPIngressPath{
|
||||
{
|
||||
Backend: extensionsv1beta1.IngressBackend{
|
||||
ServiceName: "service3",
|
||||
ServicePort: intstr.FromString("https"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Backend: extensionsv1beta1.IngressBackend{
|
||||
ServiceName: "service2",
|
||||
ServicePort: intstr.FromInt(802),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
TLS: []extensionsv1beta1.IngressTLS{
|
||||
{
|
||||
Hosts: []string{"foo"},
|
||||
SecretName: "tls-secret",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
@ -1,232 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
func buildService(opts ...func(*corev1.Service)) *corev1.Service {
|
||||
s := &corev1.Service{}
|
||||
for _, opt := range opts {
|
||||
opt(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func sNamespace(value string) func(*corev1.Service) {
|
||||
return func(i *corev1.Service) {
|
||||
i.Namespace = value
|
||||
}
|
||||
}
|
||||
|
||||
func sName(value string) func(*corev1.Service) {
|
||||
return func(i *corev1.Service) {
|
||||
i.Name = value
|
||||
}
|
||||
}
|
||||
|
||||
func sUID(value types.UID) func(*corev1.Service) {
|
||||
return func(i *corev1.Service) {
|
||||
i.UID = value
|
||||
}
|
||||
}
|
||||
|
||||
func sAnnotation(name string, value string) func(*corev1.Service) {
|
||||
return func(s *corev1.Service) {
|
||||
if s.Annotations == nil {
|
||||
s.Annotations = make(map[string]string)
|
||||
}
|
||||
s.Annotations[name] = value
|
||||
}
|
||||
}
|
||||
|
||||
func sSpec(opts ...func(*corev1.ServiceSpec)) func(*corev1.Service) {
|
||||
return func(s *corev1.Service) {
|
||||
spec := &corev1.ServiceSpec{}
|
||||
for _, opt := range opts {
|
||||
opt(spec)
|
||||
}
|
||||
s.Spec = *spec
|
||||
}
|
||||
}
|
||||
|
||||
func sLoadBalancerStatus(opts ...func(*corev1.LoadBalancerStatus)) func(service *corev1.Service) {
|
||||
return func(s *corev1.Service) {
|
||||
loadBalancer := &corev1.LoadBalancerStatus{}
|
||||
for _, opt := range opts {
|
||||
if opt != nil {
|
||||
opt(loadBalancer)
|
||||
}
|
||||
}
|
||||
s.Status = corev1.ServiceStatus{
|
||||
LoadBalancer: *loadBalancer,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sLoadBalancerIngress(ip string, hostname string) func(*corev1.LoadBalancerStatus) {
|
||||
return func(status *corev1.LoadBalancerStatus) {
|
||||
ingress := corev1.LoadBalancerIngress{
|
||||
IP: ip,
|
||||
Hostname: hostname,
|
||||
}
|
||||
status.Ingress = append(status.Ingress, ingress)
|
||||
}
|
||||
}
|
||||
|
||||
func clusterIP(ip string) func(*corev1.ServiceSpec) {
|
||||
return func(spec *corev1.ServiceSpec) {
|
||||
spec.ClusterIP = ip
|
||||
}
|
||||
}
|
||||
|
||||
func sType(value corev1.ServiceType) func(*corev1.ServiceSpec) {
|
||||
return func(spec *corev1.ServiceSpec) {
|
||||
spec.Type = value
|
||||
}
|
||||
}
|
||||
|
||||
func sExternalName(name string) func(*corev1.ServiceSpec) {
|
||||
return func(spec *corev1.ServiceSpec) {
|
||||
spec.ExternalName = name
|
||||
}
|
||||
}
|
||||
|
||||
func sPorts(opts ...func(*corev1.ServicePort)) func(*corev1.ServiceSpec) {
|
||||
return func(spec *corev1.ServiceSpec) {
|
||||
for _, opt := range opts {
|
||||
p := &corev1.ServicePort{}
|
||||
opt(p)
|
||||
spec.Ports = append(spec.Ports, *p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sPort(port int32, name string) func(*corev1.ServicePort) {
|
||||
return func(sp *corev1.ServicePort) {
|
||||
sp.Port = port
|
||||
sp.Name = name
|
||||
}
|
||||
}
|
||||
|
||||
// Test
|
||||
|
||||
func TestBuildService(t *testing.T) {
|
||||
actual1 := buildService(
|
||||
sName("service1"),
|
||||
sNamespace("testing"),
|
||||
sUID("1"),
|
||||
sSpec(
|
||||
clusterIP("10.0.0.1"),
|
||||
sPorts(sPort(80, "")),
|
||||
),
|
||||
)
|
||||
|
||||
assert.EqualValues(t, sampleService1(), actual1)
|
||||
|
||||
actual2 := buildService(
|
||||
sName("service2"),
|
||||
sNamespace("testing"),
|
||||
sUID("2"),
|
||||
sSpec(
|
||||
clusterIP("10.0.0.2"),
|
||||
sType("ExternalName"),
|
||||
sExternalName("example.com"),
|
||||
sPorts(
|
||||
sPort(80, "http"),
|
||||
sPort(443, "https"),
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
assert.EqualValues(t, sampleService2(), actual2)
|
||||
|
||||
actual3 := buildService(
|
||||
sName("service3"),
|
||||
sNamespace("testing"),
|
||||
sUID("3"),
|
||||
sSpec(
|
||||
clusterIP("10.0.0.3"),
|
||||
sType("ExternalName"),
|
||||
sExternalName("example.com"),
|
||||
sPorts(
|
||||
sPort(8080, "http"),
|
||||
sPort(8443, "https"),
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
assert.EqualValues(t, sampleService3(), actual3)
|
||||
}
|
||||
|
||||
func sampleService1() *corev1.Service {
|
||||
return &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "service1",
|
||||
UID: "1",
|
||||
Namespace: "testing",
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
ClusterIP: "10.0.0.1",
|
||||
Ports: []corev1.ServicePort{
|
||||
{
|
||||
Port: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func sampleService2() *corev1.Service {
|
||||
return &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "service2",
|
||||
UID: "2",
|
||||
Namespace: "testing",
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
ClusterIP: "10.0.0.2",
|
||||
Type: "ExternalName",
|
||||
ExternalName: "example.com",
|
||||
Ports: []corev1.ServicePort{
|
||||
{
|
||||
Name: "http",
|
||||
Port: 80,
|
||||
},
|
||||
{
|
||||
Name: "https",
|
||||
Port: 443,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func sampleService3() *corev1.Service {
|
||||
return &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "service3",
|
||||
UID: "3",
|
||||
Namespace: "testing",
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
ClusterIP: "10.0.0.3",
|
||||
Type: "ExternalName",
|
||||
ExternalName: "example.com",
|
||||
Ports: []corev1.ServicePort{
|
||||
{
|
||||
Name: "http",
|
||||
Port: 8080,
|
||||
},
|
||||
{
|
||||
Name: "https",
|
||||
Port: 8443,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
@ -1,259 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/containous/traefik/log"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
kubeerror "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
const resyncPeriod = 10 * time.Minute
|
||||
|
||||
type resourceEventHandler struct {
|
||||
ev chan<- interface{}
|
||||
}
|
||||
|
||||
func (reh *resourceEventHandler) OnAdd(obj interface{}) {
|
||||
eventHandlerFunc(reh.ev, obj)
|
||||
}
|
||||
|
||||
func (reh *resourceEventHandler) OnUpdate(oldObj, newObj interface{}) {
|
||||
eventHandlerFunc(reh.ev, newObj)
|
||||
}
|
||||
|
||||
func (reh *resourceEventHandler) OnDelete(obj interface{}) {
|
||||
eventHandlerFunc(reh.ev, obj)
|
||||
}
|
||||
|
||||
// Client is a client for the Provider master.
|
||||
// WatchAll starts the watch of the Provider resources and updates the stores.
|
||||
// The stores can then be accessed via the Get* functions.
|
||||
type Client interface {
|
||||
WatchAll(namespaces Namespaces, stopCh <-chan struct{}) (<-chan interface{}, error)
|
||||
GetIngresses() []*extensionsv1beta1.Ingress
|
||||
GetService(namespace, name string) (*corev1.Service, bool, error)
|
||||
GetSecret(namespace, name string) (*corev1.Secret, bool, error)
|
||||
GetEndpoints(namespace, name string) (*corev1.Endpoints, bool, error)
|
||||
UpdateIngressStatus(namespace, name, ip, hostname string) error
|
||||
}
|
||||
|
||||
type clientImpl struct {
|
||||
clientset *kubernetes.Clientset
|
||||
factories map[string]informers.SharedInformerFactory
|
||||
ingressLabelSelector labels.Selector
|
||||
isNamespaceAll bool
|
||||
}
|
||||
|
||||
func newClientImpl(clientset *kubernetes.Clientset) *clientImpl {
|
||||
return &clientImpl{
|
||||
clientset: clientset,
|
||||
factories: make(map[string]informers.SharedInformerFactory),
|
||||
}
|
||||
}
|
||||
|
||||
// newInClusterClient returns a new Provider client that is expected to run
|
||||
// inside the cluster.
|
||||
func newInClusterClient(endpoint string) (*clientImpl, error) {
|
||||
config, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create in-cluster configuration: %s", err)
|
||||
}
|
||||
|
||||
if endpoint != "" {
|
||||
config.Host = endpoint
|
||||
}
|
||||
|
||||
return createClientFromConfig(config)
|
||||
}
|
||||
|
||||
// newExternalClusterClient returns a new Provider client that may run outside
|
||||
// of the cluster.
|
||||
// The endpoint parameter must not be empty.
|
||||
func newExternalClusterClient(endpoint, token, caFilePath string) (*clientImpl, error) {
|
||||
if endpoint == "" {
|
||||
return nil, errors.New("endpoint missing for external cluster client")
|
||||
}
|
||||
|
||||
config := &rest.Config{
|
||||
Host: endpoint,
|
||||
BearerToken: token,
|
||||
}
|
||||
|
||||
if caFilePath != "" {
|
||||
caData, err := ioutil.ReadFile(caFilePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read CA file %s: %s", caFilePath, err)
|
||||
}
|
||||
|
||||
config.TLSClientConfig = rest.TLSClientConfig{CAData: caData}
|
||||
}
|
||||
|
||||
return createClientFromConfig(config)
|
||||
}
|
||||
|
||||
func createClientFromConfig(c *rest.Config) (*clientImpl, error) {
|
||||
clientset, err := kubernetes.NewForConfig(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newClientImpl(clientset), nil
|
||||
}
|
||||
|
||||
// WatchAll starts namespace-specific controllers for all relevant kinds.
|
||||
func (c *clientImpl) WatchAll(namespaces Namespaces, stopCh <-chan struct{}) (<-chan interface{}, error) {
|
||||
eventCh := make(chan interface{}, 1)
|
||||
|
||||
if len(namespaces) == 0 {
|
||||
namespaces = Namespaces{metav1.NamespaceAll}
|
||||
c.isNamespaceAll = true
|
||||
}
|
||||
|
||||
eventHandler := c.newResourceEventHandler(eventCh)
|
||||
for _, ns := range namespaces {
|
||||
factory := informers.NewFilteredSharedInformerFactory(c.clientset, resyncPeriod, ns, nil)
|
||||
factory.Extensions().V1beta1().Ingresses().Informer().AddEventHandler(eventHandler)
|
||||
factory.Core().V1().Services().Informer().AddEventHandler(eventHandler)
|
||||
factory.Core().V1().Endpoints().Informer().AddEventHandler(eventHandler)
|
||||
c.factories[ns] = factory
|
||||
}
|
||||
|
||||
for _, ns := range namespaces {
|
||||
c.factories[ns].Start(stopCh)
|
||||
}
|
||||
|
||||
for _, ns := range namespaces {
|
||||
for t, ok := range c.factories[ns].WaitForCacheSync(stopCh) {
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("timed out waiting for controller caches to sync %s in namespace %q", t.String(), ns)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Do not wait for the Secrets store to get synced since we cannot rely on
|
||||
// users having granted RBAC permissions for this object.
|
||||
// https://github.com/containous/traefik/issues/1784 should improve the
|
||||
// situation here in the future.
|
||||
for _, ns := range namespaces {
|
||||
c.factories[ns].Core().V1().Secrets().Informer().AddEventHandler(eventHandler)
|
||||
c.factories[ns].Start(stopCh)
|
||||
}
|
||||
|
||||
return eventCh, nil
|
||||
}
|
||||
|
||||
// GetIngresses returns all Ingresses for observed namespaces in the cluster.
|
||||
func (c *clientImpl) GetIngresses() []*extensionsv1beta1.Ingress {
|
||||
var result []*extensionsv1beta1.Ingress
|
||||
for ns, factory := range c.factories {
|
||||
ings, err := factory.Extensions().V1beta1().Ingresses().Lister().List(c.ingressLabelSelector)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list ingresses in namespace %s: %s", ns, err)
|
||||
}
|
||||
result = append(result, ings...)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// UpdateIngressStatus updates an Ingress with a provided status.
|
||||
func (c *clientImpl) UpdateIngressStatus(namespace, name, ip, hostname string) error {
|
||||
ing, err := c.factories[c.lookupNamespace(namespace)].Extensions().V1beta1().Ingresses().Lister().Ingresses(namespace).Get(name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get ingress %s/%s: %v", namespace, name, err)
|
||||
}
|
||||
|
||||
if len(ing.Status.LoadBalancer.Ingress) > 0 {
|
||||
if ing.Status.LoadBalancer.Ingress[0].Hostname == hostname && ing.Status.LoadBalancer.Ingress[0].IP == ip {
|
||||
// If status is already set, skip update
|
||||
log.Debugf("Skipping status update on ingress %s/%s", ing.Namespace, ing.Name)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
ingCopy := ing.DeepCopy()
|
||||
ingCopy.Status = extensionsv1beta1.IngressStatus{LoadBalancer: corev1.LoadBalancerStatus{Ingress: []corev1.LoadBalancerIngress{{IP: ip, Hostname: hostname}}}}
|
||||
|
||||
_, err = c.clientset.ExtensionsV1beta1().Ingresses(ingCopy.Namespace).UpdateStatus(ingCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update ingress status %s/%s: %v", namespace, name, err)
|
||||
}
|
||||
log.Infof("Updated status on ingress %s/%s", namespace, name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetService returns the named service from the given namespace.
|
||||
func (c *clientImpl) GetService(namespace, name string) (*corev1.Service, bool, error) {
|
||||
service, err := c.factories[c.lookupNamespace(namespace)].Core().V1().Services().Lister().Services(namespace).Get(name)
|
||||
exist, err := translateNotFoundError(err)
|
||||
return service, exist, err
|
||||
}
|
||||
|
||||
// GetEndpoints returns the named endpoints from the given namespace.
|
||||
func (c *clientImpl) GetEndpoints(namespace, name string) (*corev1.Endpoints, bool, error) {
|
||||
endpoint, err := c.factories[c.lookupNamespace(namespace)].Core().V1().Endpoints().Lister().Endpoints(namespace).Get(name)
|
||||
exist, err := translateNotFoundError(err)
|
||||
return endpoint, exist, err
|
||||
}
|
||||
|
||||
// GetSecret returns the named secret from the given namespace.
|
||||
func (c *clientImpl) GetSecret(namespace, name string) (*corev1.Secret, bool, error) {
|
||||
secret, err := c.factories[c.lookupNamespace(namespace)].Core().V1().Secrets().Lister().Secrets(namespace).Get(name)
|
||||
exist, err := translateNotFoundError(err)
|
||||
return secret, exist, err
|
||||
}
|
||||
|
||||
// lookupNamespace returns the lookup namespace key for the given namespace.
|
||||
// When listening on all namespaces, it returns the client-go identifier ("")
|
||||
// for all-namespaces. Otherwise, it returns the given namespace.
|
||||
// The distinction is necessary because we index all informers on the special
|
||||
// identifier iff all-namespaces are requested but receive specific namespace
|
||||
// identifiers from the Kubernetes API, so we have to bridge this gap.
|
||||
func (c *clientImpl) lookupNamespace(ns string) string {
|
||||
if c.isNamespaceAll {
|
||||
return metav1.NamespaceAll
|
||||
}
|
||||
return ns
|
||||
}
|
||||
|
||||
func (c *clientImpl) newResourceEventHandler(events chan<- interface{}) cache.ResourceEventHandler {
|
||||
return &cache.FilteringResourceEventHandler{
|
||||
FilterFunc: func(obj interface{}) bool {
|
||||
// Ignore Ingresses that do not match our custom label selector.
|
||||
if ing, ok := obj.(*extensionsv1beta1.Ingress); ok {
|
||||
lbls := labels.Set(ing.GetLabels())
|
||||
return c.ingressLabelSelector.Matches(lbls)
|
||||
}
|
||||
return true
|
||||
},
|
||||
Handler: &resourceEventHandler{ev: events},
|
||||
}
|
||||
}
|
||||
|
||||
// eventHandlerFunc will pass the obj on to the events channel or drop it.
|
||||
// This is so passing the events along won't block in the case of high volume.
|
||||
// The events are only used for signaling anyway so dropping a few is ok.
|
||||
func eventHandlerFunc(events chan<- interface{}, obj interface{}) {
|
||||
select {
|
||||
case events <- obj:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// translateNotFoundError will translate a "not found" error to a boolean return
|
||||
// value which indicates if the resource exists and a nil error.
|
||||
func translateNotFoundError(err error) (bool, error) {
|
||||
if kubeerror.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
return err == nil, err
|
||||
}
|
||||
|
|
@ -1,71 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
)
|
||||
|
||||
type clientMock struct {
|
||||
ingresses []*extensionsv1beta1.Ingress
|
||||
services []*corev1.Service
|
||||
secrets []*corev1.Secret
|
||||
endpoints []*corev1.Endpoints
|
||||
watchChan chan interface{}
|
||||
|
||||
apiServiceError error
|
||||
apiSecretError error
|
||||
apiEndpointsError error
|
||||
apiIngressStatusError error
|
||||
}
|
||||
|
||||
func (c clientMock) GetIngresses() []*extensionsv1beta1.Ingress {
|
||||
return c.ingresses
|
||||
}
|
||||
|
||||
func (c clientMock) GetService(namespace, name string) (*corev1.Service, bool, error) {
|
||||
if c.apiServiceError != nil {
|
||||
return nil, false, c.apiServiceError
|
||||
}
|
||||
|
||||
for _, service := range c.services {
|
||||
if service.Namespace == namespace && service.Name == name {
|
||||
return service, true, nil
|
||||
}
|
||||
}
|
||||
return nil, false, c.apiServiceError
|
||||
}
|
||||
|
||||
func (c clientMock) GetEndpoints(namespace, name string) (*corev1.Endpoints, bool, error) {
|
||||
if c.apiEndpointsError != nil {
|
||||
return nil, false, c.apiEndpointsError
|
||||
}
|
||||
|
||||
for _, endpoints := range c.endpoints {
|
||||
if endpoints.Namespace == namespace && endpoints.Name == name {
|
||||
return endpoints, true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return &corev1.Endpoints{}, false, nil
|
||||
}
|
||||
|
||||
func (c clientMock) GetSecret(namespace, name string) (*corev1.Secret, bool, error) {
|
||||
if c.apiSecretError != nil {
|
||||
return nil, false, c.apiSecretError
|
||||
}
|
||||
|
||||
for _, secret := range c.secrets {
|
||||
if secret.Namespace == namespace && secret.Name == name {
|
||||
return secret, true, nil
|
||||
}
|
||||
}
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
func (c clientMock) WatchAll(namespaces Namespaces, stopCh <-chan struct{}) (<-chan interface{}, error) {
|
||||
return c.watchChan, nil
|
||||
}
|
||||
|
||||
func (c clientMock) UpdateIngressStatus(namespace, name, ip, hostname string) error {
|
||||
return c.apiIngressStatusError
|
||||
}
|
||||
|
|
@ -1,49 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
kubeerror "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
func TestTranslateNotFoundError(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
err error
|
||||
expectedExists bool
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
desc: "kubernetes not found error",
|
||||
err: kubeerror.NewNotFound(schema.GroupResource{}, "foo"),
|
||||
expectedExists: false,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
desc: "nil error",
|
||||
err: nil,
|
||||
expectedExists: true,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
desc: "not a kubernetes not found error",
|
||||
err: fmt.Errorf("bar error"),
|
||||
expectedExists: false,
|
||||
expectedError: fmt.Errorf("bar error"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
test := testCase
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
exists, err := translateNotFoundError(test.err)
|
||||
assert.Equal(t, test.expectedExists, exists)
|
||||
assert.Equal(t, test.expectedError, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
|
@ -1,32 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Namespaces holds kubernetes namespaces
|
||||
type Namespaces []string
|
||||
|
||||
// Set adds strings elem into the the parser
|
||||
// it splits str on , and ;
|
||||
func (ns *Namespaces) Set(str string) error {
|
||||
fargs := func(c rune) bool {
|
||||
return c == ',' || c == ';'
|
||||
}
|
||||
// get function
|
||||
slice := strings.FieldsFunc(str, fargs)
|
||||
*ns = append(*ns, slice...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get []string
|
||||
func (ns *Namespaces) Get() interface{} { return *ns }
|
||||
|
||||
// String return slice in a string
|
||||
func (ns *Namespaces) String() string { return fmt.Sprintf("%v", *ns) }
|
||||
|
||||
// SetValue sets []string into the parser
|
||||
func (ns *Namespaces) SetValue(val interface{}) {
|
||||
*ns = val.(Namespaces)
|
||||
}
|
||||
|
|
@ -1,47 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const defaultPercentageValuePrecision = 3
|
||||
|
||||
// percentageValue is int64 form of percentage value with 10^-3 precision.
|
||||
type percentageValue int64
|
||||
|
||||
// toFloat64 returns its decimal float64 value.
|
||||
func (v percentageValue) toFloat64() float64 {
|
||||
return float64(v) / (1000 * 100)
|
||||
}
|
||||
|
||||
func (v percentageValue) computeWeight(count int) int {
|
||||
if count == 0 {
|
||||
return 0
|
||||
}
|
||||
return int(float64(v) / float64(count))
|
||||
}
|
||||
|
||||
// String returns its string form of percentage value.
|
||||
func (v percentageValue) String() string {
|
||||
return strconv.FormatFloat(v.toFloat64()*100, 'f', defaultPercentageValuePrecision, 64) + "%"
|
||||
}
|
||||
|
||||
// newPercentageValueFromString tries to read percentage value from string, it can be either "1.1" or "1.1%", "6%".
|
||||
// It will lose the extra precision if there are more digits after decimal point.
|
||||
func newPercentageValueFromString(rawValue string) (percentageValue, error) {
|
||||
if strings.HasSuffix(rawValue, "%") {
|
||||
rawValue = rawValue[:len(rawValue)-1]
|
||||
}
|
||||
value, err := strconv.ParseFloat(rawValue, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return newPercentageValueFromFloat64(value) / 100, nil
|
||||
}
|
||||
|
||||
// newPercentageValueFromFloat64 reads percentage value from float64
|
||||
func newPercentageValueFromFloat64(f float64) percentageValue {
|
||||
return percentageValue(f * (1000 * 100))
|
||||
}
|
||||
|
|
@ -1,196 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewPercentageValueFromFloat64(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
value float64
|
||||
expectedString string
|
||||
expectedFloat64 float64
|
||||
}{
|
||||
{
|
||||
value: 0.01,
|
||||
expectedString: "1.000%",
|
||||
expectedFloat64: 0.01,
|
||||
},
|
||||
{
|
||||
value: 0.5,
|
||||
expectedString: "50.000%",
|
||||
expectedFloat64: 0.5,
|
||||
},
|
||||
{
|
||||
value: 0.99,
|
||||
expectedString: "99.000%",
|
||||
expectedFloat64: 0.99,
|
||||
},
|
||||
{
|
||||
value: 0.99999,
|
||||
expectedString: "99.999%",
|
||||
expectedFloat64: 0.99999,
|
||||
},
|
||||
{
|
||||
value: -0.99999,
|
||||
expectedString: "-99.999%",
|
||||
expectedFloat64: -0.99999,
|
||||
},
|
||||
{
|
||||
value: -0.9999999,
|
||||
expectedString: "-99.999%",
|
||||
expectedFloat64: -0.99999,
|
||||
},
|
||||
{
|
||||
value: 0,
|
||||
expectedString: "0.000%",
|
||||
expectedFloat64: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
pvFromFloat64 := newPercentageValueFromFloat64(test.value)
|
||||
|
||||
assert.Equal(t, test.expectedString, pvFromFloat64.String(), "percentage string value mismatched")
|
||||
assert.Equal(t, test.expectedFloat64, pvFromFloat64.toFloat64(), "percentage float64 value mismatched")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewPercentageValueFromString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
value string
|
||||
expectError bool
|
||||
expectedString string
|
||||
expectedFloat64 float64
|
||||
}{
|
||||
{
|
||||
value: "1%",
|
||||
expectError: false,
|
||||
expectedString: "1.000%",
|
||||
expectedFloat64: 0.01,
|
||||
},
|
||||
{
|
||||
value: "0.5",
|
||||
expectError: false,
|
||||
expectedString: "0.500%",
|
||||
expectedFloat64: 0.005,
|
||||
},
|
||||
{
|
||||
value: "99%",
|
||||
expectError: false,
|
||||
expectedString: "99.000%",
|
||||
expectedFloat64: 0.99,
|
||||
},
|
||||
{
|
||||
value: "99.9%",
|
||||
expectError: false,
|
||||
expectedString: "99.900%",
|
||||
expectedFloat64: 0.999,
|
||||
},
|
||||
{
|
||||
value: "-99.9%",
|
||||
expectError: false,
|
||||
expectedString: "-99.900%",
|
||||
expectedFloat64: -0.999,
|
||||
},
|
||||
{
|
||||
value: "-99.99999%",
|
||||
expectError: false,
|
||||
expectedString: "-99.999%",
|
||||
expectedFloat64: -0.99999,
|
||||
},
|
||||
{
|
||||
value: "0%",
|
||||
expectError: false,
|
||||
expectedString: "0.000%",
|
||||
expectedFloat64: 0,
|
||||
},
|
||||
{
|
||||
value: "%",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
value: "foo",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
value: "",
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
pvFromString, err := newPercentageValueFromString(test.value)
|
||||
|
||||
if test.expectError {
|
||||
require.Error(t, err, "expecting error but not happening")
|
||||
} else {
|
||||
require.NoError(t, err, "fail to parse percentage value")
|
||||
|
||||
assert.Equal(t, test.expectedString, pvFromString.String(), "percentage string value mismatched")
|
||||
assert.Equal(t, test.expectedFloat64, pvFromString.toFloat64(), "percentage float64 value mismatched")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewPercentageValue(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
stringValue string
|
||||
floatValue float64
|
||||
}{
|
||||
{
|
||||
desc: "percentage",
|
||||
stringValue: "1%",
|
||||
floatValue: 0.01,
|
||||
},
|
||||
{
|
||||
desc: "decimal",
|
||||
stringValue: "0.5",
|
||||
floatValue: 0.005,
|
||||
},
|
||||
{
|
||||
desc: "negative percentage",
|
||||
stringValue: "-99.999%",
|
||||
floatValue: -0.99999,
|
||||
},
|
||||
{
|
||||
desc: "negative decimal",
|
||||
stringValue: "-0.99999",
|
||||
floatValue: -0.0099999,
|
||||
},
|
||||
{
|
||||
desc: "zero",
|
||||
stringValue: "0%",
|
||||
floatValue: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
pvFromString, err := newPercentageValueFromString(test.stringValue)
|
||||
require.NoError(t, err, "fail to parse percentage value")
|
||||
|
||||
pvFromFloat64 := newPercentageValueFromFloat64(test.floatValue)
|
||||
|
||||
assert.Equal(t, pvFromString, pvFromFloat64)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,190 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/containous/traefik/provider/label"
|
||||
"gopkg.in/yaml.v2"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
)
|
||||
|
||||
type weightAllocator interface {
|
||||
getWeight(host, path, serviceName string) int
|
||||
}
|
||||
|
||||
var _ weightAllocator = &defaultWeightAllocator{}
|
||||
var _ weightAllocator = &fractionalWeightAllocator{}
|
||||
|
||||
type defaultWeightAllocator struct{}
|
||||
|
||||
func (d *defaultWeightAllocator) getWeight(host, path, serviceName string) int {
|
||||
return label.DefaultWeight
|
||||
}
|
||||
|
||||
type ingressService struct {
|
||||
host string
|
||||
path string
|
||||
service string
|
||||
}
|
||||
|
||||
type fractionalWeightAllocator map[ingressService]int
|
||||
|
||||
// String returns a string representation as service name / percentage tuples
|
||||
// sorted by service names.
|
||||
// Example: [foo-svc: 30.000% bar-svc: 70.000%]
|
||||
func (f *fractionalWeightAllocator) String() string {
|
||||
var sorted []ingressService
|
||||
for ingServ := range map[ingressService]int(*f) {
|
||||
sorted = append(sorted, ingServ)
|
||||
}
|
||||
sort.Slice(sorted, func(i, j int) bool {
|
||||
return sorted[i].service < sorted[j].service
|
||||
})
|
||||
|
||||
var res []string
|
||||
for _, ingServ := range sorted {
|
||||
res = append(res, fmt.Sprintf("%s: %s", ingServ.service, percentageValue(map[ingressService]int(*f)[ingServ])))
|
||||
}
|
||||
return fmt.Sprintf("[%s]", strings.Join(res, " "))
|
||||
}
|
||||
|
||||
func newFractionalWeightAllocator(ingress *extensionsv1beta1.Ingress, client Client) (*fractionalWeightAllocator, error) {
|
||||
servicePercentageWeights, err := getServicesPercentageWeights(ingress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serviceInstanceCounts, err := getServiceInstanceCounts(ingress, client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serviceWeights := map[ingressService]int{}
|
||||
|
||||
for _, rule := range ingress.Spec.Rules {
|
||||
// key: rule path string
|
||||
// value: service names
|
||||
fractionalPathServices := map[string][]string{}
|
||||
|
||||
// key: rule path string
|
||||
// value: fractional percentage weight
|
||||
fractionalPathWeights := map[string]percentageValue{}
|
||||
|
||||
for _, pa := range rule.HTTP.Paths {
|
||||
if _, ok := fractionalPathWeights[pa.Path]; !ok {
|
||||
fractionalPathWeights[pa.Path] = newPercentageValueFromFloat64(1)
|
||||
}
|
||||
|
||||
if weight, ok := servicePercentageWeights[pa.Backend.ServiceName]; ok {
|
||||
ingSvc := ingressService{
|
||||
host: rule.Host,
|
||||
path: pa.Path,
|
||||
service: pa.Backend.ServiceName,
|
||||
}
|
||||
|
||||
serviceWeights[ingSvc] = weight.computeWeight(serviceInstanceCounts[ingSvc])
|
||||
|
||||
fractionalPathWeights[pa.Path] -= weight
|
||||
|
||||
if fractionalPathWeights[pa.Path].toFloat64() < 0 {
|
||||
assignedWeight := newPercentageValueFromFloat64(1) - fractionalPathWeights[pa.Path]
|
||||
return nil, fmt.Errorf("percentage value %s must not exceed 100%%", assignedWeight.String())
|
||||
}
|
||||
} else {
|
||||
fractionalPathServices[pa.Path] = append(fractionalPathServices[pa.Path], pa.Backend.ServiceName)
|
||||
}
|
||||
}
|
||||
|
||||
for pa, fractionalWeight := range fractionalPathWeights {
|
||||
fractionalServices := fractionalPathServices[pa]
|
||||
|
||||
if len(fractionalServices) == 0 {
|
||||
if fractionalWeight > 0 {
|
||||
assignedWeight := newPercentageValueFromFloat64(1) - fractionalWeight
|
||||
return nil, fmt.Errorf("the sum of weights(%s) in the path %s%s must be 100%% when no omitted fractional service left", assignedWeight.String(), rule.Host, pa)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
totalFractionalInstanceCount := 0
|
||||
for _, svc := range fractionalServices {
|
||||
totalFractionalInstanceCount += serviceInstanceCounts[ingressService{
|
||||
host: rule.Host,
|
||||
path: pa,
|
||||
service: svc,
|
||||
}]
|
||||
}
|
||||
|
||||
for _, svc := range fractionalServices {
|
||||
ingSvc := ingressService{
|
||||
host: rule.Host,
|
||||
path: pa,
|
||||
service: svc,
|
||||
}
|
||||
serviceWeights[ingSvc] = fractionalWeight.computeWeight(totalFractionalInstanceCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
allocator := fractionalWeightAllocator(serviceWeights)
|
||||
return &allocator, nil
|
||||
}
|
||||
|
||||
func (f *fractionalWeightAllocator) getWeight(host, path, serviceName string) int {
|
||||
return map[ingressService]int(*f)[ingressService{
|
||||
host: host,
|
||||
path: path,
|
||||
service: serviceName,
|
||||
}]
|
||||
}
|
||||
|
||||
func getServicesPercentageWeights(ingress *extensionsv1beta1.Ingress) (map[string]percentageValue, error) {
|
||||
percentageWeight := make(map[string]string)
|
||||
|
||||
annotationPercentageWeights := getAnnotationName(ingress.Annotations, annotationKubernetesServiceWeights)
|
||||
if err := yaml.Unmarshal([]byte(ingress.Annotations[annotationPercentageWeights]), percentageWeight); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
servicesPercentageWeights := make(map[string]percentageValue)
|
||||
for serviceName, percentageStr := range percentageWeight {
|
||||
percentageValue, err := newPercentageValueFromString(percentageStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid percentage value %q", percentageStr)
|
||||
}
|
||||
|
||||
servicesPercentageWeights[serviceName] = percentageValue
|
||||
}
|
||||
return servicesPercentageWeights, nil
|
||||
}
|
||||
|
||||
func getServiceInstanceCounts(ingress *extensionsv1beta1.Ingress, client Client) (map[ingressService]int, error) {
|
||||
serviceInstanceCounts := map[ingressService]int{}
|
||||
|
||||
for _, rule := range ingress.Spec.Rules {
|
||||
for _, pa := range rule.HTTP.Paths {
|
||||
count := 0
|
||||
endpoints, exists, err := client.GetEndpoints(ingress.Namespace, pa.Backend.ServiceName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get endpoints %s/%s: %v", ingress.Namespace, pa.Backend.ServiceName, err)
|
||||
}
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("endpoints not found for %s/%s", ingress.Namespace, pa.Backend.ServiceName)
|
||||
}
|
||||
|
||||
for _, subset := range endpoints.Subsets {
|
||||
count += len(subset.Addresses)
|
||||
}
|
||||
|
||||
serviceInstanceCounts[ingressService{
|
||||
host: rule.Host,
|
||||
path: pa.Path,
|
||||
service: pa.Backend.ServiceName,
|
||||
}] += count
|
||||
}
|
||||
}
|
||||
|
||||
return serviceInstanceCounts, nil
|
||||
}
|
||||
|
|
@ -1,455 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
||||
func TestString(t *testing.T) {
|
||||
pv1 := newPercentageValueFromFloat64(0.5)
|
||||
pv2 := newPercentageValueFromFloat64(0.2)
|
||||
pv3 := newPercentageValueFromFloat64(0.3)
|
||||
f := fractionalWeightAllocator(
|
||||
map[ingressService]int{
|
||||
{
|
||||
host: "host2",
|
||||
path: "path2",
|
||||
service: "service2",
|
||||
}: int(pv2),
|
||||
{
|
||||
host: "host3",
|
||||
path: "path3",
|
||||
service: "service3",
|
||||
}: int(pv3),
|
||||
{
|
||||
host: "host1",
|
||||
path: "path1",
|
||||
service: "service1",
|
||||
}: int(pv1),
|
||||
},
|
||||
)
|
||||
|
||||
expected := fmt.Sprintf("[service1: %s service2: %s service3: %s]", pv1, pv2, pv3)
|
||||
actual := f.String()
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestGetServicesPercentageWeights(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
annotationValue string
|
||||
expectError bool
|
||||
expectedWeights map[string]percentageValue
|
||||
}{
|
||||
{
|
||||
desc: "empty annotation",
|
||||
annotationValue: ``,
|
||||
expectedWeights: map[string]percentageValue{},
|
||||
},
|
||||
{
|
||||
desc: "50% fraction",
|
||||
annotationValue: `
|
||||
service1: 10%
|
||||
service2: 20%
|
||||
service3: 20%
|
||||
`,
|
||||
expectedWeights: map[string]percentageValue{
|
||||
"service1": newPercentageValueFromFloat64(0.1),
|
||||
"service2": newPercentageValueFromFloat64(0.2),
|
||||
"service3": newPercentageValueFromFloat64(0.2),
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "50% fraction with empty fraction",
|
||||
annotationValue: `
|
||||
service1: 10%
|
||||
service2: 20%
|
||||
service3: 20%
|
||||
service4:
|
||||
`,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
desc: "50% fraction float form",
|
||||
annotationValue: `
|
||||
service1: 0.1
|
||||
service2: 0.2
|
||||
service3: 0.2
|
||||
`,
|
||||
expectedWeights: map[string]percentageValue{
|
||||
"service1": newPercentageValueFromFloat64(0.001),
|
||||
"service2": newPercentageValueFromFloat64(0.002),
|
||||
"service3": newPercentageValueFromFloat64(0.002),
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "no fraction",
|
||||
annotationValue: `
|
||||
service1: 10%
|
||||
service2: 90%
|
||||
`,
|
||||
expectedWeights: map[string]percentageValue{
|
||||
"service1": newPercentageValueFromFloat64(0.1),
|
||||
"service2": newPercentageValueFromFloat64(0.9),
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "extra weight specification",
|
||||
annotationValue: `
|
||||
service1: 90%
|
||||
service5: 90%
|
||||
`,
|
||||
expectedWeights: map[string]percentageValue{
|
||||
"service1": newPercentageValueFromFloat64(0.9),
|
||||
"service5": newPercentageValueFromFloat64(0.9),
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "malformed annotation",
|
||||
annotationValue: `
|
||||
service1- 90%
|
||||
service5- 90%
|
||||
`,
|
||||
expectError: true,
|
||||
expectedWeights: nil,
|
||||
},
|
||||
{
|
||||
desc: "more than one hundred percentaged service",
|
||||
annotationValue: `
|
||||
service1: 100%
|
||||
service2: 1%
|
||||
`,
|
||||
expectedWeights: map[string]percentageValue{
|
||||
"service1": newPercentageValueFromFloat64(1),
|
||||
"service2": newPercentageValueFromFloat64(0.01),
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "incorrect percentage value",
|
||||
annotationValue: `
|
||||
service1: 1000%
|
||||
`,
|
||||
expectedWeights: map[string]percentageValue{
|
||||
"service1": newPercentageValueFromFloat64(10),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ingress := &extensionsv1beta1.Ingress{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
annotationKubernetesServiceWeights: test.annotationValue,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
weights, err := getServicesPercentageWeights(ingress)
|
||||
|
||||
if test.expectError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.expectedWeights, weights)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeServiceWeights(t *testing.T) {
|
||||
client := clientMock{
|
||||
endpoints: []*corev1.Endpoints{
|
||||
buildEndpoint(
|
||||
eNamespace("testing"),
|
||||
eName("service1"),
|
||||
eUID("1"),
|
||||
subset(
|
||||
eAddresses(eAddress("10.10.0.1")),
|
||||
ePorts(ePort(8080, ""))),
|
||||
subset(
|
||||
eAddresses(eAddress("10.21.0.2")),
|
||||
ePorts(ePort(8080, ""))),
|
||||
),
|
||||
buildEndpoint(
|
||||
eNamespace("testing"),
|
||||
eName("service2"),
|
||||
eUID("2"),
|
||||
subset(
|
||||
eAddresses(eAddress("10.10.0.3")),
|
||||
ePorts(ePort(8080, ""))),
|
||||
),
|
||||
buildEndpoint(
|
||||
eNamespace("testing"),
|
||||
eName("service3"),
|
||||
eUID("3"),
|
||||
subset(
|
||||
eAddresses(eAddress("10.10.0.4")),
|
||||
ePorts(ePort(8080, ""))),
|
||||
subset(
|
||||
eAddresses(eAddress("10.21.0.5")),
|
||||
ePorts(ePort(8080, ""))),
|
||||
subset(
|
||||
eAddresses(eAddress("10.21.0.6")),
|
||||
ePorts(ePort(8080, ""))),
|
||||
subset(
|
||||
eAddresses(eAddress("10.21.0.7")),
|
||||
ePorts(ePort(8080, ""))),
|
||||
),
|
||||
buildEndpoint(
|
||||
eNamespace("testing"),
|
||||
eName("service4"),
|
||||
eUID("4"),
|
||||
subset(
|
||||
eAddresses(eAddress("10.10.0.7")),
|
||||
ePorts(ePort(8080, ""))),
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
ingress *extensionsv1beta1.Ingress
|
||||
expectError bool
|
||||
expectedWeights map[ingressService]percentageValue
|
||||
}{
|
||||
{
|
||||
desc: "1 path 2 service",
|
||||
ingress: buildIngress(
|
||||
iNamespace("testing"),
|
||||
iAnnotation(annotationKubernetesServiceWeights, `
|
||||
service1: 10%
|
||||
`),
|
||||
iRules(
|
||||
iRule(iHost("foo.test"), iPaths(
|
||||
onePath(iPath("/foo"), iBackend("service1", intstr.FromInt(8080))),
|
||||
onePath(iPath("/foo"), iBackend("service2", intstr.FromInt(8080))),
|
||||
)),
|
||||
),
|
||||
),
|
||||
expectError: false,
|
||||
expectedWeights: map[ingressService]percentageValue{
|
||||
{
|
||||
host: "foo.test",
|
||||
path: "/foo",
|
||||
service: "service1",
|
||||
}: newPercentageValueFromFloat64(0.05),
|
||||
{
|
||||
host: "foo.test",
|
||||
path: "/foo",
|
||||
service: "service2",
|
||||
}: newPercentageValueFromFloat64(0.90),
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "2 path 2 service",
|
||||
ingress: buildIngress(
|
||||
iNamespace("testing"),
|
||||
iAnnotation(annotationKubernetesServiceWeights, `
|
||||
service1: 60%
|
||||
`),
|
||||
iRules(
|
||||
iRule(iHost("foo.test"), iPaths(
|
||||
onePath(iPath("/foo"), iBackend("service1", intstr.FromInt(8080))),
|
||||
onePath(iPath("/foo"), iBackend("service2", intstr.FromInt(8080))),
|
||||
onePath(iPath("/bar"), iBackend("service1", intstr.FromInt(8080))),
|
||||
onePath(iPath("/bar"), iBackend("service3", intstr.FromInt(8080))),
|
||||
)),
|
||||
),
|
||||
),
|
||||
expectError: false,
|
||||
expectedWeights: map[ingressService]percentageValue{
|
||||
{
|
||||
host: "foo.test",
|
||||
path: "/foo",
|
||||
service: "service1",
|
||||
}: newPercentageValueFromFloat64(0.30),
|
||||
{
|
||||
host: "foo.test",
|
||||
path: "/foo",
|
||||
service: "service2",
|
||||
}: newPercentageValueFromFloat64(0.40),
|
||||
{
|
||||
host: "foo.test",
|
||||
path: "/bar",
|
||||
service: "service1",
|
||||
}: newPercentageValueFromFloat64(0.30),
|
||||
{
|
||||
host: "foo.test",
|
||||
path: "/bar",
|
||||
service: "service3",
|
||||
}: newPercentageValueFromFloat64(0.10),
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "2 path 3 service",
|
||||
ingress: buildIngress(
|
||||
iNamespace("testing"),
|
||||
iAnnotation(annotationKubernetesServiceWeights, `
|
||||
service1: 20%
|
||||
service3: 20%
|
||||
`),
|
||||
iRules(
|
||||
iRule(iHost("foo.test"), iPaths(
|
||||
onePath(iPath("/foo"), iBackend("service1", intstr.FromInt(8080))),
|
||||
onePath(iPath("/foo"), iBackend("service2", intstr.FromInt(8080))),
|
||||
onePath(iPath("/bar"), iBackend("service2", intstr.FromInt(8080))),
|
||||
onePath(iPath("/bar"), iBackend("service3", intstr.FromInt(8080))),
|
||||
)),
|
||||
),
|
||||
),
|
||||
expectError: false,
|
||||
expectedWeights: map[ingressService]percentageValue{
|
||||
{
|
||||
host: "foo.test",
|
||||
path: "/foo",
|
||||
service: "service1",
|
||||
}: newPercentageValueFromFloat64(0.10),
|
||||
{
|
||||
host: "foo.test",
|
||||
path: "/foo",
|
||||
service: "service2",
|
||||
}: newPercentageValueFromFloat64(0.80),
|
||||
{
|
||||
host: "foo.test",
|
||||
path: "/bar",
|
||||
service: "service3",
|
||||
}: newPercentageValueFromFloat64(0.05),
|
||||
{
|
||||
host: "foo.test",
|
||||
path: "/bar",
|
||||
service: "service2",
|
||||
}: newPercentageValueFromFloat64(0.80),
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "1 path 4 service",
|
||||
ingress: buildIngress(
|
||||
iNamespace("testing"),
|
||||
iAnnotation(annotationKubernetesServiceWeights, `
|
||||
service1: 20%
|
||||
service2: 40%
|
||||
service3: 40%
|
||||
`),
|
||||
iRules(
|
||||
iRule(iHost("foo.test"), iPaths(
|
||||
onePath(iPath("/foo"), iBackend("service1", intstr.FromInt(8080))),
|
||||
onePath(iPath("/foo"), iBackend("service2", intstr.FromInt(8080))),
|
||||
onePath(iPath("/foo"), iBackend("service3", intstr.FromInt(8080))),
|
||||
onePath(iPath("/foo"), iBackend("service4", intstr.FromInt(8080))),
|
||||
)),
|
||||
),
|
||||
),
|
||||
expectError: false,
|
||||
expectedWeights: map[ingressService]percentageValue{
|
||||
{
|
||||
host: "foo.test",
|
||||
path: "/foo",
|
||||
service: "service1",
|
||||
}: newPercentageValueFromFloat64(0.10),
|
||||
{
|
||||
host: "foo.test",
|
||||
path: "/foo",
|
||||
service: "service2",
|
||||
}: newPercentageValueFromFloat64(0.40),
|
||||
{
|
||||
host: "foo.test",
|
||||
path: "/foo",
|
||||
service: "service3",
|
||||
}: newPercentageValueFromFloat64(0.10),
|
||||
{
|
||||
host: "foo.test",
|
||||
path: "/foo",
|
||||
service: "service4",
|
||||
}: newPercentageValueFromFloat64(0.00),
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "2 path no service",
|
||||
ingress: buildIngress(
|
||||
iNamespace("testing"),
|
||||
iAnnotation(annotationKubernetesServiceWeights, `
|
||||
service1: 20%
|
||||
service2: 40%
|
||||
service3: 40%
|
||||
`),
|
||||
iRules(
|
||||
iRule(iHost("foo.test"), iPaths(
|
||||
onePath(iPath("/foo"), iBackend("noservice", intstr.FromInt(8080))),
|
||||
onePath(iPath("/bar"), iBackend("noservice", intstr.FromInt(8080))),
|
||||
)),
|
||||
),
|
||||
),
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
desc: "2 path without weight",
|
||||
ingress: buildIngress(
|
||||
iNamespace("testing"),
|
||||
iAnnotation(annotationKubernetesServiceWeights, ``),
|
||||
iRules(
|
||||
iRule(iHost("foo.test"), iPaths(
|
||||
onePath(iPath("/foo"), iBackend("service1", intstr.FromInt(8080))),
|
||||
onePath(iPath("/bar"), iBackend("service2", intstr.FromInt(8080))),
|
||||
)),
|
||||
),
|
||||
),
|
||||
expectError: false,
|
||||
expectedWeights: map[ingressService]percentageValue{
|
||||
{
|
||||
host: "foo.test",
|
||||
path: "/foo",
|
||||
service: "service1",
|
||||
}: newPercentageValueFromFloat64(0.50),
|
||||
{
|
||||
host: "foo.test",
|
||||
path: "/bar",
|
||||
service: "service2",
|
||||
}: newPercentageValueFromFloat64(1.00),
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "2 path overflow",
|
||||
ingress: buildIngress(
|
||||
iNamespace("testing"),
|
||||
iAnnotation(annotationKubernetesServiceWeights, `
|
||||
service1: 70%
|
||||
service2: 80%
|
||||
`),
|
||||
iRules(
|
||||
iRule(iHost("foo.test"), iPaths(
|
||||
onePath(iPath("/foo"), iBackend("service1", intstr.FromInt(8080))),
|
||||
onePath(iPath("/foo"), iBackend("service2", intstr.FromInt(8080))),
|
||||
)),
|
||||
),
|
||||
),
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
weightAllocator, err := newFractionalWeightAllocator(test.ingress, client)
|
||||
if test.expectError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
for ingSvc, percentage := range test.expectedWeights {
|
||||
assert.Equal(t, int(percentage), weightAllocator.getWeight(ingSvc.host, ingSvc.path, ingSvc.service))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,203 +0,0 @@
|
|||
package kv
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/abronan/valkeyrie/store"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type ByKey []*store.KVPair
|
||||
|
||||
func (a ByKey) Len() int { return len(a) }
|
||||
func (a ByKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a ByKey) Less(i, j int) bool { return a[i].Key < a[j].Key }
|
||||
|
||||
func filler(prefix string, opts ...func(string, map[string]*store.KVPair)) []*store.KVPair {
|
||||
buf := make(map[string]*store.KVPair)
|
||||
for _, opt := range opts {
|
||||
opt(prefix, buf)
|
||||
}
|
||||
|
||||
var result ByKey
|
||||
for _, value := range buf {
|
||||
result = append(result, value)
|
||||
}
|
||||
|
||||
sort.Sort(result)
|
||||
return result
|
||||
}
|
||||
|
||||
func backend(name string, opts ...func(map[string]string)) func(string, map[string]*store.KVPair) {
|
||||
return entry(pathBackends+name, opts...)
|
||||
}
|
||||
|
||||
func frontend(name string, opts ...func(map[string]string)) func(string, map[string]*store.KVPair) {
|
||||
return entry(pathFrontends+name, opts...)
|
||||
}
|
||||
|
||||
func entry(root string, opts ...func(map[string]string)) func(string, map[string]*store.KVPair) {
|
||||
return func(prefix string, pairs map[string]*store.KVPair) {
|
||||
prefixedRoot := prefix + pathSeparator + strings.TrimPrefix(root, pathSeparator)
|
||||
pairs[prefixedRoot] = &store.KVPair{Key: prefixedRoot, Value: []byte("")}
|
||||
|
||||
transit := make(map[string]string)
|
||||
for _, opt := range opts {
|
||||
opt(transit)
|
||||
}
|
||||
|
||||
for key, value := range transit {
|
||||
fill(pairs, prefixedRoot, key, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func fill(pairs map[string]*store.KVPair, previous string, current string, value string) {
|
||||
clean := strings.TrimPrefix(current, pathSeparator)
|
||||
|
||||
i := strings.IndexRune(clean, '/')
|
||||
if i > 0 {
|
||||
key := previous + pathSeparator + clean[:i]
|
||||
|
||||
if _, ok := pairs[key]; !ok || len(pairs[key].Value) == 0 {
|
||||
pairs[key] = &store.KVPair{Key: key, Value: []byte("")}
|
||||
}
|
||||
|
||||
fill(pairs, key, clean[i:], value)
|
||||
}
|
||||
|
||||
key := previous + pathSeparator + clean
|
||||
pairs[key] = &store.KVPair{Key: key, Value: []byte(value)}
|
||||
}
|
||||
|
||||
func withPair(key string, value string) func(map[string]string) {
|
||||
return func(pairs map[string]string) {
|
||||
if len(key) == 0 {
|
||||
return
|
||||
}
|
||||
pairs[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
func withList(key string, values ...string) func(map[string]string) {
|
||||
return func(pairs map[string]string) {
|
||||
if len(key) == 0 {
|
||||
return
|
||||
}
|
||||
for i, value := range values {
|
||||
pairs[key+"/"+strconv.Itoa(i)] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func withErrorPage(name string, backend string, query string, statuses ...string) func(map[string]string) {
|
||||
return func(pairs map[string]string) {
|
||||
if len(name) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
withPair(pathFrontendErrorPages+name+pathFrontendErrorPagesBackend, backend)(pairs)
|
||||
withPair(pathFrontendErrorPages+name+pathFrontendErrorPagesQuery, query)(pairs)
|
||||
withList(pathFrontendErrorPages+name+pathFrontendErrorPagesStatus, statuses...)(pairs)
|
||||
}
|
||||
}
|
||||
|
||||
func withRateLimit(extractorFunc string, opts ...func(map[string]string)) func(map[string]string) {
|
||||
return func(pairs map[string]string) {
|
||||
pairs[pathFrontendRateLimitExtractorFunc] = extractorFunc
|
||||
for _, opt := range opts {
|
||||
opt(pairs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func withLimit(name string, average, burst, period string) func(map[string]string) {
|
||||
return func(pairs map[string]string) {
|
||||
pairs[pathFrontendRateLimitRateSet+name+pathFrontendRateLimitAverage] = average
|
||||
pairs[pathFrontendRateLimitRateSet+name+pathFrontendRateLimitBurst] = burst
|
||||
pairs[pathFrontendRateLimitRateSet+name+pathFrontendRateLimitPeriod] = period
|
||||
}
|
||||
}
|
||||
|
||||
func TestFiller(t *testing.T) {
|
||||
expected := []*store.KVPair{
|
||||
{Key: "traefik/backends/backend.with.dot.too", Value: []byte("")},
|
||||
{Key: "traefik/backends/backend.with.dot.too/servers", Value: []byte("")},
|
||||
{Key: "traefik/backends/backend.with.dot.too/servers/server.with.dot", Value: []byte("")},
|
||||
{Key: "traefik/backends/backend.with.dot.too/servers/server.with.dot.without.url", Value: []byte("")},
|
||||
{Key: "traefik/backends/backend.with.dot.too/servers/server.with.dot.without.url/weight", Value: []byte("1")},
|
||||
{Key: "traefik/backends/backend.with.dot.too/servers/server.with.dot/url", Value: []byte("http://172.17.0.2:80")},
|
||||
{Key: "traefik/backends/backend.with.dot.too/servers/server.with.dot/weight", Value: []byte("1")},
|
||||
{Key: "traefik/frontends/frontend.with.dot", Value: []byte("")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/backend", Value: []byte("backend.with.dot.too")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/errors", Value: []byte("")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/errors/bar", Value: []byte("")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/errors/bar/backend", Value: []byte("error")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/errors/bar/query", Value: []byte("/test2")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/errors/bar/status", Value: []byte("")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/errors/bar/status/0", Value: []byte("400-405")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/errors/foo", Value: []byte("")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/errors/foo/backend", Value: []byte("error")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/errors/foo/query", Value: []byte("/test1")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/errors/foo/status", Value: []byte("")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/errors/foo/status/0", Value: []byte("500-501")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/errors/foo/status/1", Value: []byte("503-599")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/ratelimit", Value: []byte("")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/ratelimit/extractorfunc", Value: []byte("client.ip")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/ratelimit/rateset", Value: []byte("")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/ratelimit/rateset/bar", Value: []byte("")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/ratelimit/rateset/bar/average", Value: []byte("3")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/ratelimit/rateset/bar/burst", Value: []byte("6")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/ratelimit/rateset/bar/period", Value: []byte("9")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/ratelimit/rateset/foo", Value: []byte("")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/ratelimit/rateset/foo/average", Value: []byte("6")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/ratelimit/rateset/foo/burst", Value: []byte("12")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/ratelimit/rateset/foo/period", Value: []byte("18")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/routes", Value: []byte("")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/routes/route.with.dot", Value: []byte("")},
|
||||
{Key: "traefik/frontends/frontend.with.dot/routes/route.with.dot/rule", Value: []byte("Host:test.localhost")},
|
||||
}
|
||||
|
||||
pairs1 := filler("traefik",
|
||||
frontend("frontend.with.dot",
|
||||
withPair("backend", "backend.with.dot.too"),
|
||||
withPair("routes/route.with.dot/rule", "Host:test.localhost"),
|
||||
withErrorPage("foo", "error", "/test1", "500-501", "503-599"),
|
||||
withErrorPage("bar", "error", "/test2", "400-405"),
|
||||
withRateLimit("client.ip",
|
||||
withLimit("foo", "6", "12", "18"),
|
||||
withLimit("bar", "3", "6", "9"))),
|
||||
backend("backend.with.dot.too",
|
||||
withPair("servers/server.with.dot/url", "http://172.17.0.2:80"),
|
||||
withPair("servers/server.with.dot/weight", "1"),
|
||||
withPair("servers/server.with.dot.without.url/weight", "1")),
|
||||
)
|
||||
assert.EqualValues(t, expected, pairs1)
|
||||
|
||||
pairs2 := filler("traefik",
|
||||
entry("frontends/frontend.with.dot",
|
||||
withPair("backend", "backend.with.dot.too"),
|
||||
withPair("routes/route.with.dot/rule", "Host:test.localhost"),
|
||||
withPair("errors/foo/backend", "error"),
|
||||
withPair("errors/foo/query", "/test1"),
|
||||
withList("errors/foo/status", "500-501", "503-599"),
|
||||
withPair("errors/bar/backend", "error"),
|
||||
withPair("errors/bar/query", "/test2"),
|
||||
withList("errors/bar/status", "400-405"),
|
||||
withPair("ratelimit/extractorfunc", "client.ip"),
|
||||
withPair("ratelimit/rateset/foo/average", "6"),
|
||||
withPair("ratelimit/rateset/foo/burst", "12"),
|
||||
withPair("ratelimit/rateset/foo/period", "18"),
|
||||
withPair("ratelimit/rateset/bar/average", "3"),
|
||||
withPair("ratelimit/rateset/bar/burst", "6"),
|
||||
withPair("ratelimit/rateset/bar/period", "9")),
|
||||
entry("backends/backend.with.dot.too",
|
||||
withPair("servers/server.with.dot/url", "http://172.17.0.2:80"),
|
||||
withPair("servers/server.with.dot/weight", "1"),
|
||||
withPair("servers/server.with.dot.without.url/weight", "1")),
|
||||
)
|
||||
assert.EqualValues(t, expected, pairs2)
|
||||
}
|
||||
|
|
@ -1,123 +0,0 @@
|
|||
package kv
|
||||
|
||||
const (
|
||||
pathBackends = "/backends/"
|
||||
pathBackendCircuitBreakerExpression = "/circuitbreaker/expression"
|
||||
pathBackendResponseForwardingFlushInterval = "/responseforwarding/flushinterval"
|
||||
pathBackendHealthCheckScheme = "/healthcheck/scheme"
|
||||
pathBackendHealthCheckPath = "/healthcheck/path"
|
||||
pathBackendHealthCheckPort = "/healthcheck/port"
|
||||
pathBackendHealthCheckInterval = "/healthcheck/interval"
|
||||
pathBackendHealthCheckTimeout = "/healthcheck/timeout"
|
||||
pathBackendHealthCheckHostname = "/healthcheck/hostname"
|
||||
pathBackendHealthCheckHeaders = "/healthcheck/headers/"
|
||||
pathBackendLoadBalancerMethod = "/loadbalancer/method"
|
||||
pathBackendLoadBalancerStickiness = "/loadbalancer/stickiness"
|
||||
pathBackendLoadBalancerStickinessCookieName = "/loadbalancer/stickiness/cookiename"
|
||||
pathBackendMaxConnAmount = "/maxconn/amount"
|
||||
pathBackendMaxConnExtractorFunc = "/maxconn/extractorfunc"
|
||||
pathBackendServers = "/servers/"
|
||||
pathBackendServerURL = "/url"
|
||||
pathBackendServerWeight = "/weight"
|
||||
pathBackendBuffering = "/buffering/"
|
||||
pathBackendBufferingMaxResponseBodyBytes = pathBackendBuffering + "maxresponsebodybytes"
|
||||
pathBackendBufferingMemResponseBodyBytes = pathBackendBuffering + "memresponsebodybytes"
|
||||
pathBackendBufferingMaxRequestBodyBytes = pathBackendBuffering + "maxrequestbodybytes"
|
||||
pathBackendBufferingMemRequestBodyBytes = pathBackendBuffering + "memrequestbodybytes"
|
||||
pathBackendBufferingRetryExpression = pathBackendBuffering + "retryexpression"
|
||||
|
||||
pathFrontends = "/frontends/"
|
||||
pathFrontendBackend = "/backend"
|
||||
pathFrontendPriority = "/priority"
|
||||
pathFrontendPassHostHeader = "/passhostheader"
|
||||
pathFrontendPassTLSClientCert = "/passTLSClientCert"
|
||||
pathFrontendPassTLSClientCertPem = pathFrontendPassTLSClientCert + "/pem"
|
||||
pathFrontendPassTLSClientCertInfos = pathFrontendPassTLSClientCert + "/infos"
|
||||
pathFrontendPassTLSClientCertInfosNotAfter = pathFrontendPassTLSClientCertInfos + "/notAfter"
|
||||
pathFrontendPassTLSClientCertInfosNotBefore = pathFrontendPassTLSClientCertInfos + "/notBefore"
|
||||
pathFrontendPassTLSClientCertInfosSans = pathFrontendPassTLSClientCertInfos + "/sans"
|
||||
pathFrontendPassTLSClientCertInfosSubject = pathFrontendPassTLSClientCertInfos + "/subject"
|
||||
pathFrontendPassTLSClientCertInfosSubjectCommonName = pathFrontendPassTLSClientCertInfosSubject + "/commonName"
|
||||
pathFrontendPassTLSClientCertInfosSubjectCountry = pathFrontendPassTLSClientCertInfosSubject + "/country"
|
||||
pathFrontendPassTLSClientCertInfosSubjectLocality = pathFrontendPassTLSClientCertInfosSubject + "/locality"
|
||||
pathFrontendPassTLSClientCertInfosSubjectOrganization = pathFrontendPassTLSClientCertInfosSubject + "/organization"
|
||||
pathFrontendPassTLSClientCertInfosSubjectProvince = pathFrontendPassTLSClientCertInfosSubject + "/province"
|
||||
pathFrontendPassTLSClientCertInfosSubjectSerialNumber = pathFrontendPassTLSClientCertInfosSubject + "/serialNumber"
|
||||
pathFrontendPassTLSCert = "/passtlscert"
|
||||
pathFrontendWhiteListSourceRange = "/whitelist/sourcerange"
|
||||
pathFrontendWhiteListIPStrategy = "/whitelist/ipstrategy"
|
||||
pathFrontendWhiteListIPStrategyDepth = pathFrontendWhiteListIPStrategy + "/depth"
|
||||
pathFrontendWhiteListIPStrategyExcludedIPs = pathFrontendWhiteListIPStrategy + "/excludedips"
|
||||
|
||||
pathFrontendAuth = "/auth/"
|
||||
pathFrontendAuthHeaderField = pathFrontendAuth + "headerfield"
|
||||
pathFrontendAuthBasic = pathFrontendAuth + "basic/"
|
||||
pathFrontendAuthBasicRemoveHeader = pathFrontendAuthBasic + "removeheader"
|
||||
pathFrontendAuthBasicUsers = pathFrontendAuthBasic + "users"
|
||||
pathFrontendAuthBasicUsersFile = pathFrontendAuthBasic + "usersfile"
|
||||
pathFrontendAuthDigest = pathFrontendAuth + "digest/"
|
||||
pathFrontendAuthDigestRemoveHeader = pathFrontendAuthDigest + "removeheader"
|
||||
pathFrontendAuthDigestUsers = pathFrontendAuthDigest + "users"
|
||||
pathFrontendAuthDigestUsersFile = pathFrontendAuthDigest + "usersfile"
|
||||
pathFrontendAuthForward = pathFrontendAuth + "forward/"
|
||||
pathFrontendAuthForwardAddress = pathFrontendAuthForward + "address"
|
||||
pathFrontendAuthForwardAuthResponseHeaders = pathFrontendAuthForward + ".authresponseheaders"
|
||||
pathFrontendAuthForwardTLS = pathFrontendAuthForward + "tls/"
|
||||
pathFrontendAuthForwardTLSCa = pathFrontendAuthForwardTLS + "ca"
|
||||
pathFrontendAuthForwardTLSCaOptional = pathFrontendAuthForwardTLS + "caoptional"
|
||||
pathFrontendAuthForwardTLSCert = pathFrontendAuthForwardTLS + "cert"
|
||||
pathFrontendAuthForwardTLSInsecureSkipVerify = pathFrontendAuthForwardTLS + "insecureskipverify"
|
||||
pathFrontendAuthForwardTLSKey = pathFrontendAuthForwardTLS + "key"
|
||||
pathFrontendAuthForwardTrustForwardHeader = pathFrontendAuthForward + "trustforwardheader"
|
||||
|
||||
pathFrontendEntryPoints = "/entrypoints"
|
||||
pathFrontendRedirectEntryPoint = "/redirect/entrypoint"
|
||||
pathFrontendRedirectRegex = "/redirect/regex"
|
||||
pathFrontendRedirectReplacement = "/redirect/replacement"
|
||||
pathFrontendRedirectPermanent = "/redirect/permanent"
|
||||
pathFrontendErrorPages = "/errors/"
|
||||
pathFrontendErrorPagesBackend = "/backend"
|
||||
pathFrontendErrorPagesQuery = "/query"
|
||||
pathFrontendErrorPagesStatus = "/status"
|
||||
pathFrontendRateLimit = "/ratelimit/"
|
||||
pathFrontendRateLimitRateSet = pathFrontendRateLimit + "rateset/"
|
||||
pathFrontendRateLimitExtractorFunc = pathFrontendRateLimit + "extractorfunc"
|
||||
pathFrontendRateLimitPeriod = "/period"
|
||||
pathFrontendRateLimitAverage = "/average"
|
||||
pathFrontendRateLimitBurst = "/burst"
|
||||
|
||||
pathFrontendCustomRequestHeaders = "/headers/customrequestheaders/"
|
||||
pathFrontendCustomResponseHeaders = "/headers/customresponseheaders/"
|
||||
pathFrontendAllowedHosts = "/headers/allowedhosts"
|
||||
pathFrontendHostsProxyHeaders = "/headers/hostsproxyheaders"
|
||||
pathFrontendSSLForceHost = "/headers/sslforcehost"
|
||||
pathFrontendSSLRedirect = "/headers/sslredirect"
|
||||
pathFrontendSSLTemporaryRedirect = "/headers/ssltemporaryredirect"
|
||||
pathFrontendSSLHost = "/headers/sslhost"
|
||||
pathFrontendSSLProxyHeaders = "/headers/sslproxyheaders/"
|
||||
pathFrontendSTSSeconds = "/headers/stsseconds"
|
||||
pathFrontendSTSIncludeSubdomains = "/headers/stsincludesubdomains"
|
||||
pathFrontendSTSPreload = "/headers/stspreload"
|
||||
pathFrontendForceSTSHeader = "/headers/forcestsheader"
|
||||
pathFrontendFrameDeny = "/headers/framedeny"
|
||||
pathFrontendCustomFrameOptionsValue = "/headers/customframeoptionsvalue"
|
||||
pathFrontendContentTypeNosniff = "/headers/contenttypenosniff"
|
||||
pathFrontendBrowserXSSFilter = "/headers/browserxssfilter"
|
||||
pathFrontendCustomBrowserXSSValue = "/headers/custombrowserxssvalue"
|
||||
pathFrontendContentSecurityPolicy = "/headers/contentsecuritypolicy"
|
||||
pathFrontendPublicKey = "/headers/publickey"
|
||||
pathFrontendReferrerPolicy = "/headers/referrerpolicy"
|
||||
pathFrontendIsDevelopment = "/headers/isdevelopment"
|
||||
|
||||
pathFrontendRoutes = "/routes/"
|
||||
pathFrontendRule = "/rule"
|
||||
|
||||
pathTLS = "/tls/"
|
||||
pathTLSEntryPoints = "/entrypoints"
|
||||
pathTLSCertFile = "/certificate/certfile"
|
||||
pathTLSKeyFile = "/certificate/keyfile"
|
||||
|
||||
pathTags = "/tags"
|
||||
pathAlias = "/alias"
|
||||
pathSeparator = "/"
|
||||
)
|
||||
|
|
@ -1,128 +0,0 @@
|
|||
package kv
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/abronan/valkeyrie"
|
||||
"github.com/abronan/valkeyrie/store"
|
||||
"github.com/cenk/backoff"
|
||||
"github.com/containous/traefik/job"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/provider"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/containous/traefik/types"
|
||||
)
|
||||
|
||||
// Provider holds common configurations of key-value providers.
|
||||
type Provider struct {
|
||||
provider.BaseProvider `mapstructure:",squash" export:"true"`
|
||||
Endpoint string `description:"Comma separated server endpoints"`
|
||||
Prefix string `description:"Prefix used for KV store" export:"true"`
|
||||
TLS *types.ClientTLS `description:"Enable TLS support" export:"true"`
|
||||
Username string `description:"KV Username"`
|
||||
Password string `description:"KV Password"`
|
||||
storeType store.Backend
|
||||
kvClient store.Store
|
||||
}
|
||||
|
||||
// CreateStore create the K/V store
|
||||
func (p *Provider) CreateStore() (store.Store, error) {
|
||||
storeConfig := &store.Config{
|
||||
ConnectionTimeout: 30 * time.Second,
|
||||
Bucket: "traefik",
|
||||
Username: p.Username,
|
||||
Password: p.Password,
|
||||
}
|
||||
|
||||
if p.TLS != nil {
|
||||
var err error
|
||||
storeConfig.TLS, err = p.TLS.CreateTLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return valkeyrie.NewStore(
|
||||
p.storeType,
|
||||
strings.Split(p.Endpoint, ","),
|
||||
storeConfig,
|
||||
)
|
||||
}
|
||||
|
||||
// SetStoreType storeType setter
|
||||
func (p *Provider) SetStoreType(storeType store.Backend) {
|
||||
p.storeType = storeType
|
||||
}
|
||||
|
||||
// SetKVClient kvClient setter
|
||||
func (p *Provider) SetKVClient(kvClient store.Store) {
|
||||
p.kvClient = kvClient
|
||||
}
|
||||
|
||||
func (p *Provider) watchKv(configurationChan chan<- types.ConfigMessage, prefix string, stop chan bool) error {
|
||||
operation := func() error {
|
||||
events, err := p.kvClient.WatchTree(p.Prefix, make(chan struct{}), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to KV WatchTree: %v", err)
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
return nil
|
||||
case _, ok := <-events:
|
||||
if !ok {
|
||||
return errors.New("watchtree channel closed")
|
||||
}
|
||||
configuration := p.buildConfiguration()
|
||||
if configuration != nil {
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: string(p.storeType),
|
||||
Configuration: configuration,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
notify := func(err error, time time.Duration) {
|
||||
log.Errorf("KV connection error: %+v, retrying in %s", err, time)
|
||||
}
|
||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot connect to KV server: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Provide provides the configuration to traefik via the configuration channel
|
||||
func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error {
|
||||
operation := func() error {
|
||||
if _, err := p.kvClient.Exists(p.Prefix+"/qmslkjdfmqlskdjfmqlksjazçueznbvbwzlkajzebvkwjdcqmlsfj", nil); err != nil {
|
||||
return fmt.Errorf("failed to test KV store connection: %v", err)
|
||||
}
|
||||
if p.Watch {
|
||||
pool.Go(func(stop chan bool) {
|
||||
err := p.watchKv(configurationChan, p.Prefix, stop)
|
||||
if err != nil {
|
||||
log.Errorf("Cannot watch KV store: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
configuration := p.buildConfiguration()
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: string(p.storeType),
|
||||
Configuration: configuration,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
notify := func(err error, time time.Duration) {
|
||||
log.Errorf("KV connection error: %+v, retrying in %s", err, time)
|
||||
}
|
||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot connect to KV server: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,715 +0,0 @@
|
|||
package kv
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/BurntSushi/ty/fun"
|
||||
"github.com/abronan/valkeyrie/store"
|
||||
"github.com/containous/flaeg/parse"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/provider/label"
|
||||
"github.com/containous/traefik/tls"
|
||||
"github.com/containous/traefik/types"
|
||||
)
|
||||
|
||||
func (p *Provider) buildConfiguration() *types.Configuration {
|
||||
templateObjects := struct {
|
||||
Prefix string
|
||||
}{
|
||||
// Allow `/traefik/alias` to supersede `p.Prefix`
|
||||
Prefix: strings.TrimSuffix(p.get(p.Prefix, p.Prefix+pathAlias), pathSeparator),
|
||||
}
|
||||
|
||||
var KvFuncMap = template.FuncMap{
|
||||
"List": p.list,
|
||||
"ListServers": p.listServers,
|
||||
"Get": p.get,
|
||||
"GetBool": p.getBool,
|
||||
"GetInt": p.getInt,
|
||||
"GetInt64": p.getInt64,
|
||||
"GetList": p.getList,
|
||||
"SplitGet": p.splitGet,
|
||||
"Last": p.last,
|
||||
"Has": p.has,
|
||||
|
||||
"getTLSSection": p.getTLSSection,
|
||||
|
||||
// Frontend functions
|
||||
"getBackendName": p.getFuncString(pathFrontendBackend, ""),
|
||||
"getPriority": p.getFuncInt(pathFrontendPriority, label.DefaultFrontendPriority),
|
||||
"getPassHostHeader": p.getFuncBool(pathFrontendPassHostHeader, label.DefaultPassHostHeader),
|
||||
"getPassTLSCert": p.getFuncBool(pathFrontendPassTLSCert, label.DefaultPassTLSCert),
|
||||
"getPassTLSClientCert": p.getTLSClientCert,
|
||||
"getEntryPoints": p.getFuncList(pathFrontendEntryPoints),
|
||||
"getAuth": p.getAuth,
|
||||
"getRoutes": p.getRoutes,
|
||||
"getRedirect": p.getRedirect,
|
||||
"getErrorPages": p.getErrorPages,
|
||||
"getRateLimit": p.getRateLimit,
|
||||
"getHeaders": p.getHeaders,
|
||||
"getWhiteList": p.getWhiteList,
|
||||
|
||||
// Backend functions
|
||||
"getServers": p.getServers,
|
||||
"getCircuitBreaker": p.getCircuitBreaker,
|
||||
"getResponseForwarding": p.getResponseForwarding,
|
||||
"getLoadBalancer": p.getLoadBalancer,
|
||||
"getMaxConn": p.getMaxConn,
|
||||
"getHealthCheck": p.getHealthCheck,
|
||||
"getBuffering": p.getBuffering,
|
||||
}
|
||||
|
||||
configuration, err := p.GetConfiguration("templates/kv.tmpl", KvFuncMap, templateObjects)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
for key, frontend := range configuration.Frontends {
|
||||
if _, ok := configuration.Backends[frontend.Backend]; !ok {
|
||||
delete(configuration.Frontends, key)
|
||||
}
|
||||
}
|
||||
|
||||
return configuration
|
||||
}
|
||||
|
||||
func (p *Provider) getWhiteList(rootPath string) *types.WhiteList {
|
||||
ranges := p.getList(rootPath, pathFrontendWhiteListSourceRange)
|
||||
|
||||
if len(ranges) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &types.WhiteList{
|
||||
SourceRange: ranges,
|
||||
IPStrategy: p.getIPStrategy(rootPath),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) getIPStrategy(rootPath string) *types.IPStrategy {
|
||||
ipStrategy := p.getBool(false, rootPath, pathFrontendWhiteListIPStrategy)
|
||||
depth := p.getInt(0, rootPath, pathFrontendWhiteListIPStrategyDepth)
|
||||
excludedIPs := p.getList(rootPath, pathFrontendWhiteListIPStrategyExcludedIPs)
|
||||
|
||||
if depth == 0 && len(excludedIPs) == 0 && !ipStrategy {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &types.IPStrategy{
|
||||
Depth: depth,
|
||||
ExcludedIPs: excludedIPs,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) getRedirect(rootPath string) *types.Redirect {
|
||||
permanent := p.getBool(false, rootPath, pathFrontendRedirectPermanent)
|
||||
|
||||
if p.has(rootPath, pathFrontendRedirectEntryPoint) {
|
||||
return &types.Redirect{
|
||||
EntryPoint: p.get("", rootPath, pathFrontendRedirectEntryPoint),
|
||||
Permanent: permanent,
|
||||
}
|
||||
}
|
||||
|
||||
if p.has(rootPath, pathFrontendRedirectRegex) && p.has(rootPath, pathFrontendRedirectReplacement) {
|
||||
return &types.Redirect{
|
||||
Regex: p.get("", rootPath, pathFrontendRedirectRegex),
|
||||
Replacement: p.get("", rootPath, pathFrontendRedirectReplacement),
|
||||
Permanent: permanent,
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) getErrorPages(rootPath string) map[string]*types.ErrorPage {
|
||||
var errorPages map[string]*types.ErrorPage
|
||||
|
||||
pathErrors := p.list(rootPath, pathFrontendErrorPages)
|
||||
|
||||
for _, pathPage := range pathErrors {
|
||||
if errorPages == nil {
|
||||
errorPages = make(map[string]*types.ErrorPage)
|
||||
}
|
||||
|
||||
pageName := p.last(pathPage)
|
||||
|
||||
errorPages[pageName] = &types.ErrorPage{
|
||||
Backend: p.get("", pathPage, pathFrontendErrorPagesBackend),
|
||||
Query: p.get("", pathPage, pathFrontendErrorPagesQuery),
|
||||
Status: p.getList(pathPage, pathFrontendErrorPagesStatus),
|
||||
}
|
||||
}
|
||||
|
||||
return errorPages
|
||||
}
|
||||
|
||||
func (p *Provider) getRateLimit(rootPath string) *types.RateLimit {
|
||||
extractorFunc := p.get("", rootPath, pathFrontendRateLimitExtractorFunc)
|
||||
if len(extractorFunc) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var limits map[string]*types.Rate
|
||||
|
||||
pathRateSet := p.list(rootPath, pathFrontendRateLimitRateSet)
|
||||
for _, pathLimits := range pathRateSet {
|
||||
if limits == nil {
|
||||
limits = make(map[string]*types.Rate)
|
||||
}
|
||||
|
||||
rawPeriod := p.get("", pathLimits+pathFrontendRateLimitPeriod)
|
||||
|
||||
var period parse.Duration
|
||||
err := period.Set(rawPeriod)
|
||||
if err != nil {
|
||||
log.Errorf("Invalid %q value: %q", pathLimits+pathFrontendRateLimitPeriod, rawPeriod)
|
||||
continue
|
||||
}
|
||||
|
||||
limitName := p.last(pathLimits)
|
||||
|
||||
limits[limitName] = &types.Rate{
|
||||
Average: p.getInt64(0, pathLimits+pathFrontendRateLimitAverage),
|
||||
Burst: p.getInt64(0, pathLimits+pathFrontendRateLimitBurst),
|
||||
Period: period,
|
||||
}
|
||||
}
|
||||
|
||||
return &types.RateLimit{
|
||||
ExtractorFunc: extractorFunc,
|
||||
RateSet: limits,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) getHeaders(rootPath string) *types.Headers {
|
||||
headers := &types.Headers{
|
||||
CustomRequestHeaders: p.getMap(rootPath, pathFrontendCustomRequestHeaders),
|
||||
CustomResponseHeaders: p.getMap(rootPath, pathFrontendCustomResponseHeaders),
|
||||
SSLProxyHeaders: p.getMap(rootPath, pathFrontendSSLProxyHeaders),
|
||||
AllowedHosts: p.getList("", rootPath, pathFrontendAllowedHosts),
|
||||
HostsProxyHeaders: p.getList(rootPath, pathFrontendHostsProxyHeaders),
|
||||
SSLForceHost: p.getBool(false, rootPath, pathFrontendSSLForceHost),
|
||||
SSLRedirect: p.getBool(false, rootPath, pathFrontendSSLRedirect),
|
||||
SSLTemporaryRedirect: p.getBool(false, rootPath, pathFrontendSSLTemporaryRedirect),
|
||||
SSLHost: p.get("", rootPath, pathFrontendSSLHost),
|
||||
STSSeconds: p.getInt64(0, rootPath, pathFrontendSTSSeconds),
|
||||
STSIncludeSubdomains: p.getBool(false, rootPath, pathFrontendSTSIncludeSubdomains),
|
||||
STSPreload: p.getBool(false, rootPath, pathFrontendSTSPreload),
|
||||
ForceSTSHeader: p.getBool(false, rootPath, pathFrontendForceSTSHeader),
|
||||
FrameDeny: p.getBool(false, rootPath, pathFrontendFrameDeny),
|
||||
CustomFrameOptionsValue: p.get("", rootPath, pathFrontendCustomFrameOptionsValue),
|
||||
ContentTypeNosniff: p.getBool(false, rootPath, pathFrontendContentTypeNosniff),
|
||||
BrowserXSSFilter: p.getBool(false, rootPath, pathFrontendBrowserXSSFilter),
|
||||
CustomBrowserXSSValue: p.get("", rootPath, pathFrontendCustomBrowserXSSValue),
|
||||
ContentSecurityPolicy: p.get("", rootPath, pathFrontendContentSecurityPolicy),
|
||||
PublicKey: p.get("", rootPath, pathFrontendPublicKey),
|
||||
ReferrerPolicy: p.get("", rootPath, pathFrontendReferrerPolicy),
|
||||
IsDevelopment: p.getBool(false, rootPath, pathFrontendIsDevelopment),
|
||||
}
|
||||
|
||||
if !headers.HasSecureHeadersDefined() && !headers.HasCustomHeadersDefined() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return headers
|
||||
}
|
||||
|
||||
func (p *Provider) getLoadBalancer(rootPath string) *types.LoadBalancer {
|
||||
lb := &types.LoadBalancer{
|
||||
Method: p.get(label.DefaultBackendLoadBalancerMethod, rootPath, pathBackendLoadBalancerMethod),
|
||||
}
|
||||
|
||||
if p.getBool(false, rootPath, pathBackendLoadBalancerStickiness) {
|
||||
lb.Stickiness = &types.Stickiness{
|
||||
CookieName: p.get("", rootPath, pathBackendLoadBalancerStickinessCookieName),
|
||||
}
|
||||
}
|
||||
|
||||
return lb
|
||||
}
|
||||
|
||||
func (p *Provider) getResponseForwarding(rootPath string) *types.ResponseForwarding {
|
||||
if !p.has(rootPath, pathBackendResponseForwardingFlushInterval) {
|
||||
return nil
|
||||
}
|
||||
value := p.get("", rootPath, pathBackendResponseForwardingFlushInterval)
|
||||
if len(value) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &types.ResponseForwarding{
|
||||
FlushInterval: value,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) getCircuitBreaker(rootPath string) *types.CircuitBreaker {
|
||||
if !p.has(rootPath, pathBackendCircuitBreakerExpression) {
|
||||
return nil
|
||||
}
|
||||
|
||||
circuitBreaker := p.get(label.DefaultCircuitBreakerExpression, rootPath, pathBackendCircuitBreakerExpression)
|
||||
if len(circuitBreaker) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &types.CircuitBreaker{Expression: circuitBreaker}
|
||||
}
|
||||
|
||||
func (p *Provider) getMaxConn(rootPath string) *types.MaxConn {
|
||||
amount := p.getInt64(math.MinInt64, rootPath, pathBackendMaxConnAmount)
|
||||
extractorFunc := p.get(label.DefaultBackendMaxconnExtractorFunc, rootPath, pathBackendMaxConnExtractorFunc)
|
||||
|
||||
if amount == math.MinInt64 || len(extractorFunc) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &types.MaxConn{
|
||||
Amount: amount,
|
||||
ExtractorFunc: extractorFunc,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) getHealthCheck(rootPath string) *types.HealthCheck {
|
||||
path := p.get("", rootPath, pathBackendHealthCheckPath)
|
||||
|
||||
if len(path) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
scheme := p.get("", rootPath, pathBackendHealthCheckScheme)
|
||||
port := p.getInt(label.DefaultBackendHealthCheckPort, rootPath, pathBackendHealthCheckPort)
|
||||
interval := p.get("30s", rootPath, pathBackendHealthCheckInterval)
|
||||
timeout := p.get("5s", rootPath, pathBackendHealthCheckTimeout)
|
||||
hostname := p.get("", rootPath, pathBackendHealthCheckHostname)
|
||||
headers := p.getMap(rootPath, pathBackendHealthCheckHeaders)
|
||||
|
||||
return &types.HealthCheck{
|
||||
Scheme: scheme,
|
||||
Path: path,
|
||||
Port: port,
|
||||
Interval: interval,
|
||||
Timeout: timeout,
|
||||
Hostname: hostname,
|
||||
Headers: headers,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) getBuffering(rootPath string) *types.Buffering {
|
||||
pathsBuffering := p.list(rootPath, pathBackendBuffering)
|
||||
|
||||
var buffering *types.Buffering
|
||||
if len(pathsBuffering) > 0 {
|
||||
if buffering == nil {
|
||||
buffering = &types.Buffering{}
|
||||
}
|
||||
|
||||
buffering.MaxRequestBodyBytes = p.getInt64(0, rootPath, pathBackendBufferingMaxRequestBodyBytes)
|
||||
buffering.MaxResponseBodyBytes = p.getInt64(0, rootPath, pathBackendBufferingMaxResponseBodyBytes)
|
||||
buffering.MemRequestBodyBytes = p.getInt64(0, rootPath, pathBackendBufferingMemRequestBodyBytes)
|
||||
buffering.MemResponseBodyBytes = p.getInt64(0, rootPath, pathBackendBufferingMemResponseBodyBytes)
|
||||
buffering.RetryExpression = p.get("", rootPath, pathBackendBufferingRetryExpression)
|
||||
}
|
||||
|
||||
return buffering
|
||||
}
|
||||
|
||||
func (p *Provider) getTLSSection(prefix string) []*tls.Configuration {
|
||||
var tlsSection []*tls.Configuration
|
||||
|
||||
for _, tlsConfPath := range p.list(prefix, pathTLS) {
|
||||
certFile := p.get("", tlsConfPath, pathTLSCertFile)
|
||||
keyFile := p.get("", tlsConfPath, pathTLSKeyFile)
|
||||
|
||||
if len(certFile) == 0 && len(keyFile) == 0 {
|
||||
log.Warnf("Invalid TLS configuration (no cert and no key): %s", tlsConfPath)
|
||||
continue
|
||||
}
|
||||
|
||||
entryPoints := p.getList(tlsConfPath, pathTLSEntryPoints)
|
||||
if len(entryPoints) == 0 {
|
||||
log.Warnf("Invalid TLS configuration (no entry points): %s", tlsConfPath)
|
||||
continue
|
||||
}
|
||||
|
||||
tlsConf := &tls.Configuration{
|
||||
EntryPoints: entryPoints,
|
||||
Certificate: &tls.Certificate{
|
||||
CertFile: tls.FileOrContent(certFile),
|
||||
KeyFile: tls.FileOrContent(keyFile),
|
||||
},
|
||||
}
|
||||
|
||||
tlsSection = append(tlsSection, tlsConf)
|
||||
}
|
||||
|
||||
return tlsSection
|
||||
}
|
||||
|
||||
// getTLSClientCert create TLS client header configuration from labels
|
||||
func (p *Provider) getTLSClientCert(rootPath string) *types.TLSClientHeaders {
|
||||
if !p.hasPrefix(rootPath, pathFrontendPassTLSClientCert) {
|
||||
return nil
|
||||
}
|
||||
|
||||
tlsClientHeaders := &types.TLSClientHeaders{
|
||||
PEM: p.getBool(false, rootPath, pathFrontendPassTLSClientCertPem),
|
||||
}
|
||||
|
||||
if p.hasPrefix(rootPath, pathFrontendPassTLSClientCertInfos) {
|
||||
infos := &types.TLSClientCertificateInfos{
|
||||
NotAfter: p.getBool(false, rootPath, pathFrontendPassTLSClientCertInfosNotAfter),
|
||||
NotBefore: p.getBool(false, rootPath, pathFrontendPassTLSClientCertInfosNotBefore),
|
||||
Sans: p.getBool(false, rootPath, pathFrontendPassTLSClientCertInfosSans),
|
||||
}
|
||||
|
||||
if p.hasPrefix(rootPath, pathFrontendPassTLSClientCertInfosSubject) {
|
||||
subject := &types.TLSCLientCertificateSubjectInfos{
|
||||
CommonName: p.getBool(false, rootPath, pathFrontendPassTLSClientCertInfosSubjectCommonName),
|
||||
Country: p.getBool(false, rootPath, pathFrontendPassTLSClientCertInfosSubjectCountry),
|
||||
Locality: p.getBool(false, rootPath, pathFrontendPassTLSClientCertInfosSubjectLocality),
|
||||
Organization: p.getBool(false, rootPath, pathFrontendPassTLSClientCertInfosSubjectOrganization),
|
||||
Province: p.getBool(false, rootPath, pathFrontendPassTLSClientCertInfosSubjectProvince),
|
||||
SerialNumber: p.getBool(false, rootPath, pathFrontendPassTLSClientCertInfosSubjectSerialNumber),
|
||||
}
|
||||
infos.Subject = subject
|
||||
}
|
||||
tlsClientHeaders.Infos = infos
|
||||
}
|
||||
return tlsClientHeaders
|
||||
}
|
||||
|
||||
// GetAuth Create auth from path
|
||||
func (p *Provider) getAuth(rootPath string) *types.Auth {
|
||||
if p.hasPrefix(rootPath, pathFrontendAuth) {
|
||||
auth := &types.Auth{
|
||||
HeaderField: p.get("", rootPath, pathFrontendAuthHeaderField),
|
||||
}
|
||||
|
||||
if p.hasPrefix(rootPath, pathFrontendAuthBasic) {
|
||||
auth.Basic = p.getAuthBasic(rootPath)
|
||||
} else if p.hasPrefix(rootPath, pathFrontendAuthDigest) {
|
||||
auth.Digest = p.getAuthDigest(rootPath)
|
||||
} else if p.hasPrefix(rootPath, pathFrontendAuthForward) {
|
||||
auth.Forward = p.getAuthForward(rootPath)
|
||||
}
|
||||
|
||||
return auth
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getAuthBasic Create Basic Auth from path
|
||||
func (p *Provider) getAuthBasic(rootPath string) *types.Basic {
|
||||
return &types.Basic{
|
||||
UsersFile: p.get("", rootPath, pathFrontendAuthBasicUsersFile),
|
||||
RemoveHeader: p.getBool(false, rootPath, pathFrontendAuthBasicRemoveHeader),
|
||||
Users: p.getList(rootPath, pathFrontendAuthBasicUsers),
|
||||
}
|
||||
}
|
||||
|
||||
// getAuthDigest Create Digest Auth from path
|
||||
func (p *Provider) getAuthDigest(rootPath string) *types.Digest {
|
||||
return &types.Digest{
|
||||
Users: p.getList(rootPath, pathFrontendAuthDigestUsers),
|
||||
UsersFile: p.get("", rootPath, pathFrontendAuthDigestUsersFile),
|
||||
RemoveHeader: p.getBool(false, rootPath, pathFrontendAuthDigestRemoveHeader),
|
||||
}
|
||||
}
|
||||
|
||||
// getAuthForward Create Forward Auth from path
|
||||
func (p *Provider) getAuthForward(rootPath string) *types.Forward {
|
||||
forwardAuth := &types.Forward{
|
||||
Address: p.get("", rootPath, pathFrontendAuthForwardAddress),
|
||||
TrustForwardHeader: p.getBool(false, rootPath, pathFrontendAuthForwardTrustForwardHeader),
|
||||
AuthResponseHeaders: p.getList(rootPath, pathFrontendAuthForwardAuthResponseHeaders),
|
||||
}
|
||||
|
||||
// TLS configuration
|
||||
if len(p.getList(rootPath, pathFrontendAuthForwardTLS)) > 0 {
|
||||
forwardAuth.TLS = &types.ClientTLS{
|
||||
CA: p.get("", rootPath, pathFrontendAuthForwardTLSCa),
|
||||
CAOptional: p.getBool(false, rootPath, pathFrontendAuthForwardTLSCaOptional),
|
||||
Cert: p.get("", rootPath, pathFrontendAuthForwardTLSCert),
|
||||
InsecureSkipVerify: p.getBool(false, rootPath, pathFrontendAuthForwardTLSInsecureSkipVerify),
|
||||
Key: p.get("", rootPath, pathFrontendAuthForwardTLSKey),
|
||||
}
|
||||
}
|
||||
|
||||
return forwardAuth
|
||||
}
|
||||
|
||||
func (p *Provider) getRoutes(rootPath string) map[string]types.Route {
|
||||
var routes map[string]types.Route
|
||||
|
||||
rts := p.list(rootPath, pathFrontendRoutes)
|
||||
for _, rt := range rts {
|
||||
|
||||
rule := p.get("", rt, pathFrontendRule)
|
||||
if len(rule) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if routes == nil {
|
||||
routes = make(map[string]types.Route)
|
||||
}
|
||||
|
||||
routeName := p.last(rt)
|
||||
routes[routeName] = types.Route{
|
||||
Rule: rule,
|
||||
}
|
||||
}
|
||||
|
||||
return routes
|
||||
}
|
||||
|
||||
func (p *Provider) getServers(rootPath string) map[string]types.Server {
|
||||
var servers map[string]types.Server
|
||||
|
||||
serverKeys := p.listServers(rootPath)
|
||||
for _, serverKey := range serverKeys {
|
||||
serverURL := p.get("", serverKey, pathBackendServerURL)
|
||||
if len(serverURL) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if servers == nil {
|
||||
servers = make(map[string]types.Server)
|
||||
}
|
||||
|
||||
serverName := p.last(serverKey)
|
||||
servers[serverName] = types.Server{
|
||||
URL: serverURL,
|
||||
Weight: p.getInt(label.DefaultWeight, serverKey, pathBackendServerWeight),
|
||||
}
|
||||
}
|
||||
|
||||
return servers
|
||||
}
|
||||
|
||||
func (p *Provider) listServers(backend string) []string {
|
||||
serverNames := p.list(backend, pathBackendServers)
|
||||
return fun.Filter(p.serverFilter, serverNames).([]string)
|
||||
}
|
||||
|
||||
func (p *Provider) serverFilter(serverName string) bool {
|
||||
key := fmt.Sprint(serverName, pathBackendServerURL)
|
||||
if _, err := p.kvClient.Get(key, nil); err != nil {
|
||||
if err != store.ErrKeyNotFound {
|
||||
log.Errorf("Failed to retrieve value for key %s: %s", key, err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
return p.checkConstraints(serverName, pathTags)
|
||||
}
|
||||
|
||||
func (p *Provider) checkConstraints(keys ...string) bool {
|
||||
joinedKeys := strings.Join(keys, "")
|
||||
keyPair, err := p.kvClient.Get(joinedKeys, nil)
|
||||
|
||||
value := ""
|
||||
if err == nil && keyPair != nil && keyPair.Value != nil {
|
||||
value = string(keyPair.Value)
|
||||
}
|
||||
|
||||
constraintTags := label.SplitAndTrimString(value, ",")
|
||||
ok, failingConstraint := p.MatchConstraints(constraintTags)
|
||||
if !ok {
|
||||
if failingConstraint != nil {
|
||||
log.Debugf("Constraint %v not matching with following tags: %v", failingConstraint.String(), value)
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *Provider) getFuncString(key string, defaultValue string) func(rootPath string) string {
|
||||
return func(rootPath string) string {
|
||||
return p.get(defaultValue, rootPath, key)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) getFuncBool(key string, defaultValue bool) func(rootPath string) bool {
|
||||
return func(rootPath string) bool {
|
||||
return p.getBool(defaultValue, rootPath, key)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) getFuncInt(key string, defaultValue int) func(rootPath string) int {
|
||||
return func(rootPath string) int {
|
||||
return p.getInt(defaultValue, rootPath, key)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) getFuncList(key string) func(rootPath string) []string {
|
||||
return func(rootPath string) []string {
|
||||
return p.getList(rootPath, key)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) get(defaultValue string, keyParts ...string) string {
|
||||
key := strings.Join(keyParts, "")
|
||||
|
||||
if p.storeType == store.ETCD {
|
||||
key = strings.TrimPrefix(key, pathSeparator)
|
||||
}
|
||||
|
||||
keyPair, err := p.kvClient.Get(key, nil)
|
||||
if err != nil {
|
||||
log.Debugf("Cannot get key %s %s, setting default %s", key, err, defaultValue)
|
||||
return defaultValue
|
||||
} else if keyPair == nil {
|
||||
log.Debugf("Cannot get key %s, setting default %s", key, defaultValue)
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
return string(keyPair.Value)
|
||||
}
|
||||
|
||||
func (p *Provider) getBool(defaultValue bool, keyParts ...string) bool {
|
||||
rawValue := p.get(strconv.FormatBool(defaultValue), keyParts...)
|
||||
|
||||
if len(rawValue) == 0 {
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
value, err := strconv.ParseBool(rawValue)
|
||||
if err != nil {
|
||||
log.Errorf("Invalid value for %v: %s", keyParts, rawValue)
|
||||
return defaultValue
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (p *Provider) has(keyParts ...string) bool {
|
||||
value := p.get("", keyParts...)
|
||||
return len(value) > 0
|
||||
}
|
||||
|
||||
func (p *Provider) hasPrefix(keyParts ...string) bool {
|
||||
baseKey := strings.Join(keyParts, "")
|
||||
if !strings.HasSuffix(baseKey, "/") {
|
||||
baseKey += "/"
|
||||
}
|
||||
|
||||
listKeys, err := p.kvClient.List(baseKey, nil)
|
||||
if err != nil {
|
||||
log.Debugf("Cannot list keys under %q: %v", baseKey, err)
|
||||
return false
|
||||
}
|
||||
|
||||
return len(listKeys) > 0
|
||||
}
|
||||
|
||||
func (p *Provider) getInt(defaultValue int, keyParts ...string) int {
|
||||
rawValue := p.get("", keyParts...)
|
||||
|
||||
if len(rawValue) == 0 {
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
value, err := strconv.Atoi(rawValue)
|
||||
if err != nil {
|
||||
log.Errorf("Invalid value for %v: %s", keyParts, rawValue)
|
||||
return defaultValue
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (p *Provider) getInt64(defaultValue int64, keyParts ...string) int64 {
|
||||
rawValue := p.get("", keyParts...)
|
||||
|
||||
if len(rawValue) == 0 {
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
value, err := strconv.ParseInt(rawValue, 10, 64)
|
||||
if err != nil {
|
||||
log.Errorf("Invalid value for %v: %s", keyParts, rawValue)
|
||||
return defaultValue
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (p *Provider) list(keyParts ...string) []string {
|
||||
rootKey := strings.Join(keyParts, "")
|
||||
|
||||
keysPairs, err := p.kvClient.List(rootKey, nil)
|
||||
if err != nil {
|
||||
log.Debugf("Cannot list keys under %q: %v", rootKey, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
directoryKeys := make(map[string]string)
|
||||
for _, key := range keysPairs {
|
||||
directory := strings.Split(strings.TrimPrefix(key.Key, rootKey), pathSeparator)[0]
|
||||
directoryKeys[directory] = rootKey + directory
|
||||
}
|
||||
|
||||
keys := fun.Values(directoryKeys).([]string)
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
func (p *Provider) getList(keyParts ...string) []string {
|
||||
values := p.splitGet(keyParts...)
|
||||
if len(values) > 0 {
|
||||
return values
|
||||
}
|
||||
|
||||
return p.getSlice(keyParts...)
|
||||
}
|
||||
|
||||
// get sub keys. ex: foo/0, foo/1, foo/2
|
||||
func (p *Provider) getSlice(keyParts ...string) []string {
|
||||
baseKey := strings.Join(keyParts, "")
|
||||
if !strings.HasSuffix(baseKey, "/") {
|
||||
baseKey += "/"
|
||||
}
|
||||
|
||||
listKeys := p.list(baseKey)
|
||||
|
||||
var values []string
|
||||
for _, entryKey := range listKeys {
|
||||
val := p.get("", entryKey)
|
||||
if len(val) > 0 {
|
||||
values = append(values, val)
|
||||
}
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
func (p *Provider) splitGet(keyParts ...string) []string {
|
||||
value := p.get("", keyParts...)
|
||||
|
||||
if len(value) == 0 {
|
||||
return nil
|
||||
}
|
||||
return label.SplitAndTrimString(value, ",")
|
||||
}
|
||||
|
||||
func (p *Provider) last(key string) string {
|
||||
index := strings.LastIndex(key, pathSeparator)
|
||||
return key[index+1:]
|
||||
}
|
||||
|
||||
func (p *Provider) getMap(keyParts ...string) map[string]string {
|
||||
var mapData map[string]string
|
||||
|
||||
list := p.list(keyParts...)
|
||||
for _, name := range list {
|
||||
if mapData == nil {
|
||||
mapData = make(map[string]string)
|
||||
}
|
||||
|
||||
mapData[http.CanonicalHeaderKey(p.last(name))] = p.get("", name)
|
||||
}
|
||||
|
||||
return mapData
|
||||
}
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,124 +0,0 @@
|
|||
package kv
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/abronan/valkeyrie/store"
|
||||
)
|
||||
|
||||
func newProviderMock(kvPairs []*store.KVPair) *Provider {
|
||||
return &Provider{
|
||||
Prefix: "traefik",
|
||||
kvClient: &Mock{
|
||||
KVPairs: kvPairs,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Override Get/List to return a error
|
||||
type KvError struct {
|
||||
Get error
|
||||
List error
|
||||
}
|
||||
|
||||
// Extremely limited mock store so we can test initialization
|
||||
type Mock struct {
|
||||
Error KvError
|
||||
KVPairs []*store.KVPair
|
||||
WatchTreeMethod func() <-chan []*store.KVPair
|
||||
}
|
||||
|
||||
func newKvClientMock(kvPairs []*store.KVPair, err error) *Mock {
|
||||
mock := &Mock{
|
||||
KVPairs: kvPairs,
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
mock.Error = KvError{
|
||||
Get: err,
|
||||
List: err,
|
||||
}
|
||||
}
|
||||
return mock
|
||||
}
|
||||
|
||||
func (s *Mock) Put(key string, value []byte, opts *store.WriteOptions) error {
|
||||
return errors.New("put not supported")
|
||||
}
|
||||
|
||||
func (s *Mock) Get(key string, options *store.ReadOptions) (*store.KVPair, error) {
|
||||
if err := s.Error.Get; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, kvPair := range s.KVPairs {
|
||||
if kvPair.Key == key {
|
||||
return kvPair, nil
|
||||
}
|
||||
}
|
||||
return nil, store.ErrKeyNotFound
|
||||
}
|
||||
|
||||
func (s *Mock) Delete(key string) error {
|
||||
return errors.New("delete not supported")
|
||||
}
|
||||
|
||||
// Exists mock
|
||||
func (s *Mock) Exists(key string, options *store.ReadOptions) (bool, error) {
|
||||
if err := s.Error.Get; err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, kvPair := range s.KVPairs {
|
||||
if strings.HasPrefix(kvPair.Key, key) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, store.ErrKeyNotFound
|
||||
}
|
||||
|
||||
// Watch mock
|
||||
func (s *Mock) Watch(key string, stopCh <-chan struct{}, options *store.ReadOptions) (<-chan *store.KVPair, error) {
|
||||
return nil, errors.New("watch not supported")
|
||||
}
|
||||
|
||||
// WatchTree mock
|
||||
func (s *Mock) WatchTree(prefix string, stopCh <-chan struct{}, options *store.ReadOptions) (<-chan []*store.KVPair, error) {
|
||||
return s.WatchTreeMethod(), nil
|
||||
}
|
||||
|
||||
// NewLock mock
|
||||
func (s *Mock) NewLock(key string, options *store.LockOptions) (store.Locker, error) {
|
||||
return nil, errors.New("NewLock not supported")
|
||||
}
|
||||
|
||||
// List mock
|
||||
func (s *Mock) List(prefix string, options *store.ReadOptions) ([]*store.KVPair, error) {
|
||||
if err := s.Error.List; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var kv []*store.KVPair
|
||||
for _, kvPair := range s.KVPairs {
|
||||
if strings.HasPrefix(kvPair.Key, prefix) && !strings.ContainsAny(strings.TrimPrefix(kvPair.Key, prefix), pathSeparator) {
|
||||
kv = append(kv, kvPair)
|
||||
}
|
||||
}
|
||||
return kv, nil
|
||||
}
|
||||
|
||||
// DeleteTree mock
|
||||
func (s *Mock) DeleteTree(prefix string) error {
|
||||
return errors.New("DeleteTree not supported")
|
||||
}
|
||||
|
||||
// AtomicPut mock
|
||||
func (s *Mock) AtomicPut(key string, value []byte, previous *store.KVPair, opts *store.WriteOptions) (bool, *store.KVPair, error) {
|
||||
return false, nil, errors.New("AtomicPut not supported")
|
||||
}
|
||||
|
||||
// AtomicDelete mock
|
||||
func (s *Mock) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
|
||||
return false, errors.New("AtomicDelete not supported")
|
||||
}
|
||||
|
||||
// Close mock
|
||||
func (s *Mock) Close() {}
|
||||
|
|
@ -1,53 +0,0 @@
|
|||
package kv
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/abronan/valkeyrie/store"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/types"
|
||||
)
|
||||
|
||||
func TestKvWatchTree(t *testing.T) {
|
||||
returnedChans := make(chan chan []*store.KVPair)
|
||||
provider := Provider{
|
||||
kvClient: &Mock{
|
||||
WatchTreeMethod: func() <-chan []*store.KVPair {
|
||||
c := make(chan []*store.KVPair, 10)
|
||||
returnedChans <- c
|
||||
return c
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
configChan := make(chan types.ConfigMessage)
|
||||
go func() {
|
||||
if err := provider.watchKv(configChan, "prefix", make(chan bool, 1)); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case c1 := <-returnedChans:
|
||||
c1 <- []*store.KVPair{}
|
||||
<-configChan
|
||||
close(c1) // WatchTree chans can close due to error
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Fatalf("Failed to create a new WatchTree chan")
|
||||
}
|
||||
|
||||
select {
|
||||
case c2 := <-returnedChans:
|
||||
c2 <- []*store.KVPair{}
|
||||
<-configChan
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Fatalf("Failed to create a new WatchTree chan")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-configChan:
|
||||
t.Fatalf("configChan should be empty")
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
|
@ -1,212 +0,0 @@
|
|||
package label
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/containous/traefik/log"
|
||||
)
|
||||
|
||||
const (
|
||||
mapEntrySeparator = "||"
|
||||
mapValueSeparator = ":"
|
||||
)
|
||||
|
||||
// Default values
|
||||
const (
|
||||
DefaultWeight = 1
|
||||
DefaultProtocol = "http"
|
||||
DefaultPassHostHeader = true
|
||||
DefaultPassTLSCert = false
|
||||
DefaultFrontendPriority = 0
|
||||
DefaultCircuitBreakerExpression = "NetworkErrorRatio() > 1"
|
||||
DefaultBackendLoadBalancerMethod = "wrr"
|
||||
DefaultBackendMaxconnExtractorFunc = "request.host"
|
||||
DefaultBackendLoadbalancerStickinessCookieName = ""
|
||||
DefaultBackendHealthCheckPort = 0
|
||||
)
|
||||
|
||||
var (
|
||||
// RegexpFrontendErrorPage used to extract error pages from label
|
||||
RegexpFrontendErrorPage = regexp.MustCompile(`^traefik\.frontend\.errors\.(?P<name>[^ .]+)\.(?P<field>[^ .]+)$`)
|
||||
|
||||
// RegexpFrontendRateLimit used to extract rate limits from label
|
||||
RegexpFrontendRateLimit = regexp.MustCompile(`^traefik\.frontend\.rateLimit\.rateSet\.(?P<name>[^ .]+)\.(?P<field>[^ .]+)$`)
|
||||
)
|
||||
|
||||
// GetStringValue get string value associated to a label
|
||||
func GetStringValue(labels map[string]string, labelName string, defaultValue string) string {
|
||||
if value, ok := labels[labelName]; ok && len(value) > 0 {
|
||||
return value
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
// GetBoolValue get bool value associated to a label
|
||||
func GetBoolValue(labels map[string]string, labelName string, defaultValue bool) bool {
|
||||
rawValue, ok := labels[labelName]
|
||||
if ok {
|
||||
v, err := strconv.ParseBool(rawValue)
|
||||
if err == nil {
|
||||
return v
|
||||
}
|
||||
log.Errorf("Unable to parse %q: %q, falling back to %v. %v", labelName, rawValue, defaultValue, err)
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
// GetIntValue get int value associated to a label
|
||||
func GetIntValue(labels map[string]string, labelName string, defaultValue int) int {
|
||||
if rawValue, ok := labels[labelName]; ok {
|
||||
value, err := strconv.Atoi(rawValue)
|
||||
if err == nil {
|
||||
return value
|
||||
}
|
||||
log.Errorf("Unable to parse %q: %q, falling back to %v. %v", labelName, rawValue, defaultValue, err)
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
// GetInt64Value get int64 value associated to a label
|
||||
func GetInt64Value(labels map[string]string, labelName string, defaultValue int64) int64 {
|
||||
if rawValue, ok := labels[labelName]; ok {
|
||||
value, err := strconv.ParseInt(rawValue, 10, 64)
|
||||
if err == nil {
|
||||
return value
|
||||
}
|
||||
log.Errorf("Unable to parse %q: %q, falling back to %v. %v", labelName, rawValue, defaultValue, err)
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
// GetSliceStringValue get a slice of string associated to a label
|
||||
func GetSliceStringValue(labels map[string]string, labelName string) []string {
|
||||
var value []string
|
||||
|
||||
if values, ok := labels[labelName]; ok {
|
||||
value = SplitAndTrimString(values, ",")
|
||||
|
||||
if len(value) == 0 {
|
||||
log.Debugf("Could not load %q.", labelName)
|
||||
}
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
// ParseMapValue get Map value for a label value
|
||||
func ParseMapValue(labelName, values string) map[string]string {
|
||||
mapValue := make(map[string]string)
|
||||
|
||||
for _, parts := range strings.Split(values, mapEntrySeparator) {
|
||||
pair := strings.SplitN(parts, mapValueSeparator, 2)
|
||||
if len(pair) != 2 {
|
||||
log.Warnf("Could not load %q: %q, skipping...", labelName, parts)
|
||||
} else {
|
||||
mapValue[http.CanonicalHeaderKey(strings.TrimSpace(pair[0]))] = strings.TrimSpace(pair[1])
|
||||
}
|
||||
}
|
||||
|
||||
if len(mapValue) == 0 {
|
||||
log.Errorf("Could not load %q, skipping...", labelName)
|
||||
return nil
|
||||
}
|
||||
return mapValue
|
||||
}
|
||||
|
||||
// GetMapValue get Map value associated to a label
|
||||
func GetMapValue(labels map[string]string, labelName string) map[string]string {
|
||||
if values, ok := labels[labelName]; ok {
|
||||
|
||||
if len(values) == 0 {
|
||||
log.Errorf("Missing value for %q, skipping...", labelName)
|
||||
return nil
|
||||
}
|
||||
|
||||
return ParseMapValue(labelName, values)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetStringMultipleStrict get multiple string values associated to several labels
|
||||
// Fail if one label is missing
|
||||
func GetStringMultipleStrict(labels map[string]string, labelNames ...string) (map[string]string, error) {
|
||||
foundLabels := map[string]string{}
|
||||
for _, name := range labelNames {
|
||||
value := GetStringValue(labels, name, "")
|
||||
// Error out only if one of them is not defined.
|
||||
if len(value) == 0 {
|
||||
return nil, fmt.Errorf("label not found: %s", name)
|
||||
}
|
||||
foundLabels[name] = value
|
||||
}
|
||||
return foundLabels, nil
|
||||
}
|
||||
|
||||
// Has Check if a value is associated to a label
|
||||
func Has(labels map[string]string, labelName string) bool {
|
||||
value, ok := labels[labelName]
|
||||
return ok && len(value) > 0
|
||||
}
|
||||
|
||||
// HasPrefix Check if a value is associated to a less one label with a prefix
|
||||
func HasPrefix(labels map[string]string, prefix string) bool {
|
||||
for name, value := range labels {
|
||||
if strings.HasPrefix(name, prefix) && len(value) > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsEnabled Check if a container is enabled in Traefik
|
||||
func IsEnabled(labels map[string]string, exposedByDefault bool) bool {
|
||||
return GetBoolValue(labels, TraefikEnable, exposedByDefault)
|
||||
}
|
||||
|
||||
// SplitAndTrimString splits separatedString at the separator character and trims each
|
||||
// piece, filtering out empty pieces. Returns the list of pieces or nil if the input
|
||||
// did not contain a non-empty piece.
|
||||
func SplitAndTrimString(base string, sep string) []string {
|
||||
var trimmedStrings []string
|
||||
|
||||
for _, s := range strings.Split(base, sep) {
|
||||
s = strings.TrimSpace(s)
|
||||
if len(s) > 0 {
|
||||
trimmedStrings = append(trimmedStrings, s)
|
||||
}
|
||||
}
|
||||
|
||||
return trimmedStrings
|
||||
}
|
||||
|
||||
// GetFuncString a func related to GetStringValue
|
||||
func GetFuncString(labelName string, defaultValue string) func(map[string]string) string {
|
||||
return func(labels map[string]string) string {
|
||||
return GetStringValue(labels, labelName, defaultValue)
|
||||
}
|
||||
}
|
||||
|
||||
// GetFuncInt a func related to GetIntValue
|
||||
func GetFuncInt(labelName string, defaultValue int) func(map[string]string) int {
|
||||
return func(labels map[string]string) int {
|
||||
return GetIntValue(labels, labelName, defaultValue)
|
||||
}
|
||||
}
|
||||
|
||||
// GetFuncBool a func related to GetBoolValue
|
||||
func GetFuncBool(labelName string, defaultValue bool) func(map[string]string) bool {
|
||||
return func(labels map[string]string) bool {
|
||||
return GetBoolValue(labels, labelName, defaultValue)
|
||||
}
|
||||
}
|
||||
|
||||
// GetFuncSliceString a func related to GetSliceStringValue
|
||||
func GetFuncSliceString(labelName string) func(map[string]string) []string {
|
||||
return func(labels map[string]string) []string {
|
||||
return GetSliceStringValue(labels, labelName)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,692 +0,0 @@
|
|||
package label
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSplitAndTrimString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
input string
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
desc: "empty string",
|
||||
input: "",
|
||||
expected: nil,
|
||||
}, {
|
||||
desc: "one piece",
|
||||
input: "foo",
|
||||
expected: []string{"foo"},
|
||||
}, {
|
||||
desc: "two pieces",
|
||||
input: "foo,bar",
|
||||
expected: []string{"foo", "bar"},
|
||||
}, {
|
||||
desc: "three pieces",
|
||||
input: "foo,bar,zoo",
|
||||
expected: []string{"foo", "bar", "zoo"},
|
||||
}, {
|
||||
desc: "two pieces with whitespace",
|
||||
input: " foo , bar ",
|
||||
expected: []string{"foo", "bar"},
|
||||
}, {
|
||||
desc: "consecutive commas",
|
||||
input: " foo ,, bar ",
|
||||
expected: []string{"foo", "bar"},
|
||||
}, {
|
||||
desc: "consecutive commas with whitespace",
|
||||
input: " foo , , bar ",
|
||||
expected: []string{"foo", "bar"},
|
||||
}, {
|
||||
desc: "leading and trailing commas",
|
||||
input: ",, foo , , bar,, , ",
|
||||
expected: []string{"foo", "bar"},
|
||||
}, {
|
||||
desc: "no valid pieces",
|
||||
input: ", , , ,, ,",
|
||||
expected: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
actual := SplitAndTrimString(test.input, ",")
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetStringValue(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
labels map[string]string
|
||||
labelName string
|
||||
defaultValue string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
desc: "empty labels map",
|
||||
labelName: "foo",
|
||||
defaultValue: "default",
|
||||
expected: "default",
|
||||
},
|
||||
{
|
||||
desc: "existing label",
|
||||
labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
labelName: "foo",
|
||||
defaultValue: "default",
|
||||
expected: "bar",
|
||||
},
|
||||
{
|
||||
desc: "non existing label",
|
||||
labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
labelName: "fii",
|
||||
defaultValue: "default",
|
||||
expected: "default",
|
||||
},
|
||||
}
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
got := GetStringValue(test.labels, test.labelName, test.defaultValue)
|
||||
assert.Equal(t, test.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetBoolValue(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
labels map[string]string
|
||||
labelName string
|
||||
defaultValue bool
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
desc: "empty map",
|
||||
labelName: "foo",
|
||||
},
|
||||
{
|
||||
desc: "invalid boolean value",
|
||||
labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
labelName: "foo",
|
||||
defaultValue: true,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
desc: "valid boolean value: true",
|
||||
labels: map[string]string{
|
||||
"foo": "true",
|
||||
},
|
||||
labelName: "foo",
|
||||
defaultValue: false,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
desc: "valid boolean value: false",
|
||||
labels: map[string]string{
|
||||
"foo": "false",
|
||||
},
|
||||
labelName: "foo",
|
||||
defaultValue: true,
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
got := GetBoolValue(test.labels, test.labelName, test.defaultValue)
|
||||
assert.Equal(t, test.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetIntValue(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
labels map[string]string
|
||||
labelName string
|
||||
defaultValue int
|
||||
expected int
|
||||
}{
|
||||
{
|
||||
desc: "empty map",
|
||||
labelName: "foo",
|
||||
},
|
||||
{
|
||||
desc: "invalid int value",
|
||||
labelName: "foo",
|
||||
labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
defaultValue: 666,
|
||||
expected: 666,
|
||||
},
|
||||
{
|
||||
desc: "negative int value",
|
||||
labelName: "foo",
|
||||
labels: map[string]string{
|
||||
"foo": "-1",
|
||||
},
|
||||
defaultValue: 666,
|
||||
expected: -1,
|
||||
},
|
||||
{
|
||||
desc: "positive int value",
|
||||
labelName: "foo",
|
||||
labels: map[string]string{
|
||||
"foo": "1",
|
||||
},
|
||||
defaultValue: 666,
|
||||
expected: 1,
|
||||
},
|
||||
}
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
got := GetIntValue(test.labels, test.labelName, test.defaultValue)
|
||||
assert.Equal(t, test.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetInt64Value(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
labels map[string]string
|
||||
labelName string
|
||||
defaultValue int64
|
||||
expected int64
|
||||
}{
|
||||
{
|
||||
desc: "empty map",
|
||||
labelName: "foo",
|
||||
},
|
||||
{
|
||||
desc: "invalid int value",
|
||||
labelName: "foo",
|
||||
labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
defaultValue: 666,
|
||||
expected: 666,
|
||||
},
|
||||
{
|
||||
desc: "negative int value",
|
||||
labelName: "foo",
|
||||
labels: map[string]string{
|
||||
"foo": "-1",
|
||||
},
|
||||
defaultValue: 666,
|
||||
expected: -1,
|
||||
},
|
||||
{
|
||||
desc: "positive int value",
|
||||
labelName: "foo",
|
||||
labels: map[string]string{
|
||||
"foo": "1",
|
||||
},
|
||||
defaultValue: 666,
|
||||
expected: 1,
|
||||
},
|
||||
}
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
got := GetInt64Value(test.labels, test.labelName, test.defaultValue)
|
||||
assert.Equal(t, test.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSliceStringValue(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
labels map[string]string
|
||||
labelName string
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
desc: "empty map",
|
||||
labelName: "foo",
|
||||
},
|
||||
{
|
||||
desc: "empty value",
|
||||
labels: map[string]string{
|
||||
"foo": "",
|
||||
},
|
||||
labelName: "foo",
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "one value, not split",
|
||||
labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
labelName: "foo",
|
||||
expected: []string{"bar"},
|
||||
},
|
||||
{
|
||||
desc: "several values",
|
||||
labels: map[string]string{
|
||||
"foo": "bar,bir ,bur",
|
||||
},
|
||||
labelName: "foo",
|
||||
expected: []string{"bar", "bir", "bur"},
|
||||
},
|
||||
}
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
got := GetSliceStringValue(test.labels, test.labelName)
|
||||
assert.EqualValues(t, test.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetMapValue(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
labels map[string]string
|
||||
labelName string
|
||||
expected map[string]string
|
||||
}{
|
||||
{
|
||||
desc: "empty map",
|
||||
labelName: "foo",
|
||||
},
|
||||
{
|
||||
desc: "existent label with empty entry",
|
||||
labelName: "foo",
|
||||
labels: map[string]string{
|
||||
"foo": "",
|
||||
},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "existent label with invalid entry",
|
||||
labelName: "foo",
|
||||
labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "existent label with empty value",
|
||||
labelName: "foo",
|
||||
labels: map[string]string{
|
||||
"foo": "bar:",
|
||||
},
|
||||
expected: map[string]string{
|
||||
"Bar": "",
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "one entry",
|
||||
labelName: "foo",
|
||||
labels: map[string]string{
|
||||
"foo": " Access-Control-Allow-Methods:POST,GET,OPTIONS ",
|
||||
},
|
||||
expected: map[string]string{
|
||||
"Access-Control-Allow-Methods": "POST,GET,OPTIONS",
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "several entry",
|
||||
labelName: "foo",
|
||||
labels: map[string]string{
|
||||
"foo": "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8",
|
||||
},
|
||||
expected: map[string]string{
|
||||
"Access-Control-Allow-Methods": "POST,GET,OPTIONS",
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
got := GetMapValue(test.labels, test.labelName)
|
||||
assert.EqualValues(t, test.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetStringMultipleStrict(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
labels map[string]string
|
||||
labelNames []string
|
||||
expected map[string]string
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
desc: "empty labels names and empty labels map",
|
||||
labels: map[string]string{},
|
||||
expected: map[string]string{},
|
||||
},
|
||||
{
|
||||
desc: "empty labels names",
|
||||
labels: map[string]string{
|
||||
"foo": "bar",
|
||||
"fii": "bir",
|
||||
},
|
||||
expected: map[string]string{},
|
||||
},
|
||||
{
|
||||
desc: "one label missing",
|
||||
labels: map[string]string{
|
||||
"foo": "bar",
|
||||
"fii": "bir",
|
||||
"fyy": "byr",
|
||||
},
|
||||
labelNames: []string{"foo", "fii", "fuu"},
|
||||
expected: nil,
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
desc: "all labels are present",
|
||||
labels: map[string]string{
|
||||
"foo": "bar",
|
||||
"fii": "bir",
|
||||
"fyy": "byr",
|
||||
},
|
||||
labelNames: []string{"foo", "fii"},
|
||||
expected: map[string]string{
|
||||
"foo": "bar",
|
||||
"fii": "bir",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
got, err := GetStringMultipleStrict(test.labels, test.labelNames...)
|
||||
if (err != nil) != test.expectedErr {
|
||||
t.Errorf("error = %v, wantErr %v", err, test.expectedErr)
|
||||
return
|
||||
}
|
||||
assert.EqualValues(t, test.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHas(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
labels map[string]string
|
||||
labelName string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
desc: "nil labels map",
|
||||
labelName: "foo",
|
||||
},
|
||||
{
|
||||
desc: "nonexistent label",
|
||||
labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
labelName: "fii",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
desc: "existent label",
|
||||
labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
labelName: "foo",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
desc: "existent label with empty value",
|
||||
labels: map[string]string{
|
||||
"foo": "",
|
||||
},
|
||||
labelName: "foo",
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
got := Has(test.labels, test.labelName)
|
||||
assert.Equal(t, test.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsEnabled(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
labels map[string]string
|
||||
exposedByDefault bool
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
desc: "empty labels map & exposedByDefault true",
|
||||
exposedByDefault: true,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
desc: "empty labels map & exposedByDefault false",
|
||||
exposedByDefault: false,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
desc: "exposedByDefault false and label enable true",
|
||||
labels: map[string]string{
|
||||
TraefikEnable: "true",
|
||||
},
|
||||
exposedByDefault: false,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
desc: "exposedByDefault false and label enable false",
|
||||
labels: map[string]string{
|
||||
TraefikEnable: "false",
|
||||
},
|
||||
exposedByDefault: false,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
desc: "exposedByDefault true and label enable false",
|
||||
labels: map[string]string{
|
||||
TraefikEnable: "false",
|
||||
},
|
||||
exposedByDefault: true,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
desc: "exposedByDefault true and label enable true",
|
||||
labels: map[string]string{
|
||||
TraefikEnable: "true",
|
||||
},
|
||||
exposedByDefault: true,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
got := IsEnabled(test.labels, test.exposedByDefault)
|
||||
assert.Equal(t, test.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasPrefix(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
labels map[string]string
|
||||
prefix string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
desc: "nil labels map",
|
||||
prefix: "foo",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
desc: "nonexistent prefix",
|
||||
labels: map[string]string{
|
||||
"foo.carotte": "bar",
|
||||
},
|
||||
prefix: "fii",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
desc: "existent prefix",
|
||||
labels: map[string]string{
|
||||
"foo.carotte": "bar",
|
||||
},
|
||||
prefix: "foo",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
desc: "existent prefix with empty value",
|
||||
labels: map[string]string{
|
||||
"foo.carotte": "",
|
||||
},
|
||||
prefix: "foo",
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
got := HasPrefix(test.labels, test.prefix)
|
||||
assert.Equal(t, test.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetFuncString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
labels map[string]string
|
||||
labelName string
|
||||
defaultValue string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
labels: nil,
|
||||
labelName: TraefikProtocol,
|
||||
defaultValue: DefaultProtocol,
|
||||
expected: "http",
|
||||
},
|
||||
{
|
||||
labels: map[string]string{
|
||||
TraefikProtocol: "https",
|
||||
},
|
||||
labelName: TraefikProtocol,
|
||||
defaultValue: DefaultProtocol,
|
||||
expected: "https",
|
||||
},
|
||||
}
|
||||
|
||||
for containerID, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.labelName+strconv.Itoa(containerID), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
actual := GetFuncString(test.labelName, test.defaultValue)(test.labels)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSliceString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
labels map[string]string
|
||||
labelName string
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
desc: "no whitelist-label",
|
||||
labels: nil,
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "whitelist-label with empty string",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendWhiteListSourceRange: "",
|
||||
},
|
||||
labelName: TraefikFrontendWhiteListSourceRange,
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "whitelist-label with IPv4 mask",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendWhiteListSourceRange: "1.2.3.4/16",
|
||||
},
|
||||
labelName: TraefikFrontendWhiteListSourceRange,
|
||||
expected: []string{
|
||||
"1.2.3.4/16",
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "whitelist-label with IPv6 mask",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendWhiteListSourceRange: "fe80::/16",
|
||||
},
|
||||
labelName: TraefikFrontendWhiteListSourceRange,
|
||||
expected: []string{
|
||||
"fe80::/16",
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "whitelist-label with multiple masks",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendWhiteListSourceRange: "1.1.1.1/24, 1234:abcd::42/32",
|
||||
},
|
||||
labelName: TraefikFrontendWhiteListSourceRange,
|
||||
expected: []string{
|
||||
"1.1.1.1/24",
|
||||
"1234:abcd::42/32",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
actual := GetFuncSliceString(test.labelName)(test.labels)
|
||||
assert.EqualValues(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,222 +0,0 @@
|
|||
package label
|
||||
|
||||
// Traefik labels
|
||||
const (
|
||||
Prefix = "traefik."
|
||||
SuffixBackend = "backend"
|
||||
SuffixDomain = "domain"
|
||||
SuffixEnable = "enable"
|
||||
SuffixPort = "port"
|
||||
SuffixPortName = "portName"
|
||||
SuffixPortIndex = "portIndex"
|
||||
SuffixProtocol = "protocol"
|
||||
SuffixTags = "tags"
|
||||
SuffixWeight = "weight"
|
||||
SuffixBackendID = "backend.id"
|
||||
SuffixBackendCircuitBreaker = "backend.circuitbreaker"
|
||||
SuffixBackendCircuitBreakerExpression = "backend.circuitbreaker.expression"
|
||||
SuffixBackendHealthCheckScheme = "backend.healthcheck.scheme"
|
||||
SuffixBackendHealthCheckPath = "backend.healthcheck.path"
|
||||
SuffixBackendHealthCheckPort = "backend.healthcheck.port"
|
||||
SuffixBackendHealthCheckInterval = "backend.healthcheck.interval"
|
||||
SuffixBackendHealthCheckTimeout = "backend.healthcheck.timeout"
|
||||
SuffixBackendHealthCheckHostname = "backend.healthcheck.hostname"
|
||||
SuffixBackendHealthCheckHeaders = "backend.healthcheck.headers"
|
||||
SuffixBackendLoadBalancer = "backend.loadbalancer"
|
||||
SuffixBackendLoadBalancerMethod = SuffixBackendLoadBalancer + ".method"
|
||||
SuffixBackendLoadBalancerStickiness = SuffixBackendLoadBalancer + ".stickiness"
|
||||
SuffixBackendLoadBalancerStickinessCookieName = SuffixBackendLoadBalancer + ".stickiness.cookieName"
|
||||
SuffixBackendMaxConnAmount = "backend.maxconn.amount"
|
||||
SuffixBackendMaxConnExtractorFunc = "backend.maxconn.extractorfunc"
|
||||
SuffixBackendBuffering = "backend.buffering"
|
||||
SuffixBackendResponseForwardingFlushInterval = "backend.responseForwarding.flushInterval"
|
||||
SuffixBackendBufferingMaxRequestBodyBytes = SuffixBackendBuffering + ".maxRequestBodyBytes"
|
||||
SuffixBackendBufferingMemRequestBodyBytes = SuffixBackendBuffering + ".memRequestBodyBytes"
|
||||
SuffixBackendBufferingMaxResponseBodyBytes = SuffixBackendBuffering + ".maxResponseBodyBytes"
|
||||
SuffixBackendBufferingMemResponseBodyBytes = SuffixBackendBuffering + ".memResponseBodyBytes"
|
||||
SuffixBackendBufferingRetryExpression = SuffixBackendBuffering + ".retryExpression"
|
||||
SuffixFrontend = "frontend"
|
||||
SuffixFrontendAuth = SuffixFrontend + ".auth"
|
||||
SuffixFrontendAuthBasic = SuffixFrontendAuth + ".basic"
|
||||
SuffixFrontendAuthBasicRealm = SuffixFrontendAuthBasic + ".realm"
|
||||
SuffixFrontendAuthBasicRemoveHeader = SuffixFrontendAuthBasic + ".removeHeader"
|
||||
SuffixFrontendAuthBasicUsers = SuffixFrontendAuthBasic + ".users"
|
||||
SuffixFrontendAuthBasicUsersFile = SuffixFrontendAuthBasic + ".usersFile"
|
||||
SuffixFrontendAuthDigest = SuffixFrontendAuth + ".digest"
|
||||
SuffixFrontendAuthDigestRemoveHeader = SuffixFrontendAuthDigest + ".removeHeader"
|
||||
SuffixFrontendAuthDigestUsers = SuffixFrontendAuthDigest + ".users"
|
||||
SuffixFrontendAuthDigestUsersFile = SuffixFrontendAuthDigest + ".usersFile"
|
||||
SuffixFrontendAuthForward = SuffixFrontendAuth + ".forward"
|
||||
SuffixFrontendAuthForwardAddress = SuffixFrontendAuthForward + ".address"
|
||||
SuffixFrontendAuthForwardAuthResponseHeaders = SuffixFrontendAuthForward + ".authResponseHeaders"
|
||||
SuffixFrontendAuthForwardTLS = SuffixFrontendAuthForward + ".tls"
|
||||
SuffixFrontendAuthForwardTLSCa = SuffixFrontendAuthForwardTLS + ".ca"
|
||||
SuffixFrontendAuthForwardTLSCaOptional = SuffixFrontendAuthForwardTLS + ".caOptional"
|
||||
SuffixFrontendAuthForwardTLSCert = SuffixFrontendAuthForwardTLS + ".cert"
|
||||
SuffixFrontendAuthForwardTLSInsecureSkipVerify = SuffixFrontendAuthForwardTLS + ".insecureSkipVerify"
|
||||
SuffixFrontendAuthForwardTLSKey = SuffixFrontendAuthForwardTLS + ".key"
|
||||
SuffixFrontendAuthForwardTrustForwardHeader = SuffixFrontendAuthForward + ".trustForwardHeader"
|
||||
SuffixFrontendAuthHeaderField = SuffixFrontendAuth + ".headerField"
|
||||
SuffixFrontendEntryPoints = "frontend.entryPoints"
|
||||
SuffixFrontendHeaders = "frontend.headers."
|
||||
SuffixFrontendRequestHeaders = SuffixFrontendHeaders + "customRequestHeaders"
|
||||
SuffixFrontendResponseHeaders = SuffixFrontendHeaders + "customResponseHeaders"
|
||||
SuffixFrontendHeadersAllowedHosts = SuffixFrontendHeaders + "allowedHosts"
|
||||
SuffixFrontendHeadersHostsProxyHeaders = SuffixFrontendHeaders + "hostsProxyHeaders"
|
||||
SuffixFrontendHeadersSSLForceHost = SuffixFrontendHeaders + "SSLForceHost"
|
||||
SuffixFrontendHeadersSSLRedirect = SuffixFrontendHeaders + "SSLRedirect"
|
||||
SuffixFrontendHeadersSSLTemporaryRedirect = SuffixFrontendHeaders + "SSLTemporaryRedirect"
|
||||
SuffixFrontendHeadersSSLHost = SuffixFrontendHeaders + "SSLHost"
|
||||
SuffixFrontendHeadersSSLProxyHeaders = SuffixFrontendHeaders + "SSLProxyHeaders"
|
||||
SuffixFrontendHeadersSTSSeconds = SuffixFrontendHeaders + "STSSeconds"
|
||||
SuffixFrontendHeadersSTSIncludeSubdomains = SuffixFrontendHeaders + "STSIncludeSubdomains"
|
||||
SuffixFrontendHeadersSTSPreload = SuffixFrontendHeaders + "STSPreload"
|
||||
SuffixFrontendHeadersForceSTSHeader = SuffixFrontendHeaders + "forceSTSHeader"
|
||||
SuffixFrontendHeadersFrameDeny = SuffixFrontendHeaders + "frameDeny"
|
||||
SuffixFrontendHeadersCustomFrameOptionsValue = SuffixFrontendHeaders + "customFrameOptionsValue"
|
||||
SuffixFrontendHeadersContentTypeNosniff = SuffixFrontendHeaders + "contentTypeNosniff"
|
||||
SuffixFrontendHeadersBrowserXSSFilter = SuffixFrontendHeaders + "browserXSSFilter"
|
||||
SuffixFrontendHeadersCustomBrowserXSSValue = SuffixFrontendHeaders + "customBrowserXSSValue"
|
||||
SuffixFrontendHeadersContentSecurityPolicy = SuffixFrontendHeaders + "contentSecurityPolicy"
|
||||
SuffixFrontendHeadersPublicKey = SuffixFrontendHeaders + "publicKey"
|
||||
SuffixFrontendHeadersReferrerPolicy = SuffixFrontendHeaders + "referrerPolicy"
|
||||
SuffixFrontendHeadersIsDevelopment = SuffixFrontendHeaders + "isDevelopment"
|
||||
SuffixFrontendPassHostHeader = "frontend.passHostHeader"
|
||||
SuffixFrontendPassTLSClientCert = "frontend.passTLSClientCert"
|
||||
SuffixFrontendPassTLSClientCertPem = SuffixFrontendPassTLSClientCert + ".pem"
|
||||
SuffixFrontendPassTLSClientCertInfos = SuffixFrontendPassTLSClientCert + ".infos"
|
||||
SuffixFrontendPassTLSClientCertInfosNotAfter = SuffixFrontendPassTLSClientCertInfos + ".notAfter"
|
||||
SuffixFrontendPassTLSClientCertInfosNotBefore = SuffixFrontendPassTLSClientCertInfos + ".notBefore"
|
||||
SuffixFrontendPassTLSClientCertInfosSans = SuffixFrontendPassTLSClientCertInfos + ".sans"
|
||||
SuffixFrontendPassTLSClientCertInfosSubject = SuffixFrontendPassTLSClientCertInfos + ".subject"
|
||||
SuffixFrontendPassTLSClientCertInfosSubjectCommonName = SuffixFrontendPassTLSClientCertInfosSubject + ".commonName"
|
||||
SuffixFrontendPassTLSClientCertInfosSubjectCountry = SuffixFrontendPassTLSClientCertInfosSubject + ".country"
|
||||
SuffixFrontendPassTLSClientCertInfosSubjectLocality = SuffixFrontendPassTLSClientCertInfosSubject + ".locality"
|
||||
SuffixFrontendPassTLSClientCertInfosSubjectOrganization = SuffixFrontendPassTLSClientCertInfosSubject + ".organization"
|
||||
SuffixFrontendPassTLSClientCertInfosSubjectProvince = SuffixFrontendPassTLSClientCertInfosSubject + ".province"
|
||||
SuffixFrontendPassTLSClientCertInfosSubjectSerialNumber = SuffixFrontendPassTLSClientCertInfosSubject + ".serialNumber"
|
||||
SuffixFrontendPassTLSCert = "frontend.passTLSCert" // Deprecated
|
||||
SuffixFrontendPriority = "frontend.priority"
|
||||
SuffixFrontendRateLimitExtractorFunc = "frontend.rateLimit.extractorFunc"
|
||||
SuffixFrontendRedirectEntryPoint = "frontend.redirect.entryPoint"
|
||||
SuffixFrontendRedirectRegex = "frontend.redirect.regex"
|
||||
SuffixFrontendRedirectReplacement = "frontend.redirect.replacement"
|
||||
SuffixFrontendRedirectPermanent = "frontend.redirect.permanent"
|
||||
SuffixFrontendRule = "frontend.rule"
|
||||
SuffixFrontendWhiteList = "frontend.whiteList."
|
||||
SuffixFrontendWhiteListSourceRange = SuffixFrontendWhiteList + "sourceRange"
|
||||
SuffixFrontendWhiteListIPStrategy = SuffixFrontendWhiteList + "ipStrategy"
|
||||
SuffixFrontendWhiteListIPStrategyDepth = SuffixFrontendWhiteListIPStrategy + ".depth"
|
||||
SuffixFrontendWhiteListIPStrategyExcludedIPS = SuffixFrontendWhiteListIPStrategy + ".excludedIPs"
|
||||
TraefikDomain = Prefix + SuffixDomain
|
||||
TraefikEnable = Prefix + SuffixEnable
|
||||
TraefikPort = Prefix + SuffixPort
|
||||
TraefikPortName = Prefix + SuffixPortName
|
||||
TraefikPortIndex = Prefix + SuffixPortIndex
|
||||
TraefikProtocol = Prefix + SuffixProtocol
|
||||
TraefikTags = Prefix + SuffixTags
|
||||
TraefikWeight = Prefix + SuffixWeight
|
||||
TraefikBackend = Prefix + SuffixBackend
|
||||
TraefikBackendID = Prefix + SuffixBackendID
|
||||
TraefikBackendCircuitBreaker = Prefix + SuffixBackendCircuitBreaker
|
||||
TraefikBackendCircuitBreakerExpression = Prefix + SuffixBackendCircuitBreakerExpression
|
||||
TraefikBackendHealthCheckScheme = Prefix + SuffixBackendHealthCheckScheme
|
||||
TraefikBackendHealthCheckPath = Prefix + SuffixBackendHealthCheckPath
|
||||
TraefikBackendHealthCheckPort = Prefix + SuffixBackendHealthCheckPort
|
||||
TraefikBackendHealthCheckInterval = Prefix + SuffixBackendHealthCheckInterval
|
||||
TraefikBackendHealthCheckTimeout = Prefix + SuffixBackendHealthCheckTimeout
|
||||
TraefikBackendHealthCheckHostname = Prefix + SuffixBackendHealthCheckHostname
|
||||
TraefikBackendHealthCheckHeaders = Prefix + SuffixBackendHealthCheckHeaders
|
||||
TraefikBackendLoadBalancer = Prefix + SuffixBackendLoadBalancer
|
||||
TraefikBackendLoadBalancerMethod = Prefix + SuffixBackendLoadBalancerMethod
|
||||
TraefikBackendLoadBalancerStickiness = Prefix + SuffixBackendLoadBalancerStickiness
|
||||
TraefikBackendLoadBalancerStickinessCookieName = Prefix + SuffixBackendLoadBalancerStickinessCookieName
|
||||
TraefikBackendMaxConnAmount = Prefix + SuffixBackendMaxConnAmount
|
||||
TraefikBackendMaxConnExtractorFunc = Prefix + SuffixBackendMaxConnExtractorFunc
|
||||
TraefikBackendBuffering = Prefix + SuffixBackendBuffering
|
||||
TraefikBackendResponseForwardingFlushInterval = Prefix + SuffixBackendResponseForwardingFlushInterval
|
||||
TraefikBackendBufferingMaxRequestBodyBytes = Prefix + SuffixBackendBufferingMaxRequestBodyBytes
|
||||
TraefikBackendBufferingMemRequestBodyBytes = Prefix + SuffixBackendBufferingMemRequestBodyBytes
|
||||
TraefikBackendBufferingMaxResponseBodyBytes = Prefix + SuffixBackendBufferingMaxResponseBodyBytes
|
||||
TraefikBackendBufferingMemResponseBodyBytes = Prefix + SuffixBackendBufferingMemResponseBodyBytes
|
||||
TraefikBackendBufferingRetryExpression = Prefix + SuffixBackendBufferingRetryExpression
|
||||
TraefikFrontend = Prefix + SuffixFrontend
|
||||
TraefikFrontendAuth = Prefix + SuffixFrontendAuth
|
||||
TraefikFrontendAuthBasic = Prefix + SuffixFrontendAuthBasic
|
||||
TraefikFrontendAuthBasicRealm = Prefix + SuffixFrontendAuthBasicRealm
|
||||
TraefikFrontendAuthBasicRemoveHeader = Prefix + SuffixFrontendAuthBasicRemoveHeader
|
||||
TraefikFrontendAuthBasicUsers = Prefix + SuffixFrontendAuthBasicUsers
|
||||
TraefikFrontendAuthBasicUsersFile = Prefix + SuffixFrontendAuthBasicUsersFile
|
||||
TraefikFrontendAuthDigest = Prefix + SuffixFrontendAuthDigest
|
||||
TraefikFrontendAuthDigestRemoveHeader = Prefix + SuffixFrontendAuthDigestRemoveHeader
|
||||
TraefikFrontendAuthDigestUsers = Prefix + SuffixFrontendAuthDigestUsers
|
||||
TraefikFrontendAuthDigestUsersFile = Prefix + SuffixFrontendAuthDigestUsersFile
|
||||
TraefikFrontendAuthForward = Prefix + SuffixFrontendAuthForward
|
||||
TraefikFrontendAuthForwardAddress = Prefix + SuffixFrontendAuthForwardAddress
|
||||
TraefikFrontendAuthForwardAuthResponseHeaders = Prefix + SuffixFrontendAuthForwardAuthResponseHeaders
|
||||
TraefikFrontendAuthForwardTLS = Prefix + SuffixFrontendAuthForwardTLS
|
||||
TraefikFrontendAuthForwardTLSCa = Prefix + SuffixFrontendAuthForwardTLSCa
|
||||
TraefikFrontendAuthForwardTLSCaOptional = Prefix + SuffixFrontendAuthForwardTLSCaOptional
|
||||
TraefikFrontendAuthForwardTLSCert = Prefix + SuffixFrontendAuthForwardTLSCert
|
||||
TraefikFrontendAuthForwardTLSInsecureSkipVerify = Prefix + SuffixFrontendAuthForwardTLSInsecureSkipVerify
|
||||
TraefikFrontendAuthForwardTLSKey = Prefix + SuffixFrontendAuthForwardTLSKey
|
||||
TraefikFrontendAuthForwardTrustForwardHeader = Prefix + SuffixFrontendAuthForwardTrustForwardHeader
|
||||
TraefikFrontendAuthHeaderField = Prefix + SuffixFrontendAuthHeaderField
|
||||
TraefikFrontendEntryPoints = Prefix + SuffixFrontendEntryPoints
|
||||
TraefikFrontendPassHostHeader = Prefix + SuffixFrontendPassHostHeader
|
||||
TraefikFrontendPassTLSClientCert = Prefix + SuffixFrontendPassTLSClientCert
|
||||
TraefikFrontendPassTLSClientCertPem = Prefix + SuffixFrontendPassTLSClientCertPem
|
||||
TraefikFrontendPassTLSClientCertInfos = Prefix + SuffixFrontendPassTLSClientCertInfos
|
||||
TraefikFrontendPassTLSClientCertInfosNotAfter = Prefix + SuffixFrontendPassTLSClientCertInfosNotAfter
|
||||
TraefikFrontendPassTLSClientCertInfosNotBefore = Prefix + SuffixFrontendPassTLSClientCertInfosNotBefore
|
||||
TraefikFrontendPassTLSClientCertInfosSans = Prefix + SuffixFrontendPassTLSClientCertInfosSans
|
||||
TraefikFrontendPassTLSClientCertInfosSubject = Prefix + SuffixFrontendPassTLSClientCertInfosSubject
|
||||
TraefikFrontendPassTLSClientCertInfosSubjectCommonName = Prefix + SuffixFrontendPassTLSClientCertInfosSubjectCommonName
|
||||
TraefikFrontendPassTLSClientCertInfosSubjectCountry = Prefix + SuffixFrontendPassTLSClientCertInfosSubjectCountry
|
||||
TraefikFrontendPassTLSClientCertInfosSubjectLocality = Prefix + SuffixFrontendPassTLSClientCertInfosSubjectLocality
|
||||
TraefikFrontendPassTLSClientCertInfosSubjectOrganization = Prefix + SuffixFrontendPassTLSClientCertInfosSubjectOrganization
|
||||
TraefikFrontendPassTLSClientCertInfosSubjectProvince = Prefix + SuffixFrontendPassTLSClientCertInfosSubjectProvince
|
||||
TraefikFrontendPassTLSClientCertInfosSubjectSerialNumber = Prefix + SuffixFrontendPassTLSClientCertInfosSubjectSerialNumber
|
||||
TraefikFrontendPassTLSCert = Prefix + SuffixFrontendPassTLSCert // Deprecated
|
||||
TraefikFrontendPriority = Prefix + SuffixFrontendPriority
|
||||
TraefikFrontendRateLimitExtractorFunc = Prefix + SuffixFrontendRateLimitExtractorFunc
|
||||
TraefikFrontendRedirectEntryPoint = Prefix + SuffixFrontendRedirectEntryPoint
|
||||
TraefikFrontendRedirectRegex = Prefix + SuffixFrontendRedirectRegex
|
||||
TraefikFrontendRedirectReplacement = Prefix + SuffixFrontendRedirectReplacement
|
||||
TraefikFrontendRedirectPermanent = Prefix + SuffixFrontendRedirectPermanent
|
||||
TraefikFrontendRule = Prefix + SuffixFrontendRule
|
||||
TraefikFrontendWhiteListSourceRange = Prefix + SuffixFrontendWhiteListSourceRange
|
||||
TraefikFrontendWhiteListIPStrategy = Prefix + SuffixFrontendWhiteListIPStrategy
|
||||
TraefikFrontendWhiteListIPStrategyDepth = Prefix + SuffixFrontendWhiteListIPStrategyDepth
|
||||
TraefikFrontendWhiteListIPStrategyExcludedIPS = Prefix + SuffixFrontendWhiteListIPStrategyExcludedIPS
|
||||
TraefikFrontendRequestHeaders = Prefix + SuffixFrontendRequestHeaders
|
||||
TraefikFrontendResponseHeaders = Prefix + SuffixFrontendResponseHeaders
|
||||
TraefikFrontendAllowedHosts = Prefix + SuffixFrontendHeadersAllowedHosts
|
||||
TraefikFrontendHostsProxyHeaders = Prefix + SuffixFrontendHeadersHostsProxyHeaders
|
||||
TraefikFrontendSSLForceHost = Prefix + SuffixFrontendHeadersSSLForceHost
|
||||
TraefikFrontendSSLRedirect = Prefix + SuffixFrontendHeadersSSLRedirect
|
||||
TraefikFrontendSSLTemporaryRedirect = Prefix + SuffixFrontendHeadersSSLTemporaryRedirect
|
||||
TraefikFrontendSSLHost = Prefix + SuffixFrontendHeadersSSLHost
|
||||
TraefikFrontendSSLProxyHeaders = Prefix + SuffixFrontendHeadersSSLProxyHeaders
|
||||
TraefikFrontendSTSSeconds = Prefix + SuffixFrontendHeadersSTSSeconds
|
||||
TraefikFrontendSTSIncludeSubdomains = Prefix + SuffixFrontendHeadersSTSIncludeSubdomains
|
||||
TraefikFrontendSTSPreload = Prefix + SuffixFrontendHeadersSTSPreload
|
||||
TraefikFrontendForceSTSHeader = Prefix + SuffixFrontendHeadersForceSTSHeader
|
||||
TraefikFrontendFrameDeny = Prefix + SuffixFrontendHeadersFrameDeny
|
||||
TraefikFrontendCustomFrameOptionsValue = Prefix + SuffixFrontendHeadersCustomFrameOptionsValue
|
||||
TraefikFrontendContentTypeNosniff = Prefix + SuffixFrontendHeadersContentTypeNosniff
|
||||
TraefikFrontendBrowserXSSFilter = Prefix + SuffixFrontendHeadersBrowserXSSFilter
|
||||
TraefikFrontendCustomBrowserXSSValue = Prefix + SuffixFrontendHeadersCustomBrowserXSSValue
|
||||
TraefikFrontendContentSecurityPolicy = Prefix + SuffixFrontendHeadersContentSecurityPolicy
|
||||
TraefikFrontendPublicKey = Prefix + SuffixFrontendHeadersPublicKey
|
||||
TraefikFrontendReferrerPolicy = Prefix + SuffixFrontendHeadersReferrerPolicy
|
||||
TraefikFrontendIsDevelopment = Prefix + SuffixFrontendHeadersIsDevelopment
|
||||
BaseFrontendErrorPage = "frontend.errors."
|
||||
SuffixErrorPageBackend = "backend"
|
||||
SuffixErrorPageQuery = "query"
|
||||
SuffixErrorPageStatus = "status"
|
||||
BaseFrontendRateLimit = "frontend.rateLimit.rateSet."
|
||||
SuffixRateLimitPeriod = "period"
|
||||
SuffixRateLimitAverage = "average"
|
||||
SuffixRateLimitBurst = "burst"
|
||||
)
|
||||
|
|
@ -1,417 +0,0 @@
|
|||
package label
|
||||
|
||||
import (
|
||||
"math"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/containous/flaeg/parse"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/types"
|
||||
)
|
||||
|
||||
// GetWhiteList Create white list from labels
|
||||
func GetWhiteList(labels map[string]string) *types.WhiteList {
|
||||
ranges := GetSliceStringValue(labels, TraefikFrontendWhiteListSourceRange)
|
||||
if len(ranges) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &types.WhiteList{
|
||||
SourceRange: ranges,
|
||||
IPStrategy: getIPStrategy(labels),
|
||||
}
|
||||
}
|
||||
|
||||
func getIPStrategy(labels map[string]string) *types.IPStrategy {
|
||||
ipStrategy := GetBoolValue(labels, TraefikFrontendWhiteListIPStrategy, false)
|
||||
depth := GetIntValue(labels, TraefikFrontendWhiteListIPStrategyDepth, 0)
|
||||
excludedIPs := GetSliceStringValue(labels, TraefikFrontendWhiteListIPStrategyExcludedIPS)
|
||||
|
||||
if depth == 0 && len(excludedIPs) == 0 && !ipStrategy {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &types.IPStrategy{
|
||||
Depth: depth,
|
||||
ExcludedIPs: excludedIPs,
|
||||
}
|
||||
}
|
||||
|
||||
// GetRedirect Create redirect from labels
|
||||
func GetRedirect(labels map[string]string) *types.Redirect {
|
||||
permanent := GetBoolValue(labels, TraefikFrontendRedirectPermanent, false)
|
||||
|
||||
if Has(labels, TraefikFrontendRedirectEntryPoint) {
|
||||
return &types.Redirect{
|
||||
EntryPoint: GetStringValue(labels, TraefikFrontendRedirectEntryPoint, ""),
|
||||
Permanent: permanent,
|
||||
}
|
||||
}
|
||||
|
||||
if Has(labels, TraefikFrontendRedirectRegex) &&
|
||||
Has(labels, TraefikFrontendRedirectReplacement) {
|
||||
return &types.Redirect{
|
||||
Regex: GetStringValue(labels, TraefikFrontendRedirectRegex, ""),
|
||||
Replacement: GetStringValue(labels, TraefikFrontendRedirectReplacement, ""),
|
||||
Permanent: permanent,
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetTLSClientCert create TLS client header configuration from labels
|
||||
func GetTLSClientCert(labels map[string]string) *types.TLSClientHeaders {
|
||||
if !HasPrefix(labels, TraefikFrontendPassTLSClientCert) {
|
||||
return nil
|
||||
}
|
||||
|
||||
tlsClientHeaders := &types.TLSClientHeaders{
|
||||
PEM: GetBoolValue(labels, TraefikFrontendPassTLSClientCertPem, false),
|
||||
}
|
||||
|
||||
if HasPrefix(labels, TraefikFrontendPassTLSClientCertInfos) {
|
||||
infos := &types.TLSClientCertificateInfos{
|
||||
NotAfter: GetBoolValue(labels, TraefikFrontendPassTLSClientCertInfosNotAfter, false),
|
||||
NotBefore: GetBoolValue(labels, TraefikFrontendPassTLSClientCertInfosNotBefore, false),
|
||||
Sans: GetBoolValue(labels, TraefikFrontendPassTLSClientCertInfosSans, false),
|
||||
}
|
||||
|
||||
if HasPrefix(labels, TraefikFrontendPassTLSClientCertInfosSubject) {
|
||||
subject := &types.TLSCLientCertificateSubjectInfos{
|
||||
CommonName: GetBoolValue(labels, TraefikFrontendPassTLSClientCertInfosSubjectCommonName, false),
|
||||
Country: GetBoolValue(labels, TraefikFrontendPassTLSClientCertInfosSubjectCountry, false),
|
||||
Locality: GetBoolValue(labels, TraefikFrontendPassTLSClientCertInfosSubjectLocality, false),
|
||||
Organization: GetBoolValue(labels, TraefikFrontendPassTLSClientCertInfosSubjectOrganization, false),
|
||||
Province: GetBoolValue(labels, TraefikFrontendPassTLSClientCertInfosSubjectProvince, false),
|
||||
SerialNumber: GetBoolValue(labels, TraefikFrontendPassTLSClientCertInfosSubjectSerialNumber, false),
|
||||
}
|
||||
infos.Subject = subject
|
||||
}
|
||||
tlsClientHeaders.Infos = infos
|
||||
}
|
||||
return tlsClientHeaders
|
||||
}
|
||||
|
||||
// GetAuth Create auth from labels
|
||||
func GetAuth(labels map[string]string) *types.Auth {
|
||||
if !HasPrefix(labels, TraefikFrontendAuth) {
|
||||
return nil
|
||||
}
|
||||
|
||||
auth := &types.Auth{
|
||||
HeaderField: GetStringValue(labels, TraefikFrontendAuthHeaderField, ""),
|
||||
}
|
||||
|
||||
if HasPrefix(labels, TraefikFrontendAuthBasic) {
|
||||
auth.Basic = getAuthBasic(labels)
|
||||
} else if HasPrefix(labels, TraefikFrontendAuthDigest) {
|
||||
auth.Digest = getAuthDigest(labels)
|
||||
} else if HasPrefix(labels, TraefikFrontendAuthForward) {
|
||||
auth.Forward = getAuthForward(labels)
|
||||
}
|
||||
|
||||
return auth
|
||||
}
|
||||
|
||||
// getAuthBasic Create Basic Auth from labels
|
||||
func getAuthBasic(labels map[string]string) *types.Basic {
|
||||
basicAuth := &types.Basic{
|
||||
Realm: GetStringValue(labels, TraefikFrontendAuthBasicRealm, ""),
|
||||
UsersFile: GetStringValue(labels, TraefikFrontendAuthBasicUsersFile, ""),
|
||||
RemoveHeader: GetBoolValue(labels, TraefikFrontendAuthBasicRemoveHeader, false),
|
||||
}
|
||||
|
||||
// backward compatibility
|
||||
if Has(labels, TraefikFrontendAuthBasic) {
|
||||
basicAuth.Users = GetSliceStringValue(labels, TraefikFrontendAuthBasic)
|
||||
log.Warnf("Deprecated configuration found: %s. Please use %s.", TraefikFrontendAuthBasic, TraefikFrontendAuthBasicUsers)
|
||||
} else {
|
||||
basicAuth.Users = GetSliceStringValue(labels, TraefikFrontendAuthBasicUsers)
|
||||
}
|
||||
|
||||
return basicAuth
|
||||
}
|
||||
|
||||
// getAuthDigest Create Digest Auth from labels
|
||||
func getAuthDigest(labels map[string]string) *types.Digest {
|
||||
return &types.Digest{
|
||||
Users: GetSliceStringValue(labels, TraefikFrontendAuthDigestUsers),
|
||||
UsersFile: GetStringValue(labels, TraefikFrontendAuthDigestUsersFile, ""),
|
||||
RemoveHeader: GetBoolValue(labels, TraefikFrontendAuthDigestRemoveHeader, false),
|
||||
}
|
||||
}
|
||||
|
||||
// getAuthForward Create Forward Auth from labels
|
||||
func getAuthForward(labels map[string]string) *types.Forward {
|
||||
forwardAuth := &types.Forward{
|
||||
Address: GetStringValue(labels, TraefikFrontendAuthForwardAddress, ""),
|
||||
AuthResponseHeaders: GetSliceStringValue(labels, TraefikFrontendAuthForwardAuthResponseHeaders),
|
||||
TrustForwardHeader: GetBoolValue(labels, TraefikFrontendAuthForwardTrustForwardHeader, false),
|
||||
}
|
||||
|
||||
// TLS configuration
|
||||
if HasPrefix(labels, TraefikFrontendAuthForwardTLS) {
|
||||
forwardAuth.TLS = &types.ClientTLS{
|
||||
CA: GetStringValue(labels, TraefikFrontendAuthForwardTLSCa, ""),
|
||||
CAOptional: GetBoolValue(labels, TraefikFrontendAuthForwardTLSCaOptional, false),
|
||||
Cert: GetStringValue(labels, TraefikFrontendAuthForwardTLSCert, ""),
|
||||
InsecureSkipVerify: GetBoolValue(labels, TraefikFrontendAuthForwardTLSInsecureSkipVerify, false),
|
||||
Key: GetStringValue(labels, TraefikFrontendAuthForwardTLSKey, ""),
|
||||
}
|
||||
}
|
||||
|
||||
return forwardAuth
|
||||
}
|
||||
|
||||
// GetErrorPages Create error pages from labels
|
||||
func GetErrorPages(labels map[string]string) map[string]*types.ErrorPage {
|
||||
prefix := Prefix + BaseFrontendErrorPage
|
||||
return ParseErrorPages(labels, prefix, RegexpFrontendErrorPage)
|
||||
}
|
||||
|
||||
// ParseErrorPages parse error pages to create ErrorPage struct
|
||||
func ParseErrorPages(labels map[string]string, labelPrefix string, labelRegex *regexp.Regexp) map[string]*types.ErrorPage {
|
||||
var errorPages map[string]*types.ErrorPage
|
||||
|
||||
for lblName, value := range labels {
|
||||
if strings.HasPrefix(lblName, labelPrefix) {
|
||||
submatch := labelRegex.FindStringSubmatch(lblName)
|
||||
if len(submatch) != 3 {
|
||||
log.Errorf("Invalid page error label: %s, sub-match: %v", lblName, submatch)
|
||||
continue
|
||||
}
|
||||
|
||||
if errorPages == nil {
|
||||
errorPages = make(map[string]*types.ErrorPage)
|
||||
}
|
||||
|
||||
pageName := submatch[1]
|
||||
|
||||
ep, ok := errorPages[pageName]
|
||||
if !ok {
|
||||
ep = &types.ErrorPage{}
|
||||
errorPages[pageName] = ep
|
||||
}
|
||||
|
||||
switch submatch[2] {
|
||||
case SuffixErrorPageStatus:
|
||||
ep.Status = SplitAndTrimString(value, ",")
|
||||
case SuffixErrorPageQuery:
|
||||
ep.Query = value
|
||||
case SuffixErrorPageBackend:
|
||||
ep.Backend = value
|
||||
default:
|
||||
log.Errorf("Invalid page error label: %s", lblName)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return errorPages
|
||||
}
|
||||
|
||||
// GetRateLimit Create rate limits from labels
|
||||
func GetRateLimit(labels map[string]string) *types.RateLimit {
|
||||
extractorFunc := GetStringValue(labels, TraefikFrontendRateLimitExtractorFunc, "")
|
||||
if len(extractorFunc) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
prefix := Prefix + BaseFrontendRateLimit
|
||||
limits := ParseRateSets(labels, prefix, RegexpFrontendRateLimit)
|
||||
|
||||
return &types.RateLimit{
|
||||
ExtractorFunc: extractorFunc,
|
||||
RateSet: limits,
|
||||
}
|
||||
}
|
||||
|
||||
// ParseRateSets parse rate limits to create Rate struct
|
||||
func ParseRateSets(labels map[string]string, labelPrefix string, labelRegex *regexp.Regexp) map[string]*types.Rate {
|
||||
var rateSets map[string]*types.Rate
|
||||
|
||||
for lblName, rawValue := range labels {
|
||||
if strings.HasPrefix(lblName, labelPrefix) && len(rawValue) > 0 {
|
||||
submatch := labelRegex.FindStringSubmatch(lblName)
|
||||
if len(submatch) != 3 {
|
||||
log.Errorf("Invalid rate limit label: %s, sub-match: %v", lblName, submatch)
|
||||
continue
|
||||
}
|
||||
|
||||
if rateSets == nil {
|
||||
rateSets = make(map[string]*types.Rate)
|
||||
}
|
||||
|
||||
limitName := submatch[1]
|
||||
|
||||
ep, ok := rateSets[limitName]
|
||||
if !ok {
|
||||
ep = &types.Rate{}
|
||||
rateSets[limitName] = ep
|
||||
}
|
||||
|
||||
switch submatch[2] {
|
||||
case "period":
|
||||
var d parse.Duration
|
||||
err := d.Set(rawValue)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to parse %q: %q. %v", lblName, rawValue, err)
|
||||
continue
|
||||
}
|
||||
ep.Period = d
|
||||
case "average":
|
||||
value, err := strconv.ParseInt(rawValue, 10, 64)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to parse %q: %q. %v", lblName, rawValue, err)
|
||||
continue
|
||||
}
|
||||
ep.Average = value
|
||||
case "burst":
|
||||
value, err := strconv.ParseInt(rawValue, 10, 64)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to parse %q: %q. %v", lblName, rawValue, err)
|
||||
continue
|
||||
}
|
||||
ep.Burst = value
|
||||
default:
|
||||
log.Errorf("Invalid rate limit label: %s", lblName)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
return rateSets
|
||||
}
|
||||
|
||||
// GetHeaders Create headers from labels
|
||||
func GetHeaders(labels map[string]string) *types.Headers {
|
||||
headers := &types.Headers{
|
||||
CustomRequestHeaders: GetMapValue(labels, TraefikFrontendRequestHeaders),
|
||||
CustomResponseHeaders: GetMapValue(labels, TraefikFrontendResponseHeaders),
|
||||
SSLProxyHeaders: GetMapValue(labels, TraefikFrontendSSLProxyHeaders),
|
||||
AllowedHosts: GetSliceStringValue(labels, TraefikFrontendAllowedHosts),
|
||||
HostsProxyHeaders: GetSliceStringValue(labels, TraefikFrontendHostsProxyHeaders),
|
||||
STSSeconds: GetInt64Value(labels, TraefikFrontendSTSSeconds, 0),
|
||||
SSLRedirect: GetBoolValue(labels, TraefikFrontendSSLRedirect, false),
|
||||
SSLTemporaryRedirect: GetBoolValue(labels, TraefikFrontendSSLTemporaryRedirect, false),
|
||||
SSLForceHost: GetBoolValue(labels, TraefikFrontendSSLForceHost, false),
|
||||
STSIncludeSubdomains: GetBoolValue(labels, TraefikFrontendSTSIncludeSubdomains, false),
|
||||
STSPreload: GetBoolValue(labels, TraefikFrontendSTSPreload, false),
|
||||
ForceSTSHeader: GetBoolValue(labels, TraefikFrontendForceSTSHeader, false),
|
||||
FrameDeny: GetBoolValue(labels, TraefikFrontendFrameDeny, false),
|
||||
ContentTypeNosniff: GetBoolValue(labels, TraefikFrontendContentTypeNosniff, false),
|
||||
BrowserXSSFilter: GetBoolValue(labels, TraefikFrontendBrowserXSSFilter, false),
|
||||
IsDevelopment: GetBoolValue(labels, TraefikFrontendIsDevelopment, false),
|
||||
SSLHost: GetStringValue(labels, TraefikFrontendSSLHost, ""),
|
||||
CustomFrameOptionsValue: GetStringValue(labels, TraefikFrontendCustomFrameOptionsValue, ""),
|
||||
ContentSecurityPolicy: GetStringValue(labels, TraefikFrontendContentSecurityPolicy, ""),
|
||||
PublicKey: GetStringValue(labels, TraefikFrontendPublicKey, ""),
|
||||
ReferrerPolicy: GetStringValue(labels, TraefikFrontendReferrerPolicy, ""),
|
||||
CustomBrowserXSSValue: GetStringValue(labels, TraefikFrontendCustomBrowserXSSValue, ""),
|
||||
}
|
||||
|
||||
if !headers.HasSecureHeadersDefined() && !headers.HasCustomHeadersDefined() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return headers
|
||||
}
|
||||
|
||||
// GetMaxConn Create max connection from labels
|
||||
func GetMaxConn(labels map[string]string) *types.MaxConn {
|
||||
amount := GetInt64Value(labels, TraefikBackendMaxConnAmount, math.MinInt64)
|
||||
extractorFunc := GetStringValue(labels, TraefikBackendMaxConnExtractorFunc, DefaultBackendMaxconnExtractorFunc)
|
||||
|
||||
if amount == math.MinInt64 || len(extractorFunc) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &types.MaxConn{
|
||||
Amount: amount,
|
||||
ExtractorFunc: extractorFunc,
|
||||
}
|
||||
}
|
||||
|
||||
// GetHealthCheck Create health check from labels
|
||||
func GetHealthCheck(labels map[string]string) *types.HealthCheck {
|
||||
path := GetStringValue(labels, TraefikBackendHealthCheckPath, "")
|
||||
if len(path) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
scheme := GetStringValue(labels, TraefikBackendHealthCheckScheme, "")
|
||||
port := GetIntValue(labels, TraefikBackendHealthCheckPort, DefaultBackendHealthCheckPort)
|
||||
interval := GetStringValue(labels, TraefikBackendHealthCheckInterval, "")
|
||||
timeout := GetStringValue(labels, TraefikBackendHealthCheckTimeout, "")
|
||||
hostname := GetStringValue(labels, TraefikBackendHealthCheckHostname, "")
|
||||
headers := GetMapValue(labels, TraefikBackendHealthCheckHeaders)
|
||||
|
||||
return &types.HealthCheck{
|
||||
Scheme: scheme,
|
||||
Path: path,
|
||||
Port: port,
|
||||
Interval: interval,
|
||||
Timeout: timeout,
|
||||
Hostname: hostname,
|
||||
Headers: headers,
|
||||
}
|
||||
}
|
||||
|
||||
// GetResponseForwarding Create ResponseForwarding from labels
|
||||
func GetResponseForwarding(labels map[string]string) *types.ResponseForwarding {
|
||||
if !HasPrefix(labels, TraefikBackendResponseForwardingFlushInterval) {
|
||||
return nil
|
||||
}
|
||||
|
||||
value := GetStringValue(labels, TraefikBackendResponseForwardingFlushInterval, "0")
|
||||
|
||||
return &types.ResponseForwarding{
|
||||
FlushInterval: value,
|
||||
}
|
||||
}
|
||||
|
||||
// GetBuffering Create buffering from labels
|
||||
func GetBuffering(labels map[string]string) *types.Buffering {
|
||||
if !HasPrefix(labels, TraefikBackendBuffering) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &types.Buffering{
|
||||
MaxRequestBodyBytes: GetInt64Value(labels, TraefikBackendBufferingMaxRequestBodyBytes, 0),
|
||||
MaxResponseBodyBytes: GetInt64Value(labels, TraefikBackendBufferingMaxResponseBodyBytes, 0),
|
||||
MemRequestBodyBytes: GetInt64Value(labels, TraefikBackendBufferingMemRequestBodyBytes, 0),
|
||||
MemResponseBodyBytes: GetInt64Value(labels, TraefikBackendBufferingMemResponseBodyBytes, 0),
|
||||
RetryExpression: GetStringValue(labels, TraefikBackendBufferingRetryExpression, ""),
|
||||
}
|
||||
}
|
||||
|
||||
// GetCircuitBreaker Create circuit breaker from labels
|
||||
func GetCircuitBreaker(labels map[string]string) *types.CircuitBreaker {
|
||||
circuitBreaker := GetStringValue(labels, TraefikBackendCircuitBreakerExpression, "")
|
||||
if len(circuitBreaker) == 0 {
|
||||
return nil
|
||||
}
|
||||
return &types.CircuitBreaker{Expression: circuitBreaker}
|
||||
}
|
||||
|
||||
// GetLoadBalancer Create load balancer from labels
|
||||
func GetLoadBalancer(labels map[string]string) *types.LoadBalancer {
|
||||
if !HasPrefix(labels, TraefikBackendLoadBalancer) {
|
||||
return nil
|
||||
}
|
||||
|
||||
method := GetStringValue(labels, TraefikBackendLoadBalancerMethod, DefaultBackendLoadBalancerMethod)
|
||||
|
||||
lb := &types.LoadBalancer{
|
||||
Method: method,
|
||||
}
|
||||
|
||||
if GetBoolValue(labels, TraefikBackendLoadBalancerStickiness, false) {
|
||||
cookieName := GetStringValue(labels, TraefikBackendLoadBalancerStickinessCookieName, DefaultBackendLoadbalancerStickinessCookieName)
|
||||
lb.Stickiness = &types.Stickiness{CookieName: cookieName}
|
||||
}
|
||||
|
||||
return lb
|
||||
}
|
||||
|
|
@ -1,996 +0,0 @@
|
|||
package label
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/containous/flaeg/parse"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseErrorPages(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
labels map[string]string
|
||||
expected map[string]*types.ErrorPage
|
||||
}{
|
||||
{
|
||||
desc: "2 errors pages",
|
||||
labels: map[string]string{
|
||||
Prefix + BaseFrontendErrorPage + "foo." + SuffixErrorPageStatus: "404",
|
||||
Prefix + BaseFrontendErrorPage + "foo." + SuffixErrorPageBackend: "foo_backend",
|
||||
Prefix + BaseFrontendErrorPage + "foo." + SuffixErrorPageQuery: "foo_query",
|
||||
Prefix + BaseFrontendErrorPage + "bar." + SuffixErrorPageStatus: "500,600",
|
||||
Prefix + BaseFrontendErrorPage + "bar." + SuffixErrorPageBackend: "bar_backend",
|
||||
Prefix + BaseFrontendErrorPage + "bar." + SuffixErrorPageQuery: "bar_query",
|
||||
},
|
||||
expected: map[string]*types.ErrorPage{
|
||||
"foo": {
|
||||
Status: []string{"404"},
|
||||
Query: "foo_query",
|
||||
Backend: "foo_backend",
|
||||
},
|
||||
"bar": {
|
||||
Status: []string{"500", "600"},
|
||||
Query: "bar_query",
|
||||
Backend: "bar_backend",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "only status field",
|
||||
labels: map[string]string{
|
||||
Prefix + BaseFrontendErrorPage + "foo." + SuffixErrorPageStatus: "404",
|
||||
},
|
||||
expected: map[string]*types.ErrorPage{
|
||||
"foo": {
|
||||
Status: []string{"404"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "invalid field",
|
||||
labels: map[string]string{
|
||||
Prefix + BaseFrontendErrorPage + "foo." + "courgette": "404",
|
||||
},
|
||||
expected: map[string]*types.ErrorPage{"foo": {}},
|
||||
},
|
||||
{
|
||||
desc: "no error pages labels",
|
||||
labels: map[string]string{},
|
||||
expected: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
pages := ParseErrorPages(test.labels, Prefix+BaseFrontendErrorPage, RegexpFrontendErrorPage)
|
||||
|
||||
assert.EqualValues(t, test.expected, pages)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseRateSets(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
labels map[string]string
|
||||
expected map[string]*types.Rate
|
||||
}{
|
||||
{
|
||||
desc: "2 rate limits",
|
||||
labels: map[string]string{
|
||||
Prefix + BaseFrontendRateLimit + "foo." + SuffixRateLimitPeriod: "6",
|
||||
Prefix + BaseFrontendRateLimit + "foo." + SuffixRateLimitAverage: "12",
|
||||
Prefix + BaseFrontendRateLimit + "foo." + SuffixRateLimitBurst: "18",
|
||||
Prefix + BaseFrontendRateLimit + "bar." + SuffixRateLimitPeriod: "3",
|
||||
Prefix + BaseFrontendRateLimit + "bar." + SuffixRateLimitAverage: "6",
|
||||
Prefix + BaseFrontendRateLimit + "bar." + SuffixRateLimitBurst: "9",
|
||||
},
|
||||
expected: map[string]*types.Rate{
|
||||
"foo": {
|
||||
Period: parse.Duration(6 * time.Second),
|
||||
Average: 12,
|
||||
Burst: 18,
|
||||
},
|
||||
"bar": {
|
||||
Period: parse.Duration(3 * time.Second),
|
||||
Average: 6,
|
||||
Burst: 9,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "no rate limits labels",
|
||||
labels: map[string]string{},
|
||||
expected: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
rateSets := ParseRateSets(test.labels, Prefix+BaseFrontendRateLimit, RegexpFrontendRateLimit)
|
||||
|
||||
assert.EqualValues(t, test.expected, rateSets)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWhiteList(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
labels map[string]string
|
||||
expected *types.WhiteList
|
||||
}{
|
||||
{
|
||||
desc: "should return nil when no white list labels",
|
||||
labels: map[string]string{},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "should return a struct when only range",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendWhiteListSourceRange: "10.10.10.10",
|
||||
},
|
||||
expected: &types.WhiteList{
|
||||
SourceRange: []string{
|
||||
"10.10.10.10",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "should return a struct with ip strategy depth",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendWhiteListSourceRange: "10.10.10.10",
|
||||
TraefikFrontendWhiteListIPStrategyDepth: "5",
|
||||
},
|
||||
expected: &types.WhiteList{
|
||||
SourceRange: []string{
|
||||
"10.10.10.10",
|
||||
},
|
||||
IPStrategy: &types.IPStrategy{
|
||||
Depth: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "should return a struct with ip strategy depth and excluded ips",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendWhiteListSourceRange: "10.10.10.10",
|
||||
TraefikFrontendWhiteListIPStrategyDepth: "5",
|
||||
TraefikFrontendWhiteListIPStrategyExcludedIPS: "10.10.10.10,10.10.10.11",
|
||||
},
|
||||
expected: &types.WhiteList{
|
||||
SourceRange: []string{
|
||||
"10.10.10.10",
|
||||
},
|
||||
IPStrategy: &types.IPStrategy{
|
||||
Depth: 5,
|
||||
ExcludedIPs: []string{
|
||||
"10.10.10.10",
|
||||
"10.10.10.11",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "should return a struct with ip strategy (remoteAddr) with no depth and no excludedIPs",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendWhiteListSourceRange: "10.10.10.10",
|
||||
TraefikFrontendWhiteListIPStrategy: "true",
|
||||
},
|
||||
expected: &types.WhiteList{
|
||||
SourceRange: []string{
|
||||
"10.10.10.10",
|
||||
},
|
||||
IPStrategy: &types.IPStrategy{
|
||||
Depth: 0,
|
||||
ExcludedIPs: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "should return a struct with ip strategy with depth",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendWhiteListSourceRange: "10.10.10.10",
|
||||
TraefikFrontendWhiteListIPStrategy: "true",
|
||||
TraefikFrontendWhiteListIPStrategyDepth: "5",
|
||||
},
|
||||
expected: &types.WhiteList{
|
||||
SourceRange: []string{
|
||||
"10.10.10.10",
|
||||
},
|
||||
IPStrategy: &types.IPStrategy{
|
||||
Depth: 5,
|
||||
ExcludedIPs: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
actual := GetWhiteList(test.labels)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCircuitBreaker(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
labels map[string]string
|
||||
expected *types.CircuitBreaker
|
||||
}{
|
||||
{
|
||||
desc: "should return nil when no CB label",
|
||||
labels: map[string]string{},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "should return a struct when CB label is set",
|
||||
labels: map[string]string{
|
||||
TraefikBackendCircuitBreakerExpression: "NetworkErrorRatio() > 0.5",
|
||||
},
|
||||
expected: &types.CircuitBreaker{
|
||||
Expression: "NetworkErrorRatio() > 0.5",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
actual := GetCircuitBreaker(test.labels)
|
||||
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetLoadBalancer(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
labels map[string]string
|
||||
expected *types.LoadBalancer
|
||||
}{
|
||||
{
|
||||
desc: "should return nil when no LB labels",
|
||||
labels: map[string]string{},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "should return a struct when labels are set",
|
||||
labels: map[string]string{
|
||||
TraefikBackendLoadBalancerMethod: "drr",
|
||||
TraefikBackendLoadBalancerStickiness: "true",
|
||||
TraefikBackendLoadBalancerStickinessCookieName: "foo",
|
||||
},
|
||||
expected: &types.LoadBalancer{
|
||||
Method: "drr",
|
||||
Stickiness: &types.Stickiness{
|
||||
CookieName: "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "should return a nil Stickiness when Stickiness is not set",
|
||||
labels: map[string]string{
|
||||
TraefikBackendLoadBalancerMethod: "drr",
|
||||
TraefikBackendLoadBalancerStickinessCookieName: "foo",
|
||||
},
|
||||
expected: &types.LoadBalancer{
|
||||
Method: "drr",
|
||||
Stickiness: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
actual := GetLoadBalancer(test.labels)
|
||||
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetMaxConn(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
labels map[string]string
|
||||
expected *types.MaxConn
|
||||
}{
|
||||
{
|
||||
desc: "should return nil when no max conn labels",
|
||||
labels: map[string]string{},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "should return nil when no amount label",
|
||||
labels: map[string]string{
|
||||
TraefikBackendMaxConnExtractorFunc: "client.ip",
|
||||
},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "should return default when no empty extractorFunc label",
|
||||
labels: map[string]string{
|
||||
TraefikBackendMaxConnExtractorFunc: "",
|
||||
TraefikBackendMaxConnAmount: "666",
|
||||
},
|
||||
expected: &types.MaxConn{
|
||||
ExtractorFunc: "request.host",
|
||||
Amount: 666,
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "should return a struct when max conn labels are set",
|
||||
labels: map[string]string{
|
||||
TraefikBackendMaxConnExtractorFunc: "client.ip",
|
||||
TraefikBackendMaxConnAmount: "666",
|
||||
},
|
||||
expected: &types.MaxConn{
|
||||
ExtractorFunc: "client.ip",
|
||||
Amount: 666,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
actual := GetMaxConn(test.labels)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetHealthCheck(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
labels map[string]string
|
||||
expected *types.HealthCheck
|
||||
}{
|
||||
{
|
||||
desc: "should return nil when no health check labels",
|
||||
labels: map[string]string{},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "should return nil when no health check Path label",
|
||||
labels: map[string]string{
|
||||
TraefikBackendHealthCheckPort: "80",
|
||||
TraefikBackendHealthCheckInterval: "6",
|
||||
TraefikBackendHealthCheckTimeout: "3",
|
||||
},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "should return a struct when health check labels are set",
|
||||
labels: map[string]string{
|
||||
TraefikBackendHealthCheckPath: "/health",
|
||||
TraefikBackendHealthCheckPort: "80",
|
||||
TraefikBackendHealthCheckInterval: "6",
|
||||
TraefikBackendHealthCheckTimeout: "3",
|
||||
TraefikBackendHealthCheckHeaders: "Foo:bar || Goo:bir",
|
||||
TraefikBackendHealthCheckHostname: "traefik",
|
||||
TraefikBackendHealthCheckScheme: "http",
|
||||
},
|
||||
expected: &types.HealthCheck{
|
||||
Scheme: "http",
|
||||
Path: "/health",
|
||||
Port: 80,
|
||||
Interval: "6",
|
||||
Timeout: "3",
|
||||
Hostname: "traefik",
|
||||
Headers: map[string]string{
|
||||
"Foo": "bar",
|
||||
"Goo": "bir",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
actual := GetHealthCheck(test.labels)
|
||||
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetBuffering(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
labels map[string]string
|
||||
expected *types.Buffering
|
||||
}{
|
||||
{
|
||||
desc: "should return nil when no buffering labels",
|
||||
labels: map[string]string{},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "should return a struct when buffering labels are set",
|
||||
labels: map[string]string{
|
||||
TraefikBackendBufferingMaxResponseBodyBytes: "10485760",
|
||||
TraefikBackendBufferingMemResponseBodyBytes: "2097152",
|
||||
TraefikBackendBufferingMaxRequestBodyBytes: "10485760",
|
||||
TraefikBackendBufferingMemRequestBodyBytes: "2097152",
|
||||
TraefikBackendBufferingRetryExpression: "IsNetworkError() && Attempts() <= 2",
|
||||
},
|
||||
expected: &types.Buffering{
|
||||
MaxResponseBodyBytes: 10485760,
|
||||
MemResponseBodyBytes: 2097152,
|
||||
MaxRequestBodyBytes: 10485760,
|
||||
MemRequestBodyBytes: 2097152,
|
||||
RetryExpression: "IsNetworkError() && Attempts() <= 2",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
actual := GetBuffering(test.labels)
|
||||
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetRedirect(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
labels map[string]string
|
||||
expected *types.Redirect
|
||||
}{
|
||||
|
||||
{
|
||||
desc: "should return nil when no redirect labels",
|
||||
labels: map[string]string{},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "should use only entry point tag when mix regex redirect and entry point redirect",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendRedirectEntryPoint: "https",
|
||||
TraefikFrontendRedirectRegex: "(.*)",
|
||||
TraefikFrontendRedirectReplacement: "$1",
|
||||
},
|
||||
expected: &types.Redirect{
|
||||
EntryPoint: "https",
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "should return a struct when entry point redirect label",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendRedirectEntryPoint: "https",
|
||||
},
|
||||
expected: &types.Redirect{
|
||||
EntryPoint: "https",
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "should return a struct when entry point redirect label (permanent)",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendRedirectEntryPoint: "https",
|
||||
TraefikFrontendRedirectPermanent: "true",
|
||||
},
|
||||
expected: &types.Redirect{
|
||||
EntryPoint: "https",
|
||||
Permanent: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "should return a struct when regex redirect labels",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendRedirectRegex: "(.*)",
|
||||
TraefikFrontendRedirectReplacement: "$1",
|
||||
},
|
||||
expected: &types.Redirect{
|
||||
Regex: "(.*)",
|
||||
Replacement: "$1",
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "should return a struct when regex redirect labels (permanent)",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendRedirectRegex: "(.*)",
|
||||
TraefikFrontendRedirectReplacement: "$1",
|
||||
TraefikFrontendRedirectPermanent: "true",
|
||||
},
|
||||
expected: &types.Redirect{
|
||||
Regex: "(.*)",
|
||||
Replacement: "$1",
|
||||
Permanent: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
actual := GetRedirect(test.labels)
|
||||
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetRateLimit(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
labels map[string]string
|
||||
expected *types.RateLimit
|
||||
}{
|
||||
{
|
||||
desc: "should return nil when no rate limit labels",
|
||||
labels: map[string]string{},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "should return a struct when rate limit labels are defined",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendRateLimitExtractorFunc: "client.ip",
|
||||
Prefix + BaseFrontendRateLimit + "foo." + SuffixRateLimitPeriod: "6",
|
||||
Prefix + BaseFrontendRateLimit + "foo." + SuffixRateLimitAverage: "12",
|
||||
Prefix + BaseFrontendRateLimit + "foo." + SuffixRateLimitBurst: "18",
|
||||
Prefix + BaseFrontendRateLimit + "bar." + SuffixRateLimitPeriod: "3",
|
||||
Prefix + BaseFrontendRateLimit + "bar." + SuffixRateLimitAverage: "6",
|
||||
Prefix + BaseFrontendRateLimit + "bar." + SuffixRateLimitBurst: "9",
|
||||
},
|
||||
expected: &types.RateLimit{
|
||||
ExtractorFunc: "client.ip",
|
||||
RateSet: map[string]*types.Rate{
|
||||
"foo": {
|
||||
Period: parse.Duration(6 * time.Second),
|
||||
Average: 12,
|
||||
Burst: 18,
|
||||
},
|
||||
"bar": {
|
||||
Period: parse.Duration(3 * time.Second),
|
||||
Average: 6,
|
||||
Burst: 9,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "should return nil when ExtractorFunc is missing",
|
||||
labels: map[string]string{
|
||||
Prefix + BaseFrontendRateLimit + "foo." + SuffixRateLimitPeriod: "6",
|
||||
Prefix + BaseFrontendRateLimit + "foo." + SuffixRateLimitAverage: "12",
|
||||
Prefix + BaseFrontendRateLimit + "foo." + SuffixRateLimitBurst: "18",
|
||||
Prefix + BaseFrontendRateLimit + "bar." + SuffixRateLimitPeriod: "3",
|
||||
Prefix + BaseFrontendRateLimit + "bar." + SuffixRateLimitAverage: "6",
|
||||
Prefix + BaseFrontendRateLimit + "bar." + SuffixRateLimitBurst: "9",
|
||||
},
|
||||
expected: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
actual := GetRateLimit(test.labels)
|
||||
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetHeaders(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
labels map[string]string
|
||||
expected *types.Headers
|
||||
}{
|
||||
{
|
||||
desc: "should return nil when no custom headers options are set",
|
||||
labels: map[string]string{},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "should return a struct when all custom headers options are set",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendRequestHeaders: "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8",
|
||||
TraefikFrontendResponseHeaders: "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8",
|
||||
TraefikFrontendSSLProxyHeaders: "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8",
|
||||
TraefikFrontendAllowedHosts: "foo,bar,bor",
|
||||
TraefikFrontendHostsProxyHeaders: "foo,bar,bor",
|
||||
TraefikFrontendSSLHost: "foo",
|
||||
TraefikFrontendCustomFrameOptionsValue: "foo",
|
||||
TraefikFrontendContentSecurityPolicy: "foo",
|
||||
TraefikFrontendPublicKey: "foo",
|
||||
TraefikFrontendReferrerPolicy: "foo",
|
||||
TraefikFrontendCustomBrowserXSSValue: "foo",
|
||||
TraefikFrontendSTSSeconds: "666",
|
||||
TraefikFrontendSSLRedirect: "true",
|
||||
TraefikFrontendSSLForceHost: "true",
|
||||
TraefikFrontendSSLTemporaryRedirect: "true",
|
||||
TraefikFrontendSTSIncludeSubdomains: "true",
|
||||
TraefikFrontendSTSPreload: "true",
|
||||
TraefikFrontendForceSTSHeader: "true",
|
||||
TraefikFrontendFrameDeny: "true",
|
||||
TraefikFrontendContentTypeNosniff: "true",
|
||||
TraefikFrontendBrowserXSSFilter: "true",
|
||||
TraefikFrontendIsDevelopment: "true",
|
||||
},
|
||||
expected: &types.Headers{
|
||||
CustomRequestHeaders: map[string]string{
|
||||
"Access-Control-Allow-Methods": "POST,GET,OPTIONS",
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
},
|
||||
CustomResponseHeaders: map[string]string{
|
||||
"Access-Control-Allow-Methods": "POST,GET,OPTIONS",
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
},
|
||||
SSLProxyHeaders: map[string]string{
|
||||
"Access-Control-Allow-Methods": "POST,GET,OPTIONS",
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
},
|
||||
AllowedHosts: []string{"foo", "bar", "bor"},
|
||||
HostsProxyHeaders: []string{"foo", "bar", "bor"},
|
||||
SSLHost: "foo",
|
||||
CustomFrameOptionsValue: "foo",
|
||||
ContentSecurityPolicy: "foo",
|
||||
PublicKey: "foo",
|
||||
ReferrerPolicy: "foo",
|
||||
CustomBrowserXSSValue: "foo",
|
||||
STSSeconds: 666,
|
||||
SSLForceHost: true,
|
||||
SSLRedirect: true,
|
||||
SSLTemporaryRedirect: true,
|
||||
STSIncludeSubdomains: true,
|
||||
STSPreload: true,
|
||||
ForceSTSHeader: true,
|
||||
FrameDeny: true,
|
||||
ContentTypeNosniff: true,
|
||||
BrowserXSSFilter: true,
|
||||
IsDevelopment: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
actual := GetHeaders(test.labels)
|
||||
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProviderGetErrorPages(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
labels map[string]string
|
||||
expected map[string]*types.ErrorPage
|
||||
}{
|
||||
{
|
||||
desc: "should return nil when no tags",
|
||||
labels: map[string]string{},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "should return a map when tags are present",
|
||||
labels: map[string]string{
|
||||
Prefix + BaseFrontendErrorPage + "foo." + SuffixErrorPageStatus: "404",
|
||||
Prefix + BaseFrontendErrorPage + "foo." + SuffixErrorPageBackend: "foo_backend",
|
||||
Prefix + BaseFrontendErrorPage + "foo." + SuffixErrorPageQuery: "foo_query",
|
||||
Prefix + BaseFrontendErrorPage + "bar." + SuffixErrorPageStatus: "500,600",
|
||||
Prefix + BaseFrontendErrorPage + "bar." + SuffixErrorPageBackend: "bar_backend",
|
||||
Prefix + BaseFrontendErrorPage + "bar." + SuffixErrorPageQuery: "bar_query",
|
||||
},
|
||||
expected: map[string]*types.ErrorPage{
|
||||
"foo": {
|
||||
Status: []string{"404"},
|
||||
Query: "foo_query",
|
||||
Backend: "foo_backend",
|
||||
},
|
||||
"bar": {
|
||||
Status: []string{"500", "600"},
|
||||
Query: "bar_query",
|
||||
Backend: "bar_backend",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
result := GetErrorPages(test.labels)
|
||||
|
||||
assert.Equal(t, test.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAuth(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
labels map[string]string
|
||||
expected *types.Auth
|
||||
}{
|
||||
{
|
||||
desc: "should return nil when no tags",
|
||||
labels: map[string]string{},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "should return a basic auth",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendAuthHeaderField: "myHeaderField",
|
||||
TraefikFrontendAuthBasicRealm: "myRealm",
|
||||
TraefikFrontendAuthBasicUsers: "user:pwd,user2:pwd2",
|
||||
TraefikFrontendAuthBasicUsersFile: "myUsersFile",
|
||||
TraefikFrontendAuthBasicRemoveHeader: "true",
|
||||
},
|
||||
expected: &types.Auth{
|
||||
HeaderField: "myHeaderField",
|
||||
Basic: &types.Basic{UsersFile: "myUsersFile", Users: []string{"user:pwd", "user2:pwd2"}, RemoveHeader: true, Realm: "myRealm"},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "should return a digest auth",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendAuthDigestRemoveHeader: "true",
|
||||
TraefikFrontendAuthHeaderField: "myHeaderField",
|
||||
TraefikFrontendAuthDigestUsers: "user:pwd,user2:pwd2",
|
||||
TraefikFrontendAuthDigestUsersFile: "myUsersFile",
|
||||
},
|
||||
expected: &types.Auth{
|
||||
HeaderField: "myHeaderField",
|
||||
Digest: &types.Digest{UsersFile: "myUsersFile", Users: []string{"user:pwd", "user2:pwd2"}, RemoveHeader: true},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "should return a forward auth",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendAuthHeaderField: "myHeaderField",
|
||||
TraefikFrontendAuthForwardAddress: "myAddress",
|
||||
TraefikFrontendAuthForwardTrustForwardHeader: "true",
|
||||
TraefikFrontendAuthForwardTLSCa: "ca.crt",
|
||||
TraefikFrontendAuthForwardTLSCaOptional: "true",
|
||||
TraefikFrontendAuthForwardTLSInsecureSkipVerify: "true",
|
||||
TraefikFrontendAuthForwardTLSKey: "myKey",
|
||||
TraefikFrontendAuthForwardTLSCert: "myCert",
|
||||
},
|
||||
expected: &types.Auth{
|
||||
HeaderField: "myHeaderField",
|
||||
Forward: &types.Forward{
|
||||
TrustForwardHeader: true,
|
||||
Address: "myAddress",
|
||||
TLS: &types.ClientTLS{
|
||||
InsecureSkipVerify: true,
|
||||
CA: "ca.crt",
|
||||
CAOptional: true,
|
||||
Key: "myKey",
|
||||
Cert: "myCert",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
result := GetAuth(test.labels)
|
||||
|
||||
assert.Equal(t, test.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
func TestGetPassTLSClientCert(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
labels map[string]string
|
||||
expected *types.TLSClientHeaders
|
||||
}{
|
||||
{
|
||||
desc: "should return nil when no tags",
|
||||
labels: map[string]string{},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "should return tlsClientHeaders with true pem flag",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendPassTLSClientCertPem: "true",
|
||||
},
|
||||
expected: &types.TLSClientHeaders{
|
||||
PEM: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "should return tlsClientHeaders with infos and NotAfter true",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendPassTLSClientCertInfosNotAfter: "true",
|
||||
},
|
||||
expected: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
NotAfter: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "should return tlsClientHeaders with infos and NotBefore true",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendPassTLSClientCertInfosNotBefore: "true",
|
||||
},
|
||||
expected: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
NotBefore: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "should return tlsClientHeaders with infos and sans true",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendPassTLSClientCertInfosSans: "true",
|
||||
},
|
||||
expected: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
Sans: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "should return tlsClientHeaders with infos and subject with commonName true",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendPassTLSClientCertInfosSubjectCommonName: "true",
|
||||
},
|
||||
expected: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
Subject: &types.TLSCLientCertificateSubjectInfos{
|
||||
CommonName: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "should return tlsClientHeaders with infos and subject with country true",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendPassTLSClientCertInfosSubjectCountry: "true",
|
||||
},
|
||||
expected: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
Subject: &types.TLSCLientCertificateSubjectInfos{
|
||||
Country: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "should return tlsClientHeaders with infos and subject with locality true",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendPassTLSClientCertInfosSubjectLocality: "true",
|
||||
},
|
||||
expected: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
Subject: &types.TLSCLientCertificateSubjectInfos{
|
||||
Locality: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "should return tlsClientHeaders with infos and subject with organization true",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendPassTLSClientCertInfosSubjectOrganization: "true",
|
||||
},
|
||||
expected: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
Subject: &types.TLSCLientCertificateSubjectInfos{
|
||||
Organization: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "should return tlsClientHeaders with infos and subject with province true",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendPassTLSClientCertInfosSubjectProvince: "true",
|
||||
},
|
||||
expected: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
Subject: &types.TLSCLientCertificateSubjectInfos{
|
||||
Province: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "should return tlsClientHeaders with infos and subject with serialNumber true",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendPassTLSClientCertInfosSubjectSerialNumber: "true",
|
||||
},
|
||||
expected: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
Subject: &types.TLSCLientCertificateSubjectInfos{
|
||||
SerialNumber: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "should return tlsClientHeaders with all infos",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendPassTLSClientCertPem: "true",
|
||||
TraefikFrontendPassTLSClientCertInfosNotAfter: "true",
|
||||
TraefikFrontendPassTLSClientCertInfosNotBefore: "true",
|
||||
TraefikFrontendPassTLSClientCertInfosSans: "true",
|
||||
TraefikFrontendPassTLSClientCertInfosSubjectCommonName: "true",
|
||||
TraefikFrontendPassTLSClientCertInfosSubjectCountry: "true",
|
||||
TraefikFrontendPassTLSClientCertInfosSubjectLocality: "true",
|
||||
TraefikFrontendPassTLSClientCertInfosSubjectOrganization: "true",
|
||||
TraefikFrontendPassTLSClientCertInfosSubjectProvince: "true",
|
||||
TraefikFrontendPassTLSClientCertInfosSubjectSerialNumber: "true",
|
||||
},
|
||||
expected: &types.TLSClientHeaders{
|
||||
PEM: true,
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
Sans: true,
|
||||
NotBefore: true,
|
||||
NotAfter: true,
|
||||
Subject: &types.TLSCLientCertificateSubjectInfos{
|
||||
Province: true,
|
||||
Organization: true,
|
||||
Locality: true,
|
||||
Country: true,
|
||||
CommonName: true,
|
||||
SerialNumber: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
result := GetTLSClientCert(test.labels)
|
||||
|
||||
assert.Equal(t, test.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,115 +0,0 @@
|
|||
package label
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/containous/traefik/log"
|
||||
)
|
||||
|
||||
var (
|
||||
// SegmentPropertiesRegexp used to extract the name of the segment and the name of the property for this segment
|
||||
// All properties are under the format traefik.<segment_name>.frontend.*= except the port/portIndex/weight/protocol/backend directly after traefik.<segment_name>.
|
||||
SegmentPropertiesRegexp = regexp.MustCompile(`^traefik\.(?P<segment_name>.+?)\.(?P<property_name>port|portIndex|portName|weight|protocol|backend|frontend\.(.+))$`)
|
||||
|
||||
// PortRegexp used to extract the port label of the segment
|
||||
PortRegexp = regexp.MustCompile(`^traefik\.(?P<segment_name>.+?)\.port$`)
|
||||
)
|
||||
|
||||
// SegmentPropertyValues is a map of segment properties
|
||||
// an example value is: weight=42
|
||||
type SegmentPropertyValues map[string]string
|
||||
|
||||
// SegmentProperties is a map of segment properties per segment,
|
||||
// which we can get with label[segmentName][propertyName].
|
||||
// It yields a property value.
|
||||
type SegmentProperties map[string]SegmentPropertyValues
|
||||
|
||||
// FindSegmentSubmatch split segment labels
|
||||
func FindSegmentSubmatch(name string) []string {
|
||||
matches := SegmentPropertiesRegexp.FindStringSubmatch(name)
|
||||
if matches == nil ||
|
||||
strings.HasPrefix(name, TraefikFrontend+".") ||
|
||||
strings.HasPrefix(name, TraefikBackend+".") {
|
||||
return nil
|
||||
}
|
||||
return matches
|
||||
}
|
||||
|
||||
// ExtractTraefikLabels transform labels to segment labels
|
||||
func ExtractTraefikLabels(originLabels map[string]string) SegmentProperties {
|
||||
allLabels := make(SegmentProperties)
|
||||
|
||||
if _, ok := allLabels[""]; !ok {
|
||||
allLabels[""] = make(SegmentPropertyValues)
|
||||
}
|
||||
|
||||
for name, value := range originLabels {
|
||||
if !strings.HasPrefix(name, Prefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
matches := FindSegmentSubmatch(name)
|
||||
if matches == nil {
|
||||
// Classic labels
|
||||
allLabels[""][name] = value
|
||||
} else {
|
||||
// segments labels
|
||||
var segmentName string
|
||||
var propertyName string
|
||||
for i, name := range SegmentPropertiesRegexp.SubexpNames() {
|
||||
// the group 0 is anonymous because it's always the root expression
|
||||
if i != 0 {
|
||||
if name == "segment_name" {
|
||||
segmentName = matches[i]
|
||||
} else if name == "property_name" {
|
||||
propertyName = matches[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := allLabels[segmentName]; !ok {
|
||||
allLabels[segmentName] = make(SegmentPropertyValues)
|
||||
}
|
||||
allLabels[segmentName][Prefix+propertyName] = value
|
||||
}
|
||||
}
|
||||
|
||||
log.Debug("originLabels", originLabels)
|
||||
log.Debug("allLabels", allLabels)
|
||||
|
||||
allLabels.mergeDefault()
|
||||
|
||||
return allLabels
|
||||
}
|
||||
|
||||
func (s SegmentProperties) mergeDefault() {
|
||||
// if SegmentProperties contains the default segment, merge each segments with the default segment
|
||||
if defaultLabels, okDefault := s[""]; okDefault {
|
||||
|
||||
segmentsNames := s.GetSegmentNames()
|
||||
if len(defaultLabels) > 0 {
|
||||
for _, name := range segmentsNames {
|
||||
segmentLabels := s[name]
|
||||
for key, value := range defaultLabels {
|
||||
if _, ok := segmentLabels[key]; !ok {
|
||||
segmentLabels[key] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(segmentsNames) > 1 {
|
||||
delete(s, "")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetSegmentNames get all segment names
|
||||
func (s SegmentProperties) GetSegmentNames() []string {
|
||||
var names []string
|
||||
for name := range s {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
|
@ -1,95 +0,0 @@
|
|||
package label
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestExtractTraefikLabels(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
prefix string
|
||||
originLabels map[string]string
|
||||
expected SegmentProperties
|
||||
}{
|
||||
{
|
||||
desc: "nil labels map",
|
||||
prefix: "traefik",
|
||||
originLabels: nil,
|
||||
expected: SegmentProperties{"": {}},
|
||||
},
|
||||
{
|
||||
desc: "container labels",
|
||||
prefix: "traefik",
|
||||
originLabels: map[string]string{
|
||||
"frontend.priority": "foo", // missing prefix: skip
|
||||
"traefik.port": "bar",
|
||||
},
|
||||
expected: SegmentProperties{
|
||||
"": {
|
||||
"traefik.port": "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "segment labels: only segment no default",
|
||||
prefix: "traefik",
|
||||
originLabels: map[string]string{
|
||||
"traefik.goo.frontend.priority": "A",
|
||||
"traefik.goo.port": "D",
|
||||
"traefik.port": "C",
|
||||
},
|
||||
expected: SegmentProperties{
|
||||
"goo": {
|
||||
"traefik.frontend.priority": "A",
|
||||
"traefik.port": "D",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "segment labels: use default",
|
||||
prefix: "traefik",
|
||||
originLabels: map[string]string{
|
||||
"traefik.guu.frontend.priority": "B",
|
||||
"traefik.port": "C",
|
||||
},
|
||||
expected: SegmentProperties{
|
||||
"guu": {
|
||||
"traefik.frontend.priority": "B",
|
||||
"traefik.port": "C",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "segment labels: several segments",
|
||||
prefix: "traefik",
|
||||
originLabels: map[string]string{
|
||||
"traefik.goo.frontend.priority": "A",
|
||||
"traefik.goo.port": "D",
|
||||
"traefik.guu.frontend.priority": "B",
|
||||
"traefik.port": "C",
|
||||
},
|
||||
expected: SegmentProperties{
|
||||
"goo": {
|
||||
"traefik.frontend.priority": "A",
|
||||
"traefik.port": "D",
|
||||
},
|
||||
"guu": {
|
||||
"traefik.frontend.priority": "B",
|
||||
"traefik.port": "C",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
actual := ExtractTraefikLabels(test.originLabels)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,226 +0,0 @@
|
|||
package marathon
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containous/traefik/provider/label"
|
||||
"github.com/gambol99/go-marathon"
|
||||
)
|
||||
|
||||
const testTaskName = "taskID"
|
||||
|
||||
func withAppData(app marathon.Application, segmentName string) appData {
|
||||
segmentProperties := label.ExtractTraefikLabels(stringValueMap(app.Labels))
|
||||
return appData{
|
||||
Application: app,
|
||||
SegmentLabels: segmentProperties[segmentName],
|
||||
SegmentName: segmentName,
|
||||
LinkedApps: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// Functions related to building applications.
|
||||
|
||||
func withApplications(apps ...marathon.Application) *marathon.Applications {
|
||||
return &marathon.Applications{Apps: apps}
|
||||
}
|
||||
|
||||
func application(ops ...func(*marathon.Application)) marathon.Application {
|
||||
app := marathon.Application{}
|
||||
app.EmptyLabels()
|
||||
app.Deployments = []map[string]string{}
|
||||
app.ReadinessChecks = &[]marathon.ReadinessCheck{}
|
||||
app.ReadinessCheckResults = &[]marathon.ReadinessCheckResult{}
|
||||
|
||||
for _, op := range ops {
|
||||
op(&app)
|
||||
}
|
||||
|
||||
return app
|
||||
}
|
||||
|
||||
func appID(name string) func(*marathon.Application) {
|
||||
return func(app *marathon.Application) {
|
||||
app.Name(name)
|
||||
}
|
||||
}
|
||||
|
||||
func appPorts(ports ...int) func(*marathon.Application) {
|
||||
return func(app *marathon.Application) {
|
||||
app.Ports = append(app.Ports, ports...)
|
||||
}
|
||||
}
|
||||
|
||||
func withLabel(key, value string) func(*marathon.Application) {
|
||||
return func(app *marathon.Application) {
|
||||
app.AddLabel(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
func constraint(value string) func(*marathon.Application) {
|
||||
return func(app *marathon.Application) {
|
||||
app.AddConstraint(strings.Split(value, ":")...)
|
||||
}
|
||||
}
|
||||
|
||||
func withSegmentLabel(key, value string, segmentName string) func(*marathon.Application) {
|
||||
if len(segmentName) == 0 {
|
||||
panic("segmentName can not be empty")
|
||||
}
|
||||
|
||||
property := strings.TrimPrefix(key, label.Prefix)
|
||||
return func(app *marathon.Application) {
|
||||
app.AddLabel(label.Prefix+segmentName+"."+property, value)
|
||||
}
|
||||
}
|
||||
|
||||
func portDefinition(port int) func(*marathon.Application) {
|
||||
return func(app *marathon.Application) {
|
||||
app.AddPortDefinition(marathon.PortDefinition{
|
||||
Port: &port,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func bridgeNetwork() func(*marathon.Application) {
|
||||
return func(app *marathon.Application) {
|
||||
app.SetNetwork("bridge", marathon.BridgeNetworkMode)
|
||||
}
|
||||
}
|
||||
|
||||
func containerNetwork() func(*marathon.Application) {
|
||||
return func(app *marathon.Application) {
|
||||
app.SetNetwork("cni", marathon.ContainerNetworkMode)
|
||||
}
|
||||
}
|
||||
|
||||
func ipAddrPerTask(port int) func(*marathon.Application) {
|
||||
return func(app *marathon.Application) {
|
||||
p := marathon.Port{
|
||||
Number: port,
|
||||
Name: "port",
|
||||
}
|
||||
disc := marathon.Discovery{}
|
||||
disc.AddPort(p)
|
||||
ipAddr := marathon.IPAddressPerTask{}
|
||||
ipAddr.SetDiscovery(disc)
|
||||
app.SetIPAddressPerTask(ipAddr)
|
||||
}
|
||||
}
|
||||
|
||||
func deployments(ids ...string) func(*marathon.Application) {
|
||||
return func(app *marathon.Application) {
|
||||
for _, id := range ids {
|
||||
app.Deployments = append(app.Deployments, map[string]string{
|
||||
"ID": id,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func readinessCheck(timeout time.Duration) func(*marathon.Application) {
|
||||
return func(app *marathon.Application) {
|
||||
app.ReadinessChecks = &[]marathon.ReadinessCheck{
|
||||
{
|
||||
Path: "/ready",
|
||||
TimeoutSeconds: int(timeout.Seconds()),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func readinessCheckResult(taskID string, ready bool) func(*marathon.Application) {
|
||||
return func(app *marathon.Application) {
|
||||
*app.ReadinessCheckResults = append(*app.ReadinessCheckResults, marathon.ReadinessCheckResult{
|
||||
TaskID: taskID,
|
||||
Ready: ready,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func withTasks(tasks ...marathon.Task) func(*marathon.Application) {
|
||||
return func(application *marathon.Application) {
|
||||
for _, task := range tasks {
|
||||
tu := task
|
||||
application.Tasks = append(application.Tasks, &tu)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Functions related to building tasks.
|
||||
|
||||
func task(ops ...func(*marathon.Task)) marathon.Task {
|
||||
t := &marathon.Task{
|
||||
ID: testTaskName,
|
||||
// The vast majority of tests expect the task state to be TASK_RUNNING.
|
||||
State: string(taskStateRunning),
|
||||
}
|
||||
|
||||
for _, op := range ops {
|
||||
op(t)
|
||||
}
|
||||
|
||||
return *t
|
||||
}
|
||||
|
||||
func withTaskID(id string) func(*marathon.Task) {
|
||||
return func(task *marathon.Task) {
|
||||
task.ID = id
|
||||
}
|
||||
}
|
||||
|
||||
func localhostTask(ops ...func(*marathon.Task)) marathon.Task {
|
||||
t := task(
|
||||
host("localhost"),
|
||||
ipAddresses("127.0.0.1"),
|
||||
taskState(taskStateRunning),
|
||||
)
|
||||
|
||||
for _, op := range ops {
|
||||
op(&t)
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
func taskPorts(ports ...int) func(*marathon.Task) {
|
||||
return func(t *marathon.Task) {
|
||||
t.Ports = append(t.Ports, ports...)
|
||||
}
|
||||
}
|
||||
|
||||
func taskState(state TaskState) func(*marathon.Task) {
|
||||
return func(t *marathon.Task) {
|
||||
t.State = string(state)
|
||||
}
|
||||
}
|
||||
|
||||
func host(h string) func(*marathon.Task) {
|
||||
return func(t *marathon.Task) {
|
||||
t.Host = h
|
||||
}
|
||||
}
|
||||
|
||||
func ipAddresses(addresses ...string) func(*marathon.Task) {
|
||||
return func(t *marathon.Task) {
|
||||
for _, addr := range addresses {
|
||||
t.IPAddresses = append(t.IPAddresses, &marathon.IPAddress{
|
||||
IPAddress: addr,
|
||||
Protocol: "tcp",
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func startedAt(timestamp string) func(*marathon.Task) {
|
||||
return func(t *marathon.Task) {
|
||||
t.StartedAt = timestamp
|
||||
}
|
||||
}
|
||||
|
||||
func startedAtFromNow(offset time.Duration) func(*marathon.Task) {
|
||||
return func(t *marathon.Task) {
|
||||
t.StartedAt = time.Now().Add(-offset).Format(time.RFC3339)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,390 +0,0 @@
|
|||
package marathon
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/provider"
|
||||
"github.com/containous/traefik/provider/label"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/gambol99/go-marathon"
|
||||
)
|
||||
|
||||
type appData struct {
|
||||
marathon.Application
|
||||
SegmentLabels map[string]string
|
||||
SegmentName string
|
||||
LinkedApps []*appData
|
||||
}
|
||||
|
||||
func (p *Provider) buildConfiguration(applications *marathon.Applications) *types.Configuration {
|
||||
var MarathonFuncMap = template.FuncMap{
|
||||
"getDomain": label.GetFuncString(label.TraefikDomain, p.Domain), // see https://github.com/containous/traefik/pull/1693
|
||||
"getSubDomain": p.getSubDomain, // see https://github.com/containous/traefik/pull/1693
|
||||
"getBackendName": p.getBackendName,
|
||||
|
||||
// Backend functions
|
||||
"getPort": getPort,
|
||||
"getCircuitBreaker": label.GetCircuitBreaker,
|
||||
"getLoadBalancer": label.GetLoadBalancer,
|
||||
"getMaxConn": label.GetMaxConn,
|
||||
"getHealthCheck": label.GetHealthCheck,
|
||||
"getBuffering": label.GetBuffering,
|
||||
"getResponseForwarding": label.GetResponseForwarding,
|
||||
"getServers": p.getServers,
|
||||
|
||||
// Frontend functions
|
||||
"getSegmentNameSuffix": getSegmentNameSuffix,
|
||||
"getFrontendRule": p.getFrontendRule,
|
||||
"getFrontendName": p.getFrontendName,
|
||||
"getPassHostHeader": label.GetFuncBool(label.TraefikFrontendPassHostHeader, label.DefaultPassHostHeader),
|
||||
"getPassTLSCert": label.GetFuncBool(label.TraefikFrontendPassTLSCert, label.DefaultPassTLSCert),
|
||||
"getPassTLSClientCert": label.GetTLSClientCert,
|
||||
"getPriority": label.GetFuncInt(label.TraefikFrontendPriority, label.DefaultFrontendPriority),
|
||||
"getEntryPoints": label.GetFuncSliceString(label.TraefikFrontendEntryPoints),
|
||||
"getBasicAuth": label.GetFuncSliceString(label.TraefikFrontendAuthBasic), // Deprecated
|
||||
"getAuth": label.GetAuth,
|
||||
"getRedirect": label.GetRedirect,
|
||||
"getErrorPages": label.GetErrorPages,
|
||||
"getRateLimit": label.GetRateLimit,
|
||||
"getHeaders": label.GetHeaders,
|
||||
"getWhiteList": label.GetWhiteList,
|
||||
}
|
||||
|
||||
apps := make(map[string]*appData)
|
||||
for _, app := range applications.Apps {
|
||||
if p.applicationFilter(app) {
|
||||
// Tasks
|
||||
var filteredTasks []*marathon.Task
|
||||
for _, task := range app.Tasks {
|
||||
if p.taskFilter(*task, app) {
|
||||
filteredTasks = append(filteredTasks, task)
|
||||
logIllegalServices(*task, app)
|
||||
}
|
||||
}
|
||||
|
||||
app.Tasks = filteredTasks
|
||||
|
||||
// segments
|
||||
segmentProperties := label.ExtractTraefikLabels(stringValueMap(app.Labels))
|
||||
for segmentName, labels := range segmentProperties {
|
||||
data := &appData{
|
||||
Application: app,
|
||||
SegmentLabels: labels,
|
||||
SegmentName: segmentName,
|
||||
}
|
||||
|
||||
backendName := p.getBackendName(*data)
|
||||
if baseApp, ok := apps[backendName]; ok {
|
||||
baseApp.LinkedApps = append(baseApp.LinkedApps, data)
|
||||
} else {
|
||||
apps[backendName] = data
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
templateObjects := struct {
|
||||
Applications map[string]*appData
|
||||
Domain string
|
||||
}{
|
||||
Applications: apps,
|
||||
Domain: p.Domain,
|
||||
}
|
||||
|
||||
configuration, err := p.GetConfiguration("templates/marathon.tmpl", MarathonFuncMap, templateObjects)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to render Marathon configuration template: %v", err)
|
||||
}
|
||||
return configuration
|
||||
}
|
||||
|
||||
func (p *Provider) applicationFilter(app marathon.Application) bool {
|
||||
// Filter disabled application.
|
||||
if !label.IsEnabled(stringValueMap(app.Labels), p.ExposedByDefault) {
|
||||
log.Debugf("Filtering disabled Marathon application %s", app.ID)
|
||||
return false
|
||||
}
|
||||
|
||||
// Filter by constraints.
|
||||
constraintTags := label.GetSliceStringValue(stringValueMap(app.Labels), label.TraefikTags)
|
||||
if p.MarathonLBCompatibility {
|
||||
if haGroup := label.GetStringValue(stringValueMap(app.Labels), labelLbCompatibilityGroup, ""); len(haGroup) > 0 {
|
||||
constraintTags = append(constraintTags, haGroup)
|
||||
}
|
||||
}
|
||||
if p.FilterMarathonConstraints && app.Constraints != nil {
|
||||
for _, constraintParts := range *app.Constraints {
|
||||
constraintTags = append(constraintTags, strings.Join(constraintParts, ":"))
|
||||
}
|
||||
}
|
||||
if ok, failingConstraint := p.MatchConstraints(constraintTags); !ok {
|
||||
if failingConstraint != nil {
|
||||
log.Debugf("Filtering Marathon application %s pruned by %q constraint", app.ID, failingConstraint.String())
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *Provider) taskFilter(task marathon.Task, application marathon.Application) bool {
|
||||
if task.State != string(taskStateRunning) {
|
||||
return false
|
||||
}
|
||||
|
||||
if ready := p.readyChecker.Do(task, application); !ready {
|
||||
log.Infof("Filtering unready task %s from application %s", task.ID, application.ID)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// logIllegalServices logs illegal service configurations.
|
||||
// While we cannot filter on the service level, they will eventually get
|
||||
// rejected once the server configuration is rendered.
|
||||
func logIllegalServices(task marathon.Task, app marathon.Application) {
|
||||
segmentProperties := label.ExtractTraefikLabels(stringValueMap(app.Labels))
|
||||
for segmentName, labels := range segmentProperties {
|
||||
// Check for illegal/missing ports.
|
||||
if _, err := processPorts(app, task, labels); err != nil {
|
||||
log.Warnf("%s has an illegal configuration: no proper port available", identifier(app, task, segmentName))
|
||||
continue
|
||||
}
|
||||
|
||||
// Check for illegal port label combinations.
|
||||
hasPortLabel := label.Has(labels, label.TraefikPort)
|
||||
hasPortIndexLabel := label.Has(labels, label.TraefikPortIndex)
|
||||
if hasPortLabel && hasPortIndexLabel {
|
||||
log.Warnf("%s has both port and port index specified; port will take precedence", identifier(app, task, segmentName))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getSegmentNameSuffix(serviceName string) string {
|
||||
if len(serviceName) > 0 {
|
||||
return "-service-" + provider.Normalize(serviceName)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (p *Provider) getSubDomain(name string) string {
|
||||
if p.GroupsAsSubDomains {
|
||||
splitedName := strings.Split(strings.TrimPrefix(name, "/"), "/")
|
||||
provider.ReverseStringSlice(&splitedName)
|
||||
reverseName := strings.Join(splitedName, ".")
|
||||
return reverseName
|
||||
}
|
||||
return strings.Replace(strings.TrimPrefix(name, "/"), "/", "-", -1)
|
||||
}
|
||||
|
||||
func (p *Provider) getBackendName(app appData) string {
|
||||
value := label.GetStringValue(app.SegmentLabels, label.TraefikBackend, "")
|
||||
if len(value) > 0 {
|
||||
return provider.Normalize("backend" + value)
|
||||
}
|
||||
|
||||
return provider.Normalize("backend" + app.ID + getSegmentNameSuffix(app.SegmentName))
|
||||
}
|
||||
|
||||
func (p *Provider) getFrontendName(app appData) string {
|
||||
return provider.Normalize("frontend" + app.ID + getSegmentNameSuffix(app.SegmentName))
|
||||
}
|
||||
|
||||
// getFrontendRule returns the frontend rule for the specified application, using
|
||||
// its label. If service is provided, it will look for serviceName label before generic one.
|
||||
// It returns a default one (Host) if the label is not present.
|
||||
func (p *Provider) getFrontendRule(app appData) string {
|
||||
if value := label.GetStringValue(app.SegmentLabels, label.TraefikFrontendRule, ""); len(value) > 0 {
|
||||
return value
|
||||
}
|
||||
|
||||
if p.MarathonLBCompatibility {
|
||||
if value := label.GetStringValue(stringValueMap(app.Labels), labelLbCompatibility, ""); len(value) > 0 {
|
||||
return "Host:" + value
|
||||
}
|
||||
}
|
||||
|
||||
domain := label.GetStringValue(app.SegmentLabels, label.TraefikDomain, p.Domain)
|
||||
if len(domain) > 0 {
|
||||
domain = "." + domain
|
||||
}
|
||||
|
||||
if len(app.SegmentName) > 0 {
|
||||
return "Host:" + strings.ToLower(provider.Normalize(app.SegmentName)) + "." + p.getSubDomain(app.ID) + domain
|
||||
}
|
||||
return "Host:" + p.getSubDomain(app.ID) + domain
|
||||
}
|
||||
|
||||
func getPort(task marathon.Task, app appData) string {
|
||||
port, err := processPorts(app.Application, task, app.SegmentLabels)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to process ports for %s: %s", identifier(app.Application, task, app.SegmentName), err)
|
||||
return ""
|
||||
}
|
||||
|
||||
return strconv.Itoa(port)
|
||||
}
|
||||
|
||||
// processPorts returns the configured port.
|
||||
// An explicitly specified port is preferred. If none is specified, it selects
|
||||
// one of the available port. The first such found port is returned unless an
|
||||
// optional index is provided.
|
||||
func processPorts(app marathon.Application, task marathon.Task, labels map[string]string) (int, error) {
|
||||
if label.Has(labels, label.TraefikPort) {
|
||||
port := label.GetIntValue(labels, label.TraefikPort, 0)
|
||||
|
||||
if port <= 0 {
|
||||
return 0, fmt.Errorf("explicitly specified port %d must be larger than zero", port)
|
||||
} else if port > 0 {
|
||||
return port, nil
|
||||
}
|
||||
}
|
||||
|
||||
ports := retrieveAvailablePorts(app, task)
|
||||
if len(ports) == 0 {
|
||||
return 0, errors.New("no port found")
|
||||
}
|
||||
|
||||
portIndex := label.GetIntValue(labels, label.TraefikPortIndex, 0)
|
||||
if portIndex < 0 || portIndex > len(ports)-1 {
|
||||
return 0, fmt.Errorf("index %d must be within range (0, %d)", portIndex, len(ports)-1)
|
||||
}
|
||||
return ports[portIndex], nil
|
||||
}
|
||||
|
||||
func retrieveAvailablePorts(app marathon.Application, task marathon.Task) []int {
|
||||
// Using default port configuration
|
||||
if len(task.Ports) > 0 {
|
||||
return task.Ports
|
||||
}
|
||||
|
||||
// Using port definition if available
|
||||
if app.PortDefinitions != nil && len(*app.PortDefinitions) > 0 {
|
||||
var ports []int
|
||||
for _, def := range *app.PortDefinitions {
|
||||
if def.Port != nil {
|
||||
ports = append(ports, *def.Port)
|
||||
}
|
||||
}
|
||||
return ports
|
||||
}
|
||||
|
||||
// If using IP-per-task using this port definition
|
||||
if app.IPAddressPerTask != nil && app.IPAddressPerTask.Discovery != nil && len(*(app.IPAddressPerTask.Discovery.Ports)) > 0 {
|
||||
var ports []int
|
||||
for _, def := range *(app.IPAddressPerTask.Discovery.Ports) {
|
||||
ports = append(ports, def.Number)
|
||||
}
|
||||
return ports
|
||||
}
|
||||
|
||||
return []int{}
|
||||
}
|
||||
|
||||
func identifier(app marathon.Application, task marathon.Task, segmentName string) string {
|
||||
id := fmt.Sprintf("Marathon task %s from application %s", task.ID, app.ID)
|
||||
if segmentName != "" {
|
||||
id += fmt.Sprintf(" (segment: %s)", segmentName)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
func (p *Provider) getServers(app appData) map[string]types.Server {
|
||||
var servers map[string]types.Server
|
||||
|
||||
for _, task := range app.Tasks {
|
||||
name, server, err := p.getServer(app, *task)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if servers == nil {
|
||||
servers = make(map[string]types.Server)
|
||||
}
|
||||
|
||||
servers[name] = *server
|
||||
}
|
||||
|
||||
for _, linkedApp := range app.LinkedApps {
|
||||
for _, task := range linkedApp.Tasks {
|
||||
name, server, err := p.getServer(*linkedApp, *task)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if servers == nil {
|
||||
servers = make(map[string]types.Server)
|
||||
}
|
||||
|
||||
servers[name] = *server
|
||||
}
|
||||
}
|
||||
|
||||
return servers
|
||||
}
|
||||
|
||||
func (p *Provider) getServer(app appData, task marathon.Task) (string, *types.Server, error) {
|
||||
host, err := p.getServerHost(task, app)
|
||||
if len(host) == 0 {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
port := getPort(task, app)
|
||||
protocol := label.GetStringValue(app.SegmentLabels, label.TraefikProtocol, label.DefaultProtocol)
|
||||
|
||||
serverName := provider.Normalize("server-" + app.ID + "-" + task.ID + getSegmentNameSuffix(app.SegmentName))
|
||||
|
||||
return serverName, &types.Server{
|
||||
URL: fmt.Sprintf("%s://%s", protocol, net.JoinHostPort(host, port)),
|
||||
Weight: label.GetIntValue(app.SegmentLabels, label.TraefikWeight, label.DefaultWeight),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Provider) getServerHost(task marathon.Task, app appData) (string, error) {
|
||||
networks := app.Networks
|
||||
var hostFlag bool
|
||||
|
||||
if networks == nil {
|
||||
hostFlag = app.IPAddressPerTask == nil
|
||||
} else {
|
||||
hostFlag = (*networks)[0].Mode != marathon.ContainerNetworkMode
|
||||
}
|
||||
|
||||
if hostFlag || p.ForceTaskHostname {
|
||||
if len(task.Host) == 0 {
|
||||
return "", fmt.Errorf("host is undefined for task %q app %q", task.ID, app.ID)
|
||||
}
|
||||
return task.Host, nil
|
||||
}
|
||||
|
||||
numTaskIPAddresses := len(task.IPAddresses)
|
||||
switch numTaskIPAddresses {
|
||||
case 0:
|
||||
return "", fmt.Errorf("missing IP address for Marathon application %s on task %s", app.ID, task.ID)
|
||||
case 1:
|
||||
return task.IPAddresses[0].IPAddress, nil
|
||||
default:
|
||||
ipAddressIdx := label.GetIntValue(stringValueMap(app.Labels), labelIPAddressIdx, math.MinInt32)
|
||||
|
||||
if ipAddressIdx == math.MinInt32 {
|
||||
return "", fmt.Errorf("found %d task IP addresses but missing IP address index for Marathon application %s on task %s",
|
||||
numTaskIPAddresses, app.ID, task.ID)
|
||||
}
|
||||
if ipAddressIdx < 0 || ipAddressIdx > numTaskIPAddresses {
|
||||
return "", fmt.Errorf("cannot use IP address index to select from %d task IP addresses for Marathon application %s on task %s",
|
||||
numTaskIPAddresses, app.ID, task.ID)
|
||||
}
|
||||
|
||||
return task.IPAddresses[ipAddressIdx].IPAddress, nil
|
||||
}
|
||||
}
|
||||
|
|
@ -1,370 +0,0 @@
|
|||
package marathon
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/containous/flaeg/parse"
|
||||
"github.com/containous/traefik/provider/label"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/gambol99/go-marathon"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestBuildConfigurationSegments(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
applications *marathon.Applications
|
||||
expectedFrontends map[string]*types.Frontend
|
||||
expectedBackends map[string]*types.Backend
|
||||
}{
|
||||
{
|
||||
desc: "multiple ports with segments",
|
||||
applications: withApplications(
|
||||
application(
|
||||
appID("/app"),
|
||||
appPorts(80, 81),
|
||||
withTasks(localhostTask(taskPorts(80, 81))),
|
||||
|
||||
withLabel(label.TraefikBackendMaxConnAmount, "1000"),
|
||||
withLabel(label.TraefikBackendMaxConnExtractorFunc, "client.ip"),
|
||||
withSegmentLabel(label.TraefikPort, "80", "web"),
|
||||
withSegmentLabel(label.TraefikPort, "81", "admin"),
|
||||
withLabel("traefik..port", "82"), // This should be ignored, as it fails to match the segmentPropertiesRegexp regex.
|
||||
withSegmentLabel(label.TraefikFrontendRule, "Host:web.app.marathon.localhost", "web"),
|
||||
withSegmentLabel(label.TraefikFrontendRule, "Host:admin.app.marathon.localhost", "admin"),
|
||||
)),
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-app-service-web": {
|
||||
Backend: "backend-app-service-web",
|
||||
Routes: map[string]types.Route{
|
||||
`route-host-app-service-web`: {
|
||||
Rule: "Host:web.app.marathon.localhost",
|
||||
},
|
||||
},
|
||||
PassHostHeader: true,
|
||||
EntryPoints: []string{},
|
||||
},
|
||||
"frontend-app-service-admin": {
|
||||
Backend: "backend-app-service-admin",
|
||||
Routes: map[string]types.Route{
|
||||
`route-host-app-service-admin`: {
|
||||
Rule: "Host:admin.app.marathon.localhost",
|
||||
},
|
||||
},
|
||||
PassHostHeader: true,
|
||||
EntryPoints: []string{},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-app-service-web": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-app-taskID-service-web": {
|
||||
URL: "http://localhost:80",
|
||||
Weight: label.DefaultWeight,
|
||||
},
|
||||
},
|
||||
MaxConn: &types.MaxConn{
|
||||
Amount: 1000,
|
||||
ExtractorFunc: "client.ip",
|
||||
},
|
||||
},
|
||||
"backend-app-service-admin": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-app-taskID-service-admin": {
|
||||
URL: "http://localhost:81",
|
||||
Weight: label.DefaultWeight,
|
||||
},
|
||||
},
|
||||
MaxConn: &types.MaxConn{
|
||||
Amount: 1000,
|
||||
ExtractorFunc: "client.ip",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "when all labels are set",
|
||||
applications: withApplications(
|
||||
application(
|
||||
appID("/app"),
|
||||
appPorts(80, 81),
|
||||
withTasks(localhostTask(taskPorts(80, 81))),
|
||||
|
||||
// withLabel(label.TraefikBackend, "foobar"),
|
||||
|
||||
withLabel(label.TraefikBackendCircuitBreakerExpression, "NetworkErrorRatio() > 0.5"),
|
||||
withLabel(label.TraefikBackendHealthCheckPath, "/health"),
|
||||
withLabel(label.TraefikBackendHealthCheckPort, "880"),
|
||||
withLabel(label.TraefikBackendHealthCheckInterval, "6"),
|
||||
withLabel(label.TraefikBackendHealthCheckTimeout, "3"),
|
||||
withLabel(label.TraefikBackendLoadBalancerMethod, "drr"),
|
||||
withLabel(label.TraefikBackendLoadBalancerStickiness, "true"),
|
||||
withLabel(label.TraefikBackendLoadBalancerStickinessCookieName, "chocolate"),
|
||||
withLabel(label.TraefikBackendMaxConnAmount, "666"),
|
||||
withLabel(label.TraefikBackendMaxConnExtractorFunc, "client.ip"),
|
||||
withLabel(label.TraefikBackendBufferingMaxResponseBodyBytes, "10485760"),
|
||||
withLabel(label.TraefikBackendBufferingMemResponseBodyBytes, "2097152"),
|
||||
withLabel(label.TraefikBackendBufferingMaxRequestBodyBytes, "10485760"),
|
||||
withLabel(label.TraefikBackendBufferingMemRequestBodyBytes, "2097152"),
|
||||
withLabel(label.TraefikBackendBufferingRetryExpression, "IsNetworkError() && Attempts() <= 2"),
|
||||
|
||||
withSegmentLabel(label.TraefikPort, "80", "containous"),
|
||||
withSegmentLabel(label.TraefikProtocol, "https", "containous"),
|
||||
withSegmentLabel(label.TraefikWeight, "12", "containous"),
|
||||
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertPem, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosNotBefore, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosNotAfter, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSans, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectCommonName, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectCountry, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectLocality, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectOrganization, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectProvince, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectSerialNumber, "true", "containous"),
|
||||
|
||||
withSegmentLabel(label.TraefikFrontendAuthBasic, "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthBasicRemoveHeader, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthBasicUsers, "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthBasicUsersFile, ".htpasswd", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthDigestRemoveHeader, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthDigestUsers, "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthDigestUsersFile, ".htpasswd", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthForwardAddress, "auth.server", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthForwardTrustForwardHeader, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthForwardTLSCa, "ca.crt", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthForwardTLSCaOptional, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthForwardTLSCert, "server.crt", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthForwardTLSKey, "server.key", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthForwardTLSInsecureSkipVerify, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthHeaderField, "X-WebAuth-User", "containous"),
|
||||
|
||||
withSegmentLabel(label.TraefikFrontendEntryPoints, "http,https", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassHostHeader, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSCert, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPriority, "666", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendRedirectEntryPoint, "https", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendRedirectRegex, "nope", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendRedirectReplacement, "nope", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendRedirectPermanent, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendRule, "Host:traefik.io", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendWhiteListSourceRange, "10.10.10.10", "containous"),
|
||||
|
||||
withSegmentLabel(label.TraefikFrontendRequestHeaders, "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendResponseHeaders, "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendSSLProxyHeaders, "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAllowedHosts, "foo,bar,bor", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendHostsProxyHeaders, "foo,bar,bor", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendSSLForceHost, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendSSLHost, "foo", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendCustomFrameOptionsValue, "foo", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendContentSecurityPolicy, "foo", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPublicKey, "foo", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendReferrerPolicy, "foo", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendCustomBrowserXSSValue, "foo", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendSTSSeconds, "666", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendSSLRedirect, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendSSLTemporaryRedirect, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendSTSIncludeSubdomains, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendSTSPreload, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendForceSTSHeader, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendFrameDeny, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendContentTypeNosniff, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendBrowserXSSFilter, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendIsDevelopment, "true", "containous"),
|
||||
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendErrorPage+"foo."+label.SuffixErrorPageStatus, "404"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendErrorPage+"foo."+label.SuffixErrorPageBackend, "foobar"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendErrorPage+"foo."+label.SuffixErrorPageQuery, "foo_query"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendErrorPage+"bar."+label.SuffixErrorPageStatus, "500,600"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendErrorPage+"bar."+label.SuffixErrorPageBackend, "foobar"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendErrorPage+"bar."+label.SuffixErrorPageQuery, "bar_query"),
|
||||
|
||||
withSegmentLabel(label.TraefikFrontendRateLimitExtractorFunc, "client.ip", "containous"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendRateLimit+"foo."+label.SuffixRateLimitPeriod, "6"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendRateLimit+"foo."+label.SuffixRateLimitAverage, "12"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendRateLimit+"foo."+label.SuffixRateLimitBurst, "18"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendRateLimit+"bar."+label.SuffixRateLimitPeriod, "3"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendRateLimit+"bar."+label.SuffixRateLimitAverage, "6"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendRateLimit+"bar."+label.SuffixRateLimitBurst, "9"),
|
||||
)),
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-app-service-containous": {
|
||||
EntryPoints: []string{
|
||||
"http",
|
||||
"https",
|
||||
},
|
||||
Backend: "backend-app-service-containous",
|
||||
Routes: map[string]types.Route{
|
||||
"route-host-app-service-containous": {
|
||||
Rule: "Host:traefik.io",
|
||||
},
|
||||
},
|
||||
PassHostHeader: true,
|
||||
PassTLSCert: true,
|
||||
Priority: 666,
|
||||
PassTLSClientCert: &types.TLSClientHeaders{
|
||||
PEM: true,
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
NotBefore: true,
|
||||
Sans: true,
|
||||
NotAfter: true,
|
||||
Subject: &types.TLSCLientCertificateSubjectInfos{
|
||||
CommonName: true,
|
||||
Country: true,
|
||||
Locality: true,
|
||||
Organization: true,
|
||||
Province: true,
|
||||
SerialNumber: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Auth: &types.Auth{
|
||||
HeaderField: "X-WebAuth-User",
|
||||
Basic: &types.Basic{
|
||||
RemoveHeader: true,
|
||||
Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/",
|
||||
"test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"},
|
||||
UsersFile: ".htpasswd",
|
||||
},
|
||||
},
|
||||
WhiteList: &types.WhiteList{
|
||||
SourceRange: []string{"10.10.10.10"},
|
||||
},
|
||||
Headers: &types.Headers{
|
||||
CustomRequestHeaders: map[string]string{
|
||||
"Access-Control-Allow-Methods": "POST,GET,OPTIONS",
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
},
|
||||
CustomResponseHeaders: map[string]string{
|
||||
"Access-Control-Allow-Methods": "POST,GET,OPTIONS",
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
},
|
||||
AllowedHosts: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
"bor",
|
||||
},
|
||||
HostsProxyHeaders: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
"bor",
|
||||
},
|
||||
SSLRedirect: true,
|
||||
SSLTemporaryRedirect: true,
|
||||
SSLForceHost: true,
|
||||
SSLHost: "foo",
|
||||
SSLProxyHeaders: map[string]string{
|
||||
"Access-Control-Allow-Methods": "POST,GET,OPTIONS",
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
},
|
||||
STSSeconds: 666,
|
||||
STSIncludeSubdomains: true,
|
||||
STSPreload: true,
|
||||
ForceSTSHeader: true,
|
||||
FrameDeny: true,
|
||||
CustomFrameOptionsValue: "foo",
|
||||
ContentTypeNosniff: true,
|
||||
BrowserXSSFilter: true,
|
||||
CustomBrowserXSSValue: "foo",
|
||||
ContentSecurityPolicy: "foo",
|
||||
PublicKey: "foo",
|
||||
ReferrerPolicy: "foo",
|
||||
IsDevelopment: true,
|
||||
},
|
||||
Errors: map[string]*types.ErrorPage{
|
||||
"bar": {
|
||||
Status: []string{
|
||||
"500",
|
||||
"600",
|
||||
},
|
||||
Backend: "backendfoobar",
|
||||
Query: "bar_query",
|
||||
},
|
||||
"foo": {
|
||||
Status: []string{
|
||||
"404",
|
||||
},
|
||||
Backend: "backendfoobar",
|
||||
Query: "foo_query",
|
||||
},
|
||||
},
|
||||
RateLimit: &types.RateLimit{
|
||||
RateSet: map[string]*types.Rate{
|
||||
"bar": {
|
||||
Period: parse.Duration(3 * time.Second),
|
||||
Average: 6,
|
||||
Burst: 9,
|
||||
},
|
||||
"foo": {
|
||||
Period: parse.Duration(6 * time.Second),
|
||||
Average: 12,
|
||||
Burst: 18,
|
||||
},
|
||||
},
|
||||
ExtractorFunc: "client.ip",
|
||||
},
|
||||
Redirect: &types.Redirect{
|
||||
EntryPoint: "https",
|
||||
Permanent: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-app-service-containous": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-app-taskID-service-containous": {
|
||||
URL: "https://localhost:80",
|
||||
Weight: 12,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: &types.CircuitBreaker{
|
||||
Expression: "NetworkErrorRatio() > 0.5",
|
||||
},
|
||||
LoadBalancer: &types.LoadBalancer{
|
||||
Method: "drr",
|
||||
Stickiness: &types.Stickiness{
|
||||
CookieName: "chocolate",
|
||||
},
|
||||
},
|
||||
MaxConn: &types.MaxConn{
|
||||
Amount: 666,
|
||||
ExtractorFunc: "client.ip",
|
||||
},
|
||||
HealthCheck: &types.HealthCheck{
|
||||
Path: "/health",
|
||||
Port: 880,
|
||||
Interval: "6",
|
||||
Timeout: "3",
|
||||
},
|
||||
Buffering: &types.Buffering{
|
||||
MaxResponseBodyBytes: 10485760,
|
||||
MemResponseBodyBytes: 2097152,
|
||||
MaxRequestBodyBytes: 10485760,
|
||||
MemRequestBodyBytes: 2097152,
|
||||
RetryExpression: "IsNetworkError() && Attempts() <= 2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
p := &Provider{
|
||||
Domain: "marathon.localhost",
|
||||
ExposedByDefault: true,
|
||||
}
|
||||
|
||||
actualConfig := p.buildConfiguration(test.applications)
|
||||
|
||||
assert.NotNil(t, actualConfig)
|
||||
assert.Equal(t, test.expectedBackends, actualConfig.Backends)
|
||||
assert.Equal(t, test.expectedFrontends, actualConfig.Frontends)
|
||||
})
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,8 +0,0 @@
|
|||
package marathon
|
||||
|
||||
func stringValueMap(mp *map[string]string) map[string]string {
|
||||
if mp != nil {
|
||||
return *mp
|
||||
}
|
||||
return make(map[string]string)
|
||||
}
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
package marathon
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/containous/traefik/provider/marathon/mocks"
|
||||
"github.com/gambol99/go-marathon"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
type fakeClient struct {
|
||||
mocks.Marathon
|
||||
}
|
||||
|
||||
func newFakeClient(applicationsError bool, applications marathon.Applications) *fakeClient {
|
||||
// create an instance of our test object
|
||||
fakeClient := new(fakeClient)
|
||||
if applicationsError {
|
||||
fakeClient.On("Applications", mock.Anything).Return(nil, errors.New("fake Marathon server error"))
|
||||
} else {
|
||||
fakeClient.On("Applications", mock.Anything).Return(&applications, nil)
|
||||
}
|
||||
return fakeClient
|
||||
}
|
||||
|
|
@ -1,188 +0,0 @@
|
|||
package marathon
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/cenk/backoff"
|
||||
"github.com/containous/flaeg/parse"
|
||||
"github.com/containous/traefik/job"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/provider"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/gambol99/go-marathon"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
traceMaxScanTokenSize = 1024 * 1024
|
||||
marathonEventIDs = marathon.EventIDApplications |
|
||||
marathon.EventIDAddHealthCheck |
|
||||
marathon.EventIDDeploymentSuccess |
|
||||
marathon.EventIDDeploymentFailed |
|
||||
marathon.EventIDDeploymentInfo |
|
||||
marathon.EventIDDeploymentStepSuccess |
|
||||
marathon.EventIDDeploymentStepFailed
|
||||
)
|
||||
|
||||
// TaskState denotes the Mesos state a task can have.
|
||||
type TaskState string
|
||||
|
||||
const (
|
||||
taskStateRunning TaskState = "TASK_RUNNING"
|
||||
taskStateStaging TaskState = "TASK_STAGING"
|
||||
)
|
||||
|
||||
const (
|
||||
labelIPAddressIdx = "traefik.ipAddressIdx"
|
||||
labelLbCompatibilityGroup = "HAPROXY_GROUP"
|
||||
labelLbCompatibility = "HAPROXY_0_VHOST"
|
||||
)
|
||||
|
||||
var _ provider.Provider = (*Provider)(nil)
|
||||
|
||||
// Provider holds configuration of the provider.
|
||||
type Provider struct {
|
||||
provider.BaseProvider
|
||||
Endpoint string `description:"Marathon server endpoint. You can also specify multiple endpoint for Marathon" export:"true"`
|
||||
Domain string `description:"Default domain used" export:"true"`
|
||||
ExposedByDefault bool `description:"Expose Marathon apps by default" export:"true"`
|
||||
GroupsAsSubDomains bool `description:"Convert Marathon groups to subdomains" export:"true"`
|
||||
DCOSToken string `description:"DCOSToken for DCOS environment, This will override the Authorization header" export:"true"`
|
||||
MarathonLBCompatibility bool `description:"Add compatibility with marathon-lb labels" export:"true"`
|
||||
FilterMarathonConstraints bool `description:"Enable use of Marathon constraints in constraint filtering" export:"true"`
|
||||
TLS *types.ClientTLS `description:"Enable TLS support" export:"true"`
|
||||
DialerTimeout parse.Duration `description:"Set a dialer timeout for Marathon" export:"true"`
|
||||
ResponseHeaderTimeout parse.Duration `description:"Set a response header timeout for Marathon" export:"true"`
|
||||
TLSHandshakeTimeout parse.Duration `description:"Set a TLS handhsake timeout for Marathon" export:"true"`
|
||||
KeepAlive parse.Duration `description:"Set a TCP Keep Alive time in seconds" export:"true"`
|
||||
ForceTaskHostname bool `description:"Force to use the task's hostname." export:"true"`
|
||||
Basic *Basic `description:"Enable basic authentication" export:"true"`
|
||||
RespectReadinessChecks bool `description:"Filter out tasks with non-successful readiness checks during deployments" export:"true"`
|
||||
readyChecker *readinessChecker
|
||||
marathonClient marathon.Marathon
|
||||
}
|
||||
|
||||
// Basic holds basic authentication specific configurations
|
||||
type Basic struct {
|
||||
HTTPBasicAuthUser string `description:"Basic authentication User"`
|
||||
HTTPBasicPassword string `description:"Basic authentication Password"`
|
||||
}
|
||||
|
||||
// Init the provider
|
||||
func (p *Provider) Init(constraints types.Constraints) error {
|
||||
return p.BaseProvider.Init(constraints)
|
||||
}
|
||||
|
||||
// Provide allows the marathon provider to provide configurations to traefik
|
||||
// using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error {
|
||||
operation := func() error {
|
||||
config := marathon.NewDefaultConfig()
|
||||
config.URL = p.Endpoint
|
||||
config.EventsTransport = marathon.EventsTransportSSE
|
||||
if p.Trace {
|
||||
config.LogOutput = log.CustomWriterLevel(logrus.DebugLevel, traceMaxScanTokenSize)
|
||||
}
|
||||
if p.Basic != nil {
|
||||
config.HTTPBasicAuthUser = p.Basic.HTTPBasicAuthUser
|
||||
config.HTTPBasicPassword = p.Basic.HTTPBasicPassword
|
||||
}
|
||||
var rc *readinessChecker
|
||||
if p.RespectReadinessChecks {
|
||||
log.Debug("Enabling Marathon readiness checker")
|
||||
rc = defaultReadinessChecker(p.Trace)
|
||||
}
|
||||
p.readyChecker = rc
|
||||
|
||||
if len(p.DCOSToken) > 0 {
|
||||
config.DCOSToken = p.DCOSToken
|
||||
}
|
||||
TLSConfig, err := p.TLS.CreateTLSConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.HTTPClient = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
DialContext: (&net.Dialer{
|
||||
KeepAlive: time.Duration(p.KeepAlive),
|
||||
Timeout: time.Duration(p.DialerTimeout),
|
||||
}).DialContext,
|
||||
ResponseHeaderTimeout: time.Duration(p.ResponseHeaderTimeout),
|
||||
TLSHandshakeTimeout: time.Duration(p.TLSHandshakeTimeout),
|
||||
TLSClientConfig: TLSConfig,
|
||||
},
|
||||
}
|
||||
client, err := marathon.NewClient(config)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create a client for marathon, error: %s", err)
|
||||
return err
|
||||
}
|
||||
p.marathonClient = client
|
||||
|
||||
if p.Watch {
|
||||
update, err := client.AddEventsListener(marathonEventIDs)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to register for events, %s", err)
|
||||
return err
|
||||
}
|
||||
pool.Go(func(stop chan bool) {
|
||||
defer close(update)
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
case event := <-update:
|
||||
log.Debugf("Received provider event %s", event)
|
||||
|
||||
configuration := p.getConfiguration()
|
||||
if configuration != nil {
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "marathon",
|
||||
Configuration: configuration,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
configuration := p.getConfiguration()
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "marathon",
|
||||
Configuration: configuration,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
notify := func(err error, time time.Duration) {
|
||||
log.Errorf("Provider connection error %+v, retrying in %s", err, time)
|
||||
}
|
||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify)
|
||||
if err != nil {
|
||||
log.Errorf("Cannot connect to Provider server %+v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) getConfiguration() *types.Configuration {
|
||||
applications, err := p.getApplications()
|
||||
if err != nil {
|
||||
log.Errorf("Failed to retrieve Marathon applications: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
return p.buildConfiguration(applications)
|
||||
}
|
||||
|
||||
func (p *Provider) getApplications() (*marathon.Applications, error) {
|
||||
v := url.Values{}
|
||||
v.Add("embed", "apps.tasks")
|
||||
v.Add("embed", "apps.deployments")
|
||||
v.Add("embed", "apps.readiness")
|
||||
|
||||
return p.marathonClient.Applications(v)
|
||||
}
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,122 +0,0 @@
|
|||
package marathon
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/gambol99/go-marathon"
|
||||
)
|
||||
|
||||
const (
|
||||
// readinessCheckDefaultTimeout is the default timeout for a readiness
|
||||
// check if no check timeout is specified on the application spec. This
|
||||
// should really never be the case, but better be safe than sorry.
|
||||
readinessCheckDefaultTimeout = 10 * time.Second
|
||||
// readinessCheckSafetyMargin is some buffer duration to account for
|
||||
// small offsets in readiness check execution.
|
||||
readinessCheckSafetyMargin = 5 * time.Second
|
||||
readinessLogHeader = "Marathon readiness check: "
|
||||
)
|
||||
|
||||
type readinessChecker struct {
|
||||
checkDefaultTimeout time.Duration
|
||||
checkSafetyMargin time.Duration
|
||||
traceLogging bool
|
||||
}
|
||||
|
||||
func defaultReadinessChecker(isTraceLogging bool) *readinessChecker {
|
||||
return &readinessChecker{
|
||||
checkDefaultTimeout: readinessCheckDefaultTimeout,
|
||||
checkSafetyMargin: readinessCheckSafetyMargin,
|
||||
traceLogging: isTraceLogging,
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *readinessChecker) Do(task marathon.Task, app marathon.Application) bool {
|
||||
if rc == nil {
|
||||
// Readiness checker disabled.
|
||||
return true
|
||||
}
|
||||
|
||||
switch {
|
||||
case len(app.Deployments) == 0:
|
||||
// We only care about readiness during deployments; post-deployment readiness
|
||||
// can be covered by a periodic post-deployment probe (i.e., Traefik health checks).
|
||||
rc.tracef("task %s app %s: ready = true [no deployment ongoing]", task.ID, app.ID)
|
||||
return true
|
||||
|
||||
case app.ReadinessChecks == nil || len(*app.ReadinessChecks) == 0:
|
||||
// Applications without configured readiness checks are always considered
|
||||
// ready.
|
||||
rc.tracef("task %s app %s: ready = true [no readiness checks on app]", task.ID, app.ID)
|
||||
return true
|
||||
}
|
||||
|
||||
// Loop through all readiness check results and return the results for
|
||||
// matching task IDs.
|
||||
if app.ReadinessCheckResults != nil {
|
||||
for _, readinessCheckResult := range *app.ReadinessCheckResults {
|
||||
if readinessCheckResult.TaskID == task.ID {
|
||||
rc.tracef("task %s app %s: ready = %t [evaluating readiness check ready state]", task.ID, app.ID, readinessCheckResult.Ready)
|
||||
return readinessCheckResult.Ready
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// There's a corner case sometimes hit where the first new task of a
|
||||
// deployment goes from TASK_STAGING to TASK_RUNNING without a corresponding
|
||||
// readiness check result being included in the API response. This only happens
|
||||
// in a very short (yet unlucky) time frame and does not repeat for subsequent
|
||||
// tasks of the same deployment.
|
||||
// Complicating matters, the situation may occur for both initially deploying
|
||||
// applications as well as rolling-upgraded ones where one or more tasks from
|
||||
// a previous deployment exist already and are joined by new tasks from a
|
||||
// subsequent deployment. We must always make sure that pre-existing tasks
|
||||
// maintain their ready state while newly launched tasks must be considered
|
||||
// unready until a check result appears.
|
||||
// We distinguish the two cases by comparing the current time with the start
|
||||
// time of the task: It should take Marathon at most one readiness check timeout
|
||||
// interval (plus some safety margin to account for the delayed nature of
|
||||
// distributed systems) for readiness check results to be returned along the API
|
||||
// response. Once the task turns old enough, we assume it to be part of a
|
||||
// pre-existing deployment and mark it as ready. Note that it is okay to err
|
||||
// on the side of caution and consider a task unready until the safety time
|
||||
// window has elapsed because a newly created task should be readiness-checked
|
||||
// and be given a result fairly shortly after its creation (i.e., on the scale
|
||||
// of seconds).
|
||||
readinessCheckTimeoutSecs := (*app.ReadinessChecks)[0].TimeoutSeconds
|
||||
readinessCheckTimeout := time.Duration(readinessCheckTimeoutSecs) * time.Second
|
||||
if readinessCheckTimeout == 0 {
|
||||
rc.tracef("task %s app %s: readiness check timeout not set, using default value %s", task.ID, app.ID, rc.checkDefaultTimeout)
|
||||
readinessCheckTimeout = rc.checkDefaultTimeout
|
||||
} else {
|
||||
readinessCheckTimeout += rc.checkSafetyMargin
|
||||
}
|
||||
|
||||
startTime, err := time.Parse(time.RFC3339, task.StartedAt)
|
||||
if err != nil {
|
||||
// An unparseable start time should never occur; if it does, we assume the
|
||||
// problem should be surfaced as quickly as possible, which is easiest if
|
||||
// we shun the task from rotation.
|
||||
log.Warnf("Failed to parse start-time %s of task %s from application %s: %s (assuming unready)", task.StartedAt, task.ID, app.ID, err)
|
||||
return false
|
||||
}
|
||||
|
||||
since := time.Since(startTime)
|
||||
if since < readinessCheckTimeout {
|
||||
rc.tracef("task %s app %s: ready = false [task with start-time %s not within assumed check timeout window of %s (elapsed time since task start: %s)]", task.ID, app.ID, startTime.Format(time.RFC3339), readinessCheckTimeout, since)
|
||||
return false
|
||||
}
|
||||
|
||||
// Finally, we can be certain this task is not part of the deployment (i.e.,
|
||||
// it's an old task that's going to transition into the TASK_KILLING and/or
|
||||
// TASK_KILLED state as new tasks' readiness checks gradually turn green.)
|
||||
rc.tracef("task %s app %s: ready = true [task with start-time %s not involved in deployment (elapsed time since task start: %s)]", task.ID, app.ID, startTime.Format(time.RFC3339), since)
|
||||
return true
|
||||
}
|
||||
|
||||
func (rc *readinessChecker) tracef(format string, args ...interface{}) {
|
||||
if rc.traceLogging {
|
||||
log.Debugf(readinessLogHeader+format, args...)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,134 +0,0 @@
|
|||
package marathon
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gambol99/go-marathon"
|
||||
)
|
||||
|
||||
func testReadinessChecker() *readinessChecker {
|
||||
return defaultReadinessChecker(false)
|
||||
}
|
||||
|
||||
func TestDisabledReadinessChecker(t *testing.T) {
|
||||
var rc *readinessChecker
|
||||
tsk := task()
|
||||
app := application(
|
||||
deployments("deploymentId"),
|
||||
readinessCheck(0),
|
||||
readinessCheckResult(testTaskName, false),
|
||||
)
|
||||
|
||||
if ready := rc.Do(tsk, app); !ready {
|
||||
t.Error("expected ready = true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnabledReadinessChecker(t *testing.T) {
|
||||
tests := []struct {
|
||||
desc string
|
||||
task marathon.Task
|
||||
app marathon.Application
|
||||
rc readinessChecker
|
||||
expectedReady bool
|
||||
}{
|
||||
{
|
||||
desc: "no deployment running",
|
||||
task: task(),
|
||||
app: application(),
|
||||
expectedReady: true,
|
||||
},
|
||||
{
|
||||
desc: "no readiness checks defined",
|
||||
task: task(),
|
||||
app: application(deployments("deploymentId")),
|
||||
expectedReady: true,
|
||||
},
|
||||
{
|
||||
desc: "readiness check result negative",
|
||||
task: task(),
|
||||
app: application(
|
||||
deployments("deploymentId"),
|
||||
readinessCheck(0),
|
||||
readinessCheckResult("otherTaskID", true),
|
||||
readinessCheckResult(testTaskName, false),
|
||||
),
|
||||
expectedReady: false,
|
||||
},
|
||||
{
|
||||
desc: "readiness check result positive",
|
||||
task: task(),
|
||||
app: application(
|
||||
deployments("deploymentId"),
|
||||
readinessCheck(0),
|
||||
readinessCheckResult("otherTaskID", false),
|
||||
readinessCheckResult(testTaskName, true),
|
||||
),
|
||||
expectedReady: true,
|
||||
},
|
||||
{
|
||||
desc: "no readiness check result with default timeout",
|
||||
task: task(startedAtFromNow(3 * time.Minute)),
|
||||
app: application(
|
||||
deployments("deploymentId"),
|
||||
readinessCheck(0),
|
||||
),
|
||||
rc: readinessChecker{
|
||||
checkDefaultTimeout: 5 * time.Minute,
|
||||
},
|
||||
expectedReady: false,
|
||||
},
|
||||
{
|
||||
desc: "no readiness check result with readiness check timeout",
|
||||
task: task(startedAtFromNow(4 * time.Minute)),
|
||||
app: application(
|
||||
deployments("deploymentId"),
|
||||
readinessCheck(3*time.Minute),
|
||||
),
|
||||
rc: readinessChecker{
|
||||
checkSafetyMargin: 3 * time.Minute,
|
||||
},
|
||||
expectedReady: false,
|
||||
},
|
||||
{
|
||||
desc: "invalid task start time",
|
||||
task: task(startedAt("invalid")),
|
||||
app: application(
|
||||
deployments("deploymentId"),
|
||||
readinessCheck(0),
|
||||
),
|
||||
expectedReady: false,
|
||||
},
|
||||
{
|
||||
desc: "task not involved in deployment",
|
||||
task: task(startedAtFromNow(1 * time.Hour)),
|
||||
app: application(
|
||||
deployments("deploymentId"),
|
||||
readinessCheck(0),
|
||||
),
|
||||
rc: readinessChecker{
|
||||
checkDefaultTimeout: 10 * time.Second,
|
||||
},
|
||||
expectedReady: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
rc := testReadinessChecker()
|
||||
if test.rc.checkDefaultTimeout > 0 {
|
||||
rc.checkDefaultTimeout = test.rc.checkDefaultTimeout
|
||||
}
|
||||
if test.rc.checkSafetyMargin > 0 {
|
||||
rc.checkSafetyMargin = test.rc.checkSafetyMargin
|
||||
}
|
||||
actualReady := test.rc.Do(test.task, test.app)
|
||||
if actualReady != test.expectedReady {
|
||||
t.Errorf("actual ready = %t, expected ready = %t", actualReady, test.expectedReady)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,310 +0,0 @@
|
|||
package mesos
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/provider"
|
||||
"github.com/containous/traefik/provider/label"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/mesosphere/mesos-dns/records/state"
|
||||
)
|
||||
|
||||
type taskData struct {
|
||||
state.Task
|
||||
TraefikLabels map[string]string
|
||||
SegmentName string
|
||||
}
|
||||
|
||||
func (p *Provider) buildConfiguration(tasks []state.Task) *types.Configuration {
|
||||
var mesosFuncMap = template.FuncMap{
|
||||
"getDomain": label.GetFuncString(label.TraefikDomain, p.Domain),
|
||||
"getSubDomain": p.getSubDomain,
|
||||
"getSegmentSubDomain": p.getSegmentSubDomain,
|
||||
"getID": getID,
|
||||
|
||||
// Backend functions
|
||||
"getBackendName": getBackendName,
|
||||
"getCircuitBreaker": label.GetCircuitBreaker,
|
||||
"getLoadBalancer": label.GetLoadBalancer,
|
||||
"getMaxConn": label.GetMaxConn,
|
||||
"getHealthCheck": label.GetHealthCheck,
|
||||
"getBuffering": label.GetBuffering,
|
||||
"getResponseForwarding": label.GetResponseForwarding,
|
||||
"getServers": p.getServers,
|
||||
"getHost": p.getHost,
|
||||
"getServerPort": p.getServerPort,
|
||||
|
||||
// Frontend functions
|
||||
"getSegmentNameSuffix": getSegmentNameSuffix,
|
||||
"getFrontEndName": getFrontendName,
|
||||
"getEntryPoints": label.GetFuncSliceString(label.TraefikFrontendEntryPoints),
|
||||
"getBasicAuth": label.GetFuncSliceString(label.TraefikFrontendAuthBasic), // Deprecated
|
||||
"getAuth": label.GetAuth,
|
||||
"getPriority": label.GetFuncInt(label.TraefikFrontendPriority, label.DefaultFrontendPriority),
|
||||
"getPassHostHeader": label.GetFuncBool(label.TraefikFrontendPassHostHeader, label.DefaultPassHostHeader),
|
||||
"getPassTLSCert": label.GetFuncBool(label.TraefikFrontendPassTLSCert, label.DefaultPassTLSCert),
|
||||
"getPassTLSClientCert": label.GetTLSClientCert,
|
||||
"getFrontendRule": p.getFrontendRule,
|
||||
"getRedirect": label.GetRedirect,
|
||||
"getErrorPages": label.GetErrorPages,
|
||||
"getRateLimit": label.GetRateLimit,
|
||||
"getHeaders": label.GetHeaders,
|
||||
"getWhiteList": label.GetWhiteList,
|
||||
}
|
||||
|
||||
appsTasks := p.filterTasks(tasks)
|
||||
|
||||
templateObjects := struct {
|
||||
ApplicationsTasks map[string][]taskData
|
||||
Domain string
|
||||
}{
|
||||
ApplicationsTasks: appsTasks,
|
||||
Domain: p.Domain,
|
||||
}
|
||||
|
||||
configuration, err := p.GetConfiguration("templates/mesos.tmpl", mesosFuncMap, templateObjects)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
return configuration
|
||||
}
|
||||
|
||||
func (p *Provider) filterTasks(tasks []state.Task) map[string][]taskData {
|
||||
appsTasks := make(map[string][]taskData)
|
||||
|
||||
for _, task := range tasks {
|
||||
taskLabels := label.ExtractTraefikLabels(extractLabels(task))
|
||||
for segmentName, traefikLabels := range taskLabels {
|
||||
data := taskData{
|
||||
Task: task,
|
||||
TraefikLabels: traefikLabels,
|
||||
SegmentName: segmentName,
|
||||
}
|
||||
|
||||
if taskFilter(data, p.ExposedByDefault) {
|
||||
name := getName(data)
|
||||
if _, ok := appsTasks[name]; !ok {
|
||||
appsTasks[name] = []taskData{data}
|
||||
} else {
|
||||
appsTasks[name] = append(appsTasks[name], data)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return appsTasks
|
||||
}
|
||||
|
||||
func taskFilter(task taskData, exposedByDefaultFlag bool) bool {
|
||||
name := getName(task)
|
||||
|
||||
if len(task.DiscoveryInfo.Ports.DiscoveryPorts) == 0 {
|
||||
log.Debugf("Filtering Mesos task without port %s", name)
|
||||
return false
|
||||
}
|
||||
if !isEnabled(task, exposedByDefaultFlag) {
|
||||
log.Debugf("Filtering disabled Mesos task %s", name)
|
||||
return false
|
||||
}
|
||||
|
||||
// filter indeterminable task port
|
||||
portIndexLabel := label.GetStringValue(task.TraefikLabels, label.TraefikPortIndex, "")
|
||||
portNameLabel := label.GetStringValue(task.TraefikLabels, label.TraefikPortName, "")
|
||||
portValueLabel := label.GetStringValue(task.TraefikLabels, label.TraefikPort, "")
|
||||
if portIndexLabel != "" && portValueLabel != "" {
|
||||
log.Debugf("Filtering Mesos task %s specifying both %q' and %q labels", task.Name, label.TraefikPortIndex, label.TraefikPort)
|
||||
return false
|
||||
}
|
||||
if portIndexLabel != "" {
|
||||
index, err := strconv.Atoi(portIndexLabel)
|
||||
if err != nil || index < 0 || index > len(task.DiscoveryInfo.Ports.DiscoveryPorts)-1 {
|
||||
log.Debugf("Filtering Mesos task %s with unexpected value for %q label", task.Name, label.TraefikPortIndex)
|
||||
return false
|
||||
}
|
||||
}
|
||||
if portValueLabel != "" {
|
||||
port, err := strconv.Atoi(portValueLabel)
|
||||
if err != nil {
|
||||
log.Debugf("Filtering Mesos task %s with unexpected value for %q label", task.Name, label.TraefikPort)
|
||||
return false
|
||||
}
|
||||
|
||||
var foundPort bool
|
||||
for _, exposedPort := range task.DiscoveryInfo.Ports.DiscoveryPorts {
|
||||
if port == exposedPort.Number {
|
||||
foundPort = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !foundPort {
|
||||
log.Debugf("Filtering Mesos task %s without a matching port for %q label", task.Name, label.TraefikPort)
|
||||
return false
|
||||
}
|
||||
}
|
||||
if portNameLabel != "" {
|
||||
var foundPort bool
|
||||
for _, exposedPort := range task.DiscoveryInfo.Ports.DiscoveryPorts {
|
||||
if portNameLabel == exposedPort.Name {
|
||||
foundPort = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !foundPort {
|
||||
log.Debugf("Filtering Mesos task %s without a matching port for %q label", task.Name, label.TraefikPortName)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// filter healthChecks
|
||||
if task.Statuses != nil && len(task.Statuses) > 0 && task.Statuses[0].Healthy != nil && !*task.Statuses[0].Healthy {
|
||||
log.Debugf("Filtering Mesos task %s with bad healthCheck", name)
|
||||
return false
|
||||
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func getID(task taskData) string {
|
||||
return provider.Normalize(task.ID + getSegmentNameSuffix(task.SegmentName))
|
||||
}
|
||||
|
||||
func getName(task taskData) string {
|
||||
return provider.Normalize(task.DiscoveryInfo.Name + getSegmentNameSuffix(task.SegmentName))
|
||||
}
|
||||
|
||||
func getBackendName(task taskData) string {
|
||||
return label.GetStringValue(task.TraefikLabels, label.TraefikBackend, getName(task))
|
||||
}
|
||||
|
||||
func getFrontendName(task taskData) string {
|
||||
// TODO task.ID -> task.Name + task.ID
|
||||
return provider.Normalize(task.ID + getSegmentNameSuffix(task.SegmentName))
|
||||
}
|
||||
|
||||
func getSegmentNameSuffix(serviceName string) string {
|
||||
if len(serviceName) > 0 {
|
||||
return "-service-" + provider.Normalize(serviceName)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (p *Provider) getSubDomain(name string) string {
|
||||
if p.GroupsAsSubDomains {
|
||||
splitedName := strings.Split(strings.TrimPrefix(name, "/"), "/")
|
||||
provider.ReverseStringSlice(&splitedName)
|
||||
reverseName := strings.Join(splitedName, ".")
|
||||
return reverseName
|
||||
}
|
||||
return strings.Replace(strings.Replace(strings.TrimPrefix(name, "/"), "/", "-", -1), "_", "-", -1)
|
||||
}
|
||||
|
||||
func (p *Provider) getSegmentSubDomain(task taskData) string {
|
||||
subDomain := strings.ToLower(p.getSubDomain(task.DiscoveryInfo.Name))
|
||||
if len(task.SegmentName) > 0 {
|
||||
subDomain = strings.ToLower(provider.Normalize(task.SegmentName)) + "." + subDomain
|
||||
}
|
||||
return subDomain
|
||||
}
|
||||
|
||||
// getFrontendRule returns the frontend rule for the specified application, using it's label.
|
||||
// It returns a default one (Host) if the label is not present.
|
||||
func (p *Provider) getFrontendRule(task taskData) string {
|
||||
if v := label.GetStringValue(task.TraefikLabels, label.TraefikFrontendRule, ""); len(v) > 0 {
|
||||
return v
|
||||
}
|
||||
|
||||
domain := label.GetStringValue(task.TraefikLabels, label.TraefikDomain, p.Domain)
|
||||
if len(domain) > 0 {
|
||||
domain = "." + domain
|
||||
}
|
||||
|
||||
return "Host:" + p.getSegmentSubDomain(task) + domain
|
||||
}
|
||||
|
||||
func (p *Provider) getServers(tasks []taskData) map[string]types.Server {
|
||||
var servers map[string]types.Server
|
||||
|
||||
for _, task := range tasks {
|
||||
if servers == nil {
|
||||
servers = make(map[string]types.Server)
|
||||
}
|
||||
|
||||
protocol := label.GetStringValue(task.TraefikLabels, label.TraefikProtocol, label.DefaultProtocol)
|
||||
host := p.getHost(task)
|
||||
port := p.getServerPort(task)
|
||||
|
||||
serverName := "server-" + getID(task)
|
||||
servers[serverName] = types.Server{
|
||||
URL: fmt.Sprintf("%s://%s", protocol, net.JoinHostPort(host, port)),
|
||||
Weight: getIntValue(task.TraefikLabels, label.TraefikWeight, label.DefaultWeight, math.MaxInt32),
|
||||
}
|
||||
}
|
||||
|
||||
return servers
|
||||
}
|
||||
|
||||
func (p *Provider) getHost(task taskData) string {
|
||||
return task.IP(strings.Split(p.IPSources, ",")...)
|
||||
}
|
||||
|
||||
func (p *Provider) getServerPort(task taskData) string {
|
||||
if label.Has(task.TraefikLabels, label.TraefikPort) {
|
||||
pv := label.GetIntValue(task.TraefikLabels, label.TraefikPort, 0)
|
||||
if pv <= 0 {
|
||||
log.Errorf("explicitly specified port %d must be larger than zero", pv)
|
||||
return ""
|
||||
}
|
||||
return strconv.Itoa(pv)
|
||||
}
|
||||
|
||||
plv := getIntValue(task.TraefikLabels, label.TraefikPortIndex, math.MinInt32, len(task.DiscoveryInfo.Ports.DiscoveryPorts)-1)
|
||||
if plv >= 0 {
|
||||
return strconv.Itoa(task.DiscoveryInfo.Ports.DiscoveryPorts[plv].Number)
|
||||
}
|
||||
|
||||
// Find named port using traefik.portName or the segment name
|
||||
if pn := label.GetStringValue(task.TraefikLabels, label.TraefikPortName, task.SegmentName); len(pn) > 0 {
|
||||
for _, port := range task.DiscoveryInfo.Ports.DiscoveryPorts {
|
||||
if pn == port.Name {
|
||||
return strconv.Itoa(port.Number)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, port := range task.DiscoveryInfo.Ports.DiscoveryPorts {
|
||||
return strconv.Itoa(port.Number)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func isEnabled(task taskData, exposedByDefault bool) bool {
|
||||
return label.GetBoolValue(task.TraefikLabels, label.TraefikEnable, exposedByDefault)
|
||||
}
|
||||
|
||||
// Label functions
|
||||
|
||||
func getIntValue(labels map[string]string, labelName string, defaultValue int, maxValue int) int {
|
||||
value := label.GetIntValue(labels, labelName, defaultValue)
|
||||
if value <= maxValue {
|
||||
return value
|
||||
}
|
||||
log.Warnf("The value %d for %s exceed the max authorized value %d, falling back to %d.", value, labelName, maxValue, defaultValue)
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
func extractLabels(task state.Task) map[string]string {
|
||||
labels := make(map[string]string)
|
||||
for _, lbl := range task.Labels {
|
||||
labels[lbl.Key] = lbl.Value
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
|
@ -1,392 +0,0 @@
|
|||
package mesos
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/containous/flaeg/parse"
|
||||
"github.com/containous/traefik/provider/label"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/mesosphere/mesos-dns/records/state"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBuildConfigurationSegments(t *testing.T) {
|
||||
p := &Provider{
|
||||
Domain: "mesos.localhost",
|
||||
ExposedByDefault: true,
|
||||
IPSources: "host",
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
tasks []state.Task
|
||||
expectedFrontends map[string]*types.Frontend
|
||||
expectedBackends map[string]*types.Backend
|
||||
}{
|
||||
{
|
||||
desc: "multiple ports with segments",
|
||||
tasks: []state.Task{
|
||||
aTask("app-taskID",
|
||||
withIP("127.0.0.1"),
|
||||
withInfo("/app",
|
||||
withPorts(
|
||||
withPort("TCP", 80, "web"),
|
||||
withPort("TCP", 81, "admin"),
|
||||
),
|
||||
),
|
||||
withStatus(withHealthy(true), withState("TASK_RUNNING")),
|
||||
withLabel(label.TraefikBackendMaxConnAmount, "1000"),
|
||||
withLabel(label.TraefikBackendMaxConnExtractorFunc, "client.ip"),
|
||||
withSegmentLabel(label.TraefikPort, "80", "web"),
|
||||
withSegmentLabel(label.TraefikPort, "81", "admin"),
|
||||
withLabel("traefik..port", "82"), // This should be ignored, as it fails to match the segmentPropertiesRegexp regex.
|
||||
withSegmentLabel(label.TraefikFrontendRule, "Host:web.app.mesos.localhost", "web"),
|
||||
withSegmentLabel(label.TraefikFrontendRule, "Host:admin.app.mesos.localhost", "admin"),
|
||||
),
|
||||
},
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-app-taskID-service-web": {
|
||||
Backend: "backend-app-service-web",
|
||||
Routes: map[string]types.Route{
|
||||
`route-host-app-taskID-service-web`: {
|
||||
Rule: "Host:web.app.mesos.localhost",
|
||||
},
|
||||
},
|
||||
PassHostHeader: true,
|
||||
EntryPoints: []string{},
|
||||
},
|
||||
"frontend-app-taskID-service-admin": {
|
||||
Backend: "backend-app-service-admin",
|
||||
Routes: map[string]types.Route{
|
||||
`route-host-app-taskID-service-admin`: {
|
||||
Rule: "Host:admin.app.mesos.localhost",
|
||||
},
|
||||
},
|
||||
PassHostHeader: true,
|
||||
EntryPoints: []string{},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-app-service-web": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-app-taskID-service-web": {
|
||||
URL: "http://127.0.0.1:80",
|
||||
Weight: label.DefaultWeight,
|
||||
},
|
||||
},
|
||||
MaxConn: &types.MaxConn{
|
||||
Amount: 1000,
|
||||
ExtractorFunc: "client.ip",
|
||||
},
|
||||
},
|
||||
"backend-app-service-admin": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-app-taskID-service-admin": {
|
||||
URL: "http://127.0.0.1:81",
|
||||
Weight: label.DefaultWeight,
|
||||
},
|
||||
},
|
||||
MaxConn: &types.MaxConn{
|
||||
Amount: 1000,
|
||||
ExtractorFunc: "client.ip",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "when all labels are set",
|
||||
tasks: []state.Task{
|
||||
aTask("app-taskID",
|
||||
withIP("127.0.0.1"),
|
||||
withInfo("/app",
|
||||
withPorts(
|
||||
withPort("TCP", 80, "web"),
|
||||
withPort("TCP", 81, "admin"),
|
||||
),
|
||||
),
|
||||
withStatus(withHealthy(true), withState("TASK_RUNNING")),
|
||||
|
||||
withLabel(label.TraefikBackendCircuitBreakerExpression, "NetworkErrorRatio() > 0.5"),
|
||||
withLabel(label.TraefikBackendHealthCheckScheme, "http"),
|
||||
withLabel(label.TraefikBackendHealthCheckPath, "/health"),
|
||||
withLabel(label.TraefikBackendHealthCheckPort, "880"),
|
||||
withLabel(label.TraefikBackendHealthCheckInterval, "6"),
|
||||
withLabel(label.TraefikBackendHealthCheckTimeout, "3"),
|
||||
withLabel(label.TraefikBackendHealthCheckHostname, "foo.com"),
|
||||
withLabel(label.TraefikBackendHealthCheckHeaders, "Foo:bar || Bar:foo"),
|
||||
withLabel(label.TraefikBackendLoadBalancerMethod, "drr"),
|
||||
withLabel(label.TraefikBackendLoadBalancerStickiness, "true"),
|
||||
withLabel(label.TraefikBackendLoadBalancerStickinessCookieName, "chocolate"),
|
||||
withLabel(label.TraefikBackendMaxConnAmount, "666"),
|
||||
withLabel(label.TraefikBackendMaxConnExtractorFunc, "client.ip"),
|
||||
withLabel(label.TraefikBackendBufferingMaxResponseBodyBytes, "10485760"),
|
||||
withLabel(label.TraefikBackendBufferingMemResponseBodyBytes, "2097152"),
|
||||
withLabel(label.TraefikBackendBufferingMaxRequestBodyBytes, "10485760"),
|
||||
withLabel(label.TraefikBackendBufferingMemRequestBodyBytes, "2097152"),
|
||||
withLabel(label.TraefikBackendBufferingRetryExpression, "IsNetworkError() && Attempts() <= 2"),
|
||||
|
||||
withSegmentLabel(label.TraefikPort, "80", "containous"),
|
||||
withSegmentLabel(label.TraefikPortName, "web", "containous"),
|
||||
withSegmentLabel(label.TraefikProtocol, "https", "containous"),
|
||||
withSegmentLabel(label.TraefikWeight, "12", "containous"),
|
||||
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertPem, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosNotBefore, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosNotAfter, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSans, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectCommonName, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectCountry, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectLocality, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectOrganization, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectProvince, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectSerialNumber, "true", "containous"),
|
||||
|
||||
withSegmentLabel(label.TraefikFrontendAuthBasic, "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthBasicRemoveHeader, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthBasicUsers, "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthBasicUsersFile, ".htpasswd", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthDigestRemoveHeader, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthDigestUsers, "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthDigestUsersFile, ".htpasswd", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthForwardAddress, "auth.server", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthForwardTrustForwardHeader, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthForwardTLSCa, "ca.crt", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthForwardTLSCaOptional, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthForwardTLSCert, "server.crt", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthForwardTLSKey, "server.key", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthForwardTLSInsecureSkipVerify, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthHeaderField, "X-WebAuth-User", "containous"),
|
||||
|
||||
withSegmentLabel(label.TraefikFrontendEntryPoints, "http,https", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassHostHeader, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSCert, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPriority, "666", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendRedirectEntryPoint, "https", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendRedirectRegex, "nope", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendRedirectReplacement, "nope", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendRedirectPermanent, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendRule, "Host:traefik.io", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendWhiteListSourceRange, "10.10.10.10", "containous"),
|
||||
|
||||
withSegmentLabel(label.TraefikFrontendRequestHeaders, "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendResponseHeaders, "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendSSLProxyHeaders, "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAllowedHosts, "foo,bar,bor", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendHostsProxyHeaders, "foo,bar,bor", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendSSLForceHost, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendSSLHost, "foo", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendCustomFrameOptionsValue, "foo", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendContentSecurityPolicy, "foo", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPublicKey, "foo", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendReferrerPolicy, "foo", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendCustomBrowserXSSValue, "foo", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendSTSSeconds, "666", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendSSLRedirect, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendSSLTemporaryRedirect, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendSTSIncludeSubdomains, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendSTSPreload, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendForceSTSHeader, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendFrameDeny, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendContentTypeNosniff, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendBrowserXSSFilter, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendIsDevelopment, "true", "containous"),
|
||||
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendErrorPage+"foo."+label.SuffixErrorPageStatus, "404"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendErrorPage+"foo."+label.SuffixErrorPageBackend, "foobar"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendErrorPage+"foo."+label.SuffixErrorPageQuery, "foo_query"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendErrorPage+"bar."+label.SuffixErrorPageStatus, "500,600"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendErrorPage+"bar."+label.SuffixErrorPageBackend, "foobar"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendErrorPage+"bar."+label.SuffixErrorPageQuery, "bar_query"),
|
||||
|
||||
withSegmentLabel(label.TraefikFrontendRateLimitExtractorFunc, "client.ip", "containous"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendRateLimit+"foo."+label.SuffixRateLimitPeriod, "6"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendRateLimit+"foo."+label.SuffixRateLimitAverage, "12"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendRateLimit+"foo."+label.SuffixRateLimitBurst, "18"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendRateLimit+"bar."+label.SuffixRateLimitPeriod, "3"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendRateLimit+"bar."+label.SuffixRateLimitAverage, "6"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendRateLimit+"bar."+label.SuffixRateLimitBurst, "9"),
|
||||
),
|
||||
},
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-app-taskID-service-containous": {
|
||||
EntryPoints: []string{
|
||||
"http",
|
||||
"https",
|
||||
},
|
||||
Backend: "backend-app-service-containous",
|
||||
Routes: map[string]types.Route{
|
||||
"route-host-app-taskID-service-containous": {
|
||||
Rule: "Host:traefik.io",
|
||||
},
|
||||
},
|
||||
PassHostHeader: true,
|
||||
PassTLSCert: true,
|
||||
Priority: 666,
|
||||
PassTLSClientCert: &types.TLSClientHeaders{
|
||||
PEM: true,
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
NotBefore: true,
|
||||
Sans: true,
|
||||
NotAfter: true,
|
||||
Subject: &types.TLSCLientCertificateSubjectInfos{
|
||||
CommonName: true,
|
||||
Country: true,
|
||||
Locality: true,
|
||||
Organization: true,
|
||||
Province: true,
|
||||
SerialNumber: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Auth: &types.Auth{
|
||||
HeaderField: "X-WebAuth-User",
|
||||
Basic: &types.Basic{
|
||||
RemoveHeader: true,
|
||||
Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/",
|
||||
"test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"},
|
||||
UsersFile: ".htpasswd",
|
||||
},
|
||||
},
|
||||
|
||||
WhiteList: &types.WhiteList{
|
||||
SourceRange: []string{"10.10.10.10"},
|
||||
},
|
||||
Headers: &types.Headers{
|
||||
CustomRequestHeaders: map[string]string{
|
||||
"Access-Control-Allow-Methods": "POST,GET,OPTIONS",
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
},
|
||||
CustomResponseHeaders: map[string]string{
|
||||
"Access-Control-Allow-Methods": "POST,GET,OPTIONS",
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
},
|
||||
AllowedHosts: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
"bor",
|
||||
},
|
||||
HostsProxyHeaders: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
"bor",
|
||||
},
|
||||
SSLRedirect: true,
|
||||
SSLTemporaryRedirect: true,
|
||||
SSLForceHost: true,
|
||||
SSLHost: "foo",
|
||||
SSLProxyHeaders: map[string]string{
|
||||
"Access-Control-Allow-Methods": "POST,GET,OPTIONS",
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
},
|
||||
STSSeconds: 666,
|
||||
STSIncludeSubdomains: true,
|
||||
STSPreload: true,
|
||||
ForceSTSHeader: true,
|
||||
FrameDeny: true,
|
||||
CustomFrameOptionsValue: "foo",
|
||||
ContentTypeNosniff: true,
|
||||
BrowserXSSFilter: true,
|
||||
CustomBrowserXSSValue: "foo",
|
||||
ContentSecurityPolicy: "foo",
|
||||
PublicKey: "foo",
|
||||
ReferrerPolicy: "foo",
|
||||
IsDevelopment: true,
|
||||
},
|
||||
Errors: map[string]*types.ErrorPage{
|
||||
"bar": {
|
||||
Status: []string{
|
||||
"500",
|
||||
"600",
|
||||
},
|
||||
Backend: "backend-foobar",
|
||||
Query: "bar_query",
|
||||
},
|
||||
"foo": {
|
||||
Status: []string{
|
||||
"404",
|
||||
},
|
||||
Backend: "backend-foobar",
|
||||
Query: "foo_query",
|
||||
},
|
||||
},
|
||||
RateLimit: &types.RateLimit{
|
||||
RateSet: map[string]*types.Rate{
|
||||
"bar": {
|
||||
Period: parse.Duration(3 * time.Second),
|
||||
Average: 6,
|
||||
Burst: 9,
|
||||
},
|
||||
"foo": {
|
||||
Period: parse.Duration(6 * time.Second),
|
||||
Average: 12,
|
||||
Burst: 18,
|
||||
},
|
||||
},
|
||||
ExtractorFunc: "client.ip",
|
||||
},
|
||||
Redirect: &types.Redirect{
|
||||
EntryPoint: "https",
|
||||
Permanent: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-app-service-containous": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-app-taskID-service-containous": {
|
||||
URL: "https://127.0.0.1:80",
|
||||
Weight: 12,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: &types.CircuitBreaker{
|
||||
Expression: "NetworkErrorRatio() > 0.5",
|
||||
},
|
||||
LoadBalancer: &types.LoadBalancer{
|
||||
Method: "drr",
|
||||
Stickiness: &types.Stickiness{
|
||||
CookieName: "chocolate",
|
||||
},
|
||||
},
|
||||
MaxConn: &types.MaxConn{
|
||||
Amount: 666,
|
||||
ExtractorFunc: "client.ip",
|
||||
},
|
||||
HealthCheck: &types.HealthCheck{
|
||||
Scheme: "http",
|
||||
Path: "/health",
|
||||
Port: 880,
|
||||
Interval: "6",
|
||||
Timeout: "3",
|
||||
Hostname: "foo.com",
|
||||
Headers: map[string]string{
|
||||
"Bar": "foo",
|
||||
"Foo": "bar",
|
||||
},
|
||||
},
|
||||
Buffering: &types.Buffering{
|
||||
MaxResponseBodyBytes: 10485760,
|
||||
MemResponseBodyBytes: 2097152,
|
||||
MaxRequestBodyBytes: 10485760,
|
||||
MemRequestBodyBytes: 2097152,
|
||||
RetryExpression: "IsNetworkError() && Attempts() <= 2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
actualConfig := p.buildConfiguration(test.tasks)
|
||||
|
||||
require.NotNil(t, actualConfig)
|
||||
assert.Equal(t, test.expectedBackends, actualConfig.Backends)
|
||||
assert.Equal(t, test.expectedFrontends, actualConfig.Frontends)
|
||||
})
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,173 +0,0 @@
|
|||
package mesos
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cenk/backoff"
|
||||
"github.com/containous/traefik/job"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/provider"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/mesos/mesos-go/detector"
|
||||
"github.com/mesosphere/mesos-dns/records"
|
||||
"github.com/mesosphere/mesos-dns/records/state"
|
||||
|
||||
// Register mesos zoo the detector
|
||||
_ "github.com/mesos/mesos-go/detector/zoo"
|
||||
"github.com/mesosphere/mesos-dns/detect"
|
||||
"github.com/mesosphere/mesos-dns/logging"
|
||||
"github.com/mesosphere/mesos-dns/util"
|
||||
)
|
||||
|
||||
var _ provider.Provider = (*Provider)(nil)
|
||||
|
||||
// Provider holds configuration of the provider.
|
||||
type Provider struct {
|
||||
provider.BaseProvider
|
||||
Endpoint string `description:"Mesos server endpoint. You can also specify multiple endpoint for Mesos"`
|
||||
Domain string `description:"Default domain used"`
|
||||
ExposedByDefault bool `description:"Expose Mesos apps by default" export:"true"`
|
||||
GroupsAsSubDomains bool `description:"Convert Mesos groups to subdomains" export:"true"`
|
||||
ZkDetectionTimeout int `description:"Zookeeper timeout (in seconds)" export:"true"`
|
||||
RefreshSeconds int `description:"Polling interval (in seconds)" export:"true"`
|
||||
IPSources string `description:"IPSources (e.g. host, docker, mesos, netinfo)" export:"true"`
|
||||
StateTimeoutSecond int `description:"HTTP Timeout (in seconds)" export:"true"`
|
||||
Masters []string
|
||||
}
|
||||
|
||||
// Init the provider
|
||||
func (p *Provider) Init(constraints types.Constraints) error {
|
||||
return p.BaseProvider.Init(constraints)
|
||||
}
|
||||
|
||||
// Provide allows the mesos provider to provide configurations to traefik
|
||||
// using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error {
|
||||
operation := func() error {
|
||||
|
||||
// initialize logging
|
||||
logging.SetupLogs()
|
||||
|
||||
log.Debugf("%s", p.IPSources)
|
||||
|
||||
var zk string
|
||||
var masters []string
|
||||
|
||||
if strings.HasPrefix(p.Endpoint, "zk://") {
|
||||
zk = p.Endpoint
|
||||
} else {
|
||||
masters = strings.Split(p.Endpoint, ",")
|
||||
}
|
||||
|
||||
errch := make(chan error)
|
||||
|
||||
changed := detectMasters(zk, masters)
|
||||
reload := time.NewTicker(time.Second * time.Duration(p.RefreshSeconds))
|
||||
zkTimeout := time.Second * time.Duration(p.ZkDetectionTimeout)
|
||||
timeout := time.AfterFunc(zkTimeout, func() {
|
||||
if zkTimeout > 0 {
|
||||
errch <- fmt.Errorf("master detection timed out after %s", zkTimeout)
|
||||
}
|
||||
})
|
||||
|
||||
defer reload.Stop()
|
||||
defer util.HandleCrash()
|
||||
|
||||
if !p.Watch {
|
||||
reload.Stop()
|
||||
timeout.Stop()
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-reload.C:
|
||||
tasks := p.getTasks()
|
||||
configuration := p.buildConfiguration(tasks)
|
||||
if configuration != nil {
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "mesos",
|
||||
Configuration: configuration,
|
||||
}
|
||||
}
|
||||
case masters := <-changed:
|
||||
if len(masters) == 0 || masters[0] == "" {
|
||||
// no leader
|
||||
timeout.Reset(zkTimeout)
|
||||
} else {
|
||||
timeout.Stop()
|
||||
}
|
||||
log.Debugf("new masters detected: %v", masters)
|
||||
p.Masters = masters
|
||||
tasks := p.getTasks()
|
||||
configuration := p.buildConfiguration(tasks)
|
||||
if configuration != nil {
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "mesos",
|
||||
Configuration: configuration,
|
||||
}
|
||||
}
|
||||
case err := <-errch:
|
||||
log.Errorf("%s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
notify := func(err error, time time.Duration) {
|
||||
log.Errorf("Mesos connection error %+v, retrying in %s", err, time)
|
||||
}
|
||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify)
|
||||
if err != nil {
|
||||
log.Errorf("Cannot connect to Mesos server %+v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func detectMasters(zk string, masters []string) <-chan []string {
|
||||
changed := make(chan []string, 1)
|
||||
if zk != "" {
|
||||
log.Debugf("Starting master detector for ZK %s", zk)
|
||||
if md, err := detector.New(zk); err != nil {
|
||||
log.Errorf("Failed to create master detector: %v", err)
|
||||
} else if err := md.Detect(detect.NewMasters(masters, changed)); err != nil {
|
||||
log.Errorf("Failed to initialize master detector: %v", err)
|
||||
}
|
||||
} else {
|
||||
changed <- masters
|
||||
}
|
||||
return changed
|
||||
}
|
||||
|
||||
func (p *Provider) getTasks() []state.Task {
|
||||
rg := records.NewRecordGenerator(time.Duration(p.StateTimeoutSecond) * time.Second)
|
||||
|
||||
st, err := rg.FindMaster(p.Masters...)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create a client for Mesos, error: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
return taskRecords(st)
|
||||
}
|
||||
|
||||
func taskRecords(st state.State) []state.Task {
|
||||
var tasks []state.Task
|
||||
for _, f := range st.Frameworks {
|
||||
for _, task := range f.Tasks {
|
||||
for _, slave := range st.Slaves {
|
||||
if task.SlaveID == slave.ID {
|
||||
task.SlaveIP = slave.PID.Host
|
||||
}
|
||||
}
|
||||
|
||||
// only do running and discoverable tasks
|
||||
if task.State == "TASK_RUNNING" {
|
||||
tasks = append(tasks, task)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return tasks
|
||||
}
|
||||
|
|
@ -1,184 +0,0 @@
|
|||
package mesos
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/containous/traefik/provider/label"
|
||||
"github.com/mesosphere/mesos-dns/records/state"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// test helpers
|
||||
|
||||
func TestBuilder(t *testing.T) {
|
||||
result := aTask("ID1",
|
||||
withIP("10.10.10.10"),
|
||||
withLabel("foo", "bar"),
|
||||
withLabel("fii", "bar"),
|
||||
withLabel("fuu", "bar"),
|
||||
withInfo("name1",
|
||||
withPorts(withPort("TCP", 80, "p"),
|
||||
withPortTCP(81, "n"))),
|
||||
withStatus(withHealthy(true), withState("a")))
|
||||
|
||||
expected := state.Task{
|
||||
FrameworkID: "",
|
||||
ID: "ID1",
|
||||
SlaveIP: "10.10.10.10",
|
||||
Name: "",
|
||||
SlaveID: "",
|
||||
State: "",
|
||||
Statuses: []state.Status{{
|
||||
State: "a",
|
||||
Healthy: Bool(true),
|
||||
ContainerStatus: state.ContainerStatus{},
|
||||
}},
|
||||
DiscoveryInfo: state.DiscoveryInfo{
|
||||
Name: "name1",
|
||||
Labels: struct {
|
||||
Labels []state.Label `json:"labels"`
|
||||
}{},
|
||||
Ports: state.Ports{DiscoveryPorts: []state.DiscoveryPort{
|
||||
{Protocol: "TCP", Number: 80, Name: "p"},
|
||||
{Protocol: "TCP", Number: 81, Name: "n"}}}},
|
||||
Labels: []state.Label{
|
||||
{Key: "foo", Value: "bar"},
|
||||
{Key: "fii", Value: "bar"},
|
||||
{Key: "fuu", Value: "bar"}}}
|
||||
|
||||
assert.Equal(t, expected, result)
|
||||
}
|
||||
|
||||
func aTaskData(id, segment string, ops ...func(*state.Task)) taskData {
|
||||
ts := &state.Task{ID: id}
|
||||
for _, op := range ops {
|
||||
op(ts)
|
||||
}
|
||||
lbls := label.ExtractTraefikLabels(extractLabels(*ts))
|
||||
if len(lbls[segment]) > 0 {
|
||||
return taskData{Task: *ts, TraefikLabels: lbls[segment], SegmentName: segment}
|
||||
}
|
||||
return taskData{Task: *ts, TraefikLabels: lbls[""], SegmentName: segment}
|
||||
}
|
||||
|
||||
func segmentedTaskData(segments []string, ts state.Task) []taskData {
|
||||
var td []taskData
|
||||
lbls := label.ExtractTraefikLabels(extractLabels(ts))
|
||||
for _, s := range segments {
|
||||
if l, ok := lbls[s]; !ok {
|
||||
td = append(td, taskData{Task: ts, TraefikLabels: lbls[""], SegmentName: s})
|
||||
} else {
|
||||
td = append(td, taskData{Task: ts, TraefikLabels: l, SegmentName: s})
|
||||
}
|
||||
}
|
||||
return td
|
||||
}
|
||||
|
||||
func aTask(id string, ops ...func(*state.Task)) state.Task {
|
||||
ts := &state.Task{ID: id}
|
||||
for _, op := range ops {
|
||||
op(ts)
|
||||
}
|
||||
return *ts
|
||||
}
|
||||
|
||||
func withIP(ip string) func(*state.Task) {
|
||||
return func(task *state.Task) {
|
||||
task.SlaveIP = ip
|
||||
}
|
||||
}
|
||||
|
||||
func withInfo(name string, ops ...func(*state.DiscoveryInfo)) func(*state.Task) {
|
||||
return func(task *state.Task) {
|
||||
info := &state.DiscoveryInfo{Name: name}
|
||||
for _, op := range ops {
|
||||
op(info)
|
||||
}
|
||||
task.DiscoveryInfo = *info
|
||||
}
|
||||
}
|
||||
|
||||
func withPorts(ops ...func(port *state.DiscoveryPort)) func(*state.DiscoveryInfo) {
|
||||
return func(info *state.DiscoveryInfo) {
|
||||
var ports []state.DiscoveryPort
|
||||
for _, op := range ops {
|
||||
pt := &state.DiscoveryPort{}
|
||||
op(pt)
|
||||
ports = append(ports, *pt)
|
||||
}
|
||||
|
||||
info.Ports = state.Ports{
|
||||
DiscoveryPorts: ports,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func withPort(proto string, port int, name string) func(port *state.DiscoveryPort) {
|
||||
return func(p *state.DiscoveryPort) {
|
||||
p.Protocol = proto
|
||||
p.Number = port
|
||||
p.Name = name
|
||||
}
|
||||
}
|
||||
|
||||
func withPortTCP(port int, name string) func(port *state.DiscoveryPort) {
|
||||
return withPort("TCP", port, name)
|
||||
}
|
||||
|
||||
func withStatus(ops ...func(*state.Status)) func(*state.Task) {
|
||||
return func(task *state.Task) {
|
||||
st := &state.Status{}
|
||||
for _, op := range ops {
|
||||
op(st)
|
||||
}
|
||||
task.Statuses = append(task.Statuses, *st)
|
||||
}
|
||||
}
|
||||
func withDefaultStatus(ops ...func(*state.Status)) func(*state.Task) {
|
||||
return func(task *state.Task) {
|
||||
for _, op := range ops {
|
||||
st := &state.Status{
|
||||
State: "TASK_RUNNING",
|
||||
Healthy: Bool(true),
|
||||
}
|
||||
op(st)
|
||||
task.Statuses = append(task.Statuses, *st)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func withHealthy(st bool) func(*state.Status) {
|
||||
return func(status *state.Status) {
|
||||
status.Healthy = Bool(st)
|
||||
}
|
||||
}
|
||||
|
||||
func withState(st string) func(*state.Status) {
|
||||
return func(status *state.Status) {
|
||||
status.State = st
|
||||
}
|
||||
}
|
||||
|
||||
func withLabel(key, value string) func(*state.Task) {
|
||||
return func(task *state.Task) {
|
||||
lbl := state.Label{Key: key, Value: value}
|
||||
task.Labels = append(task.Labels, lbl)
|
||||
}
|
||||
}
|
||||
|
||||
func withSegmentLabel(key, value, segmentName string) func(*state.Task) {
|
||||
if len(segmentName) == 0 {
|
||||
panic("segmentName can not be empty")
|
||||
}
|
||||
|
||||
property := strings.TrimPrefix(key, label.Prefix)
|
||||
return func(task *state.Task) {
|
||||
lbl := state.Label{Key: label.Prefix + segmentName + "." + property, Value: value}
|
||||
task.Labels = append(task.Labels, lbl)
|
||||
}
|
||||
}
|
||||
|
||||
func Bool(v bool) *bool {
|
||||
return &v
|
||||
}
|
||||
|
|
@ -1,37 +0,0 @@
|
|||
package mesos
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/mesos/mesos-go/upid"
|
||||
"github.com/mesosphere/mesos-dns/records/state"
|
||||
)
|
||||
|
||||
func TestTaskRecords(t *testing.T) {
|
||||
var task = state.Task{
|
||||
SlaveID: "s_id",
|
||||
State: "TASK_RUNNING",
|
||||
}
|
||||
var framework = state.Framework{
|
||||
Tasks: []state.Task{task},
|
||||
}
|
||||
var slave = state.Slave{
|
||||
ID: "s_id",
|
||||
Hostname: "127.0.0.1",
|
||||
}
|
||||
slave.PID.UPID = &upid.UPID{}
|
||||
slave.PID.Host = slave.Hostname
|
||||
|
||||
var taskState = state.State{
|
||||
Slaves: []state.Slave{slave},
|
||||
Frameworks: []state.Framework{framework},
|
||||
}
|
||||
|
||||
var p = taskRecords(taskState)
|
||||
if len(p) == 0 {
|
||||
t.Fatal("No task")
|
||||
}
|
||||
if p[0].SlaveIP != slave.Hostname {
|
||||
t.Fatalf("The SlaveIP (%s) should be set with the slave hostname (%s)", p[0].SlaveID, slave.Hostname)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,16 +1,7 @@
|
|||
package provider
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"text/template"
|
||||
"unicode"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/Masterminds/sprig"
|
||||
"github.com/containous/traefik/autogen/gentemplates"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/config"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/containous/traefik/types"
|
||||
)
|
||||
|
|
@ -19,131 +10,6 @@ import (
|
|||
type Provider interface {
|
||||
// Provide allows the provider to provide configurations to traefik
|
||||
// using the given configuration channel.
|
||||
Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error
|
||||
Provide(configurationChan chan<- config.Message, pool *safe.Pool) error
|
||||
Init(constraints types.Constraints) error
|
||||
}
|
||||
|
||||
// BaseProvider should be inherited by providers
|
||||
type BaseProvider struct {
|
||||
Watch bool `description:"Watch provider" export:"true"`
|
||||
Filename string `description:"Override default configuration template. For advanced users :)" export:"true"`
|
||||
Constraints types.Constraints `description:"Filter services by constraint, matching with Traefik tags." export:"true"`
|
||||
Trace bool `description:"Display additional provider logs (if available)." export:"true"`
|
||||
DebugLogGeneratedTemplate bool `description:"Enable debug logging of generated configuration template." export:"true"`
|
||||
}
|
||||
|
||||
// Init for compatibility reason the BaseProvider implements an empty Init
|
||||
func (p *BaseProvider) Init(constraints types.Constraints) error {
|
||||
p.Constraints = append(p.Constraints, constraints...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// MatchConstraints must match with EVERY single constraint
|
||||
// returns first constraint that do not match or nil
|
||||
func (p *BaseProvider) MatchConstraints(tags []string) (bool, *types.Constraint) {
|
||||
// if there is no tags and no constraints, filtering is disabled
|
||||
if len(tags) == 0 && len(p.Constraints) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
for _, constraint := range p.Constraints {
|
||||
// xor: if ok and constraint.MustMatch are equal, then no tag is currently matching with the constraint
|
||||
if ok := constraint.MatchConstraintWithAtLeastOneTag(tags); ok != constraint.MustMatch {
|
||||
return false, constraint
|
||||
}
|
||||
}
|
||||
|
||||
// If no constraint or every constraints matching
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// GetConfiguration return the provider configuration from default template (file or content) or overrode template file
|
||||
func (p *BaseProvider) GetConfiguration(defaultTemplate string, funcMap template.FuncMap, templateObjects interface{}) (*types.Configuration, error) {
|
||||
tmplContent, err := p.getTemplateContent(defaultTemplate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.CreateConfiguration(tmplContent, funcMap, templateObjects)
|
||||
}
|
||||
|
||||
// CreateConfiguration create a provider configuration from content using templating
|
||||
func (p *BaseProvider) CreateConfiguration(tmplContent string, funcMap template.FuncMap, templateObjects interface{}) (*types.Configuration, error) {
|
||||
var defaultFuncMap = sprig.TxtFuncMap()
|
||||
// tolower is deprecated in favor of sprig's lower function
|
||||
defaultFuncMap["tolower"] = strings.ToLower
|
||||
defaultFuncMap["normalize"] = Normalize
|
||||
defaultFuncMap["split"] = split
|
||||
for funcID, funcElement := range funcMap {
|
||||
defaultFuncMap[funcID] = funcElement
|
||||
}
|
||||
|
||||
tmpl := template.New(p.Filename).Funcs(defaultFuncMap)
|
||||
|
||||
_, err := tmpl.Parse(tmplContent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var buffer bytes.Buffer
|
||||
err = tmpl.Execute(&buffer, templateObjects)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var renderedTemplate = buffer.String()
|
||||
if p.DebugLogGeneratedTemplate {
|
||||
log.Debugf("Template content: %s", tmplContent)
|
||||
log.Debugf("Rendering results: %s", renderedTemplate)
|
||||
}
|
||||
return p.DecodeConfiguration(renderedTemplate)
|
||||
}
|
||||
|
||||
// DecodeConfiguration Decode a *types.Configuration from a content
|
||||
func (p *BaseProvider) DecodeConfiguration(content string) (*types.Configuration, error) {
|
||||
configuration := new(types.Configuration)
|
||||
if _, err := toml.Decode(content, configuration); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return configuration, nil
|
||||
}
|
||||
|
||||
func (p *BaseProvider) getTemplateContent(defaultTemplateFile string) (string, error) {
|
||||
if len(p.Filename) > 0 {
|
||||
buf, err := ioutil.ReadFile(p.Filename)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
if strings.HasSuffix(defaultTemplateFile, ".tmpl") {
|
||||
buf, err := gentemplates.Asset(defaultTemplateFile)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
return defaultTemplateFile, nil
|
||||
}
|
||||
|
||||
func split(sep, s string) []string {
|
||||
return strings.Split(s, sep)
|
||||
}
|
||||
|
||||
// Normalize transform a string that work with the rest of traefik
|
||||
// Replace '.' with '-' in quoted keys because of this issue https://github.com/BurntSushi/toml/issues/78
|
||||
func Normalize(name string) string {
|
||||
fargs := func(c rune) bool {
|
||||
return !unicode.IsLetter(c) && !unicode.IsNumber(c)
|
||||
}
|
||||
// get function
|
||||
return strings.Join(strings.FieldsFunc(name, fargs), "-")
|
||||
}
|
||||
|
||||
// ReverseStringSlice invert the order of the given slice of string
|
||||
func ReverseStringSlice(slice *[]string) {
|
||||
for i, j := 0, len(*slice)-1; i < j; i, j = i+1, j-1 {
|
||||
(*slice)[i], (*slice)[j] = (*slice)[j], (*slice)[i]
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,475 +0,0 @@
|
|||
package provider
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"text/template"
|
||||
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type myProvider struct {
|
||||
BaseProvider
|
||||
TLS *types.ClientTLS
|
||||
}
|
||||
|
||||
func (p *myProvider) Foo() string {
|
||||
return "bar"
|
||||
}
|
||||
|
||||
func TestConfigurationErrors(t *testing.T) {
|
||||
templateErrorFile, err := ioutil.TempFile("", "provider-configuration-error")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer os.RemoveAll(templateErrorFile.Name())
|
||||
|
||||
data := []byte("Not a valid template {{ Bar }}")
|
||||
|
||||
err = ioutil.WriteFile(templateErrorFile.Name(), data, 0700)
|
||||
require.NoError(t, err)
|
||||
|
||||
templateInvalidTOMLFile, err := ioutil.TempFile("", "provider-configuration-error")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer os.RemoveAll(templateInvalidTOMLFile.Name())
|
||||
|
||||
data = []byte(`Hello {{ .Name }}
|
||||
{{ Foo }}`)
|
||||
|
||||
err = ioutil.WriteFile(templateInvalidTOMLFile.Name(), data, 0700)
|
||||
require.NoError(t, err)
|
||||
|
||||
invalids := []struct {
|
||||
provider *myProvider
|
||||
defaultTemplate string
|
||||
expectedError string
|
||||
funcMap template.FuncMap
|
||||
templateObjects interface{}
|
||||
}{
|
||||
{
|
||||
provider: &myProvider{
|
||||
BaseProvider: BaseProvider{
|
||||
Filename: "/non/existent/template.tmpl",
|
||||
},
|
||||
},
|
||||
expectedError: "open /non/existent/template.tmpl: no such file or directory",
|
||||
},
|
||||
{
|
||||
provider: &myProvider{},
|
||||
defaultTemplate: "non/existent/template.tmpl",
|
||||
expectedError: "Asset non/existent/template.tmpl not found",
|
||||
},
|
||||
{
|
||||
provider: &myProvider{
|
||||
BaseProvider: BaseProvider{
|
||||
Filename: templateErrorFile.Name(),
|
||||
},
|
||||
},
|
||||
expectedError: `function "Bar" not defined`,
|
||||
},
|
||||
{
|
||||
provider: &myProvider{
|
||||
BaseProvider: BaseProvider{
|
||||
Filename: templateInvalidTOMLFile.Name(),
|
||||
},
|
||||
},
|
||||
expectedError: "Near line 1 (last key parsed 'Hello'): expected key separator '=', but got '<' instead",
|
||||
funcMap: template.FuncMap{
|
||||
"Foo": func() string {
|
||||
return "bar"
|
||||
},
|
||||
},
|
||||
templateObjects: struct{ Name string }{Name: "bar"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, invalid := range invalids {
|
||||
configuration, err := invalid.provider.GetConfiguration(invalid.defaultTemplate, invalid.funcMap, nil)
|
||||
if err == nil || !strings.Contains(err.Error(), invalid.expectedError) {
|
||||
t.Fatalf("should have generate an error with %q, got %v", invalid.expectedError, err)
|
||||
}
|
||||
|
||||
assert.Nil(t, configuration)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetConfiguration(t *testing.T) {
|
||||
templateFile, err := ioutil.TempFile("", "provider-configuration")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer os.RemoveAll(templateFile.Name())
|
||||
|
||||
data := []byte(`[backends]
|
||||
[backends.backend1]
|
||||
[backends.backend1.circuitbreaker]
|
||||
expression = "NetworkErrorRatio() > 0.5"
|
||||
[backends.backend1.servers.server1]
|
||||
url = "http://172.17.0.2:80"
|
||||
weight = 10
|
||||
[backends.backend1.servers.server2]
|
||||
url = "http://172.17.0.3:80"
|
||||
weight = 1
|
||||
|
||||
[frontends]
|
||||
[frontends.frontend1]
|
||||
backend = "backend1"
|
||||
passHostHeader = true
|
||||
[frontends.frontend11.routes.test_2]
|
||||
rule = "Path"
|
||||
value = "/test"`)
|
||||
|
||||
err = ioutil.WriteFile(templateFile.Name(), data, 0700)
|
||||
require.NoError(t, err)
|
||||
|
||||
provider := &myProvider{
|
||||
BaseProvider: BaseProvider{
|
||||
Filename: templateFile.Name(),
|
||||
},
|
||||
}
|
||||
|
||||
configuration, err := provider.GetConfiguration(templateFile.Name(), nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NotNil(t, configuration)
|
||||
}
|
||||
|
||||
func TestGetConfigurationReturnsCorrectMaxConnConfiguration(t *testing.T) {
|
||||
templateFile, err := ioutil.TempFile("", "provider-configuration")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer os.RemoveAll(templateFile.Name())
|
||||
|
||||
data := []byte(`[backends]
|
||||
[backends.backend1]
|
||||
[backends.backend1.maxconn]
|
||||
amount = 10
|
||||
extractorFunc = "request.host"`)
|
||||
|
||||
err = ioutil.WriteFile(templateFile.Name(), data, 0700)
|
||||
require.NoError(t, err)
|
||||
|
||||
provider := &myProvider{
|
||||
BaseProvider: BaseProvider{
|
||||
Filename: templateFile.Name(),
|
||||
},
|
||||
}
|
||||
|
||||
configuration, err := provider.GetConfiguration(templateFile.Name(), nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotNil(t, configuration)
|
||||
require.Contains(t, configuration.Backends, "backend1")
|
||||
assert.EqualValues(t, 10, configuration.Backends["backend1"].MaxConn.Amount)
|
||||
assert.Equal(t, "request.host", configuration.Backends["backend1"].MaxConn.ExtractorFunc)
|
||||
}
|
||||
|
||||
func TestNilClientTLS(t *testing.T) {
|
||||
p := &myProvider{
|
||||
BaseProvider: BaseProvider{
|
||||
Filename: "",
|
||||
},
|
||||
}
|
||||
|
||||
_, err := p.TLS.CreateTLSConfig()
|
||||
require.NoError(t, err, "CreateTLSConfig should assume that consumer does not want a TLS configuration if input is nil")
|
||||
}
|
||||
|
||||
func TestInsecureSkipVerifyClientTLS(t *testing.T) {
|
||||
p := &myProvider{
|
||||
BaseProvider: BaseProvider{
|
||||
Filename: "",
|
||||
},
|
||||
TLS: &types.ClientTLS{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
}
|
||||
|
||||
config, err := p.TLS.CreateTLSConfig()
|
||||
require.NoError(t, err, "CreateTLSConfig should assume that consumer does not want a TLS configuration if input is nil")
|
||||
|
||||
assert.True(t, config.InsecureSkipVerify, "CreateTLSConfig should support setting only InsecureSkipVerify property")
|
||||
}
|
||||
|
||||
func TestInsecureSkipVerifyFalseClientTLS(t *testing.T) {
|
||||
p := &myProvider{
|
||||
BaseProvider: BaseProvider{
|
||||
Filename: "",
|
||||
},
|
||||
TLS: &types.ClientTLS{
|
||||
InsecureSkipVerify: false,
|
||||
},
|
||||
}
|
||||
|
||||
_, err := p.TLS.CreateTLSConfig()
|
||||
assert.Errorf(t, err, "CreateTLSConfig should error if consumer does not set a TLS cert or key configuration and not chooses InsecureSkipVerify to be true")
|
||||
}
|
||||
|
||||
func TestMatchingConstraints(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
constraints types.Constraints
|
||||
tags []string
|
||||
expected bool
|
||||
}{
|
||||
// simple test: must match
|
||||
{
|
||||
desc: "tag==us-east-1 with us-east-1",
|
||||
constraints: types.Constraints{
|
||||
{
|
||||
Key: "tag",
|
||||
MustMatch: true,
|
||||
Regex: "us-east-1",
|
||||
},
|
||||
},
|
||||
tags: []string{
|
||||
"us-east-1",
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
// simple test: must match but does not match
|
||||
{
|
||||
desc: "tag==us-east-1 with us-east-2",
|
||||
constraints: types.Constraints{
|
||||
{
|
||||
Key: "tag",
|
||||
MustMatch: true,
|
||||
Regex: "us-east-1",
|
||||
},
|
||||
},
|
||||
tags: []string{
|
||||
"us-east-2",
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
// simple test: must not match
|
||||
{
|
||||
desc: "tag!=us-east-1 with us-east-1",
|
||||
constraints: types.Constraints{
|
||||
{
|
||||
Key: "tag",
|
||||
MustMatch: false,
|
||||
Regex: "us-east-1",
|
||||
},
|
||||
},
|
||||
tags: []string{
|
||||
"us-east-1",
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
// complex test: globbing
|
||||
{
|
||||
desc: "tag!=us-east-* with us-east-1",
|
||||
constraints: types.Constraints{
|
||||
{
|
||||
Key: "tag",
|
||||
MustMatch: true,
|
||||
Regex: "us-east-*",
|
||||
},
|
||||
},
|
||||
tags: []string{
|
||||
"us-east-1",
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
// complex test: multiple constraints
|
||||
{
|
||||
desc: "tag==us-east-* & tag!=api with us-east-1 & api",
|
||||
constraints: types.Constraints{
|
||||
{
|
||||
Key: "tag",
|
||||
MustMatch: true,
|
||||
Regex: "us-east-*",
|
||||
},
|
||||
{
|
||||
Key: "tag",
|
||||
MustMatch: false,
|
||||
Regex: "api",
|
||||
},
|
||||
},
|
||||
tags: []string{
|
||||
"api",
|
||||
"us-east-1",
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
p := myProvider{
|
||||
BaseProvider: BaseProvider{
|
||||
Constraints: test.constraints,
|
||||
},
|
||||
}
|
||||
|
||||
actual, _ := p.MatchConstraints(test.tags)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultFuncMap(t *testing.T) {
|
||||
templateFile, err := ioutil.TempFile("", "provider-configuration")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(templateFile.Name())
|
||||
|
||||
data := []byte(`
|
||||
[backends]
|
||||
[backends.{{ "backend-1" | replace "-" "" }}]
|
||||
[backends.{{ "BACKEND1" | tolower }}.circuitbreaker]
|
||||
expression = "NetworkErrorRatio() > 0.5"
|
||||
[backends.servers.server1]
|
||||
url = "http://172.17.0.2:80"
|
||||
weight = 10
|
||||
[backends.backend1.servers.server2]
|
||||
url = "http://172.17.0.3:80"
|
||||
weight = 1
|
||||
|
||||
[frontends]
|
||||
[frontends.{{normalize "frontend/1"}}]
|
||||
{{ $backend := "backend1/test/value" | split "/" }}
|
||||
{{ $backendid := index $backend 1 }}
|
||||
{{ if "backend1" | contains "backend" }}
|
||||
backend = "backend1"
|
||||
{{end}}
|
||||
passHostHeader = true
|
||||
[frontends.frontend-1.routes.test_2]
|
||||
rule = "Path"
|
||||
value = "/test"`)
|
||||
|
||||
err = ioutil.WriteFile(templateFile.Name(), data, 0700)
|
||||
require.NoError(t, err)
|
||||
|
||||
provider := &myProvider{
|
||||
BaseProvider: BaseProvider{
|
||||
Filename: templateFile.Name(),
|
||||
},
|
||||
}
|
||||
|
||||
configuration, err := provider.GetConfiguration(templateFile.Name(), nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotNil(t, configuration)
|
||||
assert.Contains(t, configuration.Backends, "backend1")
|
||||
assert.Contains(t, configuration.Frontends, "frontend-1")
|
||||
}
|
||||
|
||||
func TestSprigFunctions(t *testing.T) {
|
||||
templateFile, err := ioutil.TempFile("", "provider-configuration")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer os.RemoveAll(templateFile.Name())
|
||||
|
||||
data := []byte(`
|
||||
{{$backend_name := trimAll "-" uuidv4}}
|
||||
[backends]
|
||||
[backends.{{$backend_name}}]
|
||||
[backends.{{$backend_name}}.circuitbreaker]
|
||||
[backends.{{$backend_name}}.servers.server2]
|
||||
url = "http://172.17.0.3:80"
|
||||
weight = 1
|
||||
|
||||
[frontends]
|
||||
[frontends.{{normalize "frontend/1"}}]
|
||||
backend = "{{$backend_name}}"
|
||||
passHostHeader = true
|
||||
[frontends.frontend-1.routes.test_2]
|
||||
rule = "Path"
|
||||
value = "/test"`)
|
||||
|
||||
err = ioutil.WriteFile(templateFile.Name(), data, 0700)
|
||||
require.NoError(t, err)
|
||||
|
||||
provider := &myProvider{
|
||||
BaseProvider: BaseProvider{
|
||||
Filename: templateFile.Name(),
|
||||
},
|
||||
}
|
||||
|
||||
configuration, err := provider.GetConfiguration(templateFile.Name(), nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotNil(t, configuration)
|
||||
assert.Len(t, configuration.Backends, 1)
|
||||
assert.Contains(t, configuration.Frontends, "frontend-1")
|
||||
}
|
||||
|
||||
func TestBaseProvider_GetConfiguration(t *testing.T) {
|
||||
baseProvider := BaseProvider{}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
defaultTemplateFile string
|
||||
expectedContent string
|
||||
}{
|
||||
{
|
||||
defaultTemplateFile: "templates/docker.tmpl",
|
||||
expectedContent: readTemplateFile(t, "./../templates/docker.tmpl"),
|
||||
},
|
||||
{
|
||||
defaultTemplateFile: `template content`,
|
||||
expectedContent: `template content`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
|
||||
content, err := baseProvider.getTemplateContent(test.defaultTemplateFile)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, test.expectedContent, content)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalize(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
name string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
desc: "without special chars",
|
||||
name: "foobar",
|
||||
expected: "foobar",
|
||||
},
|
||||
{
|
||||
desc: "with special chars",
|
||||
name: "foo.foo.foo;foo:foo!foo/foo\\foo)foo_123-ç_àéè",
|
||||
expected: "foo-foo-foo-foo-foo-foo-foo-foo-foo-123-ç-àéè",
|
||||
},
|
||||
{
|
||||
desc: "starts with special chars",
|
||||
name: ".foo.foo",
|
||||
expected: "foo-foo",
|
||||
},
|
||||
{
|
||||
desc: "ends with special chars",
|
||||
name: "foo.foo.",
|
||||
expected: "foo-foo",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
actual := Normalize(test.name)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func readTemplateFile(t *testing.T, path string) string {
|
||||
t.Helper()
|
||||
expectedContent, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return string(expectedContent)
|
||||
}
|
||||
|
|
@ -1,254 +0,0 @@
|
|||
package rancher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/cenk/backoff"
|
||||
"github.com/containous/traefik/job"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
rancher "github.com/rancher/go-rancher/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
labelRancherStackServiceName = "io.rancher.stack_service.name"
|
||||
hostNetwork = "host"
|
||||
)
|
||||
|
||||
var withoutPagination *rancher.ListOpts
|
||||
|
||||
// APIConfiguration contains configuration properties specific to the Rancher
|
||||
// API provider.
|
||||
type APIConfiguration struct {
|
||||
Endpoint string `description:"Rancher server API HTTP(S) endpoint"`
|
||||
AccessKey string `description:"Rancher server API access key"`
|
||||
SecretKey string `description:"Rancher server API secret key"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
withoutPagination = &rancher.ListOpts{
|
||||
Filters: map[string]interface{}{"limit": 0},
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) createClient() (*rancher.RancherClient, error) {
|
||||
rancherURL := getenv("CATTLE_URL", p.API.Endpoint)
|
||||
accessKey := getenv("CATTLE_ACCESS_KEY", p.API.AccessKey)
|
||||
secretKey := getenv("CATTLE_SECRET_KEY", p.API.SecretKey)
|
||||
|
||||
return rancher.NewRancherClient(&rancher.ClientOpts{
|
||||
Url: rancherURL,
|
||||
AccessKey: accessKey,
|
||||
SecretKey: secretKey,
|
||||
})
|
||||
}
|
||||
|
||||
func getenv(key, fallback string) string {
|
||||
value := os.Getenv(key)
|
||||
if len(value) == 0 {
|
||||
return fallback
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (p *Provider) apiProvide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error {
|
||||
|
||||
if p.API == nil {
|
||||
p.API = &APIConfiguration{}
|
||||
}
|
||||
|
||||
safe.Go(func() {
|
||||
operation := func() error {
|
||||
rancherClient, err := p.createClient()
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create a client for rancher, error: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
var stacks = listRancherStacks(rancherClient)
|
||||
var services = listRancherServices(rancherClient)
|
||||
var container = listRancherContainer(rancherClient)
|
||||
|
||||
var rancherData = parseAPISourcedRancherData(stacks, services, container)
|
||||
|
||||
configuration := p.buildConfiguration(rancherData)
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "rancher",
|
||||
Configuration: configuration,
|
||||
}
|
||||
|
||||
if p.Watch {
|
||||
_, cancel := context.WithCancel(ctx)
|
||||
ticker := time.NewTicker(time.Second * time.Duration(p.RefreshSeconds))
|
||||
pool.Go(func(stop chan bool) {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
checkAPI, errAPI := rancherClient.ApiKey.List(withoutPagination)
|
||||
|
||||
if errAPI != nil {
|
||||
log.Errorf("Cannot establish connection: %+v, Rancher API return: %+v; Skipping refresh Data from Rancher API.", errAPI, checkAPI)
|
||||
} else {
|
||||
log.Debugf("Refreshing new Data from Rancher API")
|
||||
stacks := listRancherStacks(rancherClient)
|
||||
services := listRancherServices(rancherClient)
|
||||
container := listRancherContainer(rancherClient)
|
||||
|
||||
rancherData := parseAPISourcedRancherData(stacks, services, container)
|
||||
|
||||
configuration := p.buildConfiguration(rancherData)
|
||||
if configuration != nil {
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "rancher",
|
||||
Configuration: configuration,
|
||||
}
|
||||
}
|
||||
}
|
||||
case <-stop:
|
||||
ticker.Stop()
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
notify := func(err error, time time.Duration) {
|
||||
log.Errorf("Provider connection error %+v, retrying in %s", err, time)
|
||||
}
|
||||
err := backoff.RetryNotify(operation, job.NewBackOff(backoff.NewExponentialBackOff()), notify)
|
||||
if err != nil {
|
||||
log.Errorf("Cannot connect to Provider Endpoint %+v", err)
|
||||
}
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func listRancherStacks(client *rancher.RancherClient) []*rancher.Stack {
|
||||
|
||||
var stackList []*rancher.Stack
|
||||
|
||||
stacks, err := client.Stack.List(withoutPagination)
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("Cannot get Provider Stacks %+v", err)
|
||||
}
|
||||
|
||||
for k := range stacks.Data {
|
||||
stackList = append(stackList, &stacks.Data[k])
|
||||
}
|
||||
|
||||
return stackList
|
||||
}
|
||||
|
||||
func listRancherServices(client *rancher.RancherClient) []*rancher.Service {
|
||||
|
||||
var servicesList []*rancher.Service
|
||||
|
||||
services, err := client.Service.List(withoutPagination)
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("Cannot get Provider Services %+v", err)
|
||||
}
|
||||
|
||||
for k := range services.Data {
|
||||
servicesList = append(servicesList, &services.Data[k])
|
||||
}
|
||||
|
||||
return servicesList
|
||||
}
|
||||
|
||||
func listRancherContainer(client *rancher.RancherClient) []*rancher.Container {
|
||||
|
||||
var containerList []*rancher.Container
|
||||
|
||||
container, err := client.Container.List(withoutPagination)
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("Cannot get Provider Services %+v", err)
|
||||
}
|
||||
|
||||
valid := true
|
||||
|
||||
for valid {
|
||||
for k := range container.Data {
|
||||
containerList = append(containerList, &container.Data[k])
|
||||
}
|
||||
|
||||
container, err = container.Next()
|
||||
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
if container == nil || len(container.Data) == 0 {
|
||||
valid = false
|
||||
}
|
||||
}
|
||||
|
||||
return containerList
|
||||
}
|
||||
|
||||
func parseAPISourcedRancherData(stacks []*rancher.Stack, services []*rancher.Service, containers []*rancher.Container) []rancherData {
|
||||
var rancherDataList []rancherData
|
||||
|
||||
for _, stack := range stacks {
|
||||
|
||||
for _, service := range services {
|
||||
|
||||
if service.StackId != stack.Id {
|
||||
continue
|
||||
}
|
||||
|
||||
rData := rancherData{
|
||||
Name: service.Name + "/" + stack.Name,
|
||||
Health: service.HealthState,
|
||||
State: service.State,
|
||||
Labels: make(map[string]string),
|
||||
Containers: []string{},
|
||||
}
|
||||
|
||||
if service.LaunchConfig == nil || service.LaunchConfig.Labels == nil {
|
||||
log.Warnf("Rancher Service Labels are missing. Stack: %s, service: %s", stack.Name, service.Name)
|
||||
} else {
|
||||
for key, value := range service.LaunchConfig.Labels {
|
||||
rData.Labels[key] = value.(string)
|
||||
}
|
||||
}
|
||||
|
||||
for _, container := range containers {
|
||||
if container.Labels[labelRancherStackServiceName] == stack.Name+"/"+service.Name &&
|
||||
containerFilter(container.Name, container.HealthState, container.State) {
|
||||
|
||||
if container.NetworkMode == hostNetwork {
|
||||
var endpoints []*rancher.PublicEndpoint
|
||||
err := mapstructure.Decode(service.PublicEndpoints, &endpoints)
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("Failed to decode PublicEndpoint: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(endpoints) > 0 {
|
||||
rData.Containers = append(rData.Containers, endpoints[0].IpAddress)
|
||||
}
|
||||
} else {
|
||||
rData.Containers = append(rData.Containers, container.PrimaryIpAddress)
|
||||
}
|
||||
}
|
||||
}
|
||||
rancherDataList = append(rancherDataList, rData)
|
||||
}
|
||||
}
|
||||
|
||||
return rancherDataList
|
||||
}
|
||||
|
|
@ -1,210 +0,0 @@
|
|||
package rancher
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/BurntSushi/ty/fun"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/provider"
|
||||
"github.com/containous/traefik/provider/label"
|
||||
"github.com/containous/traefik/types"
|
||||
)
|
||||
|
||||
func (p *Provider) buildConfiguration(services []rancherData) *types.Configuration {
|
||||
var RancherFuncMap = template.FuncMap{
|
||||
"getLabelValue": label.GetStringValue,
|
||||
"getDomain": label.GetFuncString(label.TraefikDomain, p.Domain),
|
||||
|
||||
// Backend functions
|
||||
"getCircuitBreaker": label.GetCircuitBreaker,
|
||||
"getLoadBalancer": label.GetLoadBalancer,
|
||||
"getMaxConn": label.GetMaxConn,
|
||||
"getHealthCheck": label.GetHealthCheck,
|
||||
"getBuffering": label.GetBuffering,
|
||||
"getResponseForwarding": label.GetResponseForwarding,
|
||||
"getServers": getServers,
|
||||
|
||||
// Frontend functions
|
||||
"getBackendName": getBackendName,
|
||||
"getFrontendRule": p.getFrontendRule,
|
||||
"getPriority": label.GetFuncInt(label.TraefikFrontendPriority, label.DefaultFrontendPriority),
|
||||
"getPassHostHeader": label.GetFuncBool(label.TraefikFrontendPassHostHeader, label.DefaultPassHostHeader),
|
||||
"getPassTLSCert": label.GetFuncBool(label.TraefikFrontendPassTLSCert, label.DefaultPassTLSCert),
|
||||
"getPassTLSClientCert": label.GetTLSClientCert,
|
||||
"getEntryPoints": label.GetFuncSliceString(label.TraefikFrontendEntryPoints),
|
||||
"getBasicAuth": label.GetFuncSliceString(label.TraefikFrontendAuthBasic), // Deprecated
|
||||
"getAuth": label.GetAuth,
|
||||
"getErrorPages": label.GetErrorPages,
|
||||
"getRateLimit": label.GetRateLimit,
|
||||
"getRedirect": label.GetRedirect,
|
||||
"getHeaders": label.GetHeaders,
|
||||
"getWhiteList": label.GetWhiteList,
|
||||
}
|
||||
|
||||
// filter services
|
||||
filteredServices := fun.Filter(p.serviceFilter, services).([]rancherData)
|
||||
|
||||
frontends := map[string]rancherData{}
|
||||
backends := map[string]rancherData{}
|
||||
|
||||
for _, service := range filteredServices {
|
||||
segmentProperties := label.ExtractTraefikLabels(service.Labels)
|
||||
for segmentName, labels := range segmentProperties {
|
||||
service.SegmentLabels = labels
|
||||
service.SegmentName = segmentName
|
||||
|
||||
frontendName := p.getFrontendName(service)
|
||||
frontends[frontendName] = service
|
||||
backendName := getBackendName(service)
|
||||
backends[backendName] = service
|
||||
}
|
||||
}
|
||||
|
||||
templateObjects := struct {
|
||||
Frontends map[string]rancherData
|
||||
Backends map[string]rancherData
|
||||
Domain string
|
||||
}{
|
||||
Frontends: frontends,
|
||||
Backends: backends,
|
||||
Domain: p.Domain,
|
||||
}
|
||||
|
||||
configuration, err := p.GetConfiguration("templates/rancher.tmpl", RancherFuncMap, templateObjects)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
return configuration
|
||||
}
|
||||
|
||||
func (p *Provider) serviceFilter(service rancherData) bool {
|
||||
segmentProperties := label.ExtractTraefikLabels(service.Labels)
|
||||
|
||||
for segmentName, labels := range segmentProperties {
|
||||
_, err := checkSegmentPort(labels, segmentName)
|
||||
if err != nil {
|
||||
log.Debugf("Filtering service %s %s without traefik.port label", service.Name, segmentName)
|
||||
return false
|
||||
}
|
||||
|
||||
if len(p.getFrontendRule(service.Name, labels)) == 0 {
|
||||
log.Debugf("Filtering container with empty frontend rule %s %s", service.Name, segmentName)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if !label.IsEnabled(service.Labels, p.ExposedByDefault) {
|
||||
log.Debugf("Filtering disabled service %s", service.Name)
|
||||
return false
|
||||
}
|
||||
|
||||
constraintTags := label.GetSliceStringValue(service.Labels, label.TraefikTags)
|
||||
if ok, failingConstraint := p.MatchConstraints(constraintTags); !ok {
|
||||
if failingConstraint != nil {
|
||||
log.Debugf("Filtering service %s with constraint %s", service.Name, failingConstraint.String())
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Only filter services by Health (HealthState) and State if EnableServiceHealthFilter is true
|
||||
if p.EnableServiceHealthFilter {
|
||||
|
||||
if service.Health != "" && service.Health != healthy && service.Health != updatingHealthy {
|
||||
log.Debugf("Filtering service %s with healthState of %s", service.Name, service.Health)
|
||||
return false
|
||||
}
|
||||
if service.State != "" && service.State != active && service.State != updatingActive && service.State != upgraded && service.State != upgrading {
|
||||
log.Debugf("Filtering service %s with state of %s", service.Name, service.State)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *Provider) getFrontendRule(serviceName string, labels map[string]string) string {
|
||||
domain := label.GetStringValue(labels, label.TraefikDomain, p.Domain)
|
||||
if len(domain) > 0 {
|
||||
domain = "." + domain
|
||||
}
|
||||
|
||||
defaultRule := "Host:" + strings.ToLower(strings.Replace(serviceName, "/", ".", -1)) + domain
|
||||
|
||||
return label.GetStringValue(labels, label.TraefikFrontendRule, defaultRule)
|
||||
}
|
||||
|
||||
func (p *Provider) getFrontendName(service rancherData) string {
|
||||
var name string
|
||||
if len(service.SegmentName) > 0 {
|
||||
name = getBackendName(service)
|
||||
} else {
|
||||
name = p.getFrontendRule(service.Name, service.SegmentLabels)
|
||||
}
|
||||
|
||||
return provider.Normalize(name)
|
||||
}
|
||||
|
||||
func getBackendName(service rancherData) string {
|
||||
if len(service.SegmentName) > 0 {
|
||||
return getSegmentBackendName(service)
|
||||
}
|
||||
|
||||
return getDefaultBackendName(service)
|
||||
}
|
||||
|
||||
func getSegmentBackendName(service rancherData) string {
|
||||
if value := label.GetStringValue(service.SegmentLabels, label.TraefikBackend, ""); len(value) > 0 {
|
||||
return provider.Normalize(service.Name + "-" + value)
|
||||
}
|
||||
|
||||
return provider.Normalize(service.Name + "-" + getDefaultBackendName(service) + "-" + service.SegmentName)
|
||||
}
|
||||
|
||||
func getDefaultBackendName(service rancherData) string {
|
||||
backend := label.GetStringValue(service.SegmentLabels, label.TraefikBackend, service.Name)
|
||||
return provider.Normalize(backend)
|
||||
}
|
||||
|
||||
func getServers(service rancherData) map[string]types.Server {
|
||||
var servers map[string]types.Server
|
||||
|
||||
for index, ip := range service.Containers {
|
||||
if len(ip) == 0 {
|
||||
log.Warnf("Unable to find the IP address for a container in the service %q: this container is ignored.", service.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
if servers == nil {
|
||||
servers = make(map[string]types.Server)
|
||||
}
|
||||
|
||||
protocol := label.GetStringValue(service.SegmentLabels, label.TraefikProtocol, label.DefaultProtocol)
|
||||
port := label.GetStringValue(service.SegmentLabels, label.TraefikPort, "")
|
||||
weight := label.GetIntValue(service.SegmentLabels, label.TraefikWeight, label.DefaultWeight)
|
||||
|
||||
serverName := "server-" + strconv.Itoa(index)
|
||||
servers[serverName] = types.Server{
|
||||
URL: fmt.Sprintf("%s://%s", protocol, net.JoinHostPort(ip, port)),
|
||||
Weight: weight,
|
||||
}
|
||||
}
|
||||
|
||||
return servers
|
||||
}
|
||||
|
||||
func checkSegmentPort(labels map[string]string, segmentName string) (int, error) {
|
||||
if rawPort, ok := labels[label.TraefikPort]; ok {
|
||||
port, err := strconv.Atoi(rawPort)
|
||||
if err != nil {
|
||||
return port, fmt.Errorf("invalid port value %q for the segment %q: %v", rawPort, segmentName, err)
|
||||
}
|
||||
} else {
|
||||
return 0, fmt.Errorf("port label is missing, please use %s as default value or define port label for all segments ('traefik.<segment_name>.port')", label.TraefikPort)
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,138 +0,0 @@
|
|||
package rancher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/cenk/backoff"
|
||||
"github.com/containous/traefik/job"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
rancher "github.com/rancher/go-rancher-metadata/metadata"
|
||||
)
|
||||
|
||||
// MetadataConfiguration contains configuration properties specific to
|
||||
// the Rancher metadata service provider.
|
||||
type MetadataConfiguration struct {
|
||||
IntervalPoll bool `description:"Poll the Rancher metadata service every 'rancher.refreshseconds' (less accurate)"`
|
||||
Prefix string `description:"Prefix used for accessing the Rancher metadata service"`
|
||||
}
|
||||
|
||||
func (p *Provider) metadataProvide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error {
|
||||
metadataServiceURL := fmt.Sprintf("http://rancher-metadata.rancher.internal/%s", p.Metadata.Prefix)
|
||||
|
||||
safe.Go(func() {
|
||||
operation := func() error {
|
||||
client, err := rancher.NewClientAndWait(metadataServiceURL)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create Rancher metadata service client: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
updateConfiguration := func(version string) {
|
||||
log.WithField("metadata_version", version).Debugln("Refreshing configuration from Rancher metadata service")
|
||||
|
||||
stacks, err := client.GetStacks()
|
||||
if err != nil {
|
||||
log.Errorf("Failed to query Rancher metadata service: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
rancherData := parseMetadataSourcedRancherData(stacks)
|
||||
configuration := p.buildConfiguration(rancherData)
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "rancher",
|
||||
Configuration: configuration,
|
||||
}
|
||||
}
|
||||
updateConfiguration("init")
|
||||
|
||||
if p.Watch {
|
||||
pool.Go(func(stop chan bool) {
|
||||
switch {
|
||||
case p.Metadata.IntervalPoll:
|
||||
p.intervalPoll(client, updateConfiguration, stop)
|
||||
default:
|
||||
p.longPoll(client, updateConfiguration, stop)
|
||||
}
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
notify := func(err error, time time.Duration) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"error": err,
|
||||
"retry_in": time,
|
||||
}).Errorln("Rancher metadata service connection error")
|
||||
}
|
||||
|
||||
if err := backoff.RetryNotify(operation, job.NewBackOff(backoff.NewExponentialBackOff()), notify); err != nil {
|
||||
log.WithField("endpoint", metadataServiceURL).Errorln("Cannot connect to Rancher metadata service")
|
||||
}
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) intervalPoll(client rancher.Client, updateConfiguration func(string), stop chan bool) {
|
||||
_, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
ticker := time.NewTicker(time.Second * time.Duration(p.RefreshSeconds))
|
||||
defer ticker.Stop()
|
||||
|
||||
var version string
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
newVersion, err := client.GetVersion()
|
||||
if err != nil {
|
||||
log.WithField("error", err).Errorln("Failed to read Rancher metadata service version")
|
||||
} else if version != newVersion {
|
||||
version = newVersion
|
||||
updateConfiguration(version)
|
||||
}
|
||||
case <-stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) longPoll(client rancher.Client, updateConfiguration func(string), stop chan bool) {
|
||||
_, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Holds the connection until there is either a change in the metadata
|
||||
// repository or `p.RefreshSeconds` has elapsed. Long polling should be
|
||||
// favored for the most accurate configuration updates.
|
||||
safe.Go(func() {
|
||||
client.OnChange(p.RefreshSeconds, updateConfiguration)
|
||||
})
|
||||
<-stop
|
||||
}
|
||||
|
||||
func parseMetadataSourcedRancherData(stacks []rancher.Stack) (rancherDataList []rancherData) {
|
||||
for _, stack := range stacks {
|
||||
for _, service := range stack.Services {
|
||||
var containerIPAddresses []string
|
||||
for _, container := range service.Containers {
|
||||
if containerFilter(container.Name, container.HealthState, container.State) {
|
||||
containerIPAddresses = append(containerIPAddresses, container.PrimaryIp)
|
||||
}
|
||||
}
|
||||
|
||||
rancherDataList = append(rancherDataList, rancherData{
|
||||
Name: service.Name + "/" + stack.Name,
|
||||
State: service.State,
|
||||
Labels: service.Labels,
|
||||
Containers: containerIPAddresses,
|
||||
})
|
||||
}
|
||||
}
|
||||
return rancherDataList
|
||||
}
|
||||
|
|
@ -1,80 +0,0 @@
|
|||
package rancher
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/provider"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/containous/traefik/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// Health
|
||||
healthy = "healthy"
|
||||
updatingHealthy = "updating-healthy"
|
||||
|
||||
// State
|
||||
active = "active"
|
||||
running = "running"
|
||||
upgraded = "upgraded"
|
||||
upgrading = "upgrading"
|
||||
updatingActive = "updating-active"
|
||||
updatingRunning = "updating-running"
|
||||
)
|
||||
|
||||
var _ provider.Provider = (*Provider)(nil)
|
||||
|
||||
// Provider holds configurations of the provider.
|
||||
type Provider struct {
|
||||
provider.BaseProvider `mapstructure:",squash" export:"true"`
|
||||
APIConfiguration `mapstructure:",squash" export:"true"` // Provide backwards compatibility
|
||||
API *APIConfiguration `description:"Enable the Rancher API provider" export:"true"`
|
||||
Metadata *MetadataConfiguration `description:"Enable the Rancher metadata service provider" export:"true"`
|
||||
Domain string `description:"Default domain used"`
|
||||
RefreshSeconds int `description:"Polling interval (in seconds)" export:"true"`
|
||||
ExposedByDefault bool `description:"Expose services by default" export:"true"`
|
||||
EnableServiceHealthFilter bool `description:"Filter services with unhealthy states and inactive states" export:"true"`
|
||||
}
|
||||
|
||||
type rancherData struct {
|
||||
Name string
|
||||
Labels map[string]string // List of labels set to container or service
|
||||
Containers []string
|
||||
Health string
|
||||
State string
|
||||
SegmentLabels map[string]string
|
||||
SegmentName string
|
||||
}
|
||||
|
||||
func (r rancherData) String() string {
|
||||
return fmt.Sprintf("{name:%s, labels:%v, containers: %v, health: %s, state: %s}", r.Name, r.Labels, r.Containers, r.Health, r.State)
|
||||
}
|
||||
|
||||
// Init the provider
|
||||
func (p *Provider) Init(constraints types.Constraints) error {
|
||||
return p.BaseProvider.Init(constraints)
|
||||
}
|
||||
|
||||
// Provide allows either the Rancher API or metadata service provider to
|
||||
// seed configuration into Traefik using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error {
|
||||
if p.Metadata == nil {
|
||||
return p.apiProvide(configurationChan, pool)
|
||||
}
|
||||
return p.metadataProvide(configurationChan, pool)
|
||||
}
|
||||
|
||||
func containerFilter(name, healthState, state string) bool {
|
||||
if healthState != "" && healthState != healthy && healthState != updatingHealthy {
|
||||
log.Debugf("Filtering container %s with healthState of %s", name, healthState)
|
||||
return false
|
||||
}
|
||||
|
||||
if state != "" && state != running && state != updatingRunning && state != upgraded {
|
||||
log.Debugf("Filtering container %s with state of %s", name, state)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
|
@ -1,68 +0,0 @@
|
|||
package rest
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/containous/mux"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/unrolled/render"
|
||||
)
|
||||
|
||||
// Provider is a provider.Provider implementation that provides a Rest API
|
||||
type Provider struct {
|
||||
configurationChan chan<- types.ConfigMessage
|
||||
EntryPoint string `description:"EntryPoint" export:"true"`
|
||||
}
|
||||
|
||||
var templatesRenderer = render.New(render.Options{Directory: "nowhere"})
|
||||
|
||||
// Init the provider
|
||||
func (p *Provider) Init(_ types.Constraints) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddRoutes add rest provider routes on a router
|
||||
func (p *Provider) AddRoutes(systemRouter *mux.Router) {
|
||||
systemRouter.
|
||||
Methods(http.MethodPut).
|
||||
Path("/api/providers/{provider}").
|
||||
HandlerFunc(func(response http.ResponseWriter, request *http.Request) {
|
||||
|
||||
vars := mux.Vars(request)
|
||||
// TODO: Deprecated configuration - Need to be removed in the future
|
||||
if vars["provider"] != "web" && vars["provider"] != "rest" {
|
||||
response.WriteHeader(http.StatusBadRequest)
|
||||
fmt.Fprint(response, "Only 'rest' provider can be updated through the REST API")
|
||||
return
|
||||
} else if vars["provider"] == "web" {
|
||||
log.Warn("The provider web is deprecated. Please use /rest instead")
|
||||
}
|
||||
|
||||
configuration := new(types.Configuration)
|
||||
body, _ := ioutil.ReadAll(request.Body)
|
||||
err := json.Unmarshal(body, configuration)
|
||||
if err == nil {
|
||||
// TODO: Deprecated configuration - Change to `rest` in the future
|
||||
p.configurationChan <- types.ConfigMessage{ProviderName: "web", Configuration: configuration}
|
||||
err := templatesRenderer.JSON(response, http.StatusOK, configuration)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
} else {
|
||||
log.Errorf("Error parsing configuration %+v", err)
|
||||
http.Error(response, fmt.Sprintf("%+v", err), http.StatusBadRequest)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Provide allows the provider to provide configurations to traefik
|
||||
// using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error {
|
||||
p.configurationChan = configurationChan
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,48 +0,0 @@
|
|||
package zk
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/abronan/valkeyrie/store"
|
||||
"github.com/abronan/valkeyrie/store/zookeeper"
|
||||
"github.com/containous/traefik/provider"
|
||||
"github.com/containous/traefik/provider/kv"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/containous/traefik/types"
|
||||
)
|
||||
|
||||
var _ provider.Provider = (*Provider)(nil)
|
||||
|
||||
// Provider holds configurations of the provider.
|
||||
type Provider struct {
|
||||
kv.Provider `mapstructure:",squash" export:"true"`
|
||||
}
|
||||
|
||||
// Init the provider
|
||||
func (p *Provider) Init(constraints types.Constraints) error {
|
||||
err := p.Provider.Init(constraints)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
store, err := p.CreateStore()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to Connect to KV store: %v", err)
|
||||
}
|
||||
|
||||
p.SetKVClient(store)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Provide allows the zk provider to Provide configurations to traefik
|
||||
// using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error {
|
||||
return p.Provider.Provide(configurationChan, pool)
|
||||
}
|
||||
|
||||
// CreateStore creates the KV store
|
||||
func (p *Provider) CreateStore() (store.Store, error) {
|
||||
p.SetStoreType(store.ZK)
|
||||
zookeeper.Register()
|
||||
return p.Provider.CreateStore()
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue