Dynamic Configuration Refactoring
This commit is contained in:
parent
d3ae88f108
commit
a09dfa3ce1
452 changed files with 21023 additions and 9419 deletions
254
old/provider/rancher/api.go
Normal file
254
old/provider/rancher/api.go
Normal file
|
@ -0,0 +1,254 @@
|
|||
package rancher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/cenk/backoff"
|
||||
"github.com/containous/traefik/job"
|
||||
"github.com/containous/traefik/old/log"
|
||||
"github.com/containous/traefik/old/types"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
rancher "github.com/rancher/go-rancher/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
labelRancherStackServiceName = "io.rancher.stack_service.name"
|
||||
hostNetwork = "host"
|
||||
)
|
||||
|
||||
var withoutPagination *rancher.ListOpts
|
||||
|
||||
// APIConfiguration contains configuration properties specific to the Rancher
|
||||
// API provider.
|
||||
type APIConfiguration struct {
|
||||
Endpoint string `description:"Rancher server API HTTP(S) endpoint"`
|
||||
AccessKey string `description:"Rancher server API access key"`
|
||||
SecretKey string `description:"Rancher server API secret key"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
withoutPagination = &rancher.ListOpts{
|
||||
Filters: map[string]interface{}{"limit": 0},
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) createClient() (*rancher.RancherClient, error) {
|
||||
rancherURL := getenv("CATTLE_URL", p.API.Endpoint)
|
||||
accessKey := getenv("CATTLE_ACCESS_KEY", p.API.AccessKey)
|
||||
secretKey := getenv("CATTLE_SECRET_KEY", p.API.SecretKey)
|
||||
|
||||
return rancher.NewRancherClient(&rancher.ClientOpts{
|
||||
Url: rancherURL,
|
||||
AccessKey: accessKey,
|
||||
SecretKey: secretKey,
|
||||
})
|
||||
}
|
||||
|
||||
func getenv(key, fallback string) string {
|
||||
value := os.Getenv(key)
|
||||
if len(value) == 0 {
|
||||
return fallback
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (p *Provider) apiProvide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error {
|
||||
|
||||
if p.API == nil {
|
||||
p.API = &APIConfiguration{}
|
||||
}
|
||||
|
||||
safe.Go(func() {
|
||||
operation := func() error {
|
||||
rancherClient, err := p.createClient()
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create a client for rancher, error: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
var stacks = listRancherStacks(rancherClient)
|
||||
var services = listRancherServices(rancherClient)
|
||||
var container = listRancherContainer(rancherClient)
|
||||
|
||||
var rancherData = parseAPISourcedRancherData(stacks, services, container)
|
||||
|
||||
configuration := p.buildConfiguration(rancherData)
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "rancher",
|
||||
Configuration: configuration,
|
||||
}
|
||||
|
||||
if p.Watch {
|
||||
_, cancel := context.WithCancel(ctx)
|
||||
ticker := time.NewTicker(time.Second * time.Duration(p.RefreshSeconds))
|
||||
pool.Go(func(stop chan bool) {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
checkAPI, errAPI := rancherClient.ApiKey.List(withoutPagination)
|
||||
|
||||
if errAPI != nil {
|
||||
log.Errorf("Cannot establish connection: %+v, Rancher API return: %+v; Skipping refresh Data from Rancher API.", errAPI, checkAPI)
|
||||
} else {
|
||||
log.Debugf("Refreshing new Data from Rancher API")
|
||||
stacks := listRancherStacks(rancherClient)
|
||||
services := listRancherServices(rancherClient)
|
||||
container := listRancherContainer(rancherClient)
|
||||
|
||||
rancherData := parseAPISourcedRancherData(stacks, services, container)
|
||||
|
||||
configuration := p.buildConfiguration(rancherData)
|
||||
if configuration != nil {
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "rancher",
|
||||
Configuration: configuration,
|
||||
}
|
||||
}
|
||||
}
|
||||
case <-stop:
|
||||
ticker.Stop()
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
notify := func(err error, time time.Duration) {
|
||||
log.Errorf("Provider connection error %+v, retrying in %s", err, time)
|
||||
}
|
||||
err := backoff.RetryNotify(operation, job.NewBackOff(backoff.NewExponentialBackOff()), notify)
|
||||
if err != nil {
|
||||
log.Errorf("Cannot connect to Provider Endpoint %+v", err)
|
||||
}
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func listRancherStacks(client *rancher.RancherClient) []*rancher.Stack {
|
||||
|
||||
var stackList []*rancher.Stack
|
||||
|
||||
stacks, err := client.Stack.List(withoutPagination)
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("Cannot get Provider Stacks %+v", err)
|
||||
}
|
||||
|
||||
for k := range stacks.Data {
|
||||
stackList = append(stackList, &stacks.Data[k])
|
||||
}
|
||||
|
||||
return stackList
|
||||
}
|
||||
|
||||
func listRancherServices(client *rancher.RancherClient) []*rancher.Service {
|
||||
|
||||
var servicesList []*rancher.Service
|
||||
|
||||
services, err := client.Service.List(withoutPagination)
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("Cannot get Provider Services %+v", err)
|
||||
}
|
||||
|
||||
for k := range services.Data {
|
||||
servicesList = append(servicesList, &services.Data[k])
|
||||
}
|
||||
|
||||
return servicesList
|
||||
}
|
||||
|
||||
func listRancherContainer(client *rancher.RancherClient) []*rancher.Container {
|
||||
|
||||
var containerList []*rancher.Container
|
||||
|
||||
container, err := client.Container.List(withoutPagination)
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("Cannot get Provider Services %+v", err)
|
||||
}
|
||||
|
||||
valid := true
|
||||
|
||||
for valid {
|
||||
for k := range container.Data {
|
||||
containerList = append(containerList, &container.Data[k])
|
||||
}
|
||||
|
||||
container, err = container.Next()
|
||||
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
if container == nil || len(container.Data) == 0 {
|
||||
valid = false
|
||||
}
|
||||
}
|
||||
|
||||
return containerList
|
||||
}
|
||||
|
||||
func parseAPISourcedRancherData(stacks []*rancher.Stack, services []*rancher.Service, containers []*rancher.Container) []rancherData {
|
||||
var rancherDataList []rancherData
|
||||
|
||||
for _, stack := range stacks {
|
||||
|
||||
for _, service := range services {
|
||||
|
||||
if service.StackId != stack.Id {
|
||||
continue
|
||||
}
|
||||
|
||||
rData := rancherData{
|
||||
Name: service.Name + "/" + stack.Name,
|
||||
Health: service.HealthState,
|
||||
State: service.State,
|
||||
Labels: make(map[string]string),
|
||||
Containers: []string{},
|
||||
}
|
||||
|
||||
if service.LaunchConfig == nil || service.LaunchConfig.Labels == nil {
|
||||
log.Warnf("Rancher Service Labels are missing. Stack: %s, service: %s", stack.Name, service.Name)
|
||||
} else {
|
||||
for key, value := range service.LaunchConfig.Labels {
|
||||
rData.Labels[key] = value.(string)
|
||||
}
|
||||
}
|
||||
|
||||
for _, container := range containers {
|
||||
if container.Labels[labelRancherStackServiceName] == stack.Name+"/"+service.Name &&
|
||||
containerFilter(container.Name, container.HealthState, container.State) {
|
||||
|
||||
if container.NetworkMode == hostNetwork {
|
||||
var endpoints []*rancher.PublicEndpoint
|
||||
err := mapstructure.Decode(service.PublicEndpoints, &endpoints)
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("Failed to decode PublicEndpoint: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(endpoints) > 0 {
|
||||
rData.Containers = append(rData.Containers, endpoints[0].IpAddress)
|
||||
}
|
||||
} else {
|
||||
rData.Containers = append(rData.Containers, container.PrimaryIpAddress)
|
||||
}
|
||||
}
|
||||
}
|
||||
rancherDataList = append(rancherDataList, rData)
|
||||
}
|
||||
}
|
||||
|
||||
return rancherDataList
|
||||
}
|
210
old/provider/rancher/config.go
Normal file
210
old/provider/rancher/config.go
Normal file
|
@ -0,0 +1,210 @@
|
|||
package rancher
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/BurntSushi/ty/fun"
|
||||
"github.com/containous/traefik/old/log"
|
||||
"github.com/containous/traefik/old/provider"
|
||||
"github.com/containous/traefik/old/provider/label"
|
||||
"github.com/containous/traefik/old/types"
|
||||
)
|
||||
|
||||
func (p *Provider) buildConfiguration(services []rancherData) *types.Configuration {
|
||||
var RancherFuncMap = template.FuncMap{
|
||||
"getLabelValue": label.GetStringValue,
|
||||
"getDomain": label.GetFuncString(label.TraefikDomain, p.Domain),
|
||||
|
||||
// Backend functions
|
||||
"getCircuitBreaker": label.GetCircuitBreaker,
|
||||
"getLoadBalancer": label.GetLoadBalancer,
|
||||
"getMaxConn": label.GetMaxConn,
|
||||
"getHealthCheck": label.GetHealthCheck,
|
||||
"getBuffering": label.GetBuffering,
|
||||
"getResponseForwarding": label.GetResponseForwarding,
|
||||
"getServers": getServers,
|
||||
|
||||
// Frontend functions
|
||||
"getBackendName": getBackendName,
|
||||
"getFrontendRule": p.getFrontendRule,
|
||||
"getPriority": label.GetFuncInt(label.TraefikFrontendPriority, label.DefaultFrontendPriority),
|
||||
"getPassHostHeader": label.GetFuncBool(label.TraefikFrontendPassHostHeader, label.DefaultPassHostHeader),
|
||||
"getPassTLSCert": label.GetFuncBool(label.TraefikFrontendPassTLSCert, label.DefaultPassTLSCert),
|
||||
"getPassTLSClientCert": label.GetTLSClientCert,
|
||||
"getEntryPoints": label.GetFuncSliceString(label.TraefikFrontendEntryPoints),
|
||||
"getBasicAuth": label.GetFuncSliceString(label.TraefikFrontendAuthBasic), // Deprecated
|
||||
"getAuth": label.GetAuth,
|
||||
"getErrorPages": label.GetErrorPages,
|
||||
"getRateLimit": label.GetRateLimit,
|
||||
"getRedirect": label.GetRedirect,
|
||||
"getHeaders": label.GetHeaders,
|
||||
"getWhiteList": label.GetWhiteList,
|
||||
}
|
||||
|
||||
// filter services
|
||||
filteredServices := fun.Filter(p.serviceFilter, services).([]rancherData)
|
||||
|
||||
frontends := map[string]rancherData{}
|
||||
backends := map[string]rancherData{}
|
||||
|
||||
for _, service := range filteredServices {
|
||||
segmentProperties := label.ExtractTraefikLabels(service.Labels)
|
||||
for segmentName, labels := range segmentProperties {
|
||||
service.SegmentLabels = labels
|
||||
service.SegmentName = segmentName
|
||||
|
||||
frontendName := p.getFrontendName(service)
|
||||
frontends[frontendName] = service
|
||||
backendName := getBackendName(service)
|
||||
backends[backendName] = service
|
||||
}
|
||||
}
|
||||
|
||||
templateObjects := struct {
|
||||
Frontends map[string]rancherData
|
||||
Backends map[string]rancherData
|
||||
Domain string
|
||||
}{
|
||||
Frontends: frontends,
|
||||
Backends: backends,
|
||||
Domain: p.Domain,
|
||||
}
|
||||
|
||||
configuration, err := p.GetConfiguration("templates/rancher.tmpl", RancherFuncMap, templateObjects)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
return configuration
|
||||
}
|
||||
|
||||
func (p *Provider) serviceFilter(service rancherData) bool {
|
||||
segmentProperties := label.ExtractTraefikLabels(service.Labels)
|
||||
|
||||
for segmentName, labels := range segmentProperties {
|
||||
_, err := checkSegmentPort(labels, segmentName)
|
||||
if err != nil {
|
||||
log.Debugf("Filtering service %s %s without traefik.port label", service.Name, segmentName)
|
||||
return false
|
||||
}
|
||||
|
||||
if len(p.getFrontendRule(service.Name, labels)) == 0 {
|
||||
log.Debugf("Filtering container with empty frontend rule %s %s", service.Name, segmentName)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if !label.IsEnabled(service.Labels, p.ExposedByDefault) {
|
||||
log.Debugf("Filtering disabled service %s", service.Name)
|
||||
return false
|
||||
}
|
||||
|
||||
constraintTags := label.GetSliceStringValue(service.Labels, label.TraefikTags)
|
||||
if ok, failingConstraint := p.MatchConstraints(constraintTags); !ok {
|
||||
if failingConstraint != nil {
|
||||
log.Debugf("Filtering service %s with constraint %s", service.Name, failingConstraint.String())
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Only filter services by Health (HealthState) and State if EnableServiceHealthFilter is true
|
||||
if p.EnableServiceHealthFilter {
|
||||
|
||||
if service.Health != "" && service.Health != healthy && service.Health != updatingHealthy {
|
||||
log.Debugf("Filtering service %s with healthState of %s", service.Name, service.Health)
|
||||
return false
|
||||
}
|
||||
if service.State != "" && service.State != active && service.State != updatingActive && service.State != upgraded && service.State != upgrading {
|
||||
log.Debugf("Filtering service %s with state of %s", service.Name, service.State)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *Provider) getFrontendRule(serviceName string, labels map[string]string) string {
|
||||
domain := label.GetStringValue(labels, label.TraefikDomain, p.Domain)
|
||||
if len(domain) > 0 {
|
||||
domain = "." + domain
|
||||
}
|
||||
|
||||
defaultRule := "Host:" + strings.ToLower(strings.Replace(serviceName, "/", ".", -1)) + domain
|
||||
|
||||
return label.GetStringValue(labels, label.TraefikFrontendRule, defaultRule)
|
||||
}
|
||||
|
||||
func (p *Provider) getFrontendName(service rancherData) string {
|
||||
var name string
|
||||
if len(service.SegmentName) > 0 {
|
||||
name = getBackendName(service)
|
||||
} else {
|
||||
name = p.getFrontendRule(service.Name, service.SegmentLabels)
|
||||
}
|
||||
|
||||
return provider.Normalize(name)
|
||||
}
|
||||
|
||||
func getBackendName(service rancherData) string {
|
||||
if len(service.SegmentName) > 0 {
|
||||
return getSegmentBackendName(service)
|
||||
}
|
||||
|
||||
return getDefaultBackendName(service)
|
||||
}
|
||||
|
||||
func getSegmentBackendName(service rancherData) string {
|
||||
if value := label.GetStringValue(service.SegmentLabels, label.TraefikBackend, ""); len(value) > 0 {
|
||||
return provider.Normalize(service.Name + "-" + value)
|
||||
}
|
||||
|
||||
return provider.Normalize(service.Name + "-" + getDefaultBackendName(service) + "-" + service.SegmentName)
|
||||
}
|
||||
|
||||
func getDefaultBackendName(service rancherData) string {
|
||||
backend := label.GetStringValue(service.SegmentLabels, label.TraefikBackend, service.Name)
|
||||
return provider.Normalize(backend)
|
||||
}
|
||||
|
||||
func getServers(service rancherData) map[string]types.Server {
|
||||
var servers map[string]types.Server
|
||||
|
||||
for index, ip := range service.Containers {
|
||||
if len(ip) == 0 {
|
||||
log.Warnf("Unable to find the IP address for a container in the service %q: this container is ignored.", service.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
if servers == nil {
|
||||
servers = make(map[string]types.Server)
|
||||
}
|
||||
|
||||
protocol := label.GetStringValue(service.SegmentLabels, label.TraefikProtocol, label.DefaultProtocol)
|
||||
port := label.GetStringValue(service.SegmentLabels, label.TraefikPort, "")
|
||||
weight := label.GetIntValue(service.SegmentLabels, label.TraefikWeight, label.DefaultWeight)
|
||||
|
||||
serverName := "server-" + strconv.Itoa(index)
|
||||
servers[serverName] = types.Server{
|
||||
URL: fmt.Sprintf("%s://%s", protocol, net.JoinHostPort(ip, port)),
|
||||
Weight: weight,
|
||||
}
|
||||
}
|
||||
|
||||
return servers
|
||||
}
|
||||
|
||||
func checkSegmentPort(labels map[string]string, segmentName string) (int, error) {
|
||||
if rawPort, ok := labels[label.TraefikPort]; ok {
|
||||
port, err := strconv.Atoi(rawPort)
|
||||
if err != nil {
|
||||
return port, fmt.Errorf("invalid port value %q for the segment %q: %v", rawPort, segmentName, err)
|
||||
}
|
||||
} else {
|
||||
return 0, fmt.Errorf("port label is missing, please use %s as default value or define port label for all segments ('traefik.<segment_name>.port')", label.TraefikPort)
|
||||
}
|
||||
return 0, nil
|
||||
}
|
1171
old/provider/rancher/config_test.go
Normal file
1171
old/provider/rancher/config_test.go
Normal file
File diff suppressed because it is too large
Load diff
138
old/provider/rancher/metadata.go
Normal file
138
old/provider/rancher/metadata.go
Normal file
|
@ -0,0 +1,138 @@
|
|||
package rancher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/cenk/backoff"
|
||||
"github.com/containous/traefik/job"
|
||||
"github.com/containous/traefik/old/log"
|
||||
"github.com/containous/traefik/old/types"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
rancher "github.com/rancher/go-rancher-metadata/metadata"
|
||||
)
|
||||
|
||||
// MetadataConfiguration contains configuration properties specific to
|
||||
// the Rancher metadata service provider.
|
||||
type MetadataConfiguration struct {
|
||||
IntervalPoll bool `description:"Poll the Rancher metadata service every 'rancher.refreshseconds' (less accurate)"`
|
||||
Prefix string `description:"Prefix used for accessing the Rancher metadata service"`
|
||||
}
|
||||
|
||||
func (p *Provider) metadataProvide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error {
|
||||
metadataServiceURL := fmt.Sprintf("http://rancher-metadata.rancher.internal/%s", p.Metadata.Prefix)
|
||||
|
||||
safe.Go(func() {
|
||||
operation := func() error {
|
||||
client, err := rancher.NewClientAndWait(metadataServiceURL)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create Rancher metadata service client: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
updateConfiguration := func(version string) {
|
||||
log.WithField("metadata_version", version).Debugln("Refreshing configuration from Rancher metadata service")
|
||||
|
||||
stacks, err := client.GetStacks()
|
||||
if err != nil {
|
||||
log.Errorf("Failed to query Rancher metadata service: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
rancherData := parseMetadataSourcedRancherData(stacks)
|
||||
configuration := p.buildConfiguration(rancherData)
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "rancher",
|
||||
Configuration: configuration,
|
||||
}
|
||||
}
|
||||
updateConfiguration("init")
|
||||
|
||||
if p.Watch {
|
||||
pool.Go(func(stop chan bool) {
|
||||
switch {
|
||||
case p.Metadata.IntervalPoll:
|
||||
p.intervalPoll(client, updateConfiguration, stop)
|
||||
default:
|
||||
p.longPoll(client, updateConfiguration, stop)
|
||||
}
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
notify := func(err error, time time.Duration) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"error": err,
|
||||
"retry_in": time,
|
||||
}).Errorln("Rancher metadata service connection error")
|
||||
}
|
||||
|
||||
if err := backoff.RetryNotify(operation, job.NewBackOff(backoff.NewExponentialBackOff()), notify); err != nil {
|
||||
log.WithField("endpoint", metadataServiceURL).Errorln("Cannot connect to Rancher metadata service")
|
||||
}
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) intervalPoll(client rancher.Client, updateConfiguration func(string), stop chan bool) {
|
||||
_, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
ticker := time.NewTicker(time.Second * time.Duration(p.RefreshSeconds))
|
||||
defer ticker.Stop()
|
||||
|
||||
var version string
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
newVersion, err := client.GetVersion()
|
||||
if err != nil {
|
||||
log.WithField("error", err).Errorln("Failed to read Rancher metadata service version")
|
||||
} else if version != newVersion {
|
||||
version = newVersion
|
||||
updateConfiguration(version)
|
||||
}
|
||||
case <-stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) longPoll(client rancher.Client, updateConfiguration func(string), stop chan bool) {
|
||||
_, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Holds the connection until there is either a change in the metadata
|
||||
// repository or `p.RefreshSeconds` has elapsed. Long polling should be
|
||||
// favored for the most accurate configuration updates.
|
||||
safe.Go(func() {
|
||||
client.OnChange(p.RefreshSeconds, updateConfiguration)
|
||||
})
|
||||
<-stop
|
||||
}
|
||||
|
||||
func parseMetadataSourcedRancherData(stacks []rancher.Stack) (rancherDataList []rancherData) {
|
||||
for _, stack := range stacks {
|
||||
for _, service := range stack.Services {
|
||||
var containerIPAddresses []string
|
||||
for _, container := range service.Containers {
|
||||
if containerFilter(container.Name, container.HealthState, container.State) {
|
||||
containerIPAddresses = append(containerIPAddresses, container.PrimaryIp)
|
||||
}
|
||||
}
|
||||
|
||||
rancherDataList = append(rancherDataList, rancherData{
|
||||
Name: service.Name + "/" + stack.Name,
|
||||
State: service.State,
|
||||
Labels: service.Labels,
|
||||
Containers: containerIPAddresses,
|
||||
})
|
||||
}
|
||||
}
|
||||
return rancherDataList
|
||||
}
|
80
old/provider/rancher/rancher.go
Normal file
80
old/provider/rancher/rancher.go
Normal file
|
@ -0,0 +1,80 @@
|
|||
package rancher
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containous/traefik/old/log"
|
||||
"github.com/containous/traefik/old/provider"
|
||||
"github.com/containous/traefik/old/types"
|
||||
"github.com/containous/traefik/safe"
|
||||
)
|
||||
|
||||
const (
|
||||
// Health
|
||||
healthy = "healthy"
|
||||
updatingHealthy = "updating-healthy"
|
||||
|
||||
// State
|
||||
active = "active"
|
||||
running = "running"
|
||||
upgraded = "upgraded"
|
||||
upgrading = "upgrading"
|
||||
updatingActive = "updating-active"
|
||||
updatingRunning = "updating-running"
|
||||
)
|
||||
|
||||
var _ provider.Provider = (*Provider)(nil)
|
||||
|
||||
// Provider holds configurations of the provider.
|
||||
type Provider struct {
|
||||
provider.BaseProvider `mapstructure:",squash" export:"true"`
|
||||
APIConfiguration `mapstructure:",squash" export:"true"` // Provide backwards compatibility
|
||||
API *APIConfiguration `description:"Enable the Rancher API provider" export:"true"`
|
||||
Metadata *MetadataConfiguration `description:"Enable the Rancher metadata service provider" export:"true"`
|
||||
Domain string `description:"Default domain used"`
|
||||
RefreshSeconds int `description:"Polling interval (in seconds)" export:"true"`
|
||||
ExposedByDefault bool `description:"Expose services by default" export:"true"`
|
||||
EnableServiceHealthFilter bool `description:"Filter services with unhealthy states and inactive states" export:"true"`
|
||||
}
|
||||
|
||||
type rancherData struct {
|
||||
Name string
|
||||
Labels map[string]string // List of labels set to container or service
|
||||
Containers []string
|
||||
Health string
|
||||
State string
|
||||
SegmentLabels map[string]string
|
||||
SegmentName string
|
||||
}
|
||||
|
||||
func (r rancherData) String() string {
|
||||
return fmt.Sprintf("{name:%s, labels:%v, containers: %v, health: %s, state: %s}", r.Name, r.Labels, r.Containers, r.Health, r.State)
|
||||
}
|
||||
|
||||
// Init the provider
|
||||
func (p *Provider) Init(constraints types.Constraints) error {
|
||||
return p.BaseProvider.Init(constraints)
|
||||
}
|
||||
|
||||
// Provide allows either the Rancher API or metadata service provider to
|
||||
// seed configuration into Traefik using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error {
|
||||
if p.Metadata == nil {
|
||||
return p.apiProvide(configurationChan, pool)
|
||||
}
|
||||
return p.metadataProvide(configurationChan, pool)
|
||||
}
|
||||
|
||||
func containerFilter(name, healthState, state string) bool {
|
||||
if healthState != "" && healthState != healthy && healthState != updatingHealthy {
|
||||
log.Debugf("Filtering container %s with healthState of %s", name, healthState)
|
||||
return false
|
||||
}
|
||||
|
||||
if state != "" && state != running && state != updatingRunning && state != upgraded {
|
||||
log.Debugf("Filtering container %s with state of %s", name, state)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue