Extract providers to their own package
This is just doing that and making it compile :) Signed-off-by: Vincent Demeester <vincent@sbr.pm>
This commit is contained in:
parent
2d00758b2e
commit
542c3673e4
36 changed files with 861 additions and 833 deletions
280
provider/kubernetes/client.go
Normal file
280
provider/kubernetes/client.go
Normal file
|
@ -0,0 +1,280 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/pkg/api"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/client-go/pkg/fields"
|
||||
"k8s.io/client-go/pkg/labels"
|
||||
"k8s.io/client-go/pkg/runtime"
|
||||
"k8s.io/client-go/pkg/watch"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
const resyncPeriod = time.Minute * 5
|
||||
|
||||
// Client is a client for the Provider master.
|
||||
// WatchAll starts the watch of the Provider ressources and updates the stores.
|
||||
// The stores can then be accessed via the Get* functions.
|
||||
type Client interface {
|
||||
GetIngresses(namespaces Namespaces) []*v1beta1.Ingress
|
||||
GetService(namespace, name string) (*v1.Service, bool, error)
|
||||
GetEndpoints(namespace, name string) (*v1.Endpoints, bool, error)
|
||||
WatchAll(labelSelector string, stopCh <-chan struct{}) (<-chan interface{}, error)
|
||||
}
|
||||
|
||||
type clientImpl struct {
|
||||
ingController *cache.Controller
|
||||
svcController *cache.Controller
|
||||
epController *cache.Controller
|
||||
|
||||
ingStore cache.Store
|
||||
svcStore cache.Store
|
||||
epStore cache.Store
|
||||
|
||||
clientset *kubernetes.Clientset
|
||||
}
|
||||
|
||||
// NewInClusterClient returns a new Provider client that is expected to run
|
||||
// inside the cluster.
|
||||
func NewInClusterClient(endpoint string) (Client, error) {
|
||||
config, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create in-cluster configuration: %s", err)
|
||||
}
|
||||
|
||||
if endpoint != "" {
|
||||
config.Host = endpoint
|
||||
}
|
||||
|
||||
return createClientFromConfig(config)
|
||||
}
|
||||
|
||||
// NewExternalClusterClient returns a new Provider client that may run outside
|
||||
// of the cluster.
|
||||
// The endpoint parameter must not be empty.
|
||||
func NewExternalClusterClient(endpoint, token, caFilePath string) (Client, error) {
|
||||
if endpoint == "" {
|
||||
return nil, errors.New("endpoint missing for external cluster client")
|
||||
}
|
||||
|
||||
config := &rest.Config{
|
||||
Host: endpoint,
|
||||
BearerToken: token,
|
||||
}
|
||||
|
||||
if caFilePath != "" {
|
||||
caData, err := ioutil.ReadFile(caFilePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read CA file %s: %s", caFilePath, err)
|
||||
}
|
||||
|
||||
config.TLSClientConfig = rest.TLSClientConfig{CAData: caData}
|
||||
}
|
||||
|
||||
return createClientFromConfig(config)
|
||||
}
|
||||
|
||||
func createClientFromConfig(c *rest.Config) (Client, error) {
|
||||
clientset, err := kubernetes.NewForConfig(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &clientImpl{
|
||||
clientset: clientset,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetIngresses returns all ingresses in the cluster
|
||||
func (c *clientImpl) GetIngresses(namespaces Namespaces) []*v1beta1.Ingress {
|
||||
ingList := c.ingStore.List()
|
||||
result := make([]*v1beta1.Ingress, 0, len(ingList))
|
||||
|
||||
for _, obj := range ingList {
|
||||
ingress := obj.(*v1beta1.Ingress)
|
||||
if HasNamespace(ingress, namespaces) {
|
||||
result = append(result, ingress)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// WatchIngresses starts the watch of Provider Ingresses resources and updates the corresponding store
|
||||
func (c *clientImpl) WatchIngresses(labelSelector labels.Selector, watchCh chan<- interface{}, stopCh <-chan struct{}) {
|
||||
source := NewListWatchFromClient(
|
||||
c.clientset.ExtensionsV1beta1().RESTClient(),
|
||||
"ingresses",
|
||||
api.NamespaceAll,
|
||||
fields.Everything(),
|
||||
labelSelector)
|
||||
|
||||
c.ingStore, c.ingController = cache.NewInformer(
|
||||
source,
|
||||
&v1beta1.Ingress{},
|
||||
resyncPeriod,
|
||||
newResourceEventHandlerFuncs(watchCh))
|
||||
go c.ingController.Run(stopCh)
|
||||
}
|
||||
|
||||
// eventHandlerFunc will pass the obj on to the events channel or drop it
|
||||
// This is so passing the events along won't block in the case of high volume
|
||||
// The events are only used for signalling anyway so dropping a few is ok
|
||||
func eventHandlerFunc(events chan<- interface{}, obj interface{}) {
|
||||
select {
|
||||
case events <- obj:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func newResourceEventHandlerFuncs(events chan<- interface{}) cache.ResourceEventHandlerFuncs {
|
||||
return cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) { eventHandlerFunc(events, obj) },
|
||||
UpdateFunc: func(old, new interface{}) { eventHandlerFunc(events, new) },
|
||||
DeleteFunc: func(obj interface{}) { eventHandlerFunc(events, obj) },
|
||||
}
|
||||
}
|
||||
|
||||
// GetService returns the named service from the named namespace
|
||||
func (c *clientImpl) GetService(namespace, name string) (*v1.Service, bool, error) {
|
||||
var service *v1.Service
|
||||
item, exists, err := c.svcStore.GetByKey(namespace + "/" + name)
|
||||
if item != nil {
|
||||
service = item.(*v1.Service)
|
||||
}
|
||||
|
||||
return service, exists, err
|
||||
}
|
||||
|
||||
// WatchServices starts the watch of Provider Service resources and updates the corresponding store
|
||||
func (c *clientImpl) WatchServices(watchCh chan<- interface{}, stopCh <-chan struct{}) {
|
||||
source := cache.NewListWatchFromClient(
|
||||
c.clientset.CoreV1().RESTClient(),
|
||||
"services",
|
||||
api.NamespaceAll,
|
||||
fields.Everything())
|
||||
|
||||
c.svcStore, c.svcController = cache.NewInformer(
|
||||
source,
|
||||
&v1.Service{},
|
||||
resyncPeriod,
|
||||
newResourceEventHandlerFuncs(watchCh))
|
||||
go c.svcController.Run(stopCh)
|
||||
}
|
||||
|
||||
// GetEndpoints returns the named Endpoints
|
||||
// Endpoints have the same name as the coresponding service
|
||||
func (c *clientImpl) GetEndpoints(namespace, name string) (*v1.Endpoints, bool, error) {
|
||||
var endpoint *v1.Endpoints
|
||||
item, exists, err := c.epStore.GetByKey(namespace + "/" + name)
|
||||
|
||||
if item != nil {
|
||||
endpoint = item.(*v1.Endpoints)
|
||||
}
|
||||
|
||||
return endpoint, exists, err
|
||||
}
|
||||
|
||||
// WatchEndpoints starts the watch of Provider Endpoints resources and updates the corresponding store
|
||||
func (c *clientImpl) WatchEndpoints(watchCh chan<- interface{}, stopCh <-chan struct{}) {
|
||||
source := cache.NewListWatchFromClient(
|
||||
c.clientset.CoreV1().RESTClient(),
|
||||
"endpoints",
|
||||
api.NamespaceAll,
|
||||
fields.Everything())
|
||||
|
||||
c.epStore, c.epController = cache.NewInformer(
|
||||
source,
|
||||
&v1.Endpoints{},
|
||||
resyncPeriod,
|
||||
newResourceEventHandlerFuncs(watchCh))
|
||||
go c.epController.Run(stopCh)
|
||||
}
|
||||
|
||||
// WatchAll returns events in the cluster and updates the stores via informer
|
||||
// Filters ingresses by labelSelector
|
||||
func (c *clientImpl) WatchAll(labelSelector string, stopCh <-chan struct{}) (<-chan interface{}, error) {
|
||||
watchCh := make(chan interface{}, 1)
|
||||
eventCh := make(chan interface{}, 1)
|
||||
|
||||
kubeLabelSelector, err := labels.Parse(labelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.WatchIngresses(kubeLabelSelector, eventCh, stopCh)
|
||||
c.WatchServices(eventCh, stopCh)
|
||||
c.WatchEndpoints(eventCh, stopCh)
|
||||
|
||||
go func() {
|
||||
defer close(watchCh)
|
||||
defer close(eventCh)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
case event := <-eventCh:
|
||||
c.fireEvent(event, watchCh)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return watchCh, nil
|
||||
}
|
||||
|
||||
// fireEvent checks if all controllers have synced before firing
|
||||
// Used after startup or a reconnect
|
||||
func (c *clientImpl) fireEvent(event interface{}, eventCh chan interface{}) {
|
||||
if !c.ingController.HasSynced() || !c.svcController.HasSynced() || !c.epController.HasSynced() {
|
||||
return
|
||||
}
|
||||
eventHandlerFunc(eventCh, event)
|
||||
}
|
||||
|
||||
// HasNamespace checks if the ingress is in one of the namespaces
|
||||
func HasNamespace(ingress *v1beta1.Ingress, namespaces Namespaces) bool {
|
||||
if len(namespaces) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, n := range namespaces {
|
||||
if ingress.ObjectMeta.Namespace == n {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NewListWatchFromClient creates a new ListWatch from the specified client, resource, namespace, field selector and label selector.
|
||||
// Extends cache.NewListWatchFromClient to support labelSelector
|
||||
func NewListWatchFromClient(c cache.Getter, resource string, namespace string, fieldSelector fields.Selector, labelSelector labels.Selector) *cache.ListWatch {
|
||||
listFunc := func(options api.ListOptions) (runtime.Object, error) {
|
||||
return c.Get().
|
||||
Namespace(namespace).
|
||||
Resource(resource).
|
||||
VersionedParams(&options, api.ParameterCodec).
|
||||
FieldsSelectorParam(fieldSelector).
|
||||
LabelsSelectorParam(labelSelector).
|
||||
Do().
|
||||
Get()
|
||||
}
|
||||
watchFunc := func(options api.ListOptions) (watch.Interface, error) {
|
||||
return c.Get().
|
||||
Prefix("watch").
|
||||
Namespace(namespace).
|
||||
Resource(resource).
|
||||
VersionedParams(&options, api.ParameterCodec).
|
||||
FieldsSelectorParam(fieldSelector).
|
||||
LabelsSelectorParam(labelSelector).
|
||||
Watch()
|
||||
}
|
||||
return &cache.ListWatch{ListFunc: listFunc, WatchFunc: watchFunc}
|
||||
}
|
349
provider/kubernetes/kubernetes.go
Normal file
349
provider/kubernetes/kubernetes.go
Normal file
|
@ -0,0 +1,349 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/cenk/backoff"
|
||||
"github.com/containous/traefik/job"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/provider"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/containous/traefik/types"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/pkg/util/intstr"
|
||||
)
|
||||
|
||||
var _ provider.Provider = (*Provider)(nil)
|
||||
|
||||
const (
|
||||
annotationFrontendRuleType = "traefik.frontend.rule.type"
|
||||
ruleTypePathPrefixStrip = "PathPrefixStrip"
|
||||
ruleTypePathStrip = "PathStrip"
|
||||
ruleTypePath = "Path"
|
||||
ruleTypePathPrefix = "PathPrefix"
|
||||
)
|
||||
|
||||
// Provider holds configurations of the provider.
|
||||
type Provider struct {
|
||||
provider.BaseProvider `mapstructure:",squash"`
|
||||
Endpoint string `description:"Kubernetes server endpoint (required for external cluster client)"`
|
||||
Token string `description:"Kubernetes bearer token (not needed for in-cluster client)"`
|
||||
CertAuthFilePath string `description:"Kubernetes certificate authority file path (not needed for in-cluster client)"`
|
||||
DisablePassHostHeaders bool `description:"Kubernetes disable PassHost Headers"`
|
||||
Namespaces Namespaces `description:"Kubernetes namespaces"`
|
||||
LabelSelector string `description:"Kubernetes api label selector to use"`
|
||||
lastConfiguration safe.Safe
|
||||
}
|
||||
|
||||
func (p *Provider) newK8sClient() (Client, error) {
|
||||
withEndpoint := ""
|
||||
if p.Endpoint != "" {
|
||||
withEndpoint = fmt.Sprintf(" with endpoint %v", p.Endpoint)
|
||||
}
|
||||
|
||||
if os.Getenv("KUBERNETES_SERVICE_HOST") != "" && os.Getenv("KUBERNETES_SERVICE_PORT") != "" {
|
||||
log.Infof("Creating in-cluster Provider client%s\n", withEndpoint)
|
||||
return NewInClusterClient(p.Endpoint)
|
||||
}
|
||||
|
||||
log.Infof("Creating cluster-external Provider client%s\n", withEndpoint)
|
||||
return NewExternalClusterClient(p.Endpoint, p.Token, p.CertAuthFilePath)
|
||||
}
|
||||
|
||||
// Provide allows the k8s provider to provide configurations to traefik
|
||||
// using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool, constraints types.Constraints) error {
|
||||
k8sClient, err := p.newK8sClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Constraints = append(p.Constraints, constraints...)
|
||||
|
||||
pool.Go(func(stop chan bool) {
|
||||
operation := func() error {
|
||||
for {
|
||||
stopWatch := make(chan struct{}, 1)
|
||||
defer close(stopWatch)
|
||||
log.Debugf("Using label selector: '%s'", p.LabelSelector)
|
||||
eventsChan, err := k8sClient.WatchAll(p.LabelSelector, stopWatch)
|
||||
if err != nil {
|
||||
log.Errorf("Error watching kubernetes events: %v", err)
|
||||
timer := time.NewTimer(1 * time.Second)
|
||||
select {
|
||||
case <-timer.C:
|
||||
return err
|
||||
case <-stop:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
return nil
|
||||
case event := <-eventsChan:
|
||||
log.Debugf("Received event from kubernetes %+v", event)
|
||||
templateObjects, err := p.loadIngresses(k8sClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if reflect.DeepEqual(p.lastConfiguration.Get(), templateObjects) {
|
||||
log.Debugf("Skipping event from kubernetes %+v", event)
|
||||
} else {
|
||||
p.lastConfiguration.Set(templateObjects)
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "kubernetes",
|
||||
Configuration: p.loadConfig(*templateObjects),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
notify := func(err error, time time.Duration) {
|
||||
log.Errorf("Provider connection error %+v, retrying in %s", err, time)
|
||||
}
|
||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify)
|
||||
if err != nil {
|
||||
log.Errorf("Cannot connect to Provider server %+v", err)
|
||||
}
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) loadIngresses(k8sClient Client) (*types.Configuration, error) {
|
||||
ingresses := k8sClient.GetIngresses(p.Namespaces)
|
||||
|
||||
templateObjects := types.Configuration{
|
||||
map[string]*types.Backend{},
|
||||
map[string]*types.Frontend{},
|
||||
}
|
||||
for _, i := range ingresses {
|
||||
ingressClass := i.Annotations["kubernetes.io/ingress.class"]
|
||||
|
||||
if !shouldProcessIngress(ingressClass) {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, r := range i.Spec.Rules {
|
||||
if r.HTTP == nil {
|
||||
log.Warnf("Error in ingress: HTTP is nil")
|
||||
continue
|
||||
}
|
||||
for _, pa := range r.HTTP.Paths {
|
||||
if _, exists := templateObjects.Backends[r.Host+pa.Path]; !exists {
|
||||
templateObjects.Backends[r.Host+pa.Path] = &types.Backend{
|
||||
Servers: make(map[string]types.Server),
|
||||
LoadBalancer: &types.LoadBalancer{
|
||||
Sticky: false,
|
||||
Method: "wrr",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
PassHostHeader := p.getPassHostHeader()
|
||||
|
||||
passHostHeaderAnnotation := i.Annotations["traefik.frontend.passHostHeader"]
|
||||
switch passHostHeaderAnnotation {
|
||||
case "true":
|
||||
PassHostHeader = true
|
||||
case "false":
|
||||
PassHostHeader = false
|
||||
default:
|
||||
log.Warnf("Unknown value of %s for traefik.frontend.passHostHeader, falling back to %s", passHostHeaderAnnotation, PassHostHeader)
|
||||
}
|
||||
|
||||
if _, exists := templateObjects.Frontends[r.Host+pa.Path]; !exists {
|
||||
templateObjects.Frontends[r.Host+pa.Path] = &types.Frontend{
|
||||
Backend: r.Host + pa.Path,
|
||||
PassHostHeader: PassHostHeader,
|
||||
Routes: make(map[string]types.Route),
|
||||
Priority: len(pa.Path),
|
||||
}
|
||||
}
|
||||
if len(r.Host) > 0 {
|
||||
rule := "Host:" + r.Host
|
||||
|
||||
if strings.Contains(r.Host, "*") {
|
||||
rule = "HostRegexp:" + strings.Replace(r.Host, "*", "{subdomain:[A-Za-z0-9-_]+}", 1)
|
||||
}
|
||||
|
||||
if _, exists := templateObjects.Frontends[r.Host+pa.Path].Routes[r.Host]; !exists {
|
||||
templateObjects.Frontends[r.Host+pa.Path].Routes[r.Host] = types.Route{
|
||||
Rule: rule,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(pa.Path) > 0 {
|
||||
ruleType, unknown := getRuleTypeFromAnnotation(i.Annotations)
|
||||
switch {
|
||||
case unknown:
|
||||
log.Warnf("Unknown RuleType '%s' for Ingress %s/%s, falling back to PathPrefix", ruleType, i.ObjectMeta.Namespace, i.ObjectMeta.Name)
|
||||
fallthrough
|
||||
case ruleType == "":
|
||||
ruleType = ruleTypePathPrefix
|
||||
}
|
||||
|
||||
templateObjects.Frontends[r.Host+pa.Path].Routes[pa.Path] = types.Route{
|
||||
Rule: ruleType + ":" + pa.Path,
|
||||
}
|
||||
}
|
||||
|
||||
service, exists, err := k8sClient.GetService(i.ObjectMeta.Namespace, pa.Backend.ServiceName)
|
||||
if err != nil {
|
||||
log.Errorf("Error while retrieving service information from k8s API %s/%s: %v", i.ObjectMeta.Namespace, pa.Backend.ServiceName, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !exists {
|
||||
log.Errorf("Service not found for %s/%s", i.ObjectMeta.Namespace, pa.Backend.ServiceName)
|
||||
delete(templateObjects.Frontends, r.Host+pa.Path)
|
||||
continue
|
||||
}
|
||||
|
||||
if expression := service.Annotations["traefik.backend.circuitbreaker"]; expression != "" {
|
||||
templateObjects.Backends[r.Host+pa.Path].CircuitBreaker = &types.CircuitBreaker{
|
||||
Expression: expression,
|
||||
}
|
||||
}
|
||||
if service.Annotations["traefik.backend.loadbalancer.method"] == "drr" {
|
||||
templateObjects.Backends[r.Host+pa.Path].LoadBalancer.Method = "drr"
|
||||
}
|
||||
if service.Annotations["traefik.backend.loadbalancer.sticky"] == "true" {
|
||||
templateObjects.Backends[r.Host+pa.Path].LoadBalancer.Sticky = true
|
||||
}
|
||||
|
||||
protocol := "http"
|
||||
for _, port := range service.Spec.Ports {
|
||||
if equalPorts(port, pa.Backend.ServicePort) {
|
||||
if port.Port == 443 {
|
||||
protocol = "https"
|
||||
}
|
||||
if service.Spec.Type == "ExternalName" {
|
||||
url := protocol + "://" + service.Spec.ExternalName
|
||||
name := url
|
||||
|
||||
templateObjects.Backends[r.Host+pa.Path].Servers[name] = types.Server{
|
||||
URL: url,
|
||||
Weight: 1,
|
||||
}
|
||||
} else {
|
||||
endpoints, exists, err := k8sClient.GetEndpoints(service.ObjectMeta.Namespace, service.ObjectMeta.Name)
|
||||
if err != nil {
|
||||
log.Errorf("Error retrieving endpoints %s/%s: %v", service.ObjectMeta.Namespace, service.ObjectMeta.Name, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !exists {
|
||||
log.Errorf("Endpoints not found for %s/%s", service.ObjectMeta.Namespace, service.ObjectMeta.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(endpoints.Subsets) == 0 {
|
||||
log.Warnf("Service endpoints not found for %s/%s, falling back to Service ClusterIP", service.ObjectMeta.Namespace, service.ObjectMeta.Name)
|
||||
templateObjects.Backends[r.Host+pa.Path].Servers[string(service.UID)] = types.Server{
|
||||
URL: protocol + "://" + service.Spec.ClusterIP + ":" + strconv.Itoa(int(port.Port)),
|
||||
Weight: 1,
|
||||
}
|
||||
} else {
|
||||
for _, subset := range endpoints.Subsets {
|
||||
for _, address := range subset.Addresses {
|
||||
url := protocol + "://" + address.IP + ":" + strconv.Itoa(endpointPortNumber(port, subset.Ports))
|
||||
name := url
|
||||
if address.TargetRef != nil && address.TargetRef.Name != "" {
|
||||
name = address.TargetRef.Name
|
||||
}
|
||||
templateObjects.Backends[r.Host+pa.Path].Servers[name] = types.Server{
|
||||
URL: url,
|
||||
Weight: 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return &templateObjects, nil
|
||||
}
|
||||
|
||||
func endpointPortNumber(servicePort v1.ServicePort, endpointPorts []v1.EndpointPort) int {
|
||||
if len(endpointPorts) > 0 {
|
||||
//name is optional if there is only one port
|
||||
port := endpointPorts[0]
|
||||
for _, endpointPort := range endpointPorts {
|
||||
if servicePort.Name == endpointPort.Name {
|
||||
port = endpointPort
|
||||
}
|
||||
}
|
||||
return int(port.Port)
|
||||
}
|
||||
return int(servicePort.Port)
|
||||
}
|
||||
|
||||
func equalPorts(servicePort v1.ServicePort, ingressPort intstr.IntOrString) bool {
|
||||
if int(servicePort.Port) == ingressPort.IntValue() {
|
||||
return true
|
||||
}
|
||||
if servicePort.Name != "" && servicePort.Name == ingressPort.String() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func shouldProcessIngress(ingressClass string) bool {
|
||||
switch ingressClass {
|
||||
case "", "traefik":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) getPassHostHeader() bool {
|
||||
if p.DisablePassHostHeaders {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *Provider) loadConfig(templateObjects types.Configuration) *types.Configuration {
|
||||
var FuncMap = template.FuncMap{}
|
||||
configuration, err := p.GetConfiguration("templates/kubernetes.tmpl", FuncMap, templateObjects)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
return configuration
|
||||
}
|
||||
|
||||
func getRuleTypeFromAnnotation(annotations map[string]string) (ruleType string, unknown bool) {
|
||||
ruleType = annotations[annotationFrontendRuleType]
|
||||
for _, knownRuleType := range []string{
|
||||
ruleTypePathPrefixStrip,
|
||||
ruleTypePathStrip,
|
||||
ruleTypePath,
|
||||
ruleTypePathPrefix,
|
||||
} {
|
||||
if strings.ToLower(ruleType) == strings.ToLower(knownRuleType) {
|
||||
return knownRuleType, false
|
||||
}
|
||||
}
|
||||
|
||||
if ruleType != "" {
|
||||
// Annotation is set but does not match anything we know.
|
||||
unknown = true
|
||||
}
|
||||
|
||||
return ruleType, unknown
|
||||
}
|
2144
provider/kubernetes/kubernetes_test.go
Normal file
2144
provider/kubernetes/kubernetes_test.go
Normal file
File diff suppressed because it is too large
Load diff
32
provider/kubernetes/namespace.go
Normal file
32
provider/kubernetes/namespace.go
Normal file
|
@ -0,0 +1,32 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Namespaces holds kubernetes namespaces
|
||||
type Namespaces []string
|
||||
|
||||
//Set adds strings elem into the the parser
|
||||
//it splits str on , and ;
|
||||
func (ns *Namespaces) Set(str string) error {
|
||||
fargs := func(c rune) bool {
|
||||
return c == ',' || c == ';'
|
||||
}
|
||||
// get function
|
||||
slice := strings.FieldsFunc(str, fargs)
|
||||
*ns = append(*ns, slice...)
|
||||
return nil
|
||||
}
|
||||
|
||||
//Get []string
|
||||
func (ns *Namespaces) Get() interface{} { return Namespaces(*ns) }
|
||||
|
||||
//String return slice in a string
|
||||
func (ns *Namespaces) String() string { return fmt.Sprintf("%v", *ns) }
|
||||
|
||||
//SetValue sets []string into the parser
|
||||
func (ns *Namespaces) SetValue(val interface{}) {
|
||||
*ns = Namespaces(val.(Namespaces))
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue