Adds Marathon support.
Co-authored-by: Julien Salleyron <julien@containo.us>
This commit is contained in:
parent
a433e469cc
commit
246b245959
22 changed files with 2223 additions and 2203 deletions
|
@ -1,226 +0,0 @@
|
|||
package marathon
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containous/traefik/old/provider/label"
|
||||
"github.com/gambol99/go-marathon"
|
||||
)
|
||||
|
||||
const testTaskName = "taskID"
|
||||
|
||||
func withAppData(app marathon.Application, segmentName string) appData {
|
||||
segmentProperties := label.ExtractTraefikLabels(stringValueMap(app.Labels))
|
||||
return appData{
|
||||
Application: app,
|
||||
SegmentLabels: segmentProperties[segmentName],
|
||||
SegmentName: segmentName,
|
||||
LinkedApps: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// Functions related to building applications.
|
||||
|
||||
func withApplications(apps ...marathon.Application) *marathon.Applications {
|
||||
return &marathon.Applications{Apps: apps}
|
||||
}
|
||||
|
||||
func application(ops ...func(*marathon.Application)) marathon.Application {
|
||||
app := marathon.Application{}
|
||||
app.EmptyLabels()
|
||||
app.Deployments = []map[string]string{}
|
||||
app.ReadinessChecks = &[]marathon.ReadinessCheck{}
|
||||
app.ReadinessCheckResults = &[]marathon.ReadinessCheckResult{}
|
||||
|
||||
for _, op := range ops {
|
||||
op(&app)
|
||||
}
|
||||
|
||||
return app
|
||||
}
|
||||
|
||||
func appID(name string) func(*marathon.Application) {
|
||||
return func(app *marathon.Application) {
|
||||
app.Name(name)
|
||||
}
|
||||
}
|
||||
|
||||
func appPorts(ports ...int) func(*marathon.Application) {
|
||||
return func(app *marathon.Application) {
|
||||
app.Ports = append(app.Ports, ports...)
|
||||
}
|
||||
}
|
||||
|
||||
func withLabel(key, value string) func(*marathon.Application) {
|
||||
return func(app *marathon.Application) {
|
||||
app.AddLabel(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
func constraint(value string) func(*marathon.Application) {
|
||||
return func(app *marathon.Application) {
|
||||
app.AddConstraint(strings.Split(value, ":")...)
|
||||
}
|
||||
}
|
||||
|
||||
func withSegmentLabel(key, value string, segmentName string) func(*marathon.Application) {
|
||||
if len(segmentName) == 0 {
|
||||
panic("segmentName can not be empty")
|
||||
}
|
||||
|
||||
property := strings.TrimPrefix(key, label.Prefix)
|
||||
return func(app *marathon.Application) {
|
||||
app.AddLabel(label.Prefix+segmentName+"."+property, value)
|
||||
}
|
||||
}
|
||||
|
||||
func portDefinition(port int) func(*marathon.Application) {
|
||||
return func(app *marathon.Application) {
|
||||
app.AddPortDefinition(marathon.PortDefinition{
|
||||
Port: &port,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func bridgeNetwork() func(*marathon.Application) {
|
||||
return func(app *marathon.Application) {
|
||||
app.SetNetwork("bridge", marathon.BridgeNetworkMode)
|
||||
}
|
||||
}
|
||||
|
||||
func containerNetwork() func(*marathon.Application) {
|
||||
return func(app *marathon.Application) {
|
||||
app.SetNetwork("cni", marathon.ContainerNetworkMode)
|
||||
}
|
||||
}
|
||||
|
||||
func ipAddrPerTask(port int) func(*marathon.Application) {
|
||||
return func(app *marathon.Application) {
|
||||
p := marathon.Port{
|
||||
Number: port,
|
||||
Name: "port",
|
||||
}
|
||||
disc := marathon.Discovery{}
|
||||
disc.AddPort(p)
|
||||
ipAddr := marathon.IPAddressPerTask{}
|
||||
ipAddr.SetDiscovery(disc)
|
||||
app.SetIPAddressPerTask(ipAddr)
|
||||
}
|
||||
}
|
||||
|
||||
func deployments(ids ...string) func(*marathon.Application) {
|
||||
return func(app *marathon.Application) {
|
||||
for _, id := range ids {
|
||||
app.Deployments = append(app.Deployments, map[string]string{
|
||||
"ID": id,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func readinessCheck(timeout time.Duration) func(*marathon.Application) {
|
||||
return func(app *marathon.Application) {
|
||||
app.ReadinessChecks = &[]marathon.ReadinessCheck{
|
||||
{
|
||||
Path: "/ready",
|
||||
TimeoutSeconds: int(timeout.Seconds()),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func readinessCheckResult(taskID string, ready bool) func(*marathon.Application) {
|
||||
return func(app *marathon.Application) {
|
||||
*app.ReadinessCheckResults = append(*app.ReadinessCheckResults, marathon.ReadinessCheckResult{
|
||||
TaskID: taskID,
|
||||
Ready: ready,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func withTasks(tasks ...marathon.Task) func(*marathon.Application) {
|
||||
return func(application *marathon.Application) {
|
||||
for _, task := range tasks {
|
||||
tu := task
|
||||
application.Tasks = append(application.Tasks, &tu)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Functions related to building tasks.
|
||||
|
||||
func task(ops ...func(*marathon.Task)) marathon.Task {
|
||||
t := &marathon.Task{
|
||||
ID: testTaskName,
|
||||
// The vast majority of tests expect the task state to be TASK_RUNNING.
|
||||
State: string(taskStateRunning),
|
||||
}
|
||||
|
||||
for _, op := range ops {
|
||||
op(t)
|
||||
}
|
||||
|
||||
return *t
|
||||
}
|
||||
|
||||
func withTaskID(id string) func(*marathon.Task) {
|
||||
return func(task *marathon.Task) {
|
||||
task.ID = id
|
||||
}
|
||||
}
|
||||
|
||||
func localhostTask(ops ...func(*marathon.Task)) marathon.Task {
|
||||
t := task(
|
||||
host("localhost"),
|
||||
ipAddresses("127.0.0.1"),
|
||||
taskState(taskStateRunning),
|
||||
)
|
||||
|
||||
for _, op := range ops {
|
||||
op(&t)
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
func taskPorts(ports ...int) func(*marathon.Task) {
|
||||
return func(t *marathon.Task) {
|
||||
t.Ports = append(t.Ports, ports...)
|
||||
}
|
||||
}
|
||||
|
||||
func taskState(state TaskState) func(*marathon.Task) {
|
||||
return func(t *marathon.Task) {
|
||||
t.State = string(state)
|
||||
}
|
||||
}
|
||||
|
||||
func host(h string) func(*marathon.Task) {
|
||||
return func(t *marathon.Task) {
|
||||
t.Host = h
|
||||
}
|
||||
}
|
||||
|
||||
func ipAddresses(addresses ...string) func(*marathon.Task) {
|
||||
return func(t *marathon.Task) {
|
||||
for _, addr := range addresses {
|
||||
t.IPAddresses = append(t.IPAddresses, &marathon.IPAddress{
|
||||
IPAddress: addr,
|
||||
Protocol: "tcp",
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func startedAt(timestamp string) func(*marathon.Task) {
|
||||
return func(t *marathon.Task) {
|
||||
t.StartedAt = timestamp
|
||||
}
|
||||
}
|
||||
|
||||
func startedAtFromNow(offset time.Duration) func(*marathon.Task) {
|
||||
return func(t *marathon.Task) {
|
||||
t.StartedAt = time.Now().Add(-offset).Format(time.RFC3339)
|
||||
}
|
||||
}
|
|
@ -1,390 +0,0 @@
|
|||
package marathon
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/containous/traefik/old/log"
|
||||
"github.com/containous/traefik/old/provider"
|
||||
"github.com/containous/traefik/old/provider/label"
|
||||
"github.com/containous/traefik/old/types"
|
||||
"github.com/gambol99/go-marathon"
|
||||
)
|
||||
|
||||
type appData struct {
|
||||
marathon.Application
|
||||
SegmentLabels map[string]string
|
||||
SegmentName string
|
||||
LinkedApps []*appData
|
||||
}
|
||||
|
||||
func (p *Provider) buildConfiguration(applications *marathon.Applications) *types.Configuration {
|
||||
var MarathonFuncMap = template.FuncMap{
|
||||
"getDomain": label.GetFuncString(label.TraefikDomain, p.Domain), // see https://github.com/containous/traefik/pull/1693
|
||||
"getSubDomain": p.getSubDomain, // see https://github.com/containous/traefik/pull/1693
|
||||
"getBackendName": p.getBackendName,
|
||||
|
||||
// Backend functions
|
||||
"getPort": getPort,
|
||||
"getCircuitBreaker": label.GetCircuitBreaker,
|
||||
"getLoadBalancer": label.GetLoadBalancer,
|
||||
"getMaxConn": label.GetMaxConn,
|
||||
"getHealthCheck": label.GetHealthCheck,
|
||||
"getBuffering": label.GetBuffering,
|
||||
"getResponseForwarding": label.GetResponseForwarding,
|
||||
"getServers": p.getServers,
|
||||
|
||||
// Frontend functions
|
||||
"getSegmentNameSuffix": getSegmentNameSuffix,
|
||||
"getFrontendRule": p.getFrontendRule,
|
||||
"getFrontendName": p.getFrontendName,
|
||||
"getPassHostHeader": label.GetFuncBool(label.TraefikFrontendPassHostHeader, label.DefaultPassHostHeader),
|
||||
"getPassTLSCert": label.GetFuncBool(label.TraefikFrontendPassTLSCert, label.DefaultPassTLSCert),
|
||||
"getPassTLSClientCert": label.GetTLSClientCert,
|
||||
"getPriority": label.GetFuncInt(label.TraefikFrontendPriority, label.DefaultFrontendPriority),
|
||||
"getEntryPoints": label.GetFuncSliceString(label.TraefikFrontendEntryPoints),
|
||||
"getBasicAuth": label.GetFuncSliceString(label.TraefikFrontendAuthBasic), // Deprecated
|
||||
"getAuth": label.GetAuth,
|
||||
"getRedirect": label.GetRedirect,
|
||||
"getErrorPages": label.GetErrorPages,
|
||||
"getRateLimit": label.GetRateLimit,
|
||||
"getHeaders": label.GetHeaders,
|
||||
"getWhiteList": label.GetWhiteList,
|
||||
}
|
||||
|
||||
apps := make(map[string]*appData)
|
||||
for _, app := range applications.Apps {
|
||||
if p.applicationFilter(app) {
|
||||
// Tasks
|
||||
var filteredTasks []*marathon.Task
|
||||
for _, task := range app.Tasks {
|
||||
if p.taskFilter(*task, app) {
|
||||
filteredTasks = append(filteredTasks, task)
|
||||
logIllegalServices(*task, app)
|
||||
}
|
||||
}
|
||||
|
||||
app.Tasks = filteredTasks
|
||||
|
||||
// segments
|
||||
segmentProperties := label.ExtractTraefikLabels(stringValueMap(app.Labels))
|
||||
for segmentName, labels := range segmentProperties {
|
||||
data := &appData{
|
||||
Application: app,
|
||||
SegmentLabels: labels,
|
||||
SegmentName: segmentName,
|
||||
}
|
||||
|
||||
backendName := p.getBackendName(*data)
|
||||
if baseApp, ok := apps[backendName]; ok {
|
||||
baseApp.LinkedApps = append(baseApp.LinkedApps, data)
|
||||
} else {
|
||||
apps[backendName] = data
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
templateObjects := struct {
|
||||
Applications map[string]*appData
|
||||
Domain string
|
||||
}{
|
||||
Applications: apps,
|
||||
Domain: p.Domain,
|
||||
}
|
||||
|
||||
configuration, err := p.GetConfiguration("templates/marathon.tmpl", MarathonFuncMap, templateObjects)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to render Marathon configuration template: %v", err)
|
||||
}
|
||||
return configuration
|
||||
}
|
||||
|
||||
func (p *Provider) applicationFilter(app marathon.Application) bool {
|
||||
// Filter disabled application.
|
||||
if !label.IsEnabled(stringValueMap(app.Labels), p.ExposedByDefault) {
|
||||
log.Debugf("Filtering disabled Marathon application %s", app.ID)
|
||||
return false
|
||||
}
|
||||
|
||||
// Filter by constraints.
|
||||
constraintTags := label.GetSliceStringValue(stringValueMap(app.Labels), label.TraefikTags)
|
||||
if p.MarathonLBCompatibility {
|
||||
if haGroup := label.GetStringValue(stringValueMap(app.Labels), labelLbCompatibilityGroup, ""); len(haGroup) > 0 {
|
||||
constraintTags = append(constraintTags, haGroup)
|
||||
}
|
||||
}
|
||||
if p.FilterMarathonConstraints && app.Constraints != nil {
|
||||
for _, constraintParts := range *app.Constraints {
|
||||
constraintTags = append(constraintTags, strings.Join(constraintParts, ":"))
|
||||
}
|
||||
}
|
||||
if ok, failingConstraint := p.MatchConstraints(constraintTags); !ok {
|
||||
if failingConstraint != nil {
|
||||
log.Debugf("Filtering Marathon application %s pruned by %q constraint", app.ID, failingConstraint.String())
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *Provider) taskFilter(task marathon.Task, application marathon.Application) bool {
|
||||
if task.State != string(taskStateRunning) {
|
||||
return false
|
||||
}
|
||||
|
||||
if ready := p.readyChecker.Do(task, application); !ready {
|
||||
log.Infof("Filtering unready task %s from application %s", task.ID, application.ID)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// logIllegalServices logs illegal service configurations.
|
||||
// While we cannot filter on the service level, they will eventually get
|
||||
// rejected once the server configuration is rendered.
|
||||
func logIllegalServices(task marathon.Task, app marathon.Application) {
|
||||
segmentProperties := label.ExtractTraefikLabels(stringValueMap(app.Labels))
|
||||
for segmentName, labels := range segmentProperties {
|
||||
// Check for illegal/missing ports.
|
||||
if _, err := processPorts(app, task, labels); err != nil {
|
||||
log.Warnf("%s has an illegal configuration: no proper port available", identifier(app, task, segmentName))
|
||||
continue
|
||||
}
|
||||
|
||||
// Check for illegal port label combinations.
|
||||
hasPortLabel := label.Has(labels, label.TraefikPort)
|
||||
hasPortIndexLabel := label.Has(labels, label.TraefikPortIndex)
|
||||
if hasPortLabel && hasPortIndexLabel {
|
||||
log.Warnf("%s has both port and port index specified; port will take precedence", identifier(app, task, segmentName))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getSegmentNameSuffix(serviceName string) string {
|
||||
if len(serviceName) > 0 {
|
||||
return "-service-" + provider.Normalize(serviceName)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (p *Provider) getSubDomain(name string) string {
|
||||
if p.GroupsAsSubDomains {
|
||||
splitedName := strings.Split(strings.TrimPrefix(name, "/"), "/")
|
||||
provider.ReverseStringSlice(&splitedName)
|
||||
reverseName := strings.Join(splitedName, ".")
|
||||
return reverseName
|
||||
}
|
||||
return strings.Replace(strings.TrimPrefix(name, "/"), "/", "-", -1)
|
||||
}
|
||||
|
||||
func (p *Provider) getBackendName(app appData) string {
|
||||
value := label.GetStringValue(app.SegmentLabels, label.TraefikBackend, "")
|
||||
if len(value) > 0 {
|
||||
return provider.Normalize("backend" + value)
|
||||
}
|
||||
|
||||
return provider.Normalize("backend" + app.ID + getSegmentNameSuffix(app.SegmentName))
|
||||
}
|
||||
|
||||
func (p *Provider) getFrontendName(app appData) string {
|
||||
return provider.Normalize("frontend" + app.ID + getSegmentNameSuffix(app.SegmentName))
|
||||
}
|
||||
|
||||
// getFrontendRule returns the frontend rule for the specified application, using
|
||||
// its label. If service is provided, it will look for serviceName label before generic one.
|
||||
// It returns a default one (Host) if the label is not present.
|
||||
func (p *Provider) getFrontendRule(app appData) string {
|
||||
if value := label.GetStringValue(app.SegmentLabels, label.TraefikFrontendRule, ""); len(value) > 0 {
|
||||
return value
|
||||
}
|
||||
|
||||
if p.MarathonLBCompatibility {
|
||||
if value := label.GetStringValue(stringValueMap(app.Labels), labelLbCompatibility, ""); len(value) > 0 {
|
||||
return "Host:" + value
|
||||
}
|
||||
}
|
||||
|
||||
domain := label.GetStringValue(app.SegmentLabels, label.TraefikDomain, p.Domain)
|
||||
if len(domain) > 0 {
|
||||
domain = "." + domain
|
||||
}
|
||||
|
||||
if len(app.SegmentName) > 0 {
|
||||
return "Host:" + strings.ToLower(provider.Normalize(app.SegmentName)) + "." + p.getSubDomain(app.ID) + domain
|
||||
}
|
||||
return "Host:" + p.getSubDomain(app.ID) + domain
|
||||
}
|
||||
|
||||
func getPort(task marathon.Task, app appData) string {
|
||||
port, err := processPorts(app.Application, task, app.SegmentLabels)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to process ports for %s: %s", identifier(app.Application, task, app.SegmentName), err)
|
||||
return ""
|
||||
}
|
||||
|
||||
return strconv.Itoa(port)
|
||||
}
|
||||
|
||||
// processPorts returns the configured port.
|
||||
// An explicitly specified port is preferred. If none is specified, it selects
|
||||
// one of the available port. The first such found port is returned unless an
|
||||
// optional index is provided.
|
||||
func processPorts(app marathon.Application, task marathon.Task, labels map[string]string) (int, error) {
|
||||
if label.Has(labels, label.TraefikPort) {
|
||||
port := label.GetIntValue(labels, label.TraefikPort, 0)
|
||||
|
||||
if port <= 0 {
|
||||
return 0, fmt.Errorf("explicitly specified port %d must be larger than zero", port)
|
||||
} else if port > 0 {
|
||||
return port, nil
|
||||
}
|
||||
}
|
||||
|
||||
ports := retrieveAvailablePorts(app, task)
|
||||
if len(ports) == 0 {
|
||||
return 0, errors.New("no port found")
|
||||
}
|
||||
|
||||
portIndex := label.GetIntValue(labels, label.TraefikPortIndex, 0)
|
||||
if portIndex < 0 || portIndex > len(ports)-1 {
|
||||
return 0, fmt.Errorf("index %d must be within range (0, %d)", portIndex, len(ports)-1)
|
||||
}
|
||||
return ports[portIndex], nil
|
||||
}
|
||||
|
||||
func retrieveAvailablePorts(app marathon.Application, task marathon.Task) []int {
|
||||
// Using default port configuration
|
||||
if len(task.Ports) > 0 {
|
||||
return task.Ports
|
||||
}
|
||||
|
||||
// Using port definition if available
|
||||
if app.PortDefinitions != nil && len(*app.PortDefinitions) > 0 {
|
||||
var ports []int
|
||||
for _, def := range *app.PortDefinitions {
|
||||
if def.Port != nil {
|
||||
ports = append(ports, *def.Port)
|
||||
}
|
||||
}
|
||||
return ports
|
||||
}
|
||||
|
||||
// If using IP-per-task using this port definition
|
||||
if app.IPAddressPerTask != nil && app.IPAddressPerTask.Discovery != nil && len(*(app.IPAddressPerTask.Discovery.Ports)) > 0 {
|
||||
var ports []int
|
||||
for _, def := range *(app.IPAddressPerTask.Discovery.Ports) {
|
||||
ports = append(ports, def.Number)
|
||||
}
|
||||
return ports
|
||||
}
|
||||
|
||||
return []int{}
|
||||
}
|
||||
|
||||
func identifier(app marathon.Application, task marathon.Task, segmentName string) string {
|
||||
id := fmt.Sprintf("Marathon task %s from application %s", task.ID, app.ID)
|
||||
if segmentName != "" {
|
||||
id += fmt.Sprintf(" (segment: %s)", segmentName)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
func (p *Provider) getServers(app appData) map[string]types.Server {
|
||||
var servers map[string]types.Server
|
||||
|
||||
for _, task := range app.Tasks {
|
||||
name, server, err := p.getServer(app, *task)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if servers == nil {
|
||||
servers = make(map[string]types.Server)
|
||||
}
|
||||
|
||||
servers[name] = *server
|
||||
}
|
||||
|
||||
for _, linkedApp := range app.LinkedApps {
|
||||
for _, task := range linkedApp.Tasks {
|
||||
name, server, err := p.getServer(*linkedApp, *task)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if servers == nil {
|
||||
servers = make(map[string]types.Server)
|
||||
}
|
||||
|
||||
servers[name] = *server
|
||||
}
|
||||
}
|
||||
|
||||
return servers
|
||||
}
|
||||
|
||||
func (p *Provider) getServer(app appData, task marathon.Task) (string, *types.Server, error) {
|
||||
host, err := p.getServerHost(task, app)
|
||||
if len(host) == 0 {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
port := getPort(task, app)
|
||||
protocol := label.GetStringValue(app.SegmentLabels, label.TraefikProtocol, label.DefaultProtocol)
|
||||
|
||||
serverName := provider.Normalize("server-" + app.ID + "-" + task.ID + getSegmentNameSuffix(app.SegmentName))
|
||||
|
||||
return serverName, &types.Server{
|
||||
URL: fmt.Sprintf("%s://%s", protocol, net.JoinHostPort(host, port)),
|
||||
Weight: label.GetIntValue(app.SegmentLabels, label.TraefikWeight, label.DefaultWeight),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Provider) getServerHost(task marathon.Task, app appData) (string, error) {
|
||||
networks := app.Networks
|
||||
var hostFlag bool
|
||||
|
||||
if networks == nil {
|
||||
hostFlag = app.IPAddressPerTask == nil
|
||||
} else {
|
||||
hostFlag = (*networks)[0].Mode != marathon.ContainerNetworkMode
|
||||
}
|
||||
|
||||
if hostFlag || p.ForceTaskHostname {
|
||||
if len(task.Host) == 0 {
|
||||
return "", fmt.Errorf("host is undefined for task %q app %q", task.ID, app.ID)
|
||||
}
|
||||
return task.Host, nil
|
||||
}
|
||||
|
||||
numTaskIPAddresses := len(task.IPAddresses)
|
||||
switch numTaskIPAddresses {
|
||||
case 0:
|
||||
return "", fmt.Errorf("missing IP address for Marathon application %s on task %s", app.ID, task.ID)
|
||||
case 1:
|
||||
return task.IPAddresses[0].IPAddress, nil
|
||||
default:
|
||||
ipAddressIdx := label.GetIntValue(stringValueMap(app.Labels), labelIPAddressIdx, math.MinInt32)
|
||||
|
||||
if ipAddressIdx == math.MinInt32 {
|
||||
return "", fmt.Errorf("found %d task IP addresses but missing IP address index for Marathon application %s on task %s",
|
||||
numTaskIPAddresses, app.ID, task.ID)
|
||||
}
|
||||
if ipAddressIdx < 0 || ipAddressIdx > numTaskIPAddresses {
|
||||
return "", fmt.Errorf("cannot use IP address index to select from %d task IP addresses for Marathon application %s on task %s",
|
||||
numTaskIPAddresses, app.ID, task.ID)
|
||||
}
|
||||
|
||||
return task.IPAddresses[ipAddressIdx].IPAddress, nil
|
||||
}
|
||||
}
|
|
@ -1,388 +0,0 @@
|
|||
package marathon
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/containous/flaeg/parse"
|
||||
"github.com/containous/traefik/old/provider/label"
|
||||
"github.com/containous/traefik/old/types"
|
||||
"github.com/gambol99/go-marathon"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestBuildConfigurationSegments(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
applications *marathon.Applications
|
||||
expectedFrontends map[string]*types.Frontend
|
||||
expectedBackends map[string]*types.Backend
|
||||
}{
|
||||
{
|
||||
desc: "multiple ports with segments",
|
||||
applications: withApplications(
|
||||
application(
|
||||
appID("/app"),
|
||||
appPorts(80, 81),
|
||||
withTasks(localhostTask(taskPorts(80, 81))),
|
||||
|
||||
withLabel(label.TraefikBackendMaxConnAmount, "1000"),
|
||||
withLabel(label.TraefikBackendMaxConnExtractorFunc, "client.ip"),
|
||||
withSegmentLabel(label.TraefikPort, "80", "web"),
|
||||
withSegmentLabel(label.TraefikPort, "81", "admin"),
|
||||
withLabel("traefik..port", "82"), // This should be ignored, as it fails to match the segmentPropertiesRegexp regex.
|
||||
withSegmentLabel(label.TraefikFrontendRule, "Host:web.app.marathon.localhost", "web"),
|
||||
withSegmentLabel(label.TraefikFrontendRule, "Host:admin.app.marathon.localhost", "admin"),
|
||||
)),
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-app-service-web": {
|
||||
Backend: "backend-app-service-web",
|
||||
Routes: map[string]types.Route{
|
||||
`route-host-app-service-web`: {
|
||||
Rule: "Host:web.app.marathon.localhost",
|
||||
},
|
||||
},
|
||||
PassHostHeader: true,
|
||||
EntryPoints: []string{},
|
||||
},
|
||||
"frontend-app-service-admin": {
|
||||
Backend: "backend-app-service-admin",
|
||||
Routes: map[string]types.Route{
|
||||
`route-host-app-service-admin`: {
|
||||
Rule: "Host:admin.app.marathon.localhost",
|
||||
},
|
||||
},
|
||||
PassHostHeader: true,
|
||||
EntryPoints: []string{},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-app-service-web": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-app-taskID-service-web": {
|
||||
URL: "http://localhost:80",
|
||||
Weight: label.DefaultWeight,
|
||||
},
|
||||
},
|
||||
MaxConn: &types.MaxConn{
|
||||
Amount: 1000,
|
||||
ExtractorFunc: "client.ip",
|
||||
},
|
||||
},
|
||||
"backend-app-service-admin": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-app-taskID-service-admin": {
|
||||
URL: "http://localhost:81",
|
||||
Weight: label.DefaultWeight,
|
||||
},
|
||||
},
|
||||
MaxConn: &types.MaxConn{
|
||||
Amount: 1000,
|
||||
ExtractorFunc: "client.ip",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "when all labels are set",
|
||||
applications: withApplications(
|
||||
application(
|
||||
appID("/app"),
|
||||
appPorts(80, 81),
|
||||
withTasks(localhostTask(taskPorts(80, 81))),
|
||||
|
||||
// withLabel(label.TraefikBackend, "foobar"),
|
||||
|
||||
withLabel(label.TraefikBackendCircuitBreakerExpression, "NetworkErrorRatio() > 0.5"),
|
||||
withLabel(label.TraefikBackendHealthCheckPath, "/health"),
|
||||
withLabel(label.TraefikBackendHealthCheckPort, "880"),
|
||||
withLabel(label.TraefikBackendHealthCheckInterval, "6"),
|
||||
withLabel(label.TraefikBackendHealthCheckTimeout, "3"),
|
||||
withLabel(label.TraefikBackendLoadBalancerMethod, "drr"),
|
||||
withLabel(label.TraefikBackendLoadBalancerStickiness, "true"),
|
||||
withLabel(label.TraefikBackendLoadBalancerStickinessCookieName, "chocolate"),
|
||||
withLabel(label.TraefikBackendMaxConnAmount, "666"),
|
||||
withLabel(label.TraefikBackendMaxConnExtractorFunc, "client.ip"),
|
||||
withLabel(label.TraefikBackendBufferingMaxResponseBodyBytes, "10485760"),
|
||||
withLabel(label.TraefikBackendBufferingMemResponseBodyBytes, "2097152"),
|
||||
withLabel(label.TraefikBackendBufferingMaxRequestBodyBytes, "10485760"),
|
||||
withLabel(label.TraefikBackendBufferingMemRequestBodyBytes, "2097152"),
|
||||
withLabel(label.TraefikBackendBufferingRetryExpression, "IsNetworkError() && Attempts() <= 2"),
|
||||
|
||||
withSegmentLabel(label.TraefikPort, "80", "containous"),
|
||||
withSegmentLabel(label.TraefikProtocol, "https", "containous"),
|
||||
withSegmentLabel(label.TraefikWeight, "12", "containous"),
|
||||
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertPem, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosNotBefore, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosNotAfter, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSans, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosIssuerCommonName, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosIssuerCountry, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosIssuerDomainComponent, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosIssuerLocality, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosIssuerOrganization, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosIssuerProvince, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosIssuerSerialNumber, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectCommonName, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectCountry, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectDomainComponent, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectLocality, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectOrganization, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectProvince, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectSerialNumber, "true", "containous"),
|
||||
|
||||
withSegmentLabel(label.TraefikFrontendAuthBasic, "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthBasicRemoveHeader, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthBasicUsers, "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthBasicUsersFile, ".htpasswd", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthDigestRemoveHeader, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthDigestUsers, "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthDigestUsersFile, ".htpasswd", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthForwardAddress, "auth.server", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthForwardTrustForwardHeader, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthForwardTLSCa, "ca.crt", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthForwardTLSCaOptional, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthForwardTLSCert, "server.crt", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthForwardTLSKey, "server.key", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthForwardTLSInsecureSkipVerify, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAuthHeaderField, "X-WebAuth-User", "containous"),
|
||||
|
||||
withSegmentLabel(label.TraefikFrontendEntryPoints, "http,https", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassHostHeader, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPassTLSCert, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPriority, "666", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendRedirectEntryPoint, "https", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendRedirectRegex, "nope", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendRedirectReplacement, "nope", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendRedirectPermanent, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendRule, "Host:traefik.io", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendWhiteListSourceRange, "10.10.10.10", "containous"),
|
||||
|
||||
withSegmentLabel(label.TraefikFrontendRequestHeaders, "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendResponseHeaders, "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendSSLProxyHeaders, "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendAllowedHosts, "foo,bar,bor", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendHostsProxyHeaders, "foo,bar,bor", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendSSLForceHost, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendSSLHost, "foo", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendCustomFrameOptionsValue, "foo", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendContentSecurityPolicy, "foo", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendPublicKey, "foo", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendReferrerPolicy, "foo", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendCustomBrowserXSSValue, "foo", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendSTSSeconds, "666", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendSSLRedirect, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendSSLTemporaryRedirect, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendSTSIncludeSubdomains, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendSTSPreload, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendForceSTSHeader, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendFrameDeny, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendContentTypeNosniff, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendBrowserXSSFilter, "true", "containous"),
|
||||
withSegmentLabel(label.TraefikFrontendIsDevelopment, "true", "containous"),
|
||||
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendErrorPage+"foo."+label.SuffixErrorPageStatus, "404"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendErrorPage+"foo."+label.SuffixErrorPageBackend, "foobar"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendErrorPage+"foo."+label.SuffixErrorPageQuery, "foo_query"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendErrorPage+"bar."+label.SuffixErrorPageStatus, "500,600"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendErrorPage+"bar."+label.SuffixErrorPageBackend, "foobar"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendErrorPage+"bar."+label.SuffixErrorPageQuery, "bar_query"),
|
||||
|
||||
withSegmentLabel(label.TraefikFrontendRateLimitExtractorFunc, "client.ip", "containous"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendRateLimit+"foo."+label.SuffixRateLimitPeriod, "6"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendRateLimit+"foo."+label.SuffixRateLimitAverage, "12"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendRateLimit+"foo."+label.SuffixRateLimitBurst, "18"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendRateLimit+"bar."+label.SuffixRateLimitPeriod, "3"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendRateLimit+"bar."+label.SuffixRateLimitAverage, "6"),
|
||||
withLabel(label.Prefix+"containous."+label.BaseFrontendRateLimit+"bar."+label.SuffixRateLimitBurst, "9"),
|
||||
)),
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-app-service-containous": {
|
||||
EntryPoints: []string{
|
||||
"http",
|
||||
"https",
|
||||
},
|
||||
Backend: "backend-app-service-containous",
|
||||
Routes: map[string]types.Route{
|
||||
"route-host-app-service-containous": {
|
||||
Rule: "Host:traefik.io",
|
||||
},
|
||||
},
|
||||
PassHostHeader: true,
|
||||
PassTLSCert: true,
|
||||
Priority: 666,
|
||||
PassTLSClientCert: &types.TLSClientHeaders{
|
||||
PEM: true,
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
NotBefore: true,
|
||||
Sans: true,
|
||||
NotAfter: true,
|
||||
Subject: &types.TLSCLientCertificateDNInfos{
|
||||
CommonName: true,
|
||||
Country: true,
|
||||
DomainComponent: true,
|
||||
Locality: true,
|
||||
Organization: true,
|
||||
Province: true,
|
||||
SerialNumber: true,
|
||||
},
|
||||
Issuer: &types.TLSCLientCertificateDNInfos{
|
||||
CommonName: true,
|
||||
Country: true,
|
||||
DomainComponent: true,
|
||||
Locality: true,
|
||||
Organization: true,
|
||||
Province: true,
|
||||
SerialNumber: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Auth: &types.Auth{
|
||||
HeaderField: "X-WebAuth-User",
|
||||
Basic: &types.Basic{
|
||||
RemoveHeader: true,
|
||||
Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/",
|
||||
"test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"},
|
||||
UsersFile: ".htpasswd",
|
||||
},
|
||||
},
|
||||
WhiteList: &types.WhiteList{
|
||||
SourceRange: []string{"10.10.10.10"},
|
||||
},
|
||||
Headers: &types.Headers{
|
||||
CustomRequestHeaders: map[string]string{
|
||||
"Access-Control-Allow-Methods": "POST,GET,OPTIONS",
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
},
|
||||
CustomResponseHeaders: map[string]string{
|
||||
"Access-Control-Allow-Methods": "POST,GET,OPTIONS",
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
},
|
||||
AllowedHosts: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
"bor",
|
||||
},
|
||||
HostsProxyHeaders: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
"bor",
|
||||
},
|
||||
SSLRedirect: true,
|
||||
SSLTemporaryRedirect: true,
|
||||
SSLForceHost: true,
|
||||
SSLHost: "foo",
|
||||
SSLProxyHeaders: map[string]string{
|
||||
"Access-Control-Allow-Methods": "POST,GET,OPTIONS",
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
},
|
||||
STSSeconds: 666,
|
||||
STSIncludeSubdomains: true,
|
||||
STSPreload: true,
|
||||
ForceSTSHeader: true,
|
||||
FrameDeny: true,
|
||||
CustomFrameOptionsValue: "foo",
|
||||
ContentTypeNosniff: true,
|
||||
BrowserXSSFilter: true,
|
||||
CustomBrowserXSSValue: "foo",
|
||||
ContentSecurityPolicy: "foo",
|
||||
PublicKey: "foo",
|
||||
ReferrerPolicy: "foo",
|
||||
IsDevelopment: true,
|
||||
},
|
||||
Errors: map[string]*types.ErrorPage{
|
||||
"bar": {
|
||||
Status: []string{
|
||||
"500",
|
||||
"600",
|
||||
},
|
||||
Backend: "backendfoobar",
|
||||
Query: "bar_query",
|
||||
},
|
||||
"foo": {
|
||||
Status: []string{
|
||||
"404",
|
||||
},
|
||||
Backend: "backendfoobar",
|
||||
Query: "foo_query",
|
||||
},
|
||||
},
|
||||
RateLimit: &types.RateLimit{
|
||||
RateSet: map[string]*types.Rate{
|
||||
"bar": {
|
||||
Period: parse.Duration(3 * time.Second),
|
||||
Average: 6,
|
||||
Burst: 9,
|
||||
},
|
||||
"foo": {
|
||||
Period: parse.Duration(6 * time.Second),
|
||||
Average: 12,
|
||||
Burst: 18,
|
||||
},
|
||||
},
|
||||
ExtractorFunc: "client.ip",
|
||||
},
|
||||
Redirect: &types.Redirect{
|
||||
EntryPoint: "https",
|
||||
Permanent: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-app-service-containous": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-app-taskID-service-containous": {
|
||||
URL: "https://localhost:80",
|
||||
Weight: 12,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: &types.CircuitBreaker{
|
||||
Expression: "NetworkErrorRatio() > 0.5",
|
||||
},
|
||||
LoadBalancer: &types.LoadBalancer{
|
||||
Method: "drr",
|
||||
Stickiness: &types.Stickiness{
|
||||
CookieName: "chocolate",
|
||||
},
|
||||
},
|
||||
MaxConn: &types.MaxConn{
|
||||
Amount: 666,
|
||||
ExtractorFunc: "client.ip",
|
||||
},
|
||||
HealthCheck: &types.HealthCheck{
|
||||
Path: "/health",
|
||||
Port: 880,
|
||||
Interval: "6",
|
||||
Timeout: "3",
|
||||
},
|
||||
Buffering: &types.Buffering{
|
||||
MaxResponseBodyBytes: 10485760,
|
||||
MemResponseBodyBytes: 2097152,
|
||||
MaxRequestBodyBytes: 10485760,
|
||||
MemRequestBodyBytes: 2097152,
|
||||
RetryExpression: "IsNetworkError() && Attempts() <= 2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
p := &Provider{
|
||||
Domain: "marathon.localhost",
|
||||
ExposedByDefault: true,
|
||||
}
|
||||
|
||||
actualConfig := p.buildConfiguration(test.applications)
|
||||
|
||||
assert.NotNil(t, actualConfig)
|
||||
assert.Equal(t, test.expectedBackends, actualConfig.Backends)
|
||||
assert.Equal(t, test.expectedFrontends, actualConfig.Frontends)
|
||||
})
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load diff
|
@ -1,8 +0,0 @@
|
|||
package marathon
|
||||
|
||||
func stringValueMap(mp *map[string]string) map[string]string {
|
||||
if mp != nil {
|
||||
return *mp
|
||||
}
|
||||
return make(map[string]string)
|
||||
}
|
|
@ -1,24 +0,0 @@
|
|||
package marathon
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/containous/traefik/old/provider/marathon/mocks"
|
||||
"github.com/gambol99/go-marathon"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
type fakeClient struct {
|
||||
mocks.Marathon
|
||||
}
|
||||
|
||||
func newFakeClient(applicationsError bool, applications marathon.Applications) *fakeClient {
|
||||
// create an instance of our test object
|
||||
fakeClient := new(fakeClient)
|
||||
if applicationsError {
|
||||
fakeClient.On("Applications", mock.Anything).Return(nil, errors.New("fake Marathon server error"))
|
||||
} else {
|
||||
fakeClient.On("Applications", mock.Anything).Return(&applications, nil)
|
||||
}
|
||||
return fakeClient
|
||||
}
|
|
@ -1,188 +0,0 @@
|
|||
package marathon
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/cenk/backoff"
|
||||
"github.com/containous/flaeg/parse"
|
||||
"github.com/containous/traefik/job"
|
||||
"github.com/containous/traefik/old/log"
|
||||
"github.com/containous/traefik/old/provider"
|
||||
"github.com/containous/traefik/old/types"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/gambol99/go-marathon"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
traceMaxScanTokenSize = 1024 * 1024
|
||||
marathonEventIDs = marathon.EventIDApplications |
|
||||
marathon.EventIDAddHealthCheck |
|
||||
marathon.EventIDDeploymentSuccess |
|
||||
marathon.EventIDDeploymentFailed |
|
||||
marathon.EventIDDeploymentInfo |
|
||||
marathon.EventIDDeploymentStepSuccess |
|
||||
marathon.EventIDDeploymentStepFailed
|
||||
)
|
||||
|
||||
// TaskState denotes the Mesos state a task can have.
|
||||
type TaskState string
|
||||
|
||||
const (
|
||||
taskStateRunning TaskState = "TASK_RUNNING"
|
||||
taskStateStaging TaskState = "TASK_STAGING"
|
||||
)
|
||||
|
||||
const (
|
||||
labelIPAddressIdx = "traefik.ipAddressIdx"
|
||||
labelLbCompatibilityGroup = "HAPROXY_GROUP"
|
||||
labelLbCompatibility = "HAPROXY_0_VHOST"
|
||||
)
|
||||
|
||||
var _ provider.Provider = (*Provider)(nil)
|
||||
|
||||
// Provider holds configuration of the provider.
|
||||
type Provider struct {
|
||||
provider.BaseProvider
|
||||
Endpoint string `description:"Marathon server endpoint. You can also specify multiple endpoint for Marathon" export:"true"`
|
||||
Domain string `description:"Default domain used" export:"true"`
|
||||
ExposedByDefault bool `description:"Expose Marathon apps by default" export:"true"`
|
||||
GroupsAsSubDomains bool `description:"Convert Marathon groups to subdomains" export:"true"`
|
||||
DCOSToken string `description:"DCOSToken for DCOS environment, This will override the Authorization header" export:"true"`
|
||||
MarathonLBCompatibility bool `description:"Add compatibility with marathon-lb labels" export:"true"`
|
||||
FilterMarathonConstraints bool `description:"Enable use of Marathon constraints in constraint filtering" export:"true"`
|
||||
TLS *types.ClientTLS `description:"Enable TLS support" export:"true"`
|
||||
DialerTimeout parse.Duration `description:"Set a dialer timeout for Marathon" export:"true"`
|
||||
ResponseHeaderTimeout parse.Duration `description:"Set a response header timeout for Marathon" export:"true"`
|
||||
TLSHandshakeTimeout parse.Duration `description:"Set a TLS handhsake timeout for Marathon" export:"true"`
|
||||
KeepAlive parse.Duration `description:"Set a TCP Keep Alive time in seconds" export:"true"`
|
||||
ForceTaskHostname bool `description:"Force to use the task's hostname." export:"true"`
|
||||
Basic *Basic `description:"Enable basic authentication" export:"true"`
|
||||
RespectReadinessChecks bool `description:"Filter out tasks with non-successful readiness checks during deployments" export:"true"`
|
||||
readyChecker *readinessChecker
|
||||
marathonClient marathon.Marathon
|
||||
}
|
||||
|
||||
// Basic holds basic authentication specific configurations
|
||||
type Basic struct {
|
||||
HTTPBasicAuthUser string `description:"Basic authentication User"`
|
||||
HTTPBasicPassword string `description:"Basic authentication Password"`
|
||||
}
|
||||
|
||||
// Init the provider
|
||||
func (p *Provider) Init(constraints types.Constraints) error {
|
||||
return p.BaseProvider.Init(constraints)
|
||||
}
|
||||
|
||||
// Provide allows the marathon provider to provide configurations to traefik
|
||||
// using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error {
|
||||
operation := func() error {
|
||||
config := marathon.NewDefaultConfig()
|
||||
config.URL = p.Endpoint
|
||||
config.EventsTransport = marathon.EventsTransportSSE
|
||||
if p.Trace {
|
||||
config.LogOutput = log.CustomWriterLevel(logrus.DebugLevel, traceMaxScanTokenSize)
|
||||
}
|
||||
if p.Basic != nil {
|
||||
config.HTTPBasicAuthUser = p.Basic.HTTPBasicAuthUser
|
||||
config.HTTPBasicPassword = p.Basic.HTTPBasicPassword
|
||||
}
|
||||
var rc *readinessChecker
|
||||
if p.RespectReadinessChecks {
|
||||
log.Debug("Enabling Marathon readiness checker")
|
||||
rc = defaultReadinessChecker(p.Trace)
|
||||
}
|
||||
p.readyChecker = rc
|
||||
|
||||
if len(p.DCOSToken) > 0 {
|
||||
config.DCOSToken = p.DCOSToken
|
||||
}
|
||||
TLSConfig, err := p.TLS.CreateTLSConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.HTTPClient = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
DialContext: (&net.Dialer{
|
||||
KeepAlive: time.Duration(p.KeepAlive),
|
||||
Timeout: time.Duration(p.DialerTimeout),
|
||||
}).DialContext,
|
||||
ResponseHeaderTimeout: time.Duration(p.ResponseHeaderTimeout),
|
||||
TLSHandshakeTimeout: time.Duration(p.TLSHandshakeTimeout),
|
||||
TLSClientConfig: TLSConfig,
|
||||
},
|
||||
}
|
||||
client, err := marathon.NewClient(config)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create a client for marathon, error: %s", err)
|
||||
return err
|
||||
}
|
||||
p.marathonClient = client
|
||||
|
||||
if p.Watch {
|
||||
update, err := client.AddEventsListener(marathonEventIDs)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to register for events, %s", err)
|
||||
return err
|
||||
}
|
||||
pool.Go(func(stop chan bool) {
|
||||
defer close(update)
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
case event := <-update:
|
||||
log.Debugf("Received provider event %s", event)
|
||||
|
||||
configuration := p.getConfiguration()
|
||||
if configuration != nil {
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "marathon",
|
||||
Configuration: configuration,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
configuration := p.getConfiguration()
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "marathon",
|
||||
Configuration: configuration,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
notify := func(err error, time time.Duration) {
|
||||
log.Errorf("Provider connection error %+v, retrying in %s", err, time)
|
||||
}
|
||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify)
|
||||
if err != nil {
|
||||
log.Errorf("Cannot connect to Provider server %+v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) getConfiguration() *types.Configuration {
|
||||
applications, err := p.getApplications()
|
||||
if err != nil {
|
||||
log.Errorf("Failed to retrieve Marathon applications: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
return p.buildConfiguration(applications)
|
||||
}
|
||||
|
||||
func (p *Provider) getApplications() (*marathon.Applications, error) {
|
||||
v := url.Values{}
|
||||
v.Add("embed", "apps.tasks")
|
||||
v.Add("embed", "apps.deployments")
|
||||
v.Add("embed", "apps.readiness")
|
||||
|
||||
return p.marathonClient.Applications(v)
|
||||
}
|
File diff suppressed because it is too large
Load diff
|
@ -1,122 +0,0 @@
|
|||
package marathon
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/containous/traefik/old/log"
|
||||
"github.com/gambol99/go-marathon"
|
||||
)
|
||||
|
||||
const (
|
||||
// readinessCheckDefaultTimeout is the default timeout for a readiness
|
||||
// check if no check timeout is specified on the application spec. This
|
||||
// should really never be the case, but better be safe than sorry.
|
||||
readinessCheckDefaultTimeout = 10 * time.Second
|
||||
// readinessCheckSafetyMargin is some buffer duration to account for
|
||||
// small offsets in readiness check execution.
|
||||
readinessCheckSafetyMargin = 5 * time.Second
|
||||
readinessLogHeader = "Marathon readiness check: "
|
||||
)
|
||||
|
||||
type readinessChecker struct {
|
||||
checkDefaultTimeout time.Duration
|
||||
checkSafetyMargin time.Duration
|
||||
traceLogging bool
|
||||
}
|
||||
|
||||
func defaultReadinessChecker(isTraceLogging bool) *readinessChecker {
|
||||
return &readinessChecker{
|
||||
checkDefaultTimeout: readinessCheckDefaultTimeout,
|
||||
checkSafetyMargin: readinessCheckSafetyMargin,
|
||||
traceLogging: isTraceLogging,
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *readinessChecker) Do(task marathon.Task, app marathon.Application) bool {
|
||||
if rc == nil {
|
||||
// Readiness checker disabled.
|
||||
return true
|
||||
}
|
||||
|
||||
switch {
|
||||
case len(app.Deployments) == 0:
|
||||
// We only care about readiness during deployments; post-deployment readiness
|
||||
// can be covered by a periodic post-deployment probe (i.e., Traefik health checks).
|
||||
rc.tracef("task %s app %s: ready = true [no deployment ongoing]", task.ID, app.ID)
|
||||
return true
|
||||
|
||||
case app.ReadinessChecks == nil || len(*app.ReadinessChecks) == 0:
|
||||
// Applications without configured readiness checks are always considered
|
||||
// ready.
|
||||
rc.tracef("task %s app %s: ready = true [no readiness checks on app]", task.ID, app.ID)
|
||||
return true
|
||||
}
|
||||
|
||||
// Loop through all readiness check results and return the results for
|
||||
// matching task IDs.
|
||||
if app.ReadinessCheckResults != nil {
|
||||
for _, readinessCheckResult := range *app.ReadinessCheckResults {
|
||||
if readinessCheckResult.TaskID == task.ID {
|
||||
rc.tracef("task %s app %s: ready = %t [evaluating readiness check ready state]", task.ID, app.ID, readinessCheckResult.Ready)
|
||||
return readinessCheckResult.Ready
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// There's a corner case sometimes hit where the first new task of a
|
||||
// deployment goes from TASK_STAGING to TASK_RUNNING without a corresponding
|
||||
// readiness check result being included in the API response. This only happens
|
||||
// in a very short (yet unlucky) time frame and does not repeat for subsequent
|
||||
// tasks of the same deployment.
|
||||
// Complicating matters, the situation may occur for both initially deploying
|
||||
// applications as well as rolling-upgraded ones where one or more tasks from
|
||||
// a previous deployment exist already and are joined by new tasks from a
|
||||
// subsequent deployment. We must always make sure that pre-existing tasks
|
||||
// maintain their ready state while newly launched tasks must be considered
|
||||
// unready until a check result appears.
|
||||
// We distinguish the two cases by comparing the current time with the start
|
||||
// time of the task: It should take Marathon at most one readiness check timeout
|
||||
// interval (plus some safety margin to account for the delayed nature of
|
||||
// distributed systems) for readiness check results to be returned along the API
|
||||
// response. Once the task turns old enough, we assume it to be part of a
|
||||
// pre-existing deployment and mark it as ready. Note that it is okay to err
|
||||
// on the side of caution and consider a task unready until the safety time
|
||||
// window has elapsed because a newly created task should be readiness-checked
|
||||
// and be given a result fairly shortly after its creation (i.e., on the scale
|
||||
// of seconds).
|
||||
readinessCheckTimeoutSecs := (*app.ReadinessChecks)[0].TimeoutSeconds
|
||||
readinessCheckTimeout := time.Duration(readinessCheckTimeoutSecs) * time.Second
|
||||
if readinessCheckTimeout == 0 {
|
||||
rc.tracef("task %s app %s: readiness check timeout not set, using default value %s", task.ID, app.ID, rc.checkDefaultTimeout)
|
||||
readinessCheckTimeout = rc.checkDefaultTimeout
|
||||
} else {
|
||||
readinessCheckTimeout += rc.checkSafetyMargin
|
||||
}
|
||||
|
||||
startTime, err := time.Parse(time.RFC3339, task.StartedAt)
|
||||
if err != nil {
|
||||
// An unparseable start time should never occur; if it does, we assume the
|
||||
// problem should be surfaced as quickly as possible, which is easiest if
|
||||
// we shun the task from rotation.
|
||||
log.Warnf("Failed to parse start-time %s of task %s from application %s: %s (assuming unready)", task.StartedAt, task.ID, app.ID, err)
|
||||
return false
|
||||
}
|
||||
|
||||
since := time.Since(startTime)
|
||||
if since < readinessCheckTimeout {
|
||||
rc.tracef("task %s app %s: ready = false [task with start-time %s not within assumed check timeout window of %s (elapsed time since task start: %s)]", task.ID, app.ID, startTime.Format(time.RFC3339), readinessCheckTimeout, since)
|
||||
return false
|
||||
}
|
||||
|
||||
// Finally, we can be certain this task is not part of the deployment (i.e.,
|
||||
// it's an old task that's going to transition into the TASK_KILLING and/or
|
||||
// TASK_KILLED state as new tasks' readiness checks gradually turn green.)
|
||||
rc.tracef("task %s app %s: ready = true [task with start-time %s not involved in deployment (elapsed time since task start: %s)]", task.ID, app.ID, startTime.Format(time.RFC3339), since)
|
||||
return true
|
||||
}
|
||||
|
||||
func (rc *readinessChecker) tracef(format string, args ...interface{}) {
|
||||
if rc.traceLogging {
|
||||
log.Debugf(readinessLogHeader+format, args...)
|
||||
}
|
||||
}
|
|
@ -1,134 +0,0 @@
|
|||
package marathon
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gambol99/go-marathon"
|
||||
)
|
||||
|
||||
func testReadinessChecker() *readinessChecker {
|
||||
return defaultReadinessChecker(false)
|
||||
}
|
||||
|
||||
func TestDisabledReadinessChecker(t *testing.T) {
|
||||
var rc *readinessChecker
|
||||
tsk := task()
|
||||
app := application(
|
||||
deployments("deploymentId"),
|
||||
readinessCheck(0),
|
||||
readinessCheckResult(testTaskName, false),
|
||||
)
|
||||
|
||||
if ready := rc.Do(tsk, app); !ready {
|
||||
t.Error("expected ready = true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnabledReadinessChecker(t *testing.T) {
|
||||
tests := []struct {
|
||||
desc string
|
||||
task marathon.Task
|
||||
app marathon.Application
|
||||
rc readinessChecker
|
||||
expectedReady bool
|
||||
}{
|
||||
{
|
||||
desc: "no deployment running",
|
||||
task: task(),
|
||||
app: application(),
|
||||
expectedReady: true,
|
||||
},
|
||||
{
|
||||
desc: "no readiness checks defined",
|
||||
task: task(),
|
||||
app: application(deployments("deploymentId")),
|
||||
expectedReady: true,
|
||||
},
|
||||
{
|
||||
desc: "readiness check result negative",
|
||||
task: task(),
|
||||
app: application(
|
||||
deployments("deploymentId"),
|
||||
readinessCheck(0),
|
||||
readinessCheckResult("otherTaskID", true),
|
||||
readinessCheckResult(testTaskName, false),
|
||||
),
|
||||
expectedReady: false,
|
||||
},
|
||||
{
|
||||
desc: "readiness check result positive",
|
||||
task: task(),
|
||||
app: application(
|
||||
deployments("deploymentId"),
|
||||
readinessCheck(0),
|
||||
readinessCheckResult("otherTaskID", false),
|
||||
readinessCheckResult(testTaskName, true),
|
||||
),
|
||||
expectedReady: true,
|
||||
},
|
||||
{
|
||||
desc: "no readiness check result with default timeout",
|
||||
task: task(startedAtFromNow(3 * time.Minute)),
|
||||
app: application(
|
||||
deployments("deploymentId"),
|
||||
readinessCheck(0),
|
||||
),
|
||||
rc: readinessChecker{
|
||||
checkDefaultTimeout: 5 * time.Minute,
|
||||
},
|
||||
expectedReady: false,
|
||||
},
|
||||
{
|
||||
desc: "no readiness check result with readiness check timeout",
|
||||
task: task(startedAtFromNow(4 * time.Minute)),
|
||||
app: application(
|
||||
deployments("deploymentId"),
|
||||
readinessCheck(3*time.Minute),
|
||||
),
|
||||
rc: readinessChecker{
|
||||
checkSafetyMargin: 3 * time.Minute,
|
||||
},
|
||||
expectedReady: false,
|
||||
},
|
||||
{
|
||||
desc: "invalid task start time",
|
||||
task: task(startedAt("invalid")),
|
||||
app: application(
|
||||
deployments("deploymentId"),
|
||||
readinessCheck(0),
|
||||
),
|
||||
expectedReady: false,
|
||||
},
|
||||
{
|
||||
desc: "task not involved in deployment",
|
||||
task: task(startedAtFromNow(1 * time.Hour)),
|
||||
app: application(
|
||||
deployments("deploymentId"),
|
||||
readinessCheck(0),
|
||||
),
|
||||
rc: readinessChecker{
|
||||
checkDefaultTimeout: 10 * time.Second,
|
||||
},
|
||||
expectedReady: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
rc := testReadinessChecker()
|
||||
if test.rc.checkDefaultTimeout > 0 {
|
||||
rc.checkDefaultTimeout = test.rc.checkDefaultTimeout
|
||||
}
|
||||
if test.rc.checkSafetyMargin > 0 {
|
||||
rc.checkSafetyMargin = test.rc.checkSafetyMargin
|
||||
}
|
||||
actualReady := test.rc.Do(test.task, test.app)
|
||||
if actualReady != test.expectedReady {
|
||||
t.Errorf("actual ready = %t, expected ready = %t", actualReady, test.expectedReady)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue