Vendor integration dependencies.

This commit is contained in:
Timo Reimann 2017-02-07 22:33:23 +01:00
parent dd5e3fba01
commit 55b57c736b
2451 changed files with 731611 additions and 0 deletions

View file

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2016 Containous SAS, Emile Vauge, emile@vauge.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

752
integration/vendor/github.com/containous/flaeg/flaeg.go generated vendored Normal file
View file

@ -0,0 +1,752 @@
package flaeg
import (
"errors"
"fmt"
flag "github.com/ogier/pflag"
"io"
"io/ioutil"
"os"
"path"
"reflect"
"sort"
"strings"
"text/tabwriter"
"text/template"
"time"
)
// ErrParserNotFound is thrown when a field is flaged but not parser match its type
var ErrParserNotFound = errors.New("Parser not found or custom parser missing")
// GetTypesRecursive links in flagmap a flag with its reflect.StructField
// You can whether provide objValue on a structure or a pointer to structure as first argument
// Flags are genereted from field name or from StructTag
func getTypesRecursive(objValue reflect.Value, flagmap map[string]reflect.StructField, key string) error {
name := key
switch objValue.Kind() {
case reflect.Struct:
for i := 0; i < objValue.NumField(); i++ {
if objValue.Type().Field(i).Anonymous {
if err := getTypesRecursive(objValue.Field(i), flagmap, name); err != nil {
return err
}
} else if len(objValue.Type().Field(i).Tag.Get("description")) > 0 {
fieldName := objValue.Type().Field(i).Name
if !isExported(fieldName) {
return fmt.Errorf("Field %s is an unexported field", fieldName)
}
name += objValue.Type().Name()
if tag := objValue.Type().Field(i).Tag.Get("long"); len(tag) > 0 {
fieldName = tag
}
if len(key) == 0 {
//Lower Camel Case
//name = strings.ToLower(string(fieldName[0])) + fieldName[1:]
name = strings.ToLower(fieldName)
} else {
name = key + "." + strings.ToLower(fieldName)
}
if _, ok := flagmap[name]; ok {
return errors.New("Tag already exists: " + name)
}
flagmap[name] = objValue.Type().Field(i)
if err := getTypesRecursive(objValue.Field(i), flagmap, name); err != nil {
return err
}
}
}
case reflect.Ptr:
if len(key) > 0 {
field := flagmap[name]
field.Type = reflect.TypeOf(false)
flagmap[name] = field
}
typ := objValue.Type().Elem()
inst := reflect.New(typ).Elem()
if err := getTypesRecursive(inst, flagmap, name); err != nil {
return err
}
default:
return nil
}
return nil
}
//GetPointerFlags returns flags on pointers
func GetBoolFlags(config interface{}) ([]string, error) {
flagmap := make(map[string]reflect.StructField)
if err := getTypesRecursive(reflect.ValueOf(config), flagmap, ""); err != nil {
return []string{}, err
}
flags := make([]string, 0, len(flagmap))
for f, structField := range flagmap {
if structField.Type.Kind() == reflect.Bool {
flags = append(flags, f)
}
}
return flags, nil
}
//GetFlags returns flags
func GetFlags(config interface{}) ([]string, error) {
flagmap := make(map[string]reflect.StructField)
if err := getTypesRecursive(reflect.ValueOf(config), flagmap, ""); err != nil {
return []string{}, err
}
flags := make([]string, 0, len(flagmap))
for f := range flagmap {
flags = append(flags, f)
}
return flags, nil
}
//loadParsers loads default parsers and custom parsers given as parameter. Return a map [reflect.Type]parsers
// bool, int, int64, uint, uint64, float64,
func loadParsers(customParsers map[reflect.Type]Parser) (map[reflect.Type]Parser, error) {
parsers := map[reflect.Type]Parser{}
var boolParser boolValue
parsers[reflect.TypeOf(true)] = &boolParser
var intParser intValue
parsers[reflect.TypeOf(1)] = &intParser
var int64Parser int64Value
parsers[reflect.TypeOf(int64(1))] = &int64Parser
var uintParser uintValue
parsers[reflect.TypeOf(uint(1))] = &uintParser
var uint64Parser uint64Value
parsers[reflect.TypeOf(uint64(1))] = &uint64Parser
var stringParser stringValue
parsers[reflect.TypeOf("")] = &stringParser
var float64Parser float64Value
parsers[reflect.TypeOf(float64(1.5))] = &float64Parser
var durationParser durationValue
parsers[reflect.TypeOf(time.Second)] = &durationParser
var timeParser timeValue
parsers[reflect.TypeOf(time.Now())] = &timeParser
for rType, parser := range customParsers {
parsers[rType] = parser
}
return parsers, nil
}
//ParseArgs : parses args return valmap map[flag]Getter, using parsers map[type]Getter
//args must be formated as like as flag documentation. See https://golang.org/pkg/flag
func parseArgs(args []string, flagmap map[string]reflect.StructField, parsers map[reflect.Type]Parser) (map[string]Parser, error) {
//Return var
valmap := make(map[string]Parser)
//Visitor in flag.Parse
flagList := []*flag.Flag{}
visitor := func(fl *flag.Flag) {
flagList = append(flagList, fl)
}
newParsers := map[string]Parser{}
flagSet := flag.NewFlagSet("flaeg.Load", flag.ContinueOnError)
//Disable output
flagSet.SetOutput(ioutil.Discard)
var err error
for flag, structField := range flagmap {
//for _, flag := range flags {
//structField := flagmap[flag]
if parser, ok := parsers[structField.Type]; ok {
newparserValue := reflect.New(reflect.TypeOf(parser).Elem())
newparserValue.Elem().Set(reflect.ValueOf(parser).Elem())
newparser := newparserValue.Interface().(Parser)
if short := structField.Tag.Get("short"); len(short) == 1 {
// fmt.Printf("short : %s long : %s\n", short, flag)
flagSet.VarP(newparser, flag, short, structField.Tag.Get("description"))
} else {
flagSet.Var(newparser, flag, structField.Tag.Get("description"))
}
newParsers[flag] = newparser
} else {
err = ErrParserNotFound
}
}
// prevents case sensitivity issue
args = argsToLower(args)
if err := flagSet.Parse(args); err != nil {
return nil, err
}
//Fill flagList with parsed flags
flagSet.Visit(visitor)
//Return parsers on parsed flag
for _, flag := range flagList {
valmap[flag.Name] = newParsers[flag.Name]
}
return valmap, err
}
func getDefaultValue(defaultValue reflect.Value, defaultPointersValue reflect.Value, defaultValmap map[string]reflect.Value, key string) error {
if defaultValue.Type() != defaultPointersValue.Type() {
return fmt.Errorf("Parameters defaultValue and defaultPointersValue must be the same struct. defaultValue type : %s is not defaultPointersValue type : %s", defaultValue.Type().String(), defaultPointersValue.Type().String())
}
name := key
switch defaultValue.Kind() {
case reflect.Struct:
for i := 0; i < defaultValue.NumField(); i++ {
if defaultValue.Type().Field(i).Anonymous {
if err := getDefaultValue(defaultValue.Field(i), defaultPointersValue.Field(i), defaultValmap, name); err != nil {
return err
}
} else if len(defaultValue.Type().Field(i).Tag.Get("description")) > 0 {
name += defaultValue.Type().Name()
fieldName := defaultValue.Type().Field(i).Name
if tag := defaultValue.Type().Field(i).Tag.Get("long"); len(tag) > 0 {
fieldName = tag
}
if len(key) == 0 {
name = strings.ToLower(fieldName)
} else {
name = key + "." + strings.ToLower(fieldName)
}
if defaultValue.Field(i).Kind() != reflect.Ptr {
// if _, ok := defaultValmap[name]; ok {
// return errors.New("Tag already exists: " + name)
// }
defaultValmap[name] = defaultValue.Field(i)
// fmt.Printf("%s: got default value %+v\n", name, defaultValue.Field(i))
}
if err := getDefaultValue(defaultValue.Field(i), defaultPointersValue.Field(i), defaultValmap, name); err != nil {
return err
}
}
}
case reflect.Ptr:
if !defaultPointersValue.IsNil() {
if len(key) != 0 {
//turn ptr fields to nil
defaultPointersNilValue, err := setPointersNil(defaultPointersValue)
if err != nil {
return err
}
defaultValmap[name] = defaultPointersNilValue
// fmt.Printf("%s: got default value %+v\n", name, defaultPointersNilValue)
}
if !defaultValue.IsNil() {
if err := getDefaultValue(defaultValue.Elem(), defaultPointersValue.Elem(), defaultValmap, name); err != nil {
return err
}
} else {
if err := getDefaultValue(defaultPointersValue.Elem(), defaultPointersValue.Elem(), defaultValmap, name); err != nil {
return err
}
}
} else {
instValue := reflect.New(defaultPointersValue.Type().Elem())
if len(key) != 0 {
defaultValmap[name] = instValue
// fmt.Printf("%s: got default value %+v\n", name, instValue)
}
if !defaultValue.IsNil() {
if err := getDefaultValue(defaultValue.Elem(), instValue.Elem(), defaultValmap, name); err != nil {
return err
}
} else {
if err := getDefaultValue(instValue.Elem(), instValue.Elem(), defaultValmap, name); err != nil {
return err
}
}
}
default:
return nil
}
return nil
}
//objValue a reflect.Value of a not-nil pointer on a struct
func setPointersNil(objValue reflect.Value) (reflect.Value, error) {
if objValue.Kind() != reflect.Ptr {
return objValue, fmt.Errorf("Parameters objValue must be a not-nil pointer on a struct, not a %s", objValue.Kind().String())
} else if objValue.IsNil() {
return objValue, fmt.Errorf("Parameters objValue must be a not-nil pointer")
} else if objValue.Elem().Kind() != reflect.Struct {
// fmt.Printf("Parameters objValue must be a not-nil pointer on a struct, not a pointer on a %s\n", objValue.Elem().Kind().String())
return objValue, nil
}
//Clone
starObjValue := objValue.Elem()
nilPointersObjVal := reflect.New(starObjValue.Type())
starNilPointersObjVal := nilPointersObjVal.Elem()
starNilPointersObjVal.Set(starObjValue)
for i := 0; i < nilPointersObjVal.Elem().NumField(); i++ {
if field := nilPointersObjVal.Elem().Field(i); field.Kind() == reflect.Ptr && field.CanSet() {
field.Set(reflect.Zero(field.Type()))
}
}
return nilPointersObjVal, nil
}
//FillStructRecursive initialize a value of any taged Struct given by reference
func fillStructRecursive(objValue reflect.Value, defaultPointerValmap map[string]reflect.Value, valmap map[string]Parser, key string) error {
name := key
switch objValue.Kind() {
case reflect.Struct:
for i := 0; i < objValue.Type().NumField(); i++ {
if objValue.Type().Field(i).Anonymous {
if err := fillStructRecursive(objValue.Field(i), defaultPointerValmap, valmap, name); err != nil {
return err
}
} else if len(objValue.Type().Field(i).Tag.Get("description")) > 0 {
name += objValue.Type().Name()
fieldName := objValue.Type().Field(i).Name
if tag := objValue.Type().Field(i).Tag.Get("long"); len(tag) > 0 {
fieldName = tag
}
if len(key) == 0 {
name = strings.ToLower(fieldName)
} else {
name = key + "." + strings.ToLower(fieldName)
}
// fmt.Println(name)
if objValue.Field(i).Kind() != reflect.Ptr {
if val, ok := valmap[name]; ok {
// fmt.Printf("%s : set def val\n", name)
if err := setFields(objValue.Field(i), val); err != nil {
return err
}
}
}
if err := fillStructRecursive(objValue.Field(i), defaultPointerValmap, valmap, name); err != nil {
return err
}
}
}
case reflect.Ptr:
if len(key) == 0 && !objValue.IsNil() {
if err := fillStructRecursive(objValue.Elem(), defaultPointerValmap, valmap, name); err != nil {
return err
}
return nil
}
contains := false
for flag := range valmap {
// TODO replace by regexp
if strings.Contains(flag, name+".") {
contains = true
break
}
}
needDefault := false
if _, ok := valmap[name]; ok {
needDefault = valmap[name].Get().(bool)
}
if contains && objValue.IsNil() {
needDefault = true
}
if needDefault {
if defVal, ok := defaultPointerValmap[name]; ok {
//set default pointer value
// fmt.Printf("%s : set default value %+v\n", name, defVal)
objValue.Set(defVal)
} else {
return fmt.Errorf("flag %s default value not provided", name)
}
}
if !objValue.IsNil() && contains {
if objValue.Type().Elem().Kind() == reflect.Struct {
if err := fillStructRecursive(objValue.Elem(), defaultPointerValmap, valmap, name); err != nil {
return err
}
}
}
default:
return nil
}
return nil
}
// SetFields sets value to fieldValue using tag as key in valmap
func setFields(fieldValue reflect.Value, val Parser) error {
if fieldValue.CanSet() {
fieldValue.Set(reflect.ValueOf(val).Elem().Convert(fieldValue.Type()))
} else {
return errors.New(fieldValue.Type().String() + " is not settable.")
}
return nil
}
//PrintHelp generates and prints command line help
func PrintHelp(flagmap map[string]reflect.StructField, defaultValmap map[string]reflect.Value, parsers map[reflect.Type]Parser) error {
return PrintHelpWithCommand(flagmap, defaultValmap, parsers, nil, nil)
}
//PrintError takes a not nil error and prints command line help
func PrintError(err error, flagmap map[string]reflect.StructField, defaultValmap map[string]reflect.Value, parsers map[reflect.Type]Parser) error {
if err != flag.ErrHelp {
fmt.Printf("Error : %s\n", err)
}
if !strings.Contains(err.Error(), ":No parser for type") {
PrintHelp(flagmap, defaultValmap, parsers)
}
return err
}
//LoadWithParsers initializes config : struct fields given by reference, with args : arguments.
//Some custom parsers may be given.
func LoadWithParsers(config interface{}, defaultValue interface{}, args []string, customParsers map[reflect.Type]Parser) error {
cmd := &Command{
Config: config,
DefaultPointersConfig: defaultValue,
}
_, cmd.Name = path.Split(os.Args[0])
return LoadWithCommand(cmd, args, customParsers, nil)
}
//Load initializes config : struct fields given by reference, with args : arguments.
//Some custom parsers may be given.
func Load(config interface{}, defaultValue interface{}, args []string) error {
return LoadWithParsers(config, defaultValue, args, nil)
}
// Command structure contains program/command information (command name and description)
// Config must be a pointer on the configuration struct to parse (it contains default values of field)
// DefaultPointersConfig contains default pointers values: those values are set on pointers fields if their flags are called
// It must be the same type(struct) as Config
// Run is the func which launch the program using initialized configuration structure
type Command struct {
Name string
Description string
Config interface{}
DefaultPointersConfig interface{} //TODO:case DefaultPointersConfig is nil
Run func() error
Metadata map[string]string
}
//LoadWithCommand initializes config : struct fields given by reference, with args : arguments.
//Some custom parsers and some subCommand may be given.
func LoadWithCommand(cmd *Command, cmdArgs []string, customParsers map[reflect.Type]Parser, subCommand []*Command) error {
parsers, err := loadParsers(customParsers)
if err != nil {
return err
}
tagsmap := make(map[string]reflect.StructField)
if err := getTypesRecursive(reflect.ValueOf(cmd.Config), tagsmap, ""); err != nil {
return err
}
defaultValmap := make(map[string]reflect.Value)
if err := getDefaultValue(reflect.ValueOf(cmd.Config), reflect.ValueOf(cmd.DefaultPointersConfig), defaultValmap, ""); err != nil {
return err
}
valmap, errParseArgs := parseArgs(cmdArgs, tagsmap, parsers)
if errParseArgs != nil && errParseArgs != ErrParserNotFound {
return PrintErrorWithCommand(errParseArgs, tagsmap, defaultValmap, parsers, cmd, subCommand)
}
if err := fillStructRecursive(reflect.ValueOf(cmd.Config), defaultValmap, valmap, ""); err != nil {
return err
}
if errParseArgs == ErrParserNotFound {
return errParseArgs
}
return nil
}
//PrintHelpWithCommand generates and prints command line help for a Command
func PrintHelpWithCommand(flagmap map[string]reflect.StructField, defaultValmap map[string]reflect.Value, parsers map[reflect.Type]Parser, cmd *Command, subCmd []*Command) error {
// Define a templates
// Using POSXE STD : http://pubs.opengroup.org/onlinepubs/9699919799/
const helper = `{{if .ProgDescription}}{{.ProgDescription}}
{{end}}Usage: {{.ProgName}} [--flag=flag_argument] [-f[flag_argument]] ... set flag_argument to flag(s)
or: {{.ProgName}} [--flag[=true|false| ]] [-f[true|false| ]] ... set true/false to boolean flag(s)
{{if .SubCommands}}
Available Commands:{{range $subCmdName, $subCmdDesc := .SubCommands}}
{{printf "\t%-50s %s" $subCmdName $subCmdDesc}}{{end}}
Use "{{.ProgName}} [command] --help" for more information about a command.
{{end}}
Flags:
`
// Use a struct to give data to template
type TempStruct struct {
ProgName string
ProgDescription string
SubCommands map[string]string
}
tempStruct := TempStruct{}
if cmd != nil {
tempStruct.ProgName = cmd.Name
tempStruct.ProgDescription = cmd.Description
tempStruct.SubCommands = map[string]string{}
if len(subCmd) > 1 && cmd == subCmd[0] {
for _, c := range subCmd[1:] {
tempStruct.SubCommands[c.Name] = c.Description
}
}
} else {
_, tempStruct.ProgName = path.Split(os.Args[0])
}
//Run Template
tmplHelper, err := template.New("helper").Parse(helper)
if err != nil {
return err
}
err = tmplHelper.Execute(os.Stdout, tempStruct)
if err != nil {
return err
}
return printFlagsDescriptionsDefaultValues(flagmap, defaultValmap, parsers, os.Stdout)
}
func printFlagsDescriptionsDefaultValues(flagmap map[string]reflect.StructField, defaultValmap map[string]reflect.Value, parsers map[reflect.Type]Parser, output io.Writer) error {
// Sort alphabetically & Delete unparsable flags in a slice
flags := []string{}
for flag, field := range flagmap {
if _, ok := parsers[field.Type]; ok {
flags = append(flags, flag)
}
}
sort.Strings(flags)
// Process data
descriptions := []string{}
defaultValues := []string{}
flagsWithDashs := []string{}
shortFlagsWithDash := []string{}
for _, flag := range flags {
field := flagmap[flag]
if short := field.Tag.Get("short"); len(short) == 1 {
shortFlagsWithDash = append(shortFlagsWithDash, "-"+short+",")
} else {
shortFlagsWithDash = append(shortFlagsWithDash, "")
}
flagsWithDashs = append(flagsWithDashs, "--"+flag)
//flag on pointer ?
if defVal, ok := defaultValmap[flag]; ok {
if defVal.Kind() != reflect.Ptr {
// Set defaultValue on parsers
parsers[field.Type].SetValue(defaultValmap[flag].Interface())
}
if defVal := parsers[field.Type].String(); len(defVal) > 0 {
defaultValues = append(defaultValues, fmt.Sprintf("(default \"%s\")", defVal))
} else {
defaultValues = append(defaultValues, "")
}
}
splittedDescriptions := split(field.Tag.Get("description"), 80)
for i, description := range splittedDescriptions {
descriptions = append(descriptions, description)
if i != 0 {
defaultValues = append(defaultValues, "")
flagsWithDashs = append(flagsWithDashs, "")
shortFlagsWithDash = append(shortFlagsWithDash, "")
}
}
}
//add help flag
shortFlagsWithDash = append(shortFlagsWithDash, "-h,")
flagsWithDashs = append(flagsWithDashs, "--help")
descriptions = append(descriptions, "Print Help (this message) and exit")
defaultValues = append(defaultValues, "")
return displayTab(output, shortFlagsWithDash, flagsWithDashs, descriptions, defaultValues)
}
func split(str string, width int) []string {
if len(str) > width {
index := strings.LastIndex(str[:width], " ")
if index == -1 {
index = width
}
return append([]string{strings.TrimSpace(str[:index])}, split(strings.TrimSpace(str[index:]), width)...)
}
return []string{str}
}
func displayTab(output io.Writer, columns ...[]string) error {
nbRow := len(columns[0])
nbCol := len(columns)
w := new(tabwriter.Writer)
w.Init(output, 0, 4, 1, ' ', 0)
for i := 0; i < nbRow; i++ {
row := ""
for j, col := range columns {
row += col[i]
if j != nbCol-1 {
row += "\t"
}
}
fmt.Fprintln(w, row)
}
w.Flush()
return nil
}
//PrintErrorWithCommand takes a not nil error and prints command line help
func PrintErrorWithCommand(err error, flagmap map[string]reflect.StructField, defaultValmap map[string]reflect.Value, parsers map[reflect.Type]Parser, cmd *Command, subCmd []*Command) error {
if err != flag.ErrHelp {
fmt.Printf("Error here : %s\n", err)
}
PrintHelpWithCommand(flagmap, defaultValmap, parsers, cmd, subCmd)
return err
}
//Flaeg struct contains commands (at least the root one)
//and row arguments (command and/or flags)
//a map of custom parsers could be use
type Flaeg struct {
calledCommand *Command
commands []*Command ///rootCommand is th fist one in this slice
args []string
commmandArgs []string
customParsers map[reflect.Type]Parser
}
//New creats and initialize a pointer on Flaeg
func New(rootCommand *Command, args []string) *Flaeg {
var f Flaeg
f.commands = []*Command{rootCommand}
f.args = args
f.customParsers = map[reflect.Type]Parser{}
return &f
}
//AddCommand adds sub-command to the root command
func (f *Flaeg) AddCommand(command *Command) {
f.commands = append(f.commands, command)
}
//AddParser adds custom parser for a type to the map of custom parsers
func (f *Flaeg) AddParser(typ reflect.Type, parser Parser) {
f.customParsers[typ] = parser
}
// Run calls the command with flags given as agruments
func (f *Flaeg) Run() error {
if f.calledCommand == nil {
if _, _, err := f.findCommandWithCommandArgs(); err != nil {
return err
}
}
if _, err := f.Parse(f.calledCommand); err != nil {
return err
}
return f.calledCommand.Run()
}
// Parse calls Flaeg Load Function end returns the parsed command structure (by reference)
// It returns nil and a not nil error if it fails
func (f *Flaeg) Parse(cmd *Command) (*Command, error) {
if f.calledCommand == nil {
f.commmandArgs = f.args
}
if err := LoadWithCommand(cmd, f.commmandArgs, f.customParsers, f.commands); err != nil {
return cmd, err
}
return cmd, nil
}
//splitArgs takes args (type []string) and return command ("" if rootCommand) and command's args
func splitArgs(args []string) (string, []string) {
if len(args) >= 1 && len(args[0]) >= 1 && string(args[0][0]) != "-" {
if len(args) == 1 {
return strings.ToLower(args[0]), []string{}
}
return strings.ToLower(args[0]), args[1:]
}
return "", args
}
// findCommandWithCommandArgs returns the called command (by reference) and command's args
// the error returned is not nil if it fails
func (f *Flaeg) findCommandWithCommandArgs() (*Command, []string, error) {
commandName := ""
commandName, f.commmandArgs = splitArgs(f.args)
if len(commandName) > 0 {
for _, command := range f.commands {
if commandName == command.Name {
f.calledCommand = command
return f.calledCommand, f.commmandArgs, nil
}
}
return nil, []string{}, fmt.Errorf("Command %s not found", commandName)
}
f.calledCommand = f.commands[0]
return f.calledCommand, f.commmandArgs, nil
}
// GetCommand splits args and returns the called command (by reference)
// It returns nil and a not nil error if it fails
func (f *Flaeg) GetCommand() (*Command, error) {
if f.calledCommand == nil {
_, _, err := f.findCommandWithCommandArgs()
return f.calledCommand, err
}
return f.calledCommand, nil
}
//isExported return true is the field (from fieldName) is exported,
//else false
func isExported(fieldName string) bool {
if len(fieldName) < 1 {
return false
}
if string(fieldName[0]) == strings.ToUpper(string(fieldName[0])) {
return true
}
return false
}
func argToLower(inArg string) string {
if len(inArg) < 2 {
return strings.ToLower(inArg)
}
var outArg string
dashIndex := strings.Index(inArg, "--")
if dashIndex == -1 {
if dashIndex = strings.Index(inArg, "-"); dashIndex == -1 {
return inArg
}
//-fValue
outArg = strings.ToLower(inArg[dashIndex:dashIndex+2]) + inArg[dashIndex+2:]
return outArg
}
//--flag
if equalIndex := strings.Index(inArg, "="); equalIndex != -1 {
//--flag=value
outArg = strings.ToLower(inArg[dashIndex:equalIndex]) + inArg[equalIndex:]
} else {
//--boolflag
outArg = strings.ToLower(inArg[dashIndex:])
}
return outArg
}
func argsToLower(inArgs []string) []string {
outArgs := make([]string, len(inArgs), len(inArgs))
for i, inArg := range inArgs {
outArgs[i] = argToLower(inArg)
}
return outArgs
}

View file

@ -0,0 +1,204 @@
package flaeg
import (
"flag"
"fmt"
"strconv"
"strings"
"time"
)
//TODO : add parsers on all types in https://golang.org/pkg/builtin/
// Parser is an interface that allows the contents of a flag.Getter to be set.
type Parser interface {
flag.Getter
SetValue(interface{})
}
// -- bool Value
type boolValue bool
func (b *boolValue) Set(s string) error {
v, err := strconv.ParseBool(s)
*b = boolValue(v)
return err
}
func (b *boolValue) Get() interface{} { return bool(*b) }
func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) }
func (b *boolValue) IsBoolFlag() bool { return true }
func (b *boolValue) SetValue(val interface{}) {
*b = boolValue(val.(bool))
}
// optional interface to indicate boolean flags that can be
// supplied without "=value" text
type boolFlag interface {
flag.Value
IsBoolFlag() bool
}
// -- int Value
type intValue int
func (i *intValue) Set(s string) error {
v, err := strconv.ParseInt(s, 0, 64)
*i = intValue(v)
return err
}
func (i *intValue) Get() interface{} { return int(*i) }
func (i *intValue) String() string { return fmt.Sprintf("%v", *i) }
func (i *intValue) SetValue(val interface{}) {
*i = intValue(val.(int))
}
// -- int64 Value
type int64Value int64
func (i *int64Value) Set(s string) error {
v, err := strconv.ParseInt(s, 0, 64)
*i = int64Value(v)
return err
}
func (i *int64Value) Get() interface{} { return int64(*i) }
func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) }
func (i *int64Value) SetValue(val interface{}) {
*i = int64Value(val.(int64))
}
// -- uint Value
type uintValue uint
func (i *uintValue) Set(s string) error {
v, err := strconv.ParseUint(s, 0, 64)
*i = uintValue(v)
return err
}
func (i *uintValue) Get() interface{} { return uint(*i) }
func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) }
func (i *uintValue) SetValue(val interface{}) {
*i = uintValue(val.(uint))
}
// -- uint64 Value
type uint64Value uint64
func (i *uint64Value) Set(s string) error {
v, err := strconv.ParseUint(s, 0, 64)
*i = uint64Value(v)
return err
}
func (i *uint64Value) Get() interface{} { return uint64(*i) }
func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) }
func (i *uint64Value) SetValue(val interface{}) {
*i = uint64Value(val.(uint64))
}
// -- string Value
type stringValue string
func (s *stringValue) Set(val string) error {
*s = stringValue(val)
return nil
}
func (s *stringValue) Get() interface{} { return string(*s) }
func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) }
func (s *stringValue) SetValue(val interface{}) {
*s = stringValue(val.(string))
}
// -- float64 Value
type float64Value float64
func (f *float64Value) Set(s string) error {
v, err := strconv.ParseFloat(s, 64)
*f = float64Value(v)
return err
}
func (f *float64Value) Get() interface{} { return float64(*f) }
func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) }
func (f *float64Value) SetValue(val interface{}) {
*f = float64Value(val.(float64))
}
// -- time.Duration Value
type durationValue time.Duration
func (d *durationValue) Set(s string) error {
v, err := time.ParseDuration(s)
*d = durationValue(v)
return err
}
func (d *durationValue) Get() interface{} { return time.Duration(*d) }
func (d *durationValue) String() string { return (*time.Duration)(d).String() }
func (d *durationValue) SetValue(val interface{}) {
*d = durationValue(val.(time.Duration))
}
// -- time.Time Value
type timeValue time.Time
func (t *timeValue) Set(s string) error {
v, err := time.Parse(time.RFC3339, s)
*t = timeValue(v)
return err
}
func (t *timeValue) Get() interface{} { return time.Time(*t) }
func (t *timeValue) String() string { return (*time.Time)(t).String() }
func (t *timeValue) SetValue(val interface{}) {
*t = timeValue(val.(time.Time))
}
//SliceStrings parse slice of strings
type SliceStrings []string
//Set adds strings elem into the the parser
//it splits str on , and ;
func (s *SliceStrings) Set(str string) error {
fargs := func(c rune) bool {
return c == ',' || c == ';'
}
// get function
slice := strings.FieldsFunc(str, fargs)
*s = append(*s, slice...)
return nil
}
//Get []string
func (s *SliceStrings) Get() interface{} { return []string(*s) }
//String return slice in a string
func (s *SliceStrings) String() string { return fmt.Sprintf("%v", *s) }
//SetValue sets []string into the parser
func (s *SliceStrings) SetValue(val interface{}) {
*s = SliceStrings(val.([]string))
}

View file

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2016 Containous SAS, Emile Vauge, emile@vauge.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

306
integration/vendor/github.com/containous/staert/kv.go generated vendored Normal file
View file

@ -0,0 +1,306 @@
package staert
import (
"encoding/base64"
"errors"
"fmt"
"github.com/containous/flaeg"
"github.com/docker/libkv"
"github.com/docker/libkv/store"
"github.com/mitchellh/mapstructure"
"reflect"
"sort"
"strconv"
"strings"
)
// KvSource implements Source
// It handles all mapstructure features(Squashed Embeded Sub-Structures, Maps, Pointers)
// It supports Slices (and maybe Arraies). They must be sorted in the KvStore like this :
// Key : ".../[sliceIndex]" -> Value
type KvSource struct {
store.Store
Prefix string // like this "prefix" (without the /)
}
// NewKvSource creates a new KvSource
func NewKvSource(backend store.Backend, addrs []string, options *store.Config, prefix string) (*KvSource, error) {
store, err := libkv.NewStore(backend, addrs, options)
return &KvSource{Store: store, Prefix: prefix}, err
}
// Parse uses libkv and mapstructure to fill the structure
func (kv *KvSource) Parse(cmd *flaeg.Command) (*flaeg.Command, error) {
err := kv.LoadConfig(cmd.Config)
if err != nil {
return nil, err
}
return cmd, nil
}
// LoadConfig loads data from the KV Store into the config structure (given by reference)
func (kv *KvSource) LoadConfig(config interface{}) error {
pairs := map[string][]byte{}
if err := kv.ListRecursive(kv.Prefix, pairs); err != nil {
return err
}
// fmt.Printf("pairs : %#v\n", pairs)
mapstruct, err := generateMapstructure(convertPairs(pairs), kv.Prefix)
if err != nil {
return err
}
// fmt.Printf("mapstruct : %#v\n", mapstruct)
configDecoder := &mapstructure.DecoderConfig{
Metadata: nil,
Result: config,
WeaklyTypedInput: true,
DecodeHook: decodeHook,
}
decoder, err := mapstructure.NewDecoder(configDecoder)
if err != nil {
return err
}
if err := decoder.Decode(mapstruct); err != nil {
return err
}
return nil
}
func generateMapstructure(pairs []*store.KVPair, prefix string) (map[string]interface{}, error) {
raw := make(map[string]interface{})
for _, p := range pairs {
// Trim the prefix off our key first
key := strings.TrimPrefix(strings.Trim(p.Key, "/"), strings.Trim(prefix, "/")+"/")
raw, err := processKV(key, p.Value, raw)
if err != nil {
return raw, err
}
}
return raw, nil
}
func processKV(key string, v []byte, raw map[string]interface{}) (map[string]interface{}, error) {
// Determine which map we're writing the value to. We split by '/'
// to determine any sub-maps that need to be created.
m := raw
children := strings.Split(key, "/")
if len(children) > 0 {
key = children[len(children)-1]
children = children[:len(children)-1]
for _, child := range children {
if m[child] == nil {
m[child] = make(map[string]interface{})
}
subm, ok := m[child].(map[string]interface{})
if !ok {
return nil, fmt.Errorf("child is both a data item and dir: %s", child)
}
m = subm
}
}
m[key] = string(v)
return raw, nil
}
func decodeHook(fromType reflect.Type, toType reflect.Type, data interface{}) (interface{}, error) {
// TODO : Array support
switch toType.Kind() {
case reflect.Ptr:
if fromType.Kind() == reflect.String {
if data == "" {
// default value Pointer
return make(map[string]interface{}), nil
}
}
case reflect.Slice:
if fromType.Kind() == reflect.Map {
// Type assertion
dataMap, ok := data.(map[string]interface{})
if !ok {
return data, fmt.Errorf("input data is not a map : %#v", data)
}
// Sorting map
indexes := make([]int, len(dataMap))
i := 0
for k := range dataMap {
ind, err := strconv.Atoi(k)
if err != nil {
return dataMap, err
}
indexes[i] = ind
i++
}
sort.Ints(indexes)
// Building slice
dataOutput := make([]interface{}, i)
i = 0
for _, k := range indexes {
dataOutput[i] = dataMap[strconv.Itoa(k)]
i++
}
return dataOutput, nil
} else if fromType.Kind() == reflect.String {
b, err := base64.StdEncoding.DecodeString(data.(string))
if err != nil {
return nil, err
}
return b, nil
}
}
return data, nil
}
// StoreConfig stores the config into the KV Store
func (kv *KvSource) StoreConfig(config interface{}) error {
kvMap := map[string]string{}
if err := collateKvRecursive(reflect.ValueOf(config), kvMap, kv.Prefix); err != nil {
return err
}
keys := []string{}
for key := range kvMap {
keys = append(keys, key)
}
sort.Strings(keys)
for _, k := range keys {
var writeOptions *store.WriteOptions
// is it a directory ?
if strings.HasSuffix(k, "/") {
writeOptions = &store.WriteOptions{
IsDir: true,
}
}
if err := kv.Put(k, []byte(kvMap[k]), writeOptions); err != nil {
return err
}
}
return nil
}
func collateKvRecursive(objValue reflect.Value, kv map[string]string, key string) error {
name := key
kind := objValue.Kind()
switch kind {
case reflect.Struct:
for i := 0; i < objValue.NumField(); i++ {
objType := objValue.Type()
if objType.Field(i).Name[:1] != strings.ToUpper(objType.Field(i).Name[:1]) {
//if unexported field
continue
}
squashed := false
if objType.Field(i).Anonymous {
if objValue.Field(i).Kind() == reflect.Struct {
tags := objType.Field(i).Tag
if strings.Contains(string(tags), "squash") {
squashed = true
}
}
}
if squashed {
if err := collateKvRecursive(objValue.Field(i), kv, name); err != nil {
return err
}
} else {
fieldName := objType.Field(i).Name
//useless if not empty Prefix is required ?
if len(key) == 0 {
name = strings.ToLower(fieldName)
} else {
name = key + "/" + strings.ToLower(fieldName)
}
if err := collateKvRecursive(objValue.Field(i), kv, name); err != nil {
return err
}
}
}
case reflect.Ptr:
if !objValue.IsNil() {
// hack to avoid calling this at the beginning
if len(kv) > 0 {
kv[name+"/"] = ""
}
if err := collateKvRecursive(objValue.Elem(), kv, name); err != nil {
return err
}
}
case reflect.Map:
for _, k := range objValue.MapKeys() {
if k.Kind() == reflect.Struct {
return errors.New("Struct as key not supported")
}
name = key + "/" + fmt.Sprint(k)
if err := collateKvRecursive(objValue.MapIndex(k), kv, name); err != nil {
return err
}
}
case reflect.Array, reflect.Slice:
// Byte slices get special treatment
if objValue.Type().Elem().Kind() == reflect.Uint8 {
kv[name] = base64.StdEncoding.EncodeToString(objValue.Bytes())
} else {
for i := 0; i < objValue.Len(); i++ {
name = key + "/" + strconv.Itoa(i)
if err := collateKvRecursive(objValue.Index(i), kv, name); err != nil {
return err
}
}
}
case reflect.Interface, reflect.String, reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16,
reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16,
reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64:
if _, ok := kv[name]; ok {
return errors.New("key already exists: " + name)
}
kv[name] = fmt.Sprint(objValue)
default:
return fmt.Errorf("Kind %s not supported", kind.String())
}
return nil
}
// ListRecursive lists all key value childrens under key
func (kv *KvSource) ListRecursive(key string, pairs map[string][]byte) error {
pairsN1, err := kv.List(key)
if err == store.ErrKeyNotFound {
return nil
}
if err != nil {
return err
}
if len(pairsN1) == 0 {
pairLeaf, err := kv.Get(key)
if err != nil {
return err
}
if pairLeaf == nil {
return nil
}
pairs[pairLeaf.Key] = pairLeaf.Value
return nil
}
for _, p := range pairsN1 {
err := kv.ListRecursive(p.Key, pairs)
if err != nil {
return err
}
}
return nil
}
func convertPairs(pairs map[string][]byte) []*store.KVPair {
slicePairs := make([]*store.KVPair, len(pairs))
i := 0
for k, v := range pairs {
slicePairs[i] = &store.KVPair{
Key: k,
Value: v,
}
i++
}
return slicePairs
}

View file

@ -0,0 +1,198 @@
package staert
import (
"fmt"
"github.com/BurntSushi/toml"
"github.com/containous/flaeg"
"os"
"path/filepath"
"reflect"
"strings"
)
// Source interface must be satisfy to Add any kink of Source to Staert as like as TomlFile or Flaeg
type Source interface {
Parse(cmd *flaeg.Command) (*flaeg.Command, error)
}
// Staert contains the struct to configure, thee default values inside structs and the sources
type Staert struct {
command *flaeg.Command
sources []Source
}
// NewStaert creats and return a pointer on Staert. Need defaultConfig and defaultPointersConfig given by references
func NewStaert(rootCommand *flaeg.Command) *Staert {
s := Staert{
command: rootCommand,
}
return &s
}
// AddSource adds new Source to Staert, give it by reference
func (s *Staert) AddSource(src Source) {
s.sources = append(s.sources, src)
}
// getConfig for a flaeg.Command run sources Parse func in the raw
func (s *Staert) parseConfigAllSources(cmd *flaeg.Command) error {
for _, src := range s.sources {
var err error
_, err = src.Parse(cmd)
if err != nil {
return err
}
}
return nil
}
// LoadConfig check which command is called and parses config
// It returns the the parsed config or an error if it fails
func (s *Staert) LoadConfig() (interface{}, error) {
for _, src := range s.sources {
//Type assertion
f, ok := src.(*flaeg.Flaeg)
if ok {
if fCmd, err := f.GetCommand(); err != nil {
return nil, err
} else if s.command != fCmd {
//IF fleag sub-command
if fCmd.Metadata["parseAllSources"] == "true" {
//IF parseAllSources
fCmdConfigType := reflect.TypeOf(fCmd.Config)
sCmdConfigType := reflect.TypeOf(s.command.Config)
if fCmdConfigType != sCmdConfigType {
return nil, fmt.Errorf("Command %s : Config type doesn't match with root command config type. Expected %s got %s", fCmd.Name, sCmdConfigType.Name(), fCmdConfigType.Name())
}
s.command = fCmd
} else {
// ELSE (not parseAllSources)
s.command, err = f.Parse(fCmd)
return s.command.Config, err
}
}
}
}
err := s.parseConfigAllSources(s.command)
return s.command.Config, err
}
// Run calls the Run func of the command
// Warning, Run doesn't parse the config
func (s *Staert) Run() error {
return s.command.Run()
}
//TomlSource impement Source
type TomlSource struct {
filename string
dirNfullpath []string
fullpath string
}
// NewTomlSource creats and return a pointer on TomlSource.
// Parameter filename is the file name (without extension type, ".toml" will be added)
// dirNfullpath may contain directories or fullpath to the file.
func NewTomlSource(filename string, dirNfullpath []string) *TomlSource {
return &TomlSource{filename, dirNfullpath, ""}
}
// ConfigFileUsed return config file used
func (ts *TomlSource) ConfigFileUsed() string {
return ts.fullpath
}
func preprocessDir(dirIn string) (string, error) {
dirOut := dirIn
if strings.HasPrefix(dirIn, "$") {
end := strings.Index(dirIn, string(os.PathSeparator))
if end == -1 {
end = len(dirIn)
}
dirOut = os.Getenv(dirIn[1:end]) + dirIn[end:]
}
dirOut, err := filepath.Abs(dirOut)
return dirOut, err
}
func findFile(filename string, dirNfile []string) string {
for _, df := range dirNfile {
if df != "" {
fullpath, _ := preprocessDir(df)
if fileinfo, err := os.Stat(fullpath); err == nil && !fileinfo.IsDir() {
return fullpath
}
fullpath = fullpath + "/" + filename + ".toml"
if fileinfo, err := os.Stat(fullpath); err == nil && !fileinfo.IsDir() {
return fullpath
}
}
}
return ""
}
// Parse calls toml.DecodeFile() func
func (ts *TomlSource) Parse(cmd *flaeg.Command) (*flaeg.Command, error) {
ts.fullpath = findFile(ts.filename, ts.dirNfullpath)
if len(ts.fullpath) < 2 {
return cmd, nil
}
metadata, err := toml.DecodeFile(ts.fullpath, cmd.Config)
if err != nil {
return nil, err
}
boolFlags, err := flaeg.GetBoolFlags(cmd.Config)
if err != nil {
return nil, err
}
flaegArgs, hasUnderField, err := generateArgs(metadata, boolFlags)
if err != nil {
return nil, err
}
// fmt.Println(flaegArgs)
err = flaeg.Load(cmd.Config, cmd.DefaultPointersConfig, flaegArgs)
//if err!= missing parser err
if err != nil && err != flaeg.ErrParserNotFound {
return nil, err
}
if hasUnderField {
_, err := toml.DecodeFile(ts.fullpath, cmd.Config)
if err != nil {
return nil, err
}
}
return cmd, nil
}
func generateArgs(metadata toml.MetaData, flags []string) ([]string, bool, error) {
flaegArgs := []string{}
keys := metadata.Keys()
hasUnderField := false
for i, key := range keys {
// fmt.Println(key)
if metadata.Type(key.String()) == "Hash" {
// TOML hashes correspond to Go structs or maps.
// fmt.Printf("%s could be a ptr on a struct, or a map\n", key)
for j := i; j < len(keys); j++ {
// fmt.Printf("%s =? %s\n", keys[j].String(), "."+key.String())
if strings.Contains(keys[j].String(), key.String()+".") {
hasUnderField = true
break
}
}
match := false
for _, flag := range flags {
if flag == strings.ToLower(key.String()) {
match = true
break
}
}
if match {
flaegArgs = append(flaegArgs, "--"+strings.ToLower(key.String()))
}
}
}
return flaegArgs, hasUnderField, nil
}

View file

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2016 Containous SAS, Emile Vauge, emile@vauge.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View file

@ -0,0 +1,34 @@
/*
Copyright
*/
package main
import (
"net/http"
"github.com/containous/traefik/log"
)
// OxyLogger implements oxy Logger interface with logrus.
type OxyLogger struct {
}
// Infof logs specified string as Debug level in logrus.
func (oxylogger *OxyLogger) Infof(format string, args ...interface{}) {
log.Debugf(format, args...)
}
// Warningf logs specified string as Warning level in logrus.
func (oxylogger *OxyLogger) Warningf(format string, args ...interface{}) {
log.Warningf(format, args...)
}
// Errorf logs specified string as Warningf level in logrus.
func (oxylogger *OxyLogger) Errorf(format string, args ...interface{}) {
log.Warningf(format, args...)
}
func notFoundHandler(w http.ResponseWriter, r *http.Request) {
http.NotFound(w, r)
//templatesRenderer.HTML(w, http.StatusNotFound, "notFound", nil)
}

View file

@ -0,0 +1,253 @@
package cluster
import (
"context"
"encoding/json"
"fmt"
"github.com/cenk/backoff"
"github.com/containous/staert"
"github.com/containous/traefik/job"
"github.com/containous/traefik/log"
"github.com/docker/libkv/store"
"github.com/satori/go.uuid"
"sync"
"time"
)
// Metadata stores Object plus metadata
type Metadata struct {
object Object
Object []byte
Lock string
}
// NewMetadata returns new Metadata
func NewMetadata(object Object) *Metadata {
return &Metadata{object: object}
}
// Marshall marshalls object
func (m *Metadata) Marshall() error {
var err error
m.Object, err = json.Marshal(m.object)
return err
}
func (m *Metadata) unmarshall() error {
if len(m.Object) == 0 {
return nil
}
return json.Unmarshal(m.Object, m.object)
}
// Listener is called when Object has been changed in KV store
type Listener func(Object) error
var _ Store = (*Datastore)(nil)
// Datastore holds a struct synced in a KV store
type Datastore struct {
kv staert.KvSource
ctx context.Context
localLock *sync.RWMutex
meta *Metadata
lockKey string
listener Listener
}
// NewDataStore creates a Datastore
func NewDataStore(ctx context.Context, kvSource staert.KvSource, object Object, listener Listener) (*Datastore, error) {
datastore := Datastore{
kv: kvSource,
ctx: ctx,
meta: &Metadata{object: object},
lockKey: kvSource.Prefix + "/lock",
localLock: &sync.RWMutex{},
listener: listener,
}
err := datastore.watchChanges()
if err != nil {
return nil, err
}
return &datastore, nil
}
func (d *Datastore) watchChanges() error {
stopCh := make(chan struct{})
kvCh, err := d.kv.Watch(d.lockKey, stopCh)
if err != nil {
return err
}
go func() {
ctx, cancel := context.WithCancel(d.ctx)
operation := func() error {
for {
select {
case <-ctx.Done():
stopCh <- struct{}{}
return nil
case _, ok := <-kvCh:
if !ok {
cancel()
return err
}
err = d.reload()
if err != nil {
return err
}
// log.Debugf("Datastore object change received: %+v", d.meta)
if d.listener != nil {
err := d.listener(d.meta.object)
if err != nil {
log.Errorf("Error calling datastore listener: %s", err)
}
}
}
}
}
notify := func(err error, time time.Duration) {
log.Errorf("Error in watch datastore: %+v, retrying in %s", err, time)
}
err := backoff.RetryNotify(operation, job.NewBackOff(backoff.NewExponentialBackOff()), notify)
if err != nil {
log.Errorf("Error in watch datastore: %v", err)
}
}()
return nil
}
func (d *Datastore) reload() error {
log.Debugf("Datastore reload")
d.localLock.Lock()
err := d.kv.LoadConfig(d.meta)
if err != nil {
d.localLock.Unlock()
return err
}
err = d.meta.unmarshall()
if err != nil {
d.localLock.Unlock()
return err
}
d.localLock.Unlock()
return nil
}
// Begin creates a transaction with the KV store.
func (d *Datastore) Begin() (Transaction, Object, error) {
id := uuid.NewV4().String()
log.Debugf("Transaction %s begins", id)
remoteLock, err := d.kv.NewLock(d.lockKey, &store.LockOptions{TTL: 20 * time.Second, Value: []byte(id)})
if err != nil {
return nil, nil, err
}
stopCh := make(chan struct{})
ctx, cancel := context.WithCancel(d.ctx)
var errLock error
go func() {
_, errLock = remoteLock.Lock(stopCh)
cancel()
}()
select {
case <-ctx.Done():
if errLock != nil {
return nil, nil, errLock
}
case <-d.ctx.Done():
stopCh <- struct{}{}
return nil, nil, d.ctx.Err()
}
// we got the lock! Now make sure we are synced with KV store
operation := func() error {
meta := d.get()
if meta.Lock != id {
return fmt.Errorf("Object lock value: expected %s, got %s", id, meta.Lock)
}
return nil
}
notify := func(err error, time time.Duration) {
log.Errorf("Datastore sync error: %v, retrying in %s", err, time)
err = d.reload()
if err != nil {
log.Errorf("Error reloading: %+v", err)
}
}
ebo := backoff.NewExponentialBackOff()
ebo.MaxElapsedTime = 60 * time.Second
err = backoff.RetryNotify(operation, ebo, notify)
if err != nil {
return nil, nil, fmt.Errorf("Datastore cannot sync: %v", err)
}
// we synced with KV store, we can now return Setter
return &datastoreTransaction{
Datastore: d,
remoteLock: remoteLock,
id: id,
}, d.meta.object, nil
}
func (d *Datastore) get() *Metadata {
d.localLock.RLock()
defer d.localLock.RUnlock()
return d.meta
}
// Load load atomically a struct from the KV store
func (d *Datastore) Load() (Object, error) {
d.localLock.Lock()
defer d.localLock.Unlock()
err := d.kv.LoadConfig(d.meta)
if err != nil {
return nil, err
}
err = d.meta.unmarshall()
if err != nil {
return nil, err
}
return d.meta.object, nil
}
// Get atomically a struct from the KV store
func (d *Datastore) Get() Object {
d.localLock.RLock()
defer d.localLock.RUnlock()
return d.meta.object
}
var _ Transaction = (*datastoreTransaction)(nil)
type datastoreTransaction struct {
*Datastore
remoteLock store.Locker
dirty bool
id string
}
// Commit allows to set an object in the KV store
func (s *datastoreTransaction) Commit(object Object) error {
s.localLock.Lock()
defer s.localLock.Unlock()
if s.dirty {
return fmt.Errorf("transaction already used, please begin a new one")
}
s.Datastore.meta.object = object
err := s.Datastore.meta.Marshall()
if err != nil {
return err
}
err = s.kv.StoreConfig(s.Datastore.meta)
if err != nil {
return err
}
err = s.remoteLock.Unlock()
if err != nil {
return err
}
s.dirty = true
log.Debugf("Transaction committed %s", s.id)
return nil
}

View file

@ -0,0 +1,102 @@
package cluster
import (
"context"
"github.com/cenk/backoff"
"github.com/containous/traefik/log"
"github.com/containous/traefik/safe"
"github.com/containous/traefik/types"
"github.com/docker/leadership"
"time"
)
// Leadership allows leadership election using a KV store
type Leadership struct {
*safe.Pool
*types.Cluster
candidate *leadership.Candidate
leader safe.Safe
listeners []LeaderListener
}
// NewLeadership creates a leadership
func NewLeadership(ctx context.Context, cluster *types.Cluster) *Leadership {
return &Leadership{
Pool: safe.NewPool(ctx),
Cluster: cluster,
candidate: leadership.NewCandidate(cluster.Store, cluster.Store.Prefix+"/leader", cluster.Node, 20*time.Second),
listeners: []LeaderListener{},
}
}
// LeaderListener is called when leadership has changed
type LeaderListener func(elected bool) error
// Participate tries to be a leader
func (l *Leadership) Participate(pool *safe.Pool) {
pool.GoCtx(func(ctx context.Context) {
log.Debugf("Node %s running for election", l.Cluster.Node)
defer log.Debugf("Node %s no more running for election", l.Cluster.Node)
backOff := backoff.NewExponentialBackOff()
operation := func() error {
return l.run(ctx, l.candidate)
}
notify := func(err error, time time.Duration) {
log.Errorf("Leadership election error %+v, retrying in %s", err, time)
}
err := backoff.RetryNotify(operation, backOff, notify)
if err != nil {
log.Errorf("Cannot elect leadership %+v", err)
}
})
}
// AddListener adds a leadership listerner
func (l *Leadership) AddListener(listener LeaderListener) {
l.listeners = append(l.listeners, listener)
}
// Resign resigns from being a leader
func (l *Leadership) Resign() {
l.candidate.Resign()
log.Infof("Node %s resigned", l.Cluster.Node)
}
func (l *Leadership) run(ctx context.Context, candidate *leadership.Candidate) error {
electedCh, errCh := candidate.RunForElection()
for {
select {
case elected := <-electedCh:
l.onElection(elected)
case err := <-errCh:
return err
case <-ctx.Done():
l.candidate.Resign()
return nil
}
}
}
func (l *Leadership) onElection(elected bool) {
if elected {
log.Infof("Node %s elected leader ♚", l.Cluster.Node)
l.leader.Set(true)
l.Start()
} else {
log.Infof("Node %s elected slave ♝", l.Cluster.Node)
l.leader.Set(false)
l.Stop()
}
for _, listener := range l.listeners {
err := listener(elected)
if err != nil {
log.Errorf("Error calling Leadership listener: %s", err)
}
}
}
// IsLeader returns true if current node is leader
func (l *Leadership) IsLeader() bool {
return l.leader.Get().(bool)
}

View file

@ -0,0 +1,16 @@
package cluster
// Object is the struct to store
type Object interface{}
// Store is a generic interface to represents a storage
type Store interface {
Load() (Object, error)
Get() Object
Begin() (Transaction, Object, error)
}
// Transaction allows to set a struct in the KV store
type Transaction interface {
Commit(object Object) error
}

View file

@ -0,0 +1,415 @@
package main
import (
"crypto/tls"
"errors"
"fmt"
"os"
"regexp"
"strings"
"time"
"github.com/containous/traefik/acme"
"github.com/containous/traefik/provider"
"github.com/containous/traefik/types"
)
// TraefikConfiguration holds GlobalConfiguration and other stuff
type TraefikConfiguration struct {
GlobalConfiguration `mapstructure:",squash"`
ConfigFile string `short:"c" description:"Configuration file to use (TOML)."`
}
// GlobalConfiguration holds global configuration (with providers, etc.).
// It's populated from the traefik configuration file passed as an argument to the binary.
type GlobalConfiguration struct {
GraceTimeOut int64 `short:"g" description:"Duration to give active requests a chance to finish during hot-reload"`
Debug bool `short:"d" description:"Enable debug mode"`
AccessLogsFile string `description:"Access logs file"`
TraefikLogsFile string `description:"Traefik logs file"`
LogLevel string `short:"l" description:"Log level"`
EntryPoints EntryPoints `description:"Entrypoints definition using format: --entryPoints='Name:http Address::8000 Redirect.EntryPoint:https' --entryPoints='Name:https Address::4442 TLS:tests/traefik.crt,tests/traefik.key'"`
Cluster *types.Cluster `description:"Enable clustering"`
Constraints types.Constraints `description:"Filter services by constraint, matching with service tags"`
ACME *acme.ACME `description:"Enable ACME (Let's Encrypt): automatic SSL"`
DefaultEntryPoints DefaultEntryPoints `description:"Entrypoints to be used by frontends that do not specify any entrypoint"`
ProvidersThrottleDuration time.Duration `description:"Backends throttle duration: minimum duration between 2 events from providers before applying a new configuration. It avoids unnecessary reloads if multiples events are sent in a short amount of time."`
MaxIdleConnsPerHost int `description:"If non-zero, controls the maximum idle (keep-alive) to keep per-host. If zero, DefaultMaxIdleConnsPerHost is used"`
InsecureSkipVerify bool `description:"Disable SSL certificate verification"`
Retry *Retry `description:"Enable retry sending request if network error"`
Docker *provider.Docker `description:"Enable Docker backend"`
File *provider.File `description:"Enable File backend"`
Web *WebProvider `description:"Enable Web backend"`
Marathon *provider.Marathon `description:"Enable Marathon backend"`
Consul *provider.Consul `description:"Enable Consul backend"`
ConsulCatalog *provider.ConsulCatalog `description:"Enable Consul catalog backend"`
Etcd *provider.Etcd `description:"Enable Etcd backend"`
Zookeeper *provider.Zookepper `description:"Enable Zookeeper backend"`
Boltdb *provider.BoltDb `description:"Enable Boltdb backend"`
Kubernetes *provider.Kubernetes `description:"Enable Kubernetes backend"`
Mesos *provider.Mesos `description:"Enable Mesos backend"`
Eureka *provider.Eureka `description:"Enable Eureka backend"`
}
// DefaultEntryPoints holds default entry points
type DefaultEntryPoints []string
// String is the method to format the flag's value, part of the flag.Value interface.
// The String method's output will be used in diagnostics.
func (dep *DefaultEntryPoints) String() string {
return strings.Join(*dep, ",")
}
// Set is the method to set the flag value, part of the flag.Value interface.
// Set's argument is a string to be parsed to set the flag.
// It's a comma-separated list, so we split it.
func (dep *DefaultEntryPoints) Set(value string) error {
entrypoints := strings.Split(value, ",")
if len(entrypoints) == 0 {
return errors.New("Bad DefaultEntryPoints format: " + value)
}
for _, entrypoint := range entrypoints {
*dep = append(*dep, entrypoint)
}
return nil
}
// Get return the EntryPoints map
func (dep *DefaultEntryPoints) Get() interface{} {
return DefaultEntryPoints(*dep)
}
// SetValue sets the EntryPoints map with val
func (dep *DefaultEntryPoints) SetValue(val interface{}) {
*dep = DefaultEntryPoints(val.(DefaultEntryPoints))
}
// Type is type of the struct
func (dep *DefaultEntryPoints) Type() string {
return fmt.Sprint("defaultentrypoints")
}
// EntryPoints holds entry points configuration of the reverse proxy (ip, port, TLS...)
type EntryPoints map[string]*EntryPoint
// String is the method to format the flag's value, part of the flag.Value interface.
// The String method's output will be used in diagnostics.
func (ep *EntryPoints) String() string {
return fmt.Sprintf("%+v", *ep)
}
// Set is the method to set the flag value, part of the flag.Value interface.
// Set's argument is a string to be parsed to set the flag.
// It's a comma-separated list, so we split it.
func (ep *EntryPoints) Set(value string) error {
regex := regexp.MustCompile("(?:Name:(?P<Name>\\S*))\\s*(?:Address:(?P<Address>\\S*))?\\s*(?:TLS:(?P<TLS>\\S*))?\\s*((?P<TLSACME>TLS))?\\s*(?:CA:(?P<CA>\\S*))?\\s*(?:Redirect.EntryPoint:(?P<RedirectEntryPoint>\\S*))?\\s*(?:Redirect.Regex:(?P<RedirectRegex>\\S*))?\\s*(?:Redirect.Replacement:(?P<RedirectReplacement>\\S*))?\\s*(?:Compress:(?P<Compress>\\S*))?")
match := regex.FindAllStringSubmatch(value, -1)
if match == nil {
return errors.New("Bad EntryPoints format: " + value)
}
matchResult := match[0]
result := make(map[string]string)
for i, name := range regex.SubexpNames() {
if i != 0 {
result[name] = matchResult[i]
}
}
var tls *TLS
if len(result["TLS"]) > 0 {
certs := Certificates{}
if err := certs.Set(result["TLS"]); err != nil {
return err
}
tls = &TLS{
Certificates: certs,
}
} else if len(result["TLSACME"]) > 0 {
tls = &TLS{
Certificates: Certificates{},
}
}
if len(result["CA"]) > 0 {
files := strings.Split(result["CA"], ",")
tls.ClientCAFiles = files
}
var redirect *Redirect
if len(result["RedirectEntryPoint"]) > 0 || len(result["RedirectRegex"]) > 0 || len(result["RedirectReplacement"]) > 0 {
redirect = &Redirect{
EntryPoint: result["RedirectEntryPoint"],
Regex: result["RedirectRegex"],
Replacement: result["RedirectReplacement"],
}
}
compress := false
if len(result["Compress"]) > 0 {
compress = strings.EqualFold(result["Compress"], "enable") || strings.EqualFold(result["Compress"], "on")
}
(*ep)[result["Name"]] = &EntryPoint{
Address: result["Address"],
TLS: tls,
Redirect: redirect,
Compress: compress,
}
return nil
}
// Get return the EntryPoints map
func (ep *EntryPoints) Get() interface{} {
return EntryPoints(*ep)
}
// SetValue sets the EntryPoints map with val
func (ep *EntryPoints) SetValue(val interface{}) {
*ep = EntryPoints(val.(EntryPoints))
}
// Type is type of the struct
func (ep *EntryPoints) Type() string {
return fmt.Sprint("entrypoints")
}
// EntryPoint holds an entry point configuration of the reverse proxy (ip, port, TLS...)
type EntryPoint struct {
Network string
Address string
TLS *TLS
Redirect *Redirect
Auth *types.Auth
Compress bool
}
// Redirect configures a redirection of an entry point to another, or to an URL
type Redirect struct {
EntryPoint string
Regex string
Replacement string
}
// TLS configures TLS for an entry point
type TLS struct {
MinVersion string
CipherSuites []string
Certificates Certificates
ClientCAFiles []string
}
// Map of allowed TLS minimum versions
var minVersion = map[string]uint16{
`VersionTLS10`: tls.VersionTLS10,
`VersionTLS11`: tls.VersionTLS11,
`VersionTLS12`: tls.VersionTLS12,
}
// Map of TLS CipherSuites from crypto/tls
var cipherSuites = map[string]uint16{
`TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256`: tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
`TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384`: tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
`TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA`: tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
`TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA`: tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
`TLS_RSA_WITH_AES_128_GCM_SHA256`: tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
`TLS_RSA_WITH_AES_256_GCM_SHA384`: tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
`TLS_RSA_WITH_AES_128_CBC_SHA`: tls.TLS_RSA_WITH_AES_128_CBC_SHA,
`TLS_RSA_WITH_AES_256_CBC_SHA`: tls.TLS_RSA_WITH_AES_256_CBC_SHA,
`TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA`: tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
`TLS_RSA_WITH_3DES_EDE_CBC_SHA`: tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
}
// Certificates defines traefik certificates type
// Certs and Keys could be either a file path, or the file content itself
type Certificates []Certificate
//CreateTLSConfig creates a TLS config from Certificate structures
func (certs *Certificates) CreateTLSConfig() (*tls.Config, error) {
config := &tls.Config{}
config.Certificates = []tls.Certificate{}
certsSlice := []Certificate(*certs)
for _, v := range certsSlice {
isAPath := false
_, errCert := os.Stat(v.CertFile)
_, errKey := os.Stat(v.KeyFile)
if errCert == nil {
if errKey == nil {
isAPath = true
} else {
return nil, fmt.Errorf("bad TLS Certificate KeyFile format, expected a path")
}
} else if errKey == nil {
return nil, fmt.Errorf("bad TLS Certificate KeyFile format, expected a path")
}
cert := tls.Certificate{}
var err error
if isAPath {
cert, err = tls.LoadX509KeyPair(v.CertFile, v.KeyFile)
if err != nil {
return nil, err
}
} else {
cert, err = tls.X509KeyPair([]byte(v.CertFile), []byte(v.KeyFile))
if err != nil {
return nil, err
}
}
config.Certificates = append(config.Certificates, cert)
}
return config, nil
}
// String is the method to format the flag's value, part of the flag.Value interface.
// The String method's output will be used in diagnostics.
func (certs *Certificates) String() string {
if len(*certs) == 0 {
return ""
}
return (*certs)[0].CertFile + "," + (*certs)[0].KeyFile
}
// Set is the method to set the flag value, part of the flag.Value interface.
// Set's argument is a string to be parsed to set the flag.
// It's a comma-separated list, so we split it.
func (certs *Certificates) Set(value string) error {
files := strings.Split(value, ",")
if len(files) != 2 {
return errors.New("Bad certificates format: " + value)
}
*certs = append(*certs, Certificate{
CertFile: files[0],
KeyFile: files[1],
})
return nil
}
// Type is type of the struct
func (certs *Certificates) Type() string {
return fmt.Sprint("certificates")
}
// Certificate holds a SSL cert/key pair
// Certs and Key could be either a file path, or the file content itself
type Certificate struct {
CertFile string
KeyFile string
}
// Retry contains request retry config
type Retry struct {
Attempts int `description:"Number of attempts"`
}
// NewTraefikDefaultPointersConfiguration creates a TraefikConfiguration with pointers default values
func NewTraefikDefaultPointersConfiguration() *TraefikConfiguration {
//default Docker
var defaultDocker provider.Docker
defaultDocker.Watch = true
defaultDocker.ExposedByDefault = true
defaultDocker.Endpoint = "unix:///var/run/docker.sock"
defaultDocker.SwarmMode = false
// default File
var defaultFile provider.File
defaultFile.Watch = true
defaultFile.Filename = "" //needs equivalent to viper.ConfigFileUsed()
// default Web
var defaultWeb WebProvider
defaultWeb.Address = ":8080"
defaultWeb.Statistics = &types.Statistics{
RecentErrors: 10,
}
// default Marathon
var defaultMarathon provider.Marathon
defaultMarathon.Watch = true
defaultMarathon.Endpoint = "http://127.0.0.1:8080"
defaultMarathon.ExposedByDefault = true
defaultMarathon.Constraints = []types.Constraint{}
defaultMarathon.DialerTimeout = 60
// default Consul
var defaultConsul provider.Consul
defaultConsul.Watch = true
defaultConsul.Endpoint = "127.0.0.1:8500"
defaultConsul.Prefix = "traefik"
defaultConsul.Constraints = []types.Constraint{}
// default ConsulCatalog
var defaultConsulCatalog provider.ConsulCatalog
defaultConsulCatalog.Endpoint = "127.0.0.1:8500"
defaultConsulCatalog.Constraints = []types.Constraint{}
// default Etcd
var defaultEtcd provider.Etcd
defaultEtcd.Watch = true
defaultEtcd.Endpoint = "127.0.0.1:2379"
defaultEtcd.Prefix = "/traefik"
defaultEtcd.Constraints = []types.Constraint{}
//default Zookeeper
var defaultZookeeper provider.Zookepper
defaultZookeeper.Watch = true
defaultZookeeper.Endpoint = "127.0.0.1:2181"
defaultZookeeper.Prefix = "/traefik"
defaultZookeeper.Constraints = []types.Constraint{}
//default Boltdb
var defaultBoltDb provider.BoltDb
defaultBoltDb.Watch = true
defaultBoltDb.Endpoint = "127.0.0.1:4001"
defaultBoltDb.Prefix = "/traefik"
defaultBoltDb.Constraints = []types.Constraint{}
//default Kubernetes
var defaultKubernetes provider.Kubernetes
defaultKubernetes.Watch = true
defaultKubernetes.Endpoint = ""
defaultKubernetes.LabelSelector = ""
defaultKubernetes.Constraints = []types.Constraint{}
// default Mesos
var defaultMesos provider.Mesos
defaultMesos.Watch = true
defaultMesos.Endpoint = "http://127.0.0.1:5050"
defaultMesos.ExposedByDefault = true
defaultMesos.Constraints = []types.Constraint{}
defaultConfiguration := GlobalConfiguration{
Docker: &defaultDocker,
File: &defaultFile,
Web: &defaultWeb,
Marathon: &defaultMarathon,
Consul: &defaultConsul,
ConsulCatalog: &defaultConsulCatalog,
Etcd: &defaultEtcd,
Zookeeper: &defaultZookeeper,
Boltdb: &defaultBoltDb,
Kubernetes: &defaultKubernetes,
Mesos: &defaultMesos,
Retry: &Retry{},
}
return &TraefikConfiguration{
GlobalConfiguration: defaultConfiguration,
}
}
// NewTraefikConfiguration creates a TraefikConfiguration with default values
func NewTraefikConfiguration() *TraefikConfiguration {
return &TraefikConfiguration{
GlobalConfiguration: GlobalConfiguration{
GraceTimeOut: 10,
AccessLogsFile: "",
TraefikLogsFile: "",
LogLevel: "ERROR",
EntryPoints: map[string]*EntryPoint{},
Constraints: []types.Constraint{},
DefaultEntryPoints: []string{},
ProvidersThrottleDuration: time.Duration(2 * time.Second),
MaxIdleConnsPerHost: 200,
},
ConfigFile: "",
}
}
type configs map[string]*types.Configuration

View file

@ -0,0 +1,9 @@
/*
Copyright
*/
//go:generate rm -vf autogen/gen.go
//go:generate mkdir -p static
//go:generate go-bindata -pkg autogen -o autogen/gen.go ./static/... ./templates/...
package main

View file

@ -0,0 +1,39 @@
package job
import (
"github.com/cenk/backoff"
"time"
)
var (
_ backoff.BackOff = (*BackOff)(nil)
)
const (
defaultMinJobInterval = 30 * time.Second
)
// BackOff is an exponential backoff implementation for long running jobs.
// In long running jobs, an operation() that fails after a long Duration should not increments the backoff period.
// If operation() takes more than MinJobInterval, Reset() is called in NextBackOff().
type BackOff struct {
*backoff.ExponentialBackOff
MinJobInterval time.Duration
}
// NewBackOff creates an instance of BackOff using default values.
func NewBackOff(backOff *backoff.ExponentialBackOff) *BackOff {
backOff.MaxElapsedTime = 0
return &BackOff{
ExponentialBackOff: backOff,
MinJobInterval: defaultMinJobInterval,
}
}
// NextBackOff calculates the next backoff interval.
func (b *BackOff) NextBackOff() time.Duration {
if b.GetElapsedTime() >= b.MinJobInterval {
b.Reset()
}
return b.ExponentialBackOff.NextBackOff()
}

View file

@ -0,0 +1,188 @@
package log
import (
"github.com/Sirupsen/logrus"
"io"
)
var (
logger *logrus.Entry
)
func init() {
logger = logrus.StandardLogger().WithFields(logrus.Fields{})
}
// Context sets the Context of the logger
func Context(context interface{}) *logrus.Entry {
return logger.WithField("context", context)
}
// SetOutput sets the standard logger output.
func SetOutput(out io.Writer) {
logrus.SetOutput(out)
}
// SetFormatter sets the standard logger formatter.
func SetFormatter(formatter logrus.Formatter) {
logrus.SetFormatter(formatter)
}
// SetLevel sets the standard logger level.
func SetLevel(level logrus.Level) {
logrus.SetLevel(level)
}
// GetLevel returns the standard logger level.
func GetLevel() logrus.Level {
return logrus.GetLevel()
}
// AddHook adds a hook to the standard logger hooks.
func AddHook(hook logrus.Hook) {
logrus.AddHook(hook)
}
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
func WithError(err error) *logrus.Entry {
return logger.WithError(err)
}
// WithField creates an entry from the standard logger and adds a field to
// it. If you want multiple fields, use `WithFields`.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
// or Panic on the Entry it returns.
func WithField(key string, value interface{}) *logrus.Entry {
return logger.WithField(key, value)
}
// WithFields creates an entry from the standard logger and adds multiple
// fields to it. This is simply a helper for `WithField`, invoking it
// once for each field.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
// or Panic on the Entry it returns.
func WithFields(fields logrus.Fields) *logrus.Entry {
return logger.WithFields(fields)
}
// Debug logs a message at level Debug on the standard logger.
func Debug(args ...interface{}) {
logger.Debug(args...)
}
// Print logs a message at level Info on the standard logger.
func Print(args ...interface{}) {
logger.Print(args...)
}
// Info logs a message at level Info on the standard logger.
func Info(args ...interface{}) {
logger.Info(args...)
}
// Warn logs a message at level Warn on the standard logger.
func Warn(args ...interface{}) {
logger.Warn(args...)
}
// Warning logs a message at level Warn on the standard logger.
func Warning(args ...interface{}) {
logger.Warning(args...)
}
// Error logs a message at level Error on the standard logger.
func Error(args ...interface{}) {
logger.Error(args...)
}
// Panic logs a message at level Panic on the standard logger.
func Panic(args ...interface{}) {
logger.Panic(args...)
}
// Fatal logs a message at level Fatal on the standard logger.
func Fatal(args ...interface{}) {
logger.Fatal(args...)
}
// Debugf logs a message at level Debug on the standard logger.
func Debugf(format string, args ...interface{}) {
logger.Debugf(format, args...)
}
// Printf logs a message at level Info on the standard logger.
func Printf(format string, args ...interface{}) {
logger.Printf(format, args...)
}
// Infof logs a message at level Info on the standard logger.
func Infof(format string, args ...interface{}) {
logger.Infof(format, args...)
}
// Warnf logs a message at level Warn on the standard logger.
func Warnf(format string, args ...interface{}) {
logger.Warnf(format, args...)
}
// Warningf logs a message at level Warn on the standard logger.
func Warningf(format string, args ...interface{}) {
logger.Warningf(format, args...)
}
// Errorf logs a message at level Error on the standard logger.
func Errorf(format string, args ...interface{}) {
logger.Errorf(format, args...)
}
// Panicf logs a message at level Panic on the standard logger.
func Panicf(format string, args ...interface{}) {
logger.Panicf(format, args...)
}
// Fatalf logs a message at level Fatal on the standard logger.
func Fatalf(format string, args ...interface{}) {
logger.Fatalf(format, args...)
}
// Debugln logs a message at level Debug on the standard logger.
func Debugln(args ...interface{}) {
logger.Debugln(args...)
}
// Println logs a message at level Info on the standard logger.
func Println(args ...interface{}) {
logger.Println(args...)
}
// Infoln logs a message at level Info on the standard logger.
func Infoln(args ...interface{}) {
logger.Infoln(args...)
}
// Warnln logs a message at level Warn on the standard logger.
func Warnln(args ...interface{}) {
logger.Warnln(args...)
}
// Warningln logs a message at level Warn on the standard logger.
func Warningln(args ...interface{}) {
logger.Warningln(args...)
}
// Errorln logs a message at level Error on the standard logger.
func Errorln(args ...interface{}) {
logger.Errorln(args...)
}
// Panicln logs a message at level Panic on the standard logger.
func Panicln(args ...interface{}) {
logger.Panicln(args...)
}
// Fatalln logs a message at level Fatal on the standard logger.
func Fatalln(args ...interface{}) {
logger.Fatalln(args...)
}

View file

@ -0,0 +1,34 @@
package provider
import (
"fmt"
"github.com/containous/traefik/safe"
"github.com/containous/traefik/types"
"github.com/docker/libkv/store"
"github.com/docker/libkv/store/boltdb"
)
var _ Provider = (*BoltDb)(nil)
// BoltDb holds configurations of the BoltDb provider.
type BoltDb struct {
Kv `mapstructure:",squash"`
}
// Provide allows the provider to provide configurations to traefik
// using the given configuration channel.
func (provider *BoltDb) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool, constraints []types.Constraint) error {
store, err := provider.CreateStore()
if err != nil {
return fmt.Errorf("Failed to Connect to KV store: %v", err)
}
provider.kvclient = store
return provider.provide(configurationChan, pool, constraints)
}
// CreateStore creates the KV store
func (provider *BoltDb) CreateStore() (store.Store, error) {
provider.storeType = store.BOLTDB
boltdb.Register()
return provider.createStore()
}

View file

@ -0,0 +1,34 @@
package provider
import (
"fmt"
"github.com/containous/traefik/safe"
"github.com/containous/traefik/types"
"github.com/docker/libkv/store"
"github.com/docker/libkv/store/consul"
)
var _ Provider = (*Consul)(nil)
// Consul holds configurations of the Consul provider.
type Consul struct {
Kv `mapstructure:",squash"`
}
// Provide allows the provider to provide configurations to traefik
// using the given configuration channel.
func (provider *Consul) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool, constraints []types.Constraint) error {
store, err := provider.CreateStore()
if err != nil {
return fmt.Errorf("Failed to Connect to KV store: %v", err)
}
provider.kvclient = store
return provider.provide(configurationChan, pool, constraints)
}
// CreateStore creates the KV store
func (provider *Consul) CreateStore() (store.Store, error) {
provider.storeType = store.CONSUL
consul.Register()
return provider.createStore()
}

View file

@ -0,0 +1,344 @@
package provider
import (
"errors"
"sort"
"strconv"
"strings"
"text/template"
"time"
"github.com/BurntSushi/ty/fun"
"github.com/Sirupsen/logrus"
"github.com/cenk/backoff"
"github.com/containous/traefik/job"
"github.com/containous/traefik/log"
"github.com/containous/traefik/safe"
"github.com/containous/traefik/types"
"github.com/hashicorp/consul/api"
)
const (
// DefaultWatchWaitTime is the duration to wait when polling consul
DefaultWatchWaitTime = 15 * time.Second
// DefaultConsulCatalogTagPrefix is a prefix for additional service/node configurations
DefaultConsulCatalogTagPrefix = "traefik"
)
var _ Provider = (*ConsulCatalog)(nil)
// ConsulCatalog holds configurations of the Consul catalog provider.
type ConsulCatalog struct {
BaseProvider `mapstructure:",squash"`
Endpoint string `description:"Consul server endpoint"`
Domain string `description:"Default domain used"`
client *api.Client
Prefix string
}
type serviceUpdate struct {
ServiceName string
Attributes []string
}
type catalogUpdate struct {
Service *serviceUpdate
Nodes []*api.ServiceEntry
}
type nodeSorter []*api.ServiceEntry
func (a nodeSorter) Len() int {
return len(a)
}
func (a nodeSorter) Swap(i int, j int) {
a[i], a[j] = a[j], a[i]
}
func (a nodeSorter) Less(i int, j int) bool {
lentr := a[i]
rentr := a[j]
ls := strings.ToLower(lentr.Service.Service)
lr := strings.ToLower(rentr.Service.Service)
if ls != lr {
return ls < lr
}
if lentr.Service.Address != rentr.Service.Address {
return lentr.Service.Address < rentr.Service.Address
}
if lentr.Node.Address != rentr.Node.Address {
return lentr.Node.Address < rentr.Node.Address
}
return lentr.Service.Port < rentr.Service.Port
}
func (provider *ConsulCatalog) watchServices(stopCh <-chan struct{}) <-chan map[string][]string {
watchCh := make(chan map[string][]string)
catalog := provider.client.Catalog()
safe.Go(func() {
defer close(watchCh)
opts := &api.QueryOptions{WaitTime: DefaultWatchWaitTime}
for {
select {
case <-stopCh:
return
default:
}
data, meta, err := catalog.Services(opts)
if err != nil {
log.WithError(err).Errorf("Failed to list services")
return
}
// If LastIndex didn't change then it means `Get` returned
// because of the WaitTime and the key didn't changed.
if opts.WaitIndex == meta.LastIndex {
continue
}
opts.WaitIndex = meta.LastIndex
if data != nil {
watchCh <- data
}
}
})
return watchCh
}
func (provider *ConsulCatalog) healthyNodes(service string) (catalogUpdate, error) {
health := provider.client.Health()
opts := &api.QueryOptions{}
data, _, err := health.Service(service, "", true, opts)
if err != nil {
log.WithError(err).Errorf("Failed to fetch details of " + service)
return catalogUpdate{}, err
}
nodes := fun.Filter(func(node *api.ServiceEntry) bool {
constraintTags := provider.getContraintTags(node.Service.Tags)
ok, failingConstraint := provider.MatchConstraints(constraintTags)
if ok == false && failingConstraint != nil {
log.Debugf("Service %v pruned by '%v' constraint", service, failingConstraint.String())
}
return ok
}, data).([]*api.ServiceEntry)
//Merge tags of nodes matching constraints, in a single slice.
tags := fun.Foldl(func(node *api.ServiceEntry, set []string) []string {
return fun.Keys(fun.Union(
fun.Set(set),
fun.Set(node.Service.Tags),
).(map[string]bool)).([]string)
}, []string{}, nodes).([]string)
return catalogUpdate{
Service: &serviceUpdate{
ServiceName: service,
Attributes: tags,
},
Nodes: nodes,
}, nil
}
func (provider *ConsulCatalog) getEntryPoints(list string) []string {
return strings.Split(list, ",")
}
func (provider *ConsulCatalog) getBackend(node *api.ServiceEntry) string {
return strings.ToLower(node.Service.Service)
}
func (provider *ConsulCatalog) getFrontendRule(service serviceUpdate) string {
customFrontendRule := provider.getAttribute("frontend.rule", service.Attributes, "")
if customFrontendRule != "" {
return customFrontendRule
}
return "Host:" + service.ServiceName + "." + provider.Domain
}
func (provider *ConsulCatalog) getBackendAddress(node *api.ServiceEntry) string {
if node.Service.Address != "" {
return node.Service.Address
}
return node.Node.Address
}
func (provider *ConsulCatalog) getBackendName(node *api.ServiceEntry, index int) string {
serviceName := strings.ToLower(node.Service.Service) + "--" + node.Service.Address + "--" + strconv.Itoa(node.Service.Port)
for _, tag := range node.Service.Tags {
serviceName += "--" + normalize(tag)
}
serviceName = strings.Replace(serviceName, ".", "-", -1)
serviceName = strings.Replace(serviceName, "=", "-", -1)
// unique int at the end
serviceName += "--" + strconv.Itoa(index)
return serviceName
}
func (provider *ConsulCatalog) getAttribute(name string, tags []string, defaultValue string) string {
for _, tag := range tags {
if strings.Index(strings.ToLower(tag), DefaultConsulCatalogTagPrefix+".") == 0 {
if kv := strings.SplitN(tag[len(DefaultConsulCatalogTagPrefix+"."):], "=", 2); len(kv) == 2 && strings.ToLower(kv[0]) == strings.ToLower(name) {
return kv[1]
}
}
}
return defaultValue
}
func (provider *ConsulCatalog) getContraintTags(tags []string) []string {
var list []string
for _, tag := range tags {
if strings.Index(strings.ToLower(tag), DefaultConsulCatalogTagPrefix+".tags=") == 0 {
splitedTags := strings.Split(tag[len(DefaultConsulCatalogTagPrefix+".tags="):], ",")
list = append(list, splitedTags...)
}
}
return list
}
func (provider *ConsulCatalog) buildConfig(catalog []catalogUpdate) *types.Configuration {
var FuncMap = template.FuncMap{
"getBackend": provider.getBackend,
"getFrontendRule": provider.getFrontendRule,
"getBackendName": provider.getBackendName,
"getBackendAddress": provider.getBackendAddress,
"getAttribute": provider.getAttribute,
"getEntryPoints": provider.getEntryPoints,
"hasMaxconnAttributes": provider.hasMaxconnAttributes,
}
allNodes := []*api.ServiceEntry{}
services := []*serviceUpdate{}
for _, info := range catalog {
for _, node := range info.Nodes {
isEnabled := provider.getAttribute("enable", node.Service.Tags, "true")
if isEnabled != "false" && len(info.Nodes) > 0 {
services = append(services, info.Service)
allNodes = append(allNodes, info.Nodes...)
break
}
}
}
// Ensure a stable ordering of nodes so that identical configurations may be detected
sort.Sort(nodeSorter(allNodes))
templateObjects := struct {
Services []*serviceUpdate
Nodes []*api.ServiceEntry
}{
Services: services,
Nodes: allNodes,
}
configuration, err := provider.getConfiguration("templates/consul_catalog.tmpl", FuncMap, templateObjects)
if err != nil {
log.WithError(err).Error("Failed to create config")
}
return configuration
}
func (provider *ConsulCatalog) hasMaxconnAttributes(attributes []string) bool {
amount := provider.getAttribute("backend.maxconn.amount", attributes, "")
extractorfunc := provider.getAttribute("backend.maxconn.extractorfunc", attributes, "")
if amount != "" && extractorfunc != "" {
return true
}
return false
}
func (provider *ConsulCatalog) getNodes(index map[string][]string) ([]catalogUpdate, error) {
visited := make(map[string]bool)
nodes := []catalogUpdate{}
for service := range index {
name := strings.ToLower(service)
if !strings.Contains(name, " ") && !visited[name] {
visited[name] = true
log.WithFields(logrus.Fields{
"service": name,
}).Debug("Fetching service")
healthy, err := provider.healthyNodes(name)
if err != nil {
return nil, err
}
// healthy.Nodes can be empty if constraints do not match, without throwing error
if healthy.Service != nil && len(healthy.Nodes) > 0 {
nodes = append(nodes, healthy)
}
}
}
return nodes, nil
}
func (provider *ConsulCatalog) watch(configurationChan chan<- types.ConfigMessage, stop chan bool) error {
stopCh := make(chan struct{})
serviceCatalog := provider.watchServices(stopCh)
defer close(stopCh)
for {
select {
case <-stop:
return nil
case index, ok := <-serviceCatalog:
if !ok {
return errors.New("Consul service list nil")
}
log.Debug("List of services changed")
nodes, err := provider.getNodes(index)
if err != nil {
return err
}
configuration := provider.buildConfig(nodes)
configurationChan <- types.ConfigMessage{
ProviderName: "consul_catalog",
Configuration: configuration,
}
}
}
}
// Provide allows the provider to provide configurations to traefik
// using the given configuration channel.
func (provider *ConsulCatalog) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool, constraints []types.Constraint) error {
config := api.DefaultConfig()
config.Address = provider.Endpoint
client, err := api.NewClient(config)
if err != nil {
return err
}
provider.client = client
provider.Constraints = append(provider.Constraints, constraints...)
pool.Go(func(stop chan bool) {
notify := func(err error, time time.Duration) {
log.Errorf("Consul connection error %+v, retrying in %s", err, time)
}
operation := func() error {
return provider.watch(configurationChan, stop)
}
err := backoff.RetryNotify(operation, job.NewBackOff(backoff.NewExponentialBackOff()), notify)
if err != nil {
log.Errorf("Cannot connect to consul server %+v", err)
}
})
return err
}

View file

@ -0,0 +1,663 @@
package provider
import (
"context"
"errors"
"math"
"net"
"net/http"
"strconv"
"strings"
"text/template"
"time"
"github.com/BurntSushi/ty/fun"
"github.com/cenk/backoff"
"github.com/containous/traefik/job"
"github.com/containous/traefik/log"
"github.com/containous/traefik/safe"
"github.com/containous/traefik/types"
"github.com/containous/traefik/version"
"github.com/docker/engine-api/client"
dockertypes "github.com/docker/engine-api/types"
dockercontainertypes "github.com/docker/engine-api/types/container"
eventtypes "github.com/docker/engine-api/types/events"
"github.com/docker/engine-api/types/filters"
"github.com/docker/engine-api/types/swarm"
swarmtypes "github.com/docker/engine-api/types/swarm"
"github.com/docker/go-connections/nat"
"github.com/docker/go-connections/sockets"
"github.com/vdemeester/docker-events"
)
const (
// DockerAPIVersion is a constant holding the version of the Docker API traefik will use
DockerAPIVersion string = "1.21"
// SwarmAPIVersion is a constant holding the version of the Docker API traefik will use
SwarmAPIVersion string = "1.24"
// SwarmDefaultWatchTime is the duration of the interval when polling docker
SwarmDefaultWatchTime = 15 * time.Second
)
var _ Provider = (*Docker)(nil)
// Docker holds configurations of the Docker provider.
type Docker struct {
BaseProvider `mapstructure:",squash"`
Endpoint string `description:"Docker server endpoint. Can be a tcp or a unix socket endpoint"`
Domain string `description:"Default domain used"`
TLS *ClientTLS `description:"Enable Docker TLS support"`
ExposedByDefault bool `description:"Expose containers by default"`
UseBindPortIP bool `description:"Use the ip address from the bound port, rather than from the inner network"`
SwarmMode bool `description:"Use Docker on Swarm Mode"`
}
// dockerData holds the need data to the Docker provider
type dockerData struct {
Name string
Labels map[string]string // List of labels set to container or service
NetworkSettings networkSettings
Health string
}
// NetworkSettings holds the networks data to the Docker provider
type networkSettings struct {
NetworkMode dockercontainertypes.NetworkMode
Ports nat.PortMap
Networks map[string]*networkData
}
// Network holds the network data to the Docker provider
type networkData struct {
Name string
Addr string
Port int
Protocol string
ID string
}
func (provider *Docker) createClient() (client.APIClient, error) {
var httpClient *http.Client
httpHeaders := map[string]string{
"User-Agent": "Traefik " + version.Version,
}
if provider.TLS != nil {
config, err := provider.TLS.CreateTLSConfig()
if err != nil {
return nil, err
}
tr := &http.Transport{
TLSClientConfig: config,
}
proto, addr, _, err := client.ParseHost(provider.Endpoint)
if err != nil {
return nil, err
}
sockets.ConfigureTransport(tr, proto, addr)
httpClient = &http.Client{
Transport: tr,
}
}
var version string
if provider.SwarmMode {
version = SwarmAPIVersion
} else {
version = DockerAPIVersion
}
return client.NewClient(provider.Endpoint, version, httpClient, httpHeaders)
}
// Provide allows the provider to provide configurations to traefik
// using the given configuration channel.
func (provider *Docker) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool, constraints []types.Constraint) error {
provider.Constraints = append(provider.Constraints, constraints...)
// TODO register this routine in pool, and watch for stop channel
safe.Go(func() {
operation := func() error {
var err error
dockerClient, err := provider.createClient()
if err != nil {
log.Errorf("Failed to create a client for docker, error: %s", err)
return err
}
ctx := context.Background()
version, err := dockerClient.ServerVersion(ctx)
log.Debugf("Docker connection established with docker %s (API %s)", version.Version, version.APIVersion)
var dockerDataList []dockerData
if provider.SwarmMode {
dockerDataList, err = listServices(ctx, dockerClient)
if err != nil {
log.Errorf("Failed to list services for docker swarm mode, error %s", err)
return err
}
} else {
dockerDataList, err = listContainers(ctx, dockerClient)
if err != nil {
log.Errorf("Failed to list containers for docker, error %s", err)
return err
}
}
configuration := provider.loadDockerConfig(dockerDataList)
configurationChan <- types.ConfigMessage{
ProviderName: "docker",
Configuration: configuration,
}
if provider.Watch {
ctx, cancel := context.WithCancel(ctx)
if provider.SwarmMode {
// TODO: This need to be change. Linked to Swarm events docker/docker#23827
ticker := time.NewTicker(SwarmDefaultWatchTime)
pool.Go(func(stop chan bool) {
for {
select {
case <-ticker.C:
services, err := listServices(ctx, dockerClient)
if err != nil {
log.Errorf("Failed to list services for docker, error %s", err)
return
}
configuration := provider.loadDockerConfig(services)
if configuration != nil {
configurationChan <- types.ConfigMessage{
ProviderName: "docker",
Configuration: configuration,
}
}
case <-stop:
ticker.Stop()
cancel()
return
}
}
})
} else {
pool.Go(func(stop chan bool) {
for {
select {
case <-stop:
cancel()
return
}
}
})
f := filters.NewArgs()
f.Add("type", "container")
options := dockertypes.EventsOptions{
Filters: f,
}
eventHandler := events.NewHandler(events.ByAction)
startStopHandle := func(m eventtypes.Message) {
log.Debugf("Docker event received %+v", m)
containers, err := listContainers(ctx, dockerClient)
if err != nil {
log.Errorf("Failed to list containers for docker, error %s", err)
// Call cancel to get out of the monitor
cancel()
return
}
configuration := provider.loadDockerConfig(containers)
if configuration != nil {
configurationChan <- types.ConfigMessage{
ProviderName: "docker",
Configuration: configuration,
}
}
}
eventHandler.Handle("start", startStopHandle)
eventHandler.Handle("die", startStopHandle)
eventHandler.Handle("health_status: healthy", startStopHandle)
eventHandler.Handle("health_status: unhealthy", startStopHandle)
eventHandler.Handle("health_status: starting", startStopHandle)
errChan := events.MonitorWithHandler(ctx, dockerClient, options, eventHandler)
if err := <-errChan; err != nil {
return err
}
}
}
return nil
}
notify := func(err error, time time.Duration) {
log.Errorf("Docker connection error %+v, retrying in %s", err, time)
}
err := backoff.RetryNotify(operation, job.NewBackOff(backoff.NewExponentialBackOff()), notify)
if err != nil {
log.Errorf("Cannot connect to docker server %+v", err)
}
})
return nil
}
func (provider *Docker) loadDockerConfig(containersInspected []dockerData) *types.Configuration {
var DockerFuncMap = template.FuncMap{
"getBackend": provider.getBackend,
"getIPAddress": provider.getIPAddress,
"getPort": provider.getPort,
"getWeight": provider.getWeight,
"getDomain": provider.getDomain,
"getProtocol": provider.getProtocol,
"getPassHostHeader": provider.getPassHostHeader,
"getPriority": provider.getPriority,
"getEntryPoints": provider.getEntryPoints,
"getFrontendRule": provider.getFrontendRule,
"hasCircuitBreakerLabel": provider.hasCircuitBreakerLabel,
"getCircuitBreakerExpression": provider.getCircuitBreakerExpression,
"hasLoadBalancerLabel": provider.hasLoadBalancerLabel,
"getLoadBalancerMethod": provider.getLoadBalancerMethod,
"hasMaxConnLabels": provider.hasMaxConnLabels,
"getMaxConnAmount": provider.getMaxConnAmount,
"getMaxConnExtractorFunc": provider.getMaxConnExtractorFunc,
"getSticky": provider.getSticky,
"replace": replace,
}
// filter containers
filteredContainers := fun.Filter(func(container dockerData) bool {
return provider.containerFilter(container)
}, containersInspected).([]dockerData)
frontends := map[string][]dockerData{}
backends := map[string]dockerData{}
servers := map[string][]dockerData{}
for _, container := range filteredContainers {
frontendName := provider.getFrontendName(container)
frontends[frontendName] = append(frontends[frontendName], container)
backendName := provider.getBackend(container)
backends[backendName] = container
servers[backendName] = append(servers[backendName], container)
}
templateObjects := struct {
Containers []dockerData
Frontends map[string][]dockerData
Backends map[string]dockerData
Servers map[string][]dockerData
Domain string
}{
filteredContainers,
frontends,
backends,
servers,
provider.Domain,
}
configuration, err := provider.getConfiguration("templates/docker.tmpl", DockerFuncMap, templateObjects)
if err != nil {
log.Error(err)
}
return configuration
}
func (provider *Docker) hasCircuitBreakerLabel(container dockerData) bool {
if _, err := getLabel(container, "traefik.backend.circuitbreaker.expression"); err != nil {
return false
}
return true
}
func (provider *Docker) hasLoadBalancerLabel(container dockerData) bool {
_, errMethod := getLabel(container, "traefik.backend.loadbalancer.method")
_, errSticky := getLabel(container, "traefik.backend.loadbalancer.sticky")
if errMethod != nil && errSticky != nil {
return false
}
return true
}
func (provider *Docker) hasMaxConnLabels(container dockerData) bool {
if _, err := getLabel(container, "traefik.backend.maxconn.amount"); err != nil {
return false
}
if _, err := getLabel(container, "traefik.backend.maxconn.extractorfunc"); err != nil {
return false
}
return true
}
func (provider *Docker) getCircuitBreakerExpression(container dockerData) string {
if label, err := getLabel(container, "traefik.backend.circuitbreaker.expression"); err == nil {
return label
}
return "NetworkErrorRatio() > 1"
}
func (provider *Docker) getLoadBalancerMethod(container dockerData) string {
if label, err := getLabel(container, "traefik.backend.loadbalancer.method"); err == nil {
return label
}
return "wrr"
}
func (provider *Docker) getMaxConnAmount(container dockerData) int64 {
if label, err := getLabel(container, "traefik.backend.maxconn.amount"); err == nil {
i, errConv := strconv.ParseInt(label, 10, 64)
if errConv != nil {
log.Errorf("Unable to parse traefik.backend.maxconn.amount %s", label)
return math.MaxInt64
}
return i
}
return math.MaxInt64
}
func (provider *Docker) getMaxConnExtractorFunc(container dockerData) string {
if label, err := getLabel(container, "traefik.backend.maxconn.extractorfunc"); err == nil {
return label
}
return "request.host"
}
func (provider *Docker) containerFilter(container dockerData) bool {
_, err := strconv.Atoi(container.Labels["traefik.port"])
if len(container.NetworkSettings.Ports) == 0 && err != nil {
log.Debugf("Filtering container without port and no traefik.port label %s", container.Name)
return false
}
if len(container.NetworkSettings.Ports) > 1 && err != nil {
log.Debugf("Filtering container with more than 1 port and no traefik.port label %s", container.Name)
return false
}
if !isContainerEnabled(container, provider.ExposedByDefault) {
log.Debugf("Filtering disabled container %s", container.Name)
return false
}
constraintTags := strings.Split(container.Labels["traefik.tags"], ",")
if ok, failingConstraint := provider.MatchConstraints(constraintTags); !ok {
if failingConstraint != nil {
log.Debugf("Container %v pruned by '%v' constraint", container.Name, failingConstraint.String())
}
return false
}
if container.Health != "" && container.Health != "healthy" {
log.Debugf("Filtering unhealthy or starting container %s", container.Name)
return false
}
return true
}
func (provider *Docker) getFrontendName(container dockerData) string {
// Replace '.' with '-' in quoted keys because of this issue https://github.com/BurntSushi/toml/issues/78
return normalize(provider.getFrontendRule(container))
}
// GetFrontendRule returns the frontend rule for the specified container, using
// it's label. It returns a default one (Host) if the label is not present.
func (provider *Docker) getFrontendRule(container dockerData) string {
if label, err := getLabel(container, "traefik.frontend.rule"); err == nil {
return label
}
return "Host:" + provider.getSubDomain(container.Name) + "." + provider.Domain
}
func (provider *Docker) getBackend(container dockerData) string {
if label, err := getLabel(container, "traefik.backend"); err == nil {
return label
}
return normalize(container.Name)
}
func (provider *Docker) getIPAddress(container dockerData) string {
if label, err := getLabel(container, "traefik.docker.network"); err == nil && label != "" {
networkSettings := container.NetworkSettings
if networkSettings.Networks != nil {
network := networkSettings.Networks[label]
if network != nil {
return network.Addr
}
}
}
// If net==host, quick n' dirty, we return 127.0.0.1
// This will work locally, but will fail with swarm.
if "host" == container.NetworkSettings.NetworkMode {
return "127.0.0.1"
}
if provider.UseBindPortIP {
port := provider.getPort(container)
for netport, portBindings := range container.NetworkSettings.Ports {
if string(netport) == port+"/TCP" || string(netport) == port+"/UDP" {
for _, p := range portBindings {
return p.HostIP
}
}
}
}
for _, network := range container.NetworkSettings.Networks {
return network.Addr
}
return ""
}
func (provider *Docker) getPort(container dockerData) string {
if label, err := getLabel(container, "traefik.port"); err == nil {
return label
}
for key := range container.NetworkSettings.Ports {
return key.Port()
}
return ""
}
func (provider *Docker) getWeight(container dockerData) string {
if label, err := getLabel(container, "traefik.weight"); err == nil {
return label
}
return "1"
}
func (provider *Docker) getSticky(container dockerData) string {
if _, err := getLabel(container, "traefik.backend.loadbalancer.sticky"); err == nil {
return "true"
}
return "false"
}
func (provider *Docker) getDomain(container dockerData) string {
if label, err := getLabel(container, "traefik.domain"); err == nil {
return label
}
return provider.Domain
}
func (provider *Docker) getProtocol(container dockerData) string {
if label, err := getLabel(container, "traefik.protocol"); err == nil {
return label
}
return "http"
}
func (provider *Docker) getPassHostHeader(container dockerData) string {
if passHostHeader, err := getLabel(container, "traefik.frontend.passHostHeader"); err == nil {
return passHostHeader
}
return "true"
}
func (provider *Docker) getPriority(container dockerData) string {
if priority, err := getLabel(container, "traefik.frontend.priority"); err == nil {
return priority
}
return "0"
}
func (provider *Docker) getEntryPoints(container dockerData) []string {
if entryPoints, err := getLabel(container, "traefik.frontend.entryPoints"); err == nil {
return strings.Split(entryPoints, ",")
}
return []string{}
}
func isContainerEnabled(container dockerData, exposedByDefault bool) bool {
return exposedByDefault && container.Labels["traefik.enable"] != "false" || container.Labels["traefik.enable"] == "true"
}
func getLabel(container dockerData, label string) (string, error) {
for key, value := range container.Labels {
if key == label {
return value, nil
}
}
return "", errors.New("Label not found:" + label)
}
func getLabels(container dockerData, labels []string) (map[string]string, error) {
var globalErr error
foundLabels := map[string]string{}
for _, label := range labels {
foundLabel, err := getLabel(container, label)
// Error out only if one of them is defined.
if err != nil {
globalErr = errors.New("Label not found: " + label)
continue
}
foundLabels[label] = foundLabel
}
return foundLabels, globalErr
}
func listContainers(ctx context.Context, dockerClient client.ContainerAPIClient) ([]dockerData, error) {
containerList, err := dockerClient.ContainerList(ctx, dockertypes.ContainerListOptions{})
if err != nil {
return []dockerData{}, err
}
containersInspected := []dockerData{}
// get inspect containers
for _, container := range containerList {
containerInspected, err := dockerClient.ContainerInspect(ctx, container.ID)
if err != nil {
log.Warnf("Failed to inspect container %s, error: %s", container.ID, err)
} else {
dockerData := parseContainer(containerInspected)
containersInspected = append(containersInspected, dockerData)
}
}
return containersInspected, nil
}
func parseContainer(container dockertypes.ContainerJSON) dockerData {
dockerData := dockerData{
NetworkSettings: networkSettings{},
}
if container.ContainerJSONBase != nil {
dockerData.Name = container.ContainerJSONBase.Name
if container.ContainerJSONBase.HostConfig != nil {
dockerData.NetworkSettings.NetworkMode = container.ContainerJSONBase.HostConfig.NetworkMode
}
}
if container.Config != nil && container.Config.Labels != nil {
dockerData.Labels = container.Config.Labels
}
if container.NetworkSettings != nil {
if container.NetworkSettings.Ports != nil {
dockerData.NetworkSettings.Ports = container.NetworkSettings.Ports
}
if container.NetworkSettings.Networks != nil {
dockerData.NetworkSettings.Networks = make(map[string]*networkData)
for name, containerNetwork := range container.NetworkSettings.Networks {
dockerData.NetworkSettings.Networks[name] = &networkData{
ID: containerNetwork.NetworkID,
Name: name,
Addr: containerNetwork.IPAddress,
}
}
}
}
if container.State != nil && container.State.Health != nil {
dockerData.Health = container.State.Health.Status
}
return dockerData
}
// Escape beginning slash "/", convert all others to dash "-"
func (provider *Docker) getSubDomain(name string) string {
return strings.Replace(strings.TrimPrefix(name, "/"), "/", "-", -1)
}
func listServices(ctx context.Context, dockerClient client.APIClient) ([]dockerData, error) {
serviceList, err := dockerClient.ServiceList(ctx, dockertypes.ServiceListOptions{})
if err != nil {
return []dockerData{}, err
}
networkListArgs := filters.NewArgs()
networkListArgs.Add("driver", "overlay")
networkList, err := dockerClient.NetworkList(ctx, dockertypes.NetworkListOptions{Filters: networkListArgs})
networkMap := make(map[string]*dockertypes.NetworkResource)
if err != nil {
log.Debug("Failed to network inspect on client for docker, error: %s", err)
return []dockerData{}, err
}
for _, network := range networkList {
networkToAdd := network
networkMap[network.ID] = &networkToAdd
}
var dockerDataList []dockerData
for _, service := range serviceList {
dockerData := parseService(service, networkMap)
dockerDataList = append(dockerDataList, dockerData)
}
return dockerDataList, err
}
func parseService(service swarmtypes.Service, networkMap map[string]*dockertypes.NetworkResource) dockerData {
dockerData := dockerData{
Name: service.Spec.Annotations.Name,
Labels: service.Spec.Annotations.Labels,
NetworkSettings: networkSettings{},
}
if service.Spec.EndpointSpec != nil {
switch service.Spec.EndpointSpec.Mode {
case swarm.ResolutionModeDNSRR:
log.Debug("Ignored endpoint-mode not supported, service name: %s", dockerData.Name)
case swarm.ResolutionModeVIP:
dockerData.NetworkSettings.Networks = make(map[string]*networkData)
for _, virtualIP := range service.Endpoint.VirtualIPs {
networkService := networkMap[virtualIP.NetworkID]
if networkService != nil {
ip, _, _ := net.ParseCIDR(virtualIP.Addr)
network := &networkData{
Name: networkService.Name,
ID: virtualIP.NetworkID,
Addr: ip.String(),
}
dockerData.NetworkSettings.Networks[network.Name] = network
} else {
log.Debug("Network not found, id: %s", virtualIP.NetworkID)
}
}
}
}
return dockerData
}

View file

@ -0,0 +1,34 @@
package provider
import (
"fmt"
"github.com/containous/traefik/safe"
"github.com/containous/traefik/types"
"github.com/docker/libkv/store"
"github.com/docker/libkv/store/etcd"
)
var _ Provider = (*Etcd)(nil)
// Etcd holds configurations of the Etcd provider.
type Etcd struct {
Kv `mapstructure:",squash"`
}
// Provide allows the provider to provide configurations to traefik
// using the given configuration channel.
func (provider *Etcd) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool, constraints []types.Constraint) error {
store, err := provider.CreateStore()
if err != nil {
return fmt.Errorf("Failed to Connect to KV store: %v", err)
}
provider.kvclient = store
return provider.provide(configurationChan, pool, constraints)
}
// CreateStore creates the KV store
func (provider *Etcd) CreateStore() (store.Store, error) {
provider.storeType = store.ETCD
etcd.Register()
return provider.createStore()
}

View file

@ -0,0 +1,145 @@
package provider
import (
"github.com/ArthurHlt/go-eureka-client/eureka"
log "github.com/Sirupsen/logrus"
"github.com/cenk/backoff"
"github.com/containous/traefik/job"
"github.com/containous/traefik/safe"
"github.com/containous/traefik/types"
"io/ioutil"
"strconv"
"strings"
"text/template"
"time"
)
// Eureka holds configuration of the Eureka provider.
type Eureka struct {
BaseProvider `mapstructure:",squash"`
Endpoint string
Delay string
}
// Provide allows the provider to provide configurations to traefik
// using the given configuration channel.
func (provider *Eureka) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool, _ []types.Constraint) error {
operation := func() error {
configuration, err := provider.buildConfiguration()
if err != nil {
log.Errorf("Failed to build configuration for Eureka, error: %s", err)
return err
}
configurationChan <- types.ConfigMessage{
ProviderName: "eureka",
Configuration: configuration,
}
var delay time.Duration
if len(provider.Delay) > 0 {
var err error
delay, err = time.ParseDuration(provider.Delay)
if err != nil {
log.Errorf("Failed to parse delay for Eureka, error: %s", err)
return err
}
} else {
delay = time.Second * 30
}
ticker := time.NewTicker(delay)
go func() {
for t := range ticker.C {
log.Debug("Refreshing Eureka " + t.String())
configuration, err := provider.buildConfiguration()
if err != nil {
log.Errorf("Failed to refresh Eureka configuration, error: %s", err)
return
}
configurationChan <- types.ConfigMessage{
ProviderName: "eureka",
Configuration: configuration,
}
}
}()
return nil
}
notify := func(err error, time time.Duration) {
log.Errorf("Eureka connection error %+v, retrying in %s", err, time)
}
err := backoff.RetryNotify(operation, job.NewBackOff(backoff.NewExponentialBackOff()), notify)
if err != nil {
log.Errorf("Cannot connect to Eureka server %+v", err)
return err
}
return nil
}
// Build the configuration from Eureka server
func (provider *Eureka) buildConfiguration() (*types.Configuration, error) {
var EurekaFuncMap = template.FuncMap{
"replace": replace,
"tolower": strings.ToLower,
"getPort": provider.getPort,
"getProtocol": provider.getProtocol,
"getWeight": provider.getWeight,
"getInstanceID": provider.getInstanceID,
}
eureka.GetLogger().SetOutput(ioutil.Discard)
client := eureka.NewClient([]string{
provider.Endpoint,
})
applications, err := client.GetApplications()
if err != nil {
return nil, err
}
templateObjects := struct {
Applications []eureka.Application
}{
applications.Applications,
}
configuration, err := provider.getConfiguration("templates/eureka.tmpl", EurekaFuncMap, templateObjects)
if err != nil {
log.Error(err)
}
return configuration, nil
}
func (provider *Eureka) getPort(instance eureka.InstanceInfo) string {
if instance.SecurePort.Enabled {
return strconv.Itoa(instance.SecurePort.Port)
}
return strconv.Itoa(instance.Port.Port)
}
func (provider *Eureka) getProtocol(instance eureka.InstanceInfo) string {
if instance.SecurePort.Enabled {
return "https"
}
return "http"
}
func (provider *Eureka) getWeight(instance eureka.InstanceInfo) string {
if val, ok := instance.Metadata.Map["traefik.weight"]; ok {
return val
}
return "0"
}
func (provider *Eureka) getInstanceID(instance eureka.InstanceInfo) string {
if val, ok := instance.Metadata.Map["traefik.backend.id"]; ok {
return val
}
return strings.Replace(instance.IpAddr, ".", "-", -1) + "-" + provider.getPort(instance)
}

View file

@ -0,0 +1,84 @@
package provider
import (
"os"
"path/filepath"
"strings"
"github.com/BurntSushi/toml"
"github.com/containous/traefik/log"
"github.com/containous/traefik/safe"
"github.com/containous/traefik/types"
"gopkg.in/fsnotify.v1"
)
var _ Provider = (*File)(nil)
// File holds configurations of the File provider.
type File struct {
BaseProvider `mapstructure:",squash"`
}
// Provide allows the provider to provide configurations to traefik
// using the given configuration channel.
func (provider *File) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool, _ []types.Constraint) error {
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Error("Error creating file watcher", err)
return err
}
file, err := os.Open(provider.Filename)
if err != nil {
log.Error("Error opening file", err)
return err
}
defer file.Close()
if provider.Watch {
// Process events
pool.Go(func(stop chan bool) {
defer watcher.Close()
for {
select {
case <-stop:
return
case event := <-watcher.Events:
if strings.Contains(event.Name, file.Name()) {
log.Debug("File event:", event)
configuration := provider.loadFileConfig(file.Name())
if configuration != nil {
configurationChan <- types.ConfigMessage{
ProviderName: "file",
Configuration: configuration,
}
}
}
case error := <-watcher.Errors:
log.Error("Watcher event error", error)
}
}
})
err = watcher.Add(filepath.Dir(file.Name()))
if err != nil {
log.Error("Error adding file watcher", err)
return err
}
}
configuration := provider.loadFileConfig(file.Name())
configurationChan <- types.ConfigMessage{
ProviderName: "file",
Configuration: configuration,
}
return nil
}
func (provider *File) loadFileConfig(filename string) *types.Configuration {
configuration := new(types.Configuration)
if _, err := toml.DecodeFile(filename, configuration); err != nil {
log.Error("Error reading file:", err)
return nil
}
return configuration
}

View file

@ -0,0 +1,296 @@
package k8s
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"fmt"
"github.com/containous/traefik/log"
"github.com/parnurzeal/gorequest"
"net/http"
"net/url"
"strings"
)
const (
// APIEndpoint defines the base path for kubernetes API resources.
APIEndpoint = "/api/v1"
extentionsEndpoint = "/apis/extensions/v1beta1"
defaultIngress = "/ingresses"
namespaces = "/namespaces/"
)
// Client is a client for the Kubernetes master.
type Client interface {
GetIngresses(labelSelector string, predicate func(Ingress) bool) ([]Ingress, error)
GetService(name, namespace string) (Service, error)
GetEndpoints(name, namespace string) (Endpoints, error)
WatchAll(labelSelector string, stopCh <-chan bool) (chan interface{}, chan error, error)
}
type clientImpl struct {
endpointURL string
tls *tls.Config
token string
caCert []byte
}
// NewClient returns a new Kubernetes client.
// The provided host is an url (scheme://hostname[:port]) of a
// Kubernetes master without any path.
// The provided client is an authorized http.Client used to perform requests to the Kubernetes API master.
func NewClient(baseURL string, caCert []byte, token string) (Client, error) {
validURL, err := url.Parse(baseURL)
if err != nil {
return nil, fmt.Errorf("failed to parse URL %q: %v", baseURL, err)
}
return &clientImpl{
endpointURL: strings.TrimSuffix(validURL.String(), "/"),
token: token,
caCert: caCert,
}, nil
}
func makeQueryString(baseParams map[string]string, labelSelector string) (string, error) {
if labelSelector != "" {
baseParams["labelSelector"] = labelSelector
}
queryData, err := json.Marshal(baseParams)
if err != nil {
return "", err
}
return string(queryData), nil
}
// GetIngresses returns all ingresses in the cluster
func (c *clientImpl) GetIngresses(labelSelector string, predicate func(Ingress) bool) ([]Ingress, error) {
getURL := c.endpointURL + extentionsEndpoint + defaultIngress
queryParams := map[string]string{}
queryData, err := makeQueryString(queryParams, labelSelector)
if err != nil {
return nil, fmt.Errorf("Had problems constructing query string %s : %v", queryParams, err)
}
body, err := c.do(c.request(getURL, queryData))
if err != nil {
return nil, fmt.Errorf("failed to create ingresses request: GET %q : %v", getURL, err)
}
var ingressList IngressList
if err := json.Unmarshal(body, &ingressList); err != nil {
return nil, fmt.Errorf("failed to decode list of ingress resources: %v", err)
}
ingresses := ingressList.Items[:0]
for _, ingress := range ingressList.Items {
if predicate(ingress) {
ingresses = append(ingresses, ingress)
}
}
return ingresses, nil
}
// WatchIngresses returns all ingresses in the cluster
func (c *clientImpl) WatchIngresses(labelSelector string, stopCh <-chan bool) (chan interface{}, chan error, error) {
getURL := c.endpointURL + extentionsEndpoint + defaultIngress
return c.watch(getURL, labelSelector, stopCh)
}
// GetService returns the named service from the named namespace
func (c *clientImpl) GetService(name, namespace string) (Service, error) {
getURL := c.endpointURL + APIEndpoint + namespaces + namespace + "/services/" + name
body, err := c.do(c.request(getURL, ""))
if err != nil {
return Service{}, fmt.Errorf("failed to create services request: GET %q : %v", getURL, err)
}
var service Service
if err := json.Unmarshal(body, &service); err != nil {
return Service{}, fmt.Errorf("failed to decode service resource: %v", err)
}
return service, nil
}
// WatchServices returns all services in the cluster
func (c *clientImpl) WatchServices(stopCh <-chan bool) (chan interface{}, chan error, error) {
getURL := c.endpointURL + APIEndpoint + "/services"
return c.watch(getURL, "", stopCh)
}
// GetEndpoints returns the named Endpoints
// Endpoints have the same name as the coresponding service
func (c *clientImpl) GetEndpoints(name, namespace string) (Endpoints, error) {
getURL := c.endpointURL + APIEndpoint + namespaces + namespace + "/endpoints/" + name
body, err := c.do(c.request(getURL, ""))
if err != nil {
return Endpoints{}, fmt.Errorf("failed to create endpoints request: GET %q : %v", getURL, err)
}
var endpoints Endpoints
if err := json.Unmarshal(body, &endpoints); err != nil {
return Endpoints{}, fmt.Errorf("failed to decode endpoints resources: %v", err)
}
return endpoints, nil
}
// WatchEndpoints returns endpoints in the cluster
func (c *clientImpl) WatchEndpoints(stopCh <-chan bool) (chan interface{}, chan error, error) {
getURL := c.endpointURL + APIEndpoint + "/endpoints"
return c.watch(getURL, "", stopCh)
}
// WatchAll returns events in the cluster
func (c *clientImpl) WatchAll(labelSelector string, stopCh <-chan bool) (chan interface{}, chan error, error) {
watchCh := make(chan interface{}, 10)
errCh := make(chan error, 10)
stopIngresses := make(chan bool)
chanIngresses, chanIngressesErr, err := c.WatchIngresses(labelSelector, stopIngresses)
if err != nil {
return watchCh, errCh, fmt.Errorf("failed to create watch: %v", err)
}
stopServices := make(chan bool)
chanServices, chanServicesErr, err := c.WatchServices(stopServices)
if err != nil {
return watchCh, errCh, fmt.Errorf("failed to create watch: %v", err)
}
stopEndpoints := make(chan bool)
chanEndpoints, chanEndpointsErr, err := c.WatchEndpoints(stopEndpoints)
if err != nil {
return watchCh, errCh, fmt.Errorf("failed to create watch: %v", err)
}
go func() {
defer close(watchCh)
defer close(errCh)
defer close(stopIngresses)
defer close(stopServices)
defer close(stopEndpoints)
for {
select {
case <-stopCh:
stopIngresses <- true
stopServices <- true
stopEndpoints <- true
return
case err := <-chanIngressesErr:
errCh <- err
case err := <-chanServicesErr:
errCh <- err
case err := <-chanEndpointsErr:
errCh <- err
case event := <-chanIngresses:
watchCh <- event
case event := <-chanServices:
watchCh <- event
case event := <-chanEndpoints:
watchCh <- event
}
}
}()
return watchCh, errCh, nil
}
func (c *clientImpl) do(request *gorequest.SuperAgent) ([]byte, error) {
res, body, errs := request.EndBytes()
if errs != nil {
return nil, fmt.Errorf("failed to create request: GET %q : %v", request.Url, errs)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, fmt.Errorf("http error %d GET %q: %q", res.StatusCode, request.Url, string(body))
}
return body, nil
}
func (c *clientImpl) request(reqURL string, queryContent interface{}) *gorequest.SuperAgent {
// Make request to Kubernetes API
parsedURL, parseErr := url.Parse(reqURL)
if parseErr != nil {
log.Errorf("Had issues parsing url %s. Trying anyway.", reqURL)
}
request := gorequest.New().Get(reqURL)
request.Transport.DisableKeepAlives = true
if parsedURL.Scheme == "https" {
pool := x509.NewCertPool()
pool.AppendCertsFromPEM(c.caCert)
c.tls = &tls.Config{RootCAs: pool}
request.TLSClientConfig(c.tls)
}
if len(c.token) > 0 {
request.Header["Authorization"] = "Bearer " + c.token
}
request.Query(queryContent)
return request
}
// GenericObject generic object
type GenericObject struct {
TypeMeta `json:",inline"`
ListMeta `json:"metadata,omitempty"`
}
func (c *clientImpl) watch(url string, labelSelector string, stopCh <-chan bool) (chan interface{}, chan error, error) {
watchCh := make(chan interface{}, 10)
errCh := make(chan error, 10)
// get version
body, err := c.do(c.request(url, ""))
if err != nil {
return watchCh, errCh, fmt.Errorf("failed to do version request: GET %q : %v", url, err)
}
var generic GenericObject
if err := json.Unmarshal(body, &generic); err != nil {
return watchCh, errCh, fmt.Errorf("failed to decode version %v", err)
}
resourceVersion := generic.ResourceVersion
queryParams := map[string]string{"watch": "", "resourceVersion": resourceVersion}
queryData, err := makeQueryString(queryParams, labelSelector)
if err != nil {
return watchCh, errCh, fmt.Errorf("Unable to construct query args")
}
request := c.request(url, queryData)
req, err := request.MakeRequest()
if err != nil {
return watchCh, errCh, fmt.Errorf("failed to make watch request: GET %q : %v", url, err)
}
request.Client.Transport = request.Transport
res, err := request.Client.Do(req)
if err != nil {
return watchCh, errCh, fmt.Errorf("failed to do watch request: GET %q: %v", url, err)
}
go func() {
finishCh := make(chan bool)
defer close(finishCh)
defer close(watchCh)
defer close(errCh)
go func() {
defer res.Body.Close()
for {
var eventList interface{}
if err := json.NewDecoder(res.Body).Decode(&eventList); err != nil {
if !strings.Contains(err.Error(), "net/http: request canceled") {
errCh <- fmt.Errorf("failed to decode watch event: GET %q : %v", url, err)
}
finishCh <- true
return
}
watchCh <- eventList
}
}()
select {
case <-stopCh:
go func() {
request.Transport.CancelRequest(req)
}()
<-finishCh
return
}
}()
return watchCh, errCh, nil
}

View file

@ -0,0 +1,84 @@
package k8s
// Endpoints is a collection of endpoints that implement the actual service. Example:
// Name: "mysvc",
// Subsets: [
// {
// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
// },
// {
// Addresses: [{"ip": "10.10.3.3"}],
// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}]
// },
// ]
type Endpoints struct {
TypeMeta `json:",inline"`
ObjectMeta `json:"metadata,omitempty"`
// The set of all endpoints is the union of all subsets.
Subsets []EndpointSubset
}
// EndpointSubset is a group of addresses with a common set of ports. The
// expanded set of endpoints is the Cartesian product of Addresses x Ports.
// For example, given:
// {
// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
// }
// The resulting set of endpoints can be viewed as:
// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],
// b: [ 10.10.1.1:309, 10.10.2.2:309 ]
type EndpointSubset struct {
Addresses []EndpointAddress
NotReadyAddresses []EndpointAddress
Ports []EndpointPort
}
// EndpointAddress is a tuple that describes single IP address.
type EndpointAddress struct {
// The IP of this endpoint.
// IPv6 is also accepted but not fully supported on all platforms. Also, certain
// kubernetes components, like kube-proxy, are not IPv6 ready.
// TODO: This should allow hostname or IP, see #4447.
IP string
// Optional: Hostname of this endpoint
// Meant to be used by DNS servers etc.
Hostname string `json:"hostname,omitempty"`
// Optional: The kubernetes object related to the entry point.
TargetRef *ObjectReference
}
// EndpointPort is a tuple that describes a single port.
type EndpointPort struct {
// The name of this port (corresponds to ServicePort.Name). Optional
// if only one port is defined. Must be a DNS_LABEL.
Name string
// The port number.
Port int32
// The IP protocol for this port.
Protocol Protocol
}
// ObjectReference contains enough information to let you inspect or modify the referred object.
type ObjectReference struct {
Kind string `json:"kind,omitempty"`
Namespace string `json:"namespace,omitempty"`
Name string `json:"name,omitempty"`
UID UID `json:"uid,omitempty"`
APIVersion string `json:"apiVersion,omitempty"`
ResourceVersion string `json:"resourceVersion,omitempty"`
// Optional. If referring to a piece of an object instead of an entire object, this string
// should contain information to identify the sub-object. For example, if the object
// reference is to a container within a pod, this would take on a value like:
// "spec.containers{name}" (where "name" refers to the name of the container that triggered
// the event) or if no container name is specified "spec.containers[2]" (container with
// index 2 in this pod). This syntax is chosen only to have some well-defined way of
// referencing a part of an object.
// TODO: this design is not final and this field is subject to change in the future.
FieldPath string `json:"fieldPath,omitempty"`
}

View file

@ -0,0 +1,151 @@
package k8s
// Ingress is a collection of rules that allow inbound connections to reach the
// endpoints defined by a backend. An Ingress can be configured to give services
// externally-reachable urls, load balance traffic, terminate SSL, offer name
// based virtual hosting etc.
type Ingress struct {
TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
ObjectMeta `json:"metadata,omitempty"`
// Spec is the desired state of the Ingress.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
Spec IngressSpec `json:"spec,omitempty"`
// Status is the current state of the Ingress.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
Status IngressStatus `json:"status,omitempty"`
}
// IngressList is a collection of Ingress.
type IngressList struct {
TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
ListMeta `json:"metadata,omitempty"`
// Items is the list of Ingress.
Items []Ingress `json:"items"`
}
// IngressSpec describes the Ingress the user wishes to exist.
type IngressSpec struct {
// A default backend capable of servicing requests that don't match any
// rule. At least one of 'backend' or 'rules' must be specified. This field
// is optional to allow the loadbalancer controller or defaulting logic to
// specify a global default.
Backend *IngressBackend `json:"backend,omitempty"`
// TLS configuration. Currently the Ingress only supports a single TLS
// port, 443. If multiple members of this list specify different hosts, they
// will be multiplexed on the same port according to the hostname specified
// through the SNI TLS extension, if the ingress controller fulfilling the
// ingress supports SNI.
TLS []IngressTLS `json:"tls,omitempty"`
// A list of host rules used to configure the Ingress. If unspecified, or
// no rule matches, all traffic is sent to the default backend.
Rules []IngressRule `json:"rules,omitempty"`
// TODO: Add the ability to specify load-balancer IP through claims
}
// IngressTLS describes the transport layer security associated with an Ingress.
type IngressTLS struct {
// Hosts are a list of hosts included in the TLS certificate. The values in
// this list must match the name/s used in the tlsSecret. Defaults to the
// wildcard host setting for the loadbalancer controller fulfilling this
// Ingress, if left unspecified.
Hosts []string `json:"hosts,omitempty"`
// SecretName is the name of the secret used to terminate SSL traffic on 443.
// Field is left optional to allow SSL routing based on SNI hostname alone.
// If the SNI host in a listener conflicts with the "Host" header field used
// by an IngressRule, the SNI host is used for termination and value of the
// Host header is used for routing.
SecretName string `json:"secretName,omitempty"`
// TODO: Consider specifying different modes of termination, protocols etc.
}
// IngressStatus describe the current state of the Ingress.
type IngressStatus struct {
// LoadBalancer contains the current status of the load-balancer.
LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty"`
}
// IngressRule represents the rules mapping the paths under a specified host to
// the related backend services. Incoming requests are first evaluated for a host
// match, then routed to the backend associated with the matching IngressRuleValue.
type IngressRule struct {
// Host is the fully qualified domain name of a network host, as defined
// by RFC 3986. Note the following deviations from the "host" part of the
// URI as defined in the RFC:
// 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the
// IP in the Spec of the parent Ingress.
// 2. The `:` delimiter is not respected because ports are not allowed.
// Currently the port of an Ingress is implicitly :80 for http and
// :443 for https.
// Both these may change in the future.
// Incoming requests are matched against the host before the IngressRuleValue.
// If the host is unspecified, the Ingress routes all traffic based on the
// specified IngressRuleValue.
Host string `json:"host,omitempty"`
// IngressRuleValue represents a rule to route requests for this IngressRule.
// If unspecified, the rule defaults to a http catch-all. Whether that sends
// just traffic matching the host to the default backend or all traffic to the
// default backend, is left to the controller fulfilling the Ingress. Http is
// currently the only supported IngressRuleValue.
IngressRuleValue `json:",inline,omitempty"`
}
// IngressRuleValue represents a rule to apply against incoming requests. If the
// rule is satisfied, the request is routed to the specified backend. Currently
// mixing different types of rules in a single Ingress is disallowed, so exactly
// one of the following must be set.
type IngressRuleValue struct {
//TODO:
// 1. Consider renaming this resource and the associated rules so they
// aren't tied to Ingress. They can be used to route intra-cluster traffic.
// 2. Consider adding fields for ingress-type specific global options
// usable by a loadbalancer, like http keep-alive.
HTTP *HTTPIngressRuleValue `json:"http,omitempty"`
}
// HTTPIngressRuleValue is a list of http selectors pointing to backends.
// In the example: http://<host>/<path>?<searchpart> -> backend where
// where parts of the url correspond to RFC 3986, this resource will be used
// to match against everything after the last '/' and before the first '?'
// or '#'.
type HTTPIngressRuleValue struct {
// A collection of paths that map requests to backends.
Paths []HTTPIngressPath `json:"paths"`
// TODO: Consider adding fields for ingress-type specific global
// options usable by a loadbalancer, like http keep-alive.
}
// HTTPIngressPath associates a path regex with a backend. Incoming urls matching
// the path are forwarded to the backend.
type HTTPIngressPath struct {
// Path is a extended POSIX regex as defined by IEEE Std 1003.1,
// (i.e this follows the egrep/unix syntax, not the perl syntax)
// matched against the path of an incoming request. Currently it can
// contain characters disallowed from the conventional "path"
// part of a URL as defined by RFC 3986. Paths must begin with
// a '/'. If unspecified, the path defaults to a catch all sending
// traffic to the backend.
Path string `json:"path,omitempty"`
// Backend defines the referenced service endpoint to which the traffic
// will be forwarded to.
Backend IngressBackend `json:"backend"`
}
// IngressBackend describes all endpoints for a given service and port.
type IngressBackend struct {
// Specifies the name of the referenced service.
ServiceName string `json:"serviceName"`
// Specifies the port of the referenced service.
ServicePort IntOrString `json:"servicePort"`
}

View file

@ -0,0 +1,326 @@
package k8s
import (
"encoding/json"
"strconv"
"time"
)
// TypeMeta describes an individual object in an API response or request
// with strings representing the type of the object and its API schema version.
// Structures that are versioned or persisted should inline TypeMeta.
type TypeMeta struct {
// Kind is a string value representing the REST resource this object represents.
// Servers may infer this from the endpoint the client submits requests to.
// Cannot be updated.
// In CamelCase.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
Kind string `json:"kind,omitempty"`
// APIVersion defines the versioned schema of this representation of an object.
// Servers should convert recognized schemas to the latest internal value, and
// may reject unrecognized values.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
APIVersion string `json:"apiVersion,omitempty"`
}
// ObjectMeta is metadata that all persisted resources must have, which includes all objects
// users must create.
type ObjectMeta struct {
// Name is unique within a namespace. Name is required when creating resources, although
// some resources may allow a client to request the generation of an appropriate name
// automatically. Name is primarily intended for creation idempotence and configuration
// definition.
Name string `json:"name,omitempty"`
// GenerateName indicates that the name should be made unique by the server prior to persisting
// it. A non-empty value for the field indicates the name will be made unique (and the name
// returned to the client will be different than the name passed). The value of this field will
// be combined with a unique suffix on the server if the Name field has not been provided.
// The provided value must be valid within the rules for Name, and may be truncated by the length
// of the suffix required to make the value unique on the server.
//
// If this field is specified, and Name is not present, the server will NOT return a 409 if the
// generated name exists - instead, it will either return 201 Created or 500 with Reason
// ServerTimeout indicating a unique name could not be found in the time allotted, and the client
// should retry (optionally after the time indicated in the Retry-After header).
GenerateName string `json:"generateName,omitempty"`
// Namespace defines the space within which name must be unique. An empty namespace is
// equivalent to the "default" namespace, but "default" is the canonical representation.
// Not all objects are required to be scoped to a namespace - the value of this field for
// those objects will be empty.
Namespace string `json:"namespace,omitempty"`
// SelfLink is a URL representing this object.
SelfLink string `json:"selfLink,omitempty"`
// UID is the unique in time and space value for this object. It is typically generated by
// the server on successful creation of a resource and is not allowed to change on PUT
// operations.
UID UID `json:"uid,omitempty"`
// An opaque value that represents the version of this resource. May be used for optimistic
// concurrency, change detection, and the watch operation on a resource or set of resources.
// Clients must treat these values as opaque and values may only be valid for a particular
// resource or set of resources. Only servers will generate resource versions.
ResourceVersion string `json:"resourceVersion,omitempty"`
// A sequence number representing a specific generation of the desired state.
// Populated by the system. Read-only.
Generation int64 `json:"generation,omitempty"`
// CreationTimestamp is a timestamp representing the server time when this object was
// created. It is not guaranteed to be set in happens-before order across separate operations.
// Clients may not set this value. It is represented in RFC3339 form and is in UTC.
CreationTimestamp Time `json:"creationTimestamp,omitempty"`
// DeletionTimestamp is the time after which this resource will be deleted. This
// field is set by the server when a graceful deletion is requested by the user, and is not
// directly settable by a client. The resource will be deleted (no longer visible from
// resource lists, and not reachable by name) after the time in this field. Once set, this
// value may not be unset or be set further into the future, although it may be shortened
// or the resource may be deleted prior to this time. For example, a user may request that
// a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination
// signal to the containers in the pod. Once the resource is deleted in the API, the Kubelet
// will send a hard termination signal to the container.
DeletionTimestamp *Time `json:"deletionTimestamp,omitempty"`
// DeletionGracePeriodSeconds records the graceful deletion value set when graceful deletion
// was requested. Represents the most recent grace period, and may only be shortened once set.
DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"`
// Labels are key value pairs that may be used to scope and select individual resources.
// Label keys are of the form:
// label-key ::= prefixed-name | name
// prefixed-name ::= prefix '/' name
// prefix ::= DNS_SUBDOMAIN
// name ::= DNS_LABEL
// The prefix is optional. If the prefix is not specified, the key is assumed to be private
// to the user. Other system components that wish to use labels must specify a prefix. The
// "kubernetes.io/" prefix is reserved for use by kubernetes components.
// TODO: replace map[string]string with labels.LabelSet type
Labels map[string]string `json:"labels,omitempty"`
// Annotations are unstructured key value data stored with a resource that may be set by
// external tooling. They are not queryable and should be preserved when modifying
// objects. Annotation keys have the same formatting restrictions as Label keys. See the
// comments on Labels for details.
Annotations map[string]string `json:"annotations,omitempty"`
}
// UID is a type that holds unique ID values, including UUIDs. Because we
// don't ONLY use UUIDs, this is an alias to string. Being a type captures
// intent and helps make sure that UIDs and names do not get conflated.
type UID string
// Time is a wrapper around time.Time which supports correct
// marshaling to YAML and JSON. Wrappers are provided for many
// of the factory methods that the time package offers.
//
// +protobuf.options.marshal=false
// +protobuf.as=Timestamp
type Time struct {
time.Time `protobuf:"-"`
}
// Service is a named abstraction of software service (for example, mysql) consisting of local port
// (for example 3306) that the proxy listens on, and the selector that determines which pods
// will answer requests sent through the proxy.
type Service struct {
TypeMeta `json:",inline"`
ObjectMeta `json:"metadata,omitempty"`
// Spec defines the behavior of a service.
Spec ServiceSpec `json:"spec,omitempty"`
// Status represents the current status of a service.
Status ServiceStatus `json:"status,omitempty"`
}
// ServiceSpec describes the attributes that a user creates on a service
type ServiceSpec struct {
// Type determines how the service will be exposed. Valid options: ClusterIP, NodePort, LoadBalancer
Type ServiceType `json:"type,omitempty"`
// Required: The list of ports that are exposed by this service.
Ports []ServicePort `json:"ports"`
// This service will route traffic to pods having labels matching this selector. If empty or not present,
// the service is assumed to have endpoints set by an external process and Kubernetes will not modify
// those endpoints.
Selector map[string]string `json:"selector"`
// ClusterIP is usually assigned by the master. If specified by the user
// we will try to respect it or else fail the request. This field can
// not be changed by updates.
// Valid values are None, empty string (""), or a valid IP address
// None can be specified for headless services when proxying is not required
ClusterIP string `json:"clusterIP,omitempty"`
// ExternalIPs are used by external load balancers, or can be set by
// users to handle external traffic that arrives at a node.
ExternalIPs []string `json:"externalIPs,omitempty"`
// Only applies to Service Type: LoadBalancer
// LoadBalancer will get created with the IP specified in this field.
// This feature depends on whether the underlying cloud-provider supports specifying
// the loadBalancerIP when a load balancer is created.
// This field will be ignored if the cloud-provider does not support the feature.
LoadBalancerIP string `json:"loadBalancerIP,omitempty"`
// Required: Supports "ClientIP" and "None". Used to maintain session affinity.
SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty"`
}
// ServicePort service port
type ServicePort struct {
// Optional if only one ServicePort is defined on this service: The
// name of this port within the service. This must be a DNS_LABEL.
// All ports within a ServiceSpec must have unique names. This maps to
// the 'Name' field in EndpointPort objects.
Name string `json:"name"`
// The IP protocol for this port. Supports "TCP" and "UDP".
Protocol Protocol `json:"protocol"`
// The port that will be exposed on the service.
Port int `json:"port"`
// Optional: The target port on pods selected by this service. If this
// is a string, it will be looked up as a named port in the target
// Pod's container ports. If this is not specified, the value
// of the 'port' field is used (an identity map).
// This field is ignored for services with clusterIP=None, and should be
// omitted or set equal to the 'port' field.
TargetPort IntOrString `json:"targetPort"`
// The port on each node on which this service is exposed.
// Default is to auto-allocate a port if the ServiceType of this Service requires one.
NodePort int `json:"nodePort"`
}
// ServiceStatus represents the current status of a service
type ServiceStatus struct {
// LoadBalancer contains the current status of the load-balancer,
// if one is present.
LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty"`
}
// LoadBalancerStatus represents the status of a load-balancer
type LoadBalancerStatus struct {
// Ingress is a list containing ingress points for the load-balancer;
// traffic intended for the service should be sent to these ingress points.
Ingress []LoadBalancerIngress `json:"ingress,omitempty"`
}
// LoadBalancerIngress represents the status of a load-balancer ingress point:
// traffic intended for the service should be sent to an ingress point.
type LoadBalancerIngress struct {
// IP is set for load-balancer ingress points that are IP based
// (typically GCE or OpenStack load-balancers)
IP string `json:"ip,omitempty"`
// Hostname is set for load-balancer ingress points that are DNS based
// (typically AWS load-balancers)
Hostname string `json:"hostname,omitempty"`
}
// ServiceAffinity Session Affinity Type string
type ServiceAffinity string
// ServiceType Service Type string describes ingress methods for a service
type ServiceType string
// Protocol defines network protocols supported for things like container ports.
type Protocol string
// IntOrString is a type that can hold an int32 or a string. When used in
// JSON or YAML marshalling and unmarshalling, it produces or consumes the
// inner type. This allows you to have, for example, a JSON field that can
// accept a name or number.
// TODO: Rename to Int32OrString
//
// +protobuf=true
// +protobuf.options.(gogoproto.goproto_stringer)=false
type IntOrString struct {
Type Type
IntVal int32
StrVal string
}
// FromInt creates an IntOrString object with an int32 value. It is
// your responsibility not to call this method with a value greater
// than int32.
// TODO: convert to (val int32)
func FromInt(val int) IntOrString {
return IntOrString{Type: Int, IntVal: int32(val)}
}
// FromString creates an IntOrString object with a string value.
func FromString(val string) IntOrString {
return IntOrString{Type: String, StrVal: val}
}
// String returns the string value, or the Itoa of the int value.
func (intstr *IntOrString) String() string {
if intstr.Type == String {
return intstr.StrVal
}
return strconv.Itoa(intstr.IntValue())
}
// IntValue returns the IntVal if type Int, or if
// it is a String, will attempt a conversion to int.
func (intstr *IntOrString) IntValue() int {
if intstr.Type == String {
i, _ := strconv.Atoi(intstr.StrVal)
return i
}
return int(intstr.IntVal)
}
// UnmarshalJSON implements the json.Unmarshaller interface.
func (intstr *IntOrString) UnmarshalJSON(value []byte) error {
if value[0] == '"' {
intstr.Type = String
return json.Unmarshal(value, &intstr.StrVal)
}
intstr.Type = Int
return json.Unmarshal(value, &intstr.IntVal)
}
// Type represents the stored type of IntOrString.
type Type int
const (
// Int int
Int Type = iota // The IntOrString holds an int.
//String string
String // The IntOrString holds a string.
)
// ServiceList holds a list of services.
type ServiceList struct {
TypeMeta `json:",inline"`
ListMeta `json:"metadata,omitempty"`
Items []Service `json:"items"`
}
// ListMeta describes metadata that synthetic resources must have, including lists and
// various status objects. A resource may have only one of {ObjectMeta, ListMeta}.
type ListMeta struct {
// SelfLink is a URL representing this object.
// Populated by the system.
// Read-only.
SelfLink string `json:"selfLink,omitempty"`
// String that identifies the server's internal version of this object that
// can be used by clients to determine when objects have changed.
// Value must be treated as opaque by clients and passed unmodified back to the server.
// Populated by the system.
// Read-only.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency
ResourceVersion string `json:"resourceVersion,omitempty"`
}

View file

@ -0,0 +1,328 @@
package provider
import (
"fmt"
"github.com/containous/traefik/log"
"github.com/containous/traefik/provider/k8s"
"github.com/containous/traefik/safe"
"github.com/containous/traefik/types"
"io/ioutil"
"os"
"reflect"
"strconv"
"strings"
"text/template"
"time"
"github.com/cenk/backoff"
"github.com/containous/traefik/job"
)
const (
serviceAccountToken = "/var/run/secrets/kubernetes.io/serviceaccount/token"
serviceAccountCACert = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
defaultKubeEndpoint = "http://127.0.0.1:8080"
)
// Namespaces holds kubernetes namespaces
type Namespaces []string
//Set adds strings elem into the the parser
//it splits str on , and ;
func (ns *Namespaces) Set(str string) error {
fargs := func(c rune) bool {
return c == ',' || c == ';'
}
// get function
slice := strings.FieldsFunc(str, fargs)
*ns = append(*ns, slice...)
return nil
}
//Get []string
func (ns *Namespaces) Get() interface{} { return Namespaces(*ns) }
//String return slice in a string
func (ns *Namespaces) String() string { return fmt.Sprintf("%v", *ns) }
//SetValue sets []string into the parser
func (ns *Namespaces) SetValue(val interface{}) {
*ns = Namespaces(val.(Namespaces))
}
var _ Provider = (*Kubernetes)(nil)
// Kubernetes holds configurations of the Kubernetes provider.
type Kubernetes struct {
BaseProvider `mapstructure:",squash"`
Endpoint string `description:"Kubernetes server endpoint"`
DisablePassHostHeaders bool `description:"Kubernetes disable PassHost Headers"`
Namespaces Namespaces `description:"Kubernetes namespaces"`
LabelSelector string `description:"Kubernetes api label selector to use"`
lastConfiguration safe.Safe
}
func (provider *Kubernetes) createClient() (k8s.Client, error) {
var token string
tokenBytes, err := ioutil.ReadFile(serviceAccountToken)
if err == nil {
token = string(tokenBytes)
log.Debugf("Kubernetes token: %s", token)
} else {
log.Errorf("Kubernetes load token error: %s", err)
}
caCert, err := ioutil.ReadFile(serviceAccountCACert)
if err == nil {
log.Debugf("Kubernetes CA cert: %s", serviceAccountCACert)
} else {
log.Errorf("Kubernetes load token error: %s", err)
}
kubernetesHost := os.Getenv("KUBERNETES_SERVICE_HOST")
kubernetesPort := os.Getenv("KUBERNETES_SERVICE_PORT_HTTPS")
// Prioritize user provided kubernetes endpoint since kube container runtime will almost always have it
if provider.Endpoint == "" && len(kubernetesPort) > 0 && len(kubernetesHost) > 0 {
log.Debugf("Using environment provided kubernetes endpoint")
provider.Endpoint = "https://" + kubernetesHost + ":" + kubernetesPort
}
if provider.Endpoint == "" {
log.Debugf("Using default kubernetes api endpoint")
provider.Endpoint = defaultKubeEndpoint
}
log.Debugf("Kubernetes endpoint: %s", provider.Endpoint)
return k8s.NewClient(provider.Endpoint, caCert, token)
}
// Provide allows the provider to provide configurations to traefik
// using the given configuration channel.
func (provider *Kubernetes) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool, constraints []types.Constraint) error {
k8sClient, err := provider.createClient()
if err != nil {
return err
}
provider.Constraints = append(provider.Constraints, constraints...)
pool.Go(func(stop chan bool) {
operation := func() error {
for {
stopWatch := make(chan bool, 5)
defer close(stopWatch)
log.Debugf("Using label selector: '%s'", provider.LabelSelector)
eventsChan, errEventsChan, err := k8sClient.WatchAll(provider.LabelSelector, stopWatch)
if err != nil {
log.Errorf("Error watching kubernetes events: %v", err)
timer := time.NewTimer(1 * time.Second)
select {
case <-timer.C:
return err
case <-stop:
return nil
}
}
for {
select {
case <-stop:
stopWatch <- true
return nil
case err, _ := <-errEventsChan:
stopWatch <- true
return err
case event := <-eventsChan:
log.Debugf("Received event from kubernetes %+v", event)
templateObjects, err := provider.loadIngresses(k8sClient)
if err != nil {
return err
}
if reflect.DeepEqual(provider.lastConfiguration.Get(), templateObjects) {
log.Debugf("Skipping event from kubernetes %+v", event)
} else {
provider.lastConfiguration.Set(templateObjects)
configurationChan <- types.ConfigMessage{
ProviderName: "kubernetes",
Configuration: provider.loadConfig(*templateObjects),
}
}
}
}
}
}
notify := func(err error, time time.Duration) {
log.Errorf("Kubernetes connection error %+v, retrying in %s", err, time)
}
err := backoff.RetryNotify(operation, job.NewBackOff(backoff.NewExponentialBackOff()), notify)
if err != nil {
log.Errorf("Cannot connect to Kubernetes server %+v", err)
}
})
templateObjects, err := provider.loadIngresses(k8sClient)
if err != nil {
return err
}
if reflect.DeepEqual(provider.lastConfiguration.Get(), templateObjects) {
log.Debugf("Skipping configuration from kubernetes %+v", templateObjects)
} else {
provider.lastConfiguration.Set(templateObjects)
configurationChan <- types.ConfigMessage{
ProviderName: "kubernetes",
Configuration: provider.loadConfig(*templateObjects),
}
}
return nil
}
func (provider *Kubernetes) loadIngresses(k8sClient k8s.Client) (*types.Configuration, error) {
ingresses, err := k8sClient.GetIngresses(provider.LabelSelector, func(ingress k8s.Ingress) bool {
if len(provider.Namespaces) == 0 {
return true
}
for _, n := range provider.Namespaces {
if ingress.ObjectMeta.Namespace == n {
return true
}
}
return false
})
if err != nil {
log.Errorf("Error retrieving ingresses: %+v", err)
return nil, err
}
templateObjects := types.Configuration{
map[string]*types.Backend{},
map[string]*types.Frontend{},
}
PassHostHeader := provider.getPassHostHeader()
for _, i := range ingresses {
for _, r := range i.Spec.Rules {
for _, pa := range r.HTTP.Paths {
if _, exists := templateObjects.Backends[r.Host+pa.Path]; !exists {
templateObjects.Backends[r.Host+pa.Path] = &types.Backend{
Servers: make(map[string]types.Server),
}
}
if _, exists := templateObjects.Frontends[r.Host+pa.Path]; !exists {
templateObjects.Frontends[r.Host+pa.Path] = &types.Frontend{
Backend: r.Host + pa.Path,
PassHostHeader: PassHostHeader,
Routes: make(map[string]types.Route),
Priority: len(pa.Path),
}
}
if len(r.Host) > 0 {
if _, exists := templateObjects.Frontends[r.Host+pa.Path].Routes[r.Host]; !exists {
templateObjects.Frontends[r.Host+pa.Path].Routes[r.Host] = types.Route{
Rule: "Host:" + r.Host,
}
}
}
if len(pa.Path) > 0 {
ruleType := i.Annotations["traefik.frontend.rule.type"]
switch strings.ToLower(ruleType) {
case "pathprefixstrip":
ruleType = "PathPrefixStrip"
case "pathstrip":
ruleType = "PathStrip"
case "path":
ruleType = "Path"
case "pathprefix":
ruleType = "PathPrefix"
case "":
ruleType = "PathPrefix"
default:
log.Warnf("Unknown RuleType %s for %s/%s, falling back to PathPrefix", ruleType, i.ObjectMeta.Namespace, i.ObjectMeta.Name)
ruleType = "PathPrefix"
}
templateObjects.Frontends[r.Host+pa.Path].Routes[pa.Path] = types.Route{
Rule: ruleType + ":" + pa.Path,
}
}
service, err := k8sClient.GetService(pa.Backend.ServiceName, i.ObjectMeta.Namespace)
if err != nil {
log.Warnf("Error retrieving services: %v", err)
delete(templateObjects.Frontends, r.Host+pa.Path)
log.Warnf("Error retrieving services %s", pa.Backend.ServiceName)
continue
}
protocol := "http"
for _, port := range service.Spec.Ports {
if equalPorts(port, pa.Backend.ServicePort) {
if port.Port == 443 {
protocol = "https"
}
endpoints, err := k8sClient.GetEndpoints(service.ObjectMeta.Name, service.ObjectMeta.Namespace)
if err != nil {
log.Errorf("Error retrieving endpoints: %v", err)
continue
}
if len(endpoints.Subsets) == 0 {
log.Warnf("Endpoints not found for %s/%s, falling back to Service ClusterIP", service.ObjectMeta.Namespace, service.ObjectMeta.Name)
templateObjects.Backends[r.Host+pa.Path].Servers[string(service.UID)] = types.Server{
URL: protocol + "://" + service.Spec.ClusterIP + ":" + strconv.Itoa(port.Port),
Weight: 1,
}
} else {
for _, subset := range endpoints.Subsets {
for _, address := range subset.Addresses {
url := protocol + "://" + address.IP + ":" + strconv.Itoa(endpointPortNumber(port, subset.Ports))
name := url
if address.TargetRef != nil && address.TargetRef.Name != "" {
name = address.TargetRef.Name
}
templateObjects.Backends[r.Host+pa.Path].Servers[name] = types.Server{
URL: url,
Weight: 1,
}
}
}
}
break
}
}
}
}
}
return &templateObjects, nil
}
func endpointPortNumber(servicePort k8s.ServicePort, endpointPorts []k8s.EndpointPort) int {
if len(endpointPorts) > 0 {
//name is optional if there is only one port
port := endpointPorts[0]
for _, endpointPort := range endpointPorts {
if servicePort.Name == endpointPort.Name {
port = endpointPort
}
}
return int(port.Port)
}
return servicePort.Port
}
func equalPorts(servicePort k8s.ServicePort, ingressPort k8s.IntOrString) bool {
if servicePort.Port == ingressPort.IntValue() {
return true
}
if servicePort.Name != "" && servicePort.Name == ingressPort.String() {
return true
}
return false
}
func (provider *Kubernetes) getPassHostHeader() bool {
if provider.DisablePassHostHeaders {
return false
}
return true
}
func (provider *Kubernetes) loadConfig(templateObjects types.Configuration) *types.Configuration {
var FuncMap = template.FuncMap{}
configuration, err := provider.getConfiguration("templates/kubernetes.tmpl", FuncMap, templateObjects)
if err != nil {
log.Error(err)
}
return configuration
}

View file

@ -0,0 +1,218 @@
// Package provider holds the different provider implementation.
package provider
import (
"fmt"
"strings"
"text/template"
"time"
"errors"
"github.com/BurntSushi/ty/fun"
"github.com/cenk/backoff"
"github.com/containous/traefik/job"
"github.com/containous/traefik/log"
"github.com/containous/traefik/safe"
"github.com/containous/traefik/types"
"github.com/docker/libkv"
"github.com/docker/libkv/store"
)
// Kv holds common configurations of key-value providers.
type Kv struct {
BaseProvider `mapstructure:",squash"`
Endpoint string `description:"Comma sepparated server endpoints"`
Prefix string `description:"Prefix used for KV store"`
TLS *ClientTLS `description:"Enable TLS support"`
storeType store.Backend
kvclient store.Store
}
func (provider *Kv) createStore() (store.Store, error) {
storeConfig := &store.Config{
ConnectionTimeout: 30 * time.Second,
Bucket: "traefik",
}
if provider.TLS != nil {
var err error
storeConfig.TLS, err = provider.TLS.CreateTLSConfig()
if err != nil {
return nil, err
}
}
return libkv.NewStore(
provider.storeType,
strings.Split(provider.Endpoint, ","),
storeConfig,
)
}
func (provider *Kv) watchKv(configurationChan chan<- types.ConfigMessage, prefix string, stop chan bool) error {
operation := func() error {
events, err := provider.kvclient.WatchTree(provider.Prefix, make(chan struct{}))
if err != nil {
return fmt.Errorf("Failed to KV WatchTree: %v", err)
}
for {
select {
case <-stop:
return nil
case _, ok := <-events:
if !ok {
return errors.New("watchtree channel closed")
}
configuration := provider.loadConfig()
if configuration != nil {
configurationChan <- types.ConfigMessage{
ProviderName: string(provider.storeType),
Configuration: configuration,
}
}
}
}
}
notify := func(err error, time time.Duration) {
log.Errorf("KV connection error: %+v, retrying in %s", err, time)
}
err := backoff.RetryNotify(operation, job.NewBackOff(backoff.NewExponentialBackOff()), notify)
if err != nil {
return fmt.Errorf("Cannot connect to KV server: %v", err)
}
return nil
}
func (provider *Kv) provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool, constraints []types.Constraint) error {
provider.Constraints = append(provider.Constraints, constraints...)
operation := func() error {
if _, err := provider.kvclient.Exists("qmslkjdfmqlskdjfmqlksjazçueznbvbwzlkajzebvkwjdcqmlsfj"); err != nil {
return fmt.Errorf("Failed to test KV store connection: %v", err)
}
if provider.Watch {
pool.Go(func(stop chan bool) {
err := provider.watchKv(configurationChan, provider.Prefix, stop)
if err != nil {
log.Errorf("Cannot watch KV store: %v", err)
}
})
}
configuration := provider.loadConfig()
configurationChan <- types.ConfigMessage{
ProviderName: string(provider.storeType),
Configuration: configuration,
}
return nil
}
notify := func(err error, time time.Duration) {
log.Errorf("KV connection error: %+v, retrying in %s", err, time)
}
err := backoff.RetryNotify(operation, job.NewBackOff(backoff.NewExponentialBackOff()), notify)
if err != nil {
return fmt.Errorf("Cannot connect to KV server: %v", err)
}
return nil
}
func (provider *Kv) loadConfig() *types.Configuration {
templateObjects := struct {
Prefix string
}{
// Allow `/traefik/alias` to superesede `provider.Prefix`
strings.TrimSuffix(provider.get(provider.Prefix, provider.Prefix+"/alias"), "/"),
}
var KvFuncMap = template.FuncMap{
"List": provider.list,
"ListServers": provider.listServers,
"Get": provider.get,
"SplitGet": provider.splitGet,
"Last": provider.last,
}
configuration, err := provider.getConfiguration("templates/kv.tmpl", KvFuncMap, templateObjects)
if err != nil {
log.Error(err)
}
for key, frontend := range configuration.Frontends {
if _, ok := configuration.Backends[frontend.Backend]; ok == false {
delete(configuration.Frontends, key)
}
}
return configuration
}
func (provider *Kv) list(keys ...string) []string {
joinedKeys := strings.Join(keys, "")
keysPairs, err := provider.kvclient.List(joinedKeys)
if err != nil {
log.Debugf("Cannot get keys %s %s ", joinedKeys, err)
return nil
}
directoryKeys := make(map[string]string)
for _, key := range keysPairs {
directory := strings.Split(strings.TrimPrefix(key.Key, joinedKeys), "/")[0]
directoryKeys[directory] = joinedKeys + directory
}
return fun.Values(directoryKeys).([]string)
}
func (provider *Kv) listServers(backend string) []string {
serverNames := provider.list(backend, "/servers/")
return fun.Filter(func(serverName string) bool {
return provider.checkConstraints(serverName, "/tags")
}, serverNames).([]string)
}
func (provider *Kv) get(defaultValue string, keys ...string) string {
joinedKeys := strings.Join(keys, "")
keyPair, err := provider.kvclient.Get(strings.TrimPrefix(joinedKeys, "/"))
if err != nil {
log.Debugf("Cannot get key %s %s, setting default %s", joinedKeys, err, defaultValue)
return defaultValue
} else if keyPair == nil {
log.Debugf("Cannot get key %s, setting default %s", joinedKeys, defaultValue)
return defaultValue
}
return string(keyPair.Value)
}
func (provider *Kv) splitGet(keys ...string) []string {
joinedKeys := strings.Join(keys, "")
keyPair, err := provider.kvclient.Get(joinedKeys)
if err != nil {
log.Debugf("Cannot get key %s %s, setting default empty", joinedKeys, err)
return []string{}
} else if keyPair == nil {
log.Debugf("Cannot get key %s, setting default %empty", joinedKeys)
return []string{}
}
return strings.Split(string(keyPair.Value), ",")
}
func (provider *Kv) last(key string) string {
splittedKey := strings.Split(key, "/")
return splittedKey[len(splittedKey)-1]
}
func (provider *Kv) checkConstraints(keys ...string) bool {
joinedKeys := strings.Join(keys, "")
keyPair, err := provider.kvclient.Get(joinedKeys)
value := ""
if err == nil && keyPair != nil && keyPair.Value != nil {
value = string(keyPair.Value)
}
constraintTags := strings.Split(value, ",")
ok, failingConstraint := provider.MatchConstraints(constraintTags)
if ok == false {
if failingConstraint != nil {
log.Debugf("Constraint %v not matching with following tags: %v", failingConstraint.String(), value)
}
return false
}
return true
}

View file

@ -0,0 +1,494 @@
package provider
import (
"errors"
"net"
"net/url"
"sort"
"strconv"
"strings"
"text/template"
"math"
"net/http"
"time"
"github.com/BurntSushi/ty/fun"
"github.com/cenk/backoff"
"github.com/containous/traefik/job"
"github.com/containous/traefik/log"
"github.com/containous/traefik/safe"
"github.com/containous/traefik/types"
"github.com/gambol99/go-marathon"
)
var _ Provider = (*Marathon)(nil)
// Marathon holds configuration of the Marathon provider.
type Marathon struct {
BaseProvider
Endpoint string `description:"Marathon server endpoint. You can also specify multiple endpoint for Marathon"`
Domain string `description:"Default domain used"`
ExposedByDefault bool `description:"Expose Marathon apps by default"`
GroupsAsSubDomains bool `description:"Convert Marathon groups to subdomains"`
DCOSToken string `description:"DCOSToken for DCOS environment, This will override the Authorization header"`
MarathonLBCompatibility bool `description:"Add compatibility with marathon-lb labels"`
TLS *ClientTLS `description:"Enable Docker TLS support"`
DialerTimeout time.Duration `description:"Set a non-default connection timeout for Marathon"`
Basic *MarathonBasic
marathonClient marathon.Marathon
}
// MarathonBasic holds basic authentication specific configurations
type MarathonBasic struct {
HTTPBasicAuthUser string
HTTPBasicPassword string
}
type lightMarathonClient interface {
AllTasks(v url.Values) (*marathon.Tasks, error)
Applications(url.Values) (*marathon.Applications, error)
}
// Provide allows the provider to provide configurations to traefik
// using the given configuration channel.
func (provider *Marathon) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool, constraints []types.Constraint) error {
provider.Constraints = append(provider.Constraints, constraints...)
operation := func() error {
config := marathon.NewDefaultConfig()
config.URL = provider.Endpoint
config.EventsTransport = marathon.EventsTransportSSE
if provider.Basic != nil {
config.HTTPBasicAuthUser = provider.Basic.HTTPBasicAuthUser
config.HTTPBasicPassword = provider.Basic.HTTPBasicPassword
}
if len(provider.DCOSToken) > 0 {
config.DCOSToken = provider.DCOSToken
}
TLSConfig, err := provider.TLS.CreateTLSConfig()
if err != nil {
return err
}
config.HTTPClient = &http.Client{
Transport: &http.Transport{
TLSClientConfig: TLSConfig,
DialContext: (&net.Dialer{
Timeout: time.Second * provider.DialerTimeout,
}).DialContext,
},
}
client, err := marathon.NewClient(config)
if err != nil {
log.Errorf("Failed to create a client for marathon, error: %s", err)
return err
}
provider.marathonClient = client
update := make(marathon.EventsChannel, 5)
if provider.Watch {
if err := client.AddEventsListener(update, marathon.EventIDApplications); err != nil {
log.Errorf("Failed to register for events, %s", err)
return err
}
pool.Go(func(stop chan bool) {
defer close(update)
for {
select {
case <-stop:
return
case event := <-update:
log.Debug("Marathon event receveived", event)
configuration := provider.loadMarathonConfig()
if configuration != nil {
configurationChan <- types.ConfigMessage{
ProviderName: "marathon",
Configuration: configuration,
}
}
}
}
})
}
configuration := provider.loadMarathonConfig()
configurationChan <- types.ConfigMessage{
ProviderName: "marathon",
Configuration: configuration,
}
return nil
}
notify := func(err error, time time.Duration) {
log.Errorf("Marathon connection error %+v, retrying in %s", err, time)
}
err := backoff.RetryNotify(operation, job.NewBackOff(backoff.NewExponentialBackOff()), notify)
if err != nil {
log.Errorf("Cannot connect to Marathon server %+v", err)
}
return nil
}
func (provider *Marathon) loadMarathonConfig() *types.Configuration {
var MarathonFuncMap = template.FuncMap{
"getBackend": provider.getBackend,
"getPort": provider.getPort,
"getWeight": provider.getWeight,
"getDomain": provider.getDomain,
"getProtocol": provider.getProtocol,
"getPassHostHeader": provider.getPassHostHeader,
"getPriority": provider.getPriority,
"getEntryPoints": provider.getEntryPoints,
"getFrontendRule": provider.getFrontendRule,
"getFrontendBackend": provider.getFrontendBackend,
"replace": replace,
"hasCircuitBreakerLabels": provider.hasCircuitBreakerLabels,
"hasLoadBalancerLabels": provider.hasLoadBalancerLabels,
"hasMaxConnLabels": provider.hasMaxConnLabels,
"getMaxConnExtractorFunc": provider.getMaxConnExtractorFunc,
"getMaxConnAmount": provider.getMaxConnAmount,
"getLoadBalancerMethod": provider.getLoadBalancerMethod,
"getCircuitBreakerExpression": provider.getCircuitBreakerExpression,
"getSticky": provider.getSticky,
}
applications, err := provider.marathonClient.Applications(nil)
if err != nil {
log.Errorf("Failed to create a client for marathon, error: %s", err)
return nil
}
tasks, err := provider.marathonClient.AllTasks(&marathon.AllTasksOpts{Status: "running"})
if err != nil {
log.Errorf("Failed to create a client for marathon, error: %s", err)
return nil
}
//filter tasks
filteredTasks := fun.Filter(func(task marathon.Task) bool {
return provider.taskFilter(task, applications, provider.ExposedByDefault)
}, tasks.Tasks).([]marathon.Task)
//filter apps
filteredApps := fun.Filter(func(app marathon.Application) bool {
return provider.applicationFilter(app, filteredTasks)
}, applications.Apps).([]marathon.Application)
templateObjects := struct {
Applications []marathon.Application
Tasks []marathon.Task
Domain string
}{
filteredApps,
filteredTasks,
provider.Domain,
}
configuration, err := provider.getConfiguration("templates/marathon.tmpl", MarathonFuncMap, templateObjects)
if err != nil {
log.Error(err)
}
return configuration
}
func (provider *Marathon) taskFilter(task marathon.Task, applications *marathon.Applications, exposedByDefaultFlag bool) bool {
if len(task.Ports) == 0 {
log.Debug("Filtering marathon task without port %s", task.AppID)
return false
}
application, err := getApplication(task, applications.Apps)
if err != nil {
log.Errorf("Unable to get marathon application from task %s", task.AppID)
return false
}
label, _ := provider.getLabel(application, "traefik.tags")
constraintTags := strings.Split(label, ",")
if provider.MarathonLBCompatibility {
if label, err := provider.getLabel(application, "HAPROXY_GROUP"); err == nil {
constraintTags = append(constraintTags, label)
}
}
if ok, failingConstraint := provider.MatchConstraints(constraintTags); !ok {
if failingConstraint != nil {
log.Debugf("Application %v pruned by '%v' constraint", application.ID, failingConstraint.String())
}
return false
}
if !isApplicationEnabled(application, exposedByDefaultFlag) {
log.Debugf("Filtering disabled marathon task %s", task.AppID)
return false
}
//filter indeterminable task port
portIndexLabel := (*application.Labels)["traefik.portIndex"]
portValueLabel := (*application.Labels)["traefik.port"]
if portIndexLabel != "" && portValueLabel != "" {
log.Debugf("Filtering marathon task %s specifying both traefik.portIndex and traefik.port labels", task.AppID)
return false
}
if portIndexLabel == "" && portValueLabel == "" && len(application.Ports) > 1 {
log.Debugf("Filtering marathon task %s with more than 1 port and no traefik.portIndex or traefik.port label", task.AppID)
return false
}
if portIndexLabel != "" {
index, err := strconv.Atoi((*application.Labels)["traefik.portIndex"])
if err != nil || index < 0 || index > len(application.Ports)-1 {
log.Debugf("Filtering marathon task %s with unexpected value for traefik.portIndex label", task.AppID)
return false
}
}
if portValueLabel != "" {
port, err := strconv.Atoi((*application.Labels)["traefik.port"])
if err != nil {
log.Debugf("Filtering marathon task %s with unexpected value for traefik.port label", task.AppID)
return false
}
var foundPort bool
for _, exposedPort := range task.Ports {
if port == exposedPort {
foundPort = true
break
}
}
if !foundPort {
log.Debugf("Filtering marathon task %s without a matching port for traefik.port label", task.AppID)
return false
}
}
//filter healthchecks
if application.HasHealthChecks() {
if task.HasHealthCheckResults() {
for _, healthcheck := range task.HealthCheckResults {
// found one bad healthcheck, return false
if !healthcheck.Alive {
log.Debugf("Filtering marathon task %s with bad healthcheck", task.AppID)
return false
}
}
}
}
return true
}
func (provider *Marathon) applicationFilter(app marathon.Application, filteredTasks []marathon.Task) bool {
label, _ := provider.getLabel(app, "traefik.tags")
constraintTags := strings.Split(label, ",")
if provider.MarathonLBCompatibility {
if label, err := provider.getLabel(app, "HAPROXY_GROUP"); err == nil {
constraintTags = append(constraintTags, label)
}
}
if ok, failingConstraint := provider.MatchConstraints(constraintTags); !ok {
if failingConstraint != nil {
log.Debugf("Application %v pruned by '%v' constraint", app.ID, failingConstraint.String())
}
return false
}
return fun.Exists(func(task marathon.Task) bool {
return task.AppID == app.ID
}, filteredTasks)
}
func getApplication(task marathon.Task, apps []marathon.Application) (marathon.Application, error) {
for _, application := range apps {
if application.ID == task.AppID {
return application, nil
}
}
return marathon.Application{}, errors.New("Application not found: " + task.AppID)
}
func isApplicationEnabled(application marathon.Application, exposedByDefault bool) bool {
return exposedByDefault && (*application.Labels)["traefik.enable"] != "false" || (*application.Labels)["traefik.enable"] == "true"
}
func (provider *Marathon) getLabel(application marathon.Application, label string) (string, error) {
for key, value := range *application.Labels {
if key == label {
return value, nil
}
}
return "", errors.New("Label not found:" + label)
}
func (provider *Marathon) getPort(task marathon.Task, applications []marathon.Application) string {
application, err := getApplication(task, applications)
if err != nil {
log.Errorf("Unable to get marathon application from task %s", task.AppID)
return ""
}
if portIndexLabel, err := provider.getLabel(application, "traefik.portIndex"); err == nil {
if index, err := strconv.Atoi(portIndexLabel); err == nil {
return strconv.Itoa(task.Ports[index])
}
}
if portValueLabel, err := provider.getLabel(application, "traefik.port"); err == nil {
return portValueLabel
}
for _, port := range task.Ports {
return strconv.Itoa(port)
}
return ""
}
func (provider *Marathon) getWeight(task marathon.Task, applications []marathon.Application) string {
application, errApp := getApplication(task, applications)
if errApp != nil {
log.Errorf("Unable to get marathon application from task %s", task.AppID)
return "0"
}
if label, err := provider.getLabel(application, "traefik.weight"); err == nil {
return label
}
return "0"
}
func (provider *Marathon) getDomain(application marathon.Application) string {
if label, err := provider.getLabel(application, "traefik.domain"); err == nil {
return label
}
return provider.Domain
}
func (provider *Marathon) getProtocol(task marathon.Task, applications []marathon.Application) string {
application, errApp := getApplication(task, applications)
if errApp != nil {
log.Errorf("Unable to get marathon application from task %s", task.AppID)
return "http"
}
if label, err := provider.getLabel(application, "traefik.protocol"); err == nil {
return label
}
return "http"
}
func (provider *Marathon) getSticky(application marathon.Application) string {
if sticky, err := provider.getLabel(application, "traefik.backend.loadbalancer.sticky"); err == nil {
return sticky
}
return "false"
}
func (provider *Marathon) getPassHostHeader(application marathon.Application) string {
if passHostHeader, err := provider.getLabel(application, "traefik.frontend.passHostHeader"); err == nil {
return passHostHeader
}
return "true"
}
func (provider *Marathon) getPriority(application marathon.Application) string {
if priority, err := provider.getLabel(application, "traefik.frontend.priority"); err == nil {
return priority
}
return "0"
}
func (provider *Marathon) getEntryPoints(application marathon.Application) []string {
if entryPoints, err := provider.getLabel(application, "traefik.frontend.entryPoints"); err == nil {
return strings.Split(entryPoints, ",")
}
return []string{}
}
// getFrontendRule returns the frontend rule for the specified application, using
// it's label. It returns a default one (Host) if the label is not present.
func (provider *Marathon) getFrontendRule(application marathon.Application) string {
if label, err := provider.getLabel(application, "traefik.frontend.rule"); err == nil {
return label
}
if provider.MarathonLBCompatibility {
if label, err := provider.getLabel(application, "HAPROXY_0_VHOST"); err == nil {
return "Host:" + label
}
}
return "Host:" + provider.getSubDomain(application.ID) + "." + provider.Domain
}
func (provider *Marathon) getBackend(task marathon.Task, applications []marathon.Application) string {
application, errApp := getApplication(task, applications)
if errApp != nil {
log.Errorf("Unable to get marathon application from task %s", task.AppID)
return ""
}
return provider.getFrontendBackend(application)
}
func (provider *Marathon) getFrontendBackend(application marathon.Application) string {
if label, err := provider.getLabel(application, "traefik.backend"); err == nil {
return label
}
return replace("/", "-", application.ID)
}
func (provider *Marathon) getSubDomain(name string) string {
if provider.GroupsAsSubDomains {
splitedName := strings.Split(strings.TrimPrefix(name, "/"), "/")
sort.Sort(sort.Reverse(sort.StringSlice(splitedName)))
reverseName := strings.Join(splitedName, ".")
return reverseName
}
return strings.Replace(strings.TrimPrefix(name, "/"), "/", "-", -1)
}
func (provider *Marathon) hasCircuitBreakerLabels(application marathon.Application) bool {
if _, err := provider.getLabel(application, "traefik.backend.circuitbreaker.expression"); err != nil {
return false
}
return true
}
func (provider *Marathon) hasLoadBalancerLabels(application marathon.Application) bool {
_, errMethod := provider.getLabel(application, "traefik.backend.loadbalancer.method")
_, errSticky := provider.getLabel(application, "traefik.backend.loadbalancer.sticky")
if errMethod != nil && errSticky != nil {
return false
}
return true
}
func (provider *Marathon) hasMaxConnLabels(application marathon.Application) bool {
if _, err := provider.getLabel(application, "traefik.backend.maxconn.amount"); err != nil {
return false
}
if _, err := provider.getLabel(application, "traefik.backend.maxconn.extractorfunc"); err != nil {
return false
}
return true
}
func (provider *Marathon) getMaxConnAmount(application marathon.Application) int64 {
if label, err := provider.getLabel(application, "traefik.backend.maxconn.amount"); err == nil {
i, errConv := strconv.ParseInt(label, 10, 64)
if errConv != nil {
log.Errorf("Unable to parse traefik.backend.maxconn.amount %s", label)
return math.MaxInt64
}
return i
}
return math.MaxInt64
}
func (provider *Marathon) getMaxConnExtractorFunc(application marathon.Application) string {
if label, err := provider.getLabel(application, "traefik.backend.maxconn.extractorfunc"); err == nil {
return label
}
return "request.host"
}
func (provider *Marathon) getLoadBalancerMethod(application marathon.Application) string {
if label, err := provider.getLabel(application, "traefik.backend.loadbalancer.method"); err == nil {
return label
}
return "wrr"
}
func (provider *Marathon) getCircuitBreakerExpression(application marathon.Application) string {
if label, err := provider.getLabel(application, "traefik.backend.circuitbreaker.expression"); err == nil {
return label
}
return "NetworkErrorRatio() > 1"
}

View file

@ -0,0 +1,447 @@
package provider
import (
"errors"
"strconv"
"strings"
"text/template"
"fmt"
"github.com/BurntSushi/ty/fun"
"github.com/cenk/backoff"
"github.com/containous/traefik/job"
"github.com/containous/traefik/log"
"github.com/containous/traefik/safe"
"github.com/containous/traefik/types"
"github.com/mesos/mesos-go/detector"
_ "github.com/mesos/mesos-go/detector/zoo" // Registers the ZK detector
"github.com/mesosphere/mesos-dns/detect"
"github.com/mesosphere/mesos-dns/logging"
"github.com/mesosphere/mesos-dns/records"
"github.com/mesosphere/mesos-dns/records/state"
"github.com/mesosphere/mesos-dns/util"
"sort"
"time"
)
var _ Provider = (*Mesos)(nil)
//Mesos holds configuration of the mesos provider.
type Mesos struct {
BaseProvider
Endpoint string `description:"Mesos server endpoint. You can also specify multiple endpoint for Mesos"`
Domain string `description:"Default domain used"`
ExposedByDefault bool `description:"Expose Mesos apps by default"`
GroupsAsSubDomains bool `description:"Convert Mesos groups to subdomains"`
ZkDetectionTimeout int `description:"Zookeeper timeout (in seconds)"`
RefreshSeconds int `description:"Polling interval (in seconds)"`
IPSources string `description:"IPSources (e.g. host, docker, mesos, rkt)"` // e.g. "host", "docker", "mesos", "rkt"
StateTimeoutSecond int `description:"HTTP Timeout (in seconds)"`
Masters []string
}
// Provide allows the provider to provide configurations to traefik
// using the given configuration channel.
func (provider *Mesos) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool, constraints []types.Constraint) error {
operation := func() error {
// initialize logging
logging.SetupLogs()
log.Debugf("%s", provider.IPSources)
var zk string
var masters []string
if strings.HasPrefix(provider.Endpoint, "zk://") {
zk = provider.Endpoint
} else {
masters = strings.Split(provider.Endpoint, ",")
}
errch := make(chan error)
changed := detectMasters(zk, masters)
reload := time.NewTicker(time.Second * time.Duration(provider.RefreshSeconds))
zkTimeout := time.Second * time.Duration(provider.ZkDetectionTimeout)
timeout := time.AfterFunc(zkTimeout, func() {
if zkTimeout > 0 {
errch <- fmt.Errorf("master detection timed out after %s", zkTimeout)
}
})
defer reload.Stop()
defer util.HandleCrash()
if !provider.Watch {
reload.Stop()
timeout.Stop()
}
for {
select {
case <-reload.C:
configuration := provider.loadMesosConfig()
if configuration != nil {
configurationChan <- types.ConfigMessage{
ProviderName: "mesos",
Configuration: configuration,
}
}
case masters := <-changed:
if len(masters) == 0 || masters[0] == "" {
// no leader
timeout.Reset(zkTimeout)
} else {
timeout.Stop()
}
log.Debugf("new masters detected: %v", masters)
provider.Masters = masters
configuration := provider.loadMesosConfig()
if configuration != nil {
configurationChan <- types.ConfigMessage{
ProviderName: "mesos",
Configuration: configuration,
}
}
case err := <-errch:
log.Errorf("%s", err)
}
}
}
notify := func(err error, time time.Duration) {
log.Errorf("mesos connection error %+v, retrying in %s", err, time)
}
err := backoff.RetryNotify(operation, job.NewBackOff(backoff.NewExponentialBackOff()), notify)
if err != nil {
log.Errorf("Cannot connect to mesos server %+v", err)
}
return nil
}
func (provider *Mesos) loadMesosConfig() *types.Configuration {
var mesosFuncMap = template.FuncMap{
"getBackend": provider.getBackend,
"getPort": provider.getPort,
"getHost": provider.getHost,
"getWeight": provider.getWeight,
"getDomain": provider.getDomain,
"getProtocol": provider.getProtocol,
"getPassHostHeader": provider.getPassHostHeader,
"getPriority": provider.getPriority,
"getEntryPoints": provider.getEntryPoints,
"getFrontendRule": provider.getFrontendRule,
"getFrontendBackend": provider.getFrontendBackend,
"getID": provider.getID,
"getFrontEndName": provider.getFrontEndName,
"replace": replace,
}
t := records.NewRecordGenerator(time.Duration(provider.StateTimeoutSecond) * time.Second)
sj, err := t.FindMaster(provider.Masters...)
if err != nil {
log.Errorf("Failed to create a client for mesos, error: %s", err)
return nil
}
tasks := provider.taskRecords(sj)
//filter tasks
filteredTasks := fun.Filter(func(task state.Task) bool {
return mesosTaskFilter(task, provider.ExposedByDefault)
}, tasks).([]state.Task)
filteredApps := []state.Task{}
for _, value := range filteredTasks {
if !taskInSlice(value, filteredApps) {
filteredApps = append(filteredApps, value)
}
}
templateObjects := struct {
Applications []state.Task
Tasks []state.Task
Domain string
}{
filteredApps,
filteredTasks,
provider.Domain,
}
configuration, err := provider.getConfiguration("templates/mesos.tmpl", mesosFuncMap, templateObjects)
if err != nil {
log.Error(err)
}
return configuration
}
func taskInSlice(a state.Task, list []state.Task) bool {
for _, b := range list {
if b.DiscoveryInfo.Name == a.DiscoveryInfo.Name {
return true
}
}
return false
}
// labels returns all given Status.[]Labels' values whose keys are equal
// to the given key
func labels(task state.Task, key string) string {
for _, l := range task.Labels {
if l.Key == key {
return l.Value
}
}
return ""
}
func mesosTaskFilter(task state.Task, exposedByDefaultFlag bool) bool {
if len(task.DiscoveryInfo.Ports.DiscoveryPorts) == 0 {
log.Debugf("Filtering mesos task without port %s", task.Name)
return false
}
if !isMesosApplicationEnabled(task, exposedByDefaultFlag) {
log.Debugf("Filtering disabled mesos task %s", task.DiscoveryInfo.Name)
return false
}
//filter indeterminable task port
portIndexLabel := labels(task, "traefik.portIndex")
portValueLabel := labels(task, "traefik.port")
if portIndexLabel != "" && portValueLabel != "" {
log.Debugf("Filtering mesos task %s specifying both traefik.portIndex and traefik.port labels", task.Name)
return false
}
if portIndexLabel == "" && portValueLabel == "" && len(task.DiscoveryInfo.Ports.DiscoveryPorts) > 1 {
log.Debugf("Filtering mesos task %s with more than 1 port and no traefik.portIndex or traefik.port label", task.Name)
return false
}
if portIndexLabel != "" {
index, err := strconv.Atoi(labels(task, "traefik.portIndex"))
if err != nil || index < 0 || index > len(task.DiscoveryInfo.Ports.DiscoveryPorts)-1 {
log.Debugf("Filtering mesos task %s with unexpected value for traefik.portIndex label", task.Name)
return false
}
}
if portValueLabel != "" {
port, err := strconv.Atoi(labels(task, "traefik.port"))
if err != nil {
log.Debugf("Filtering mesos task %s with unexpected value for traefik.port label", task.Name)
return false
}
var foundPort bool
for _, exposedPort := range task.DiscoveryInfo.Ports.DiscoveryPorts {
if port == exposedPort.Number {
foundPort = true
break
}
}
if !foundPort {
log.Debugf("Filtering mesos task %s without a matching port for traefik.port label", task.Name)
return false
}
}
//filter healthchecks
if task.Statuses != nil && len(task.Statuses) > 0 && task.Statuses[0].Healthy != nil && !*task.Statuses[0].Healthy {
log.Debugf("Filtering mesos task %s with bad healthcheck", task.DiscoveryInfo.Name)
return false
}
return true
}
func getMesos(task state.Task, apps []state.Task) (state.Task, error) {
for _, application := range apps {
if application.DiscoveryInfo.Name == task.DiscoveryInfo.Name {
return application, nil
}
}
return state.Task{}, errors.New("Application not found: " + task.DiscoveryInfo.Name)
}
func isMesosApplicationEnabled(task state.Task, exposedByDefault bool) bool {
return exposedByDefault && labels(task, "traefik.enable") != "false" || labels(task, "traefik.enable") == "true"
}
func (provider *Mesos) getLabel(task state.Task, label string) (string, error) {
for _, tmpLabel := range task.Labels {
if tmpLabel.Key == label {
return tmpLabel.Value, nil
}
}
return "", errors.New("Label not found:" + label)
}
func (provider *Mesos) getPort(task state.Task, applications []state.Task) string {
application, err := getMesos(task, applications)
if err != nil {
log.Errorf("Unable to get mesos application from task %s", task.DiscoveryInfo.Name)
return ""
}
if portIndexLabel, err := provider.getLabel(application, "traefik.portIndex"); err == nil {
if index, err := strconv.Atoi(portIndexLabel); err == nil {
return strconv.Itoa(task.DiscoveryInfo.Ports.DiscoveryPorts[index].Number)
}
}
if portValueLabel, err := provider.getLabel(application, "traefik.port"); err == nil {
return portValueLabel
}
for _, port := range task.DiscoveryInfo.Ports.DiscoveryPorts {
return strconv.Itoa(port.Number)
}
return ""
}
func (provider *Mesos) getWeight(task state.Task, applications []state.Task) string {
application, errApp := getMesos(task, applications)
if errApp != nil {
log.Errorf("Unable to get mesos application from task %s", task.DiscoveryInfo.Name)
return "0"
}
if label, err := provider.getLabel(application, "traefik.weight"); err == nil {
return label
}
return "0"
}
func (provider *Mesos) getDomain(task state.Task) string {
if label, err := provider.getLabel(task, "traefik.domain"); err == nil {
return label
}
return provider.Domain
}
func (provider *Mesos) getProtocol(task state.Task, applications []state.Task) string {
application, errApp := getMesos(task, applications)
if errApp != nil {
log.Errorf("Unable to get mesos application from task %s", task.DiscoveryInfo.Name)
return "http"
}
if label, err := provider.getLabel(application, "traefik.protocol"); err == nil {
return label
}
return "http"
}
func (provider *Mesos) getPassHostHeader(task state.Task) string {
if passHostHeader, err := provider.getLabel(task, "traefik.frontend.passHostHeader"); err == nil {
return passHostHeader
}
return "false"
}
func (provider *Mesos) getPriority(task state.Task) string {
if priority, err := provider.getLabel(task, "traefik.frontend.priority"); err == nil {
return priority
}
return "0"
}
func (provider *Mesos) getEntryPoints(task state.Task) []string {
if entryPoints, err := provider.getLabel(task, "traefik.frontend.entryPoints"); err == nil {
return strings.Split(entryPoints, ",")
}
return []string{}
}
// getFrontendRule returns the frontend rule for the specified application, using
// it's label. It returns a default one (Host) if the label is not present.
func (provider *Mesos) getFrontendRule(task state.Task) string {
if label, err := provider.getLabel(task, "traefik.frontend.rule"); err == nil {
return label
}
return "Host:" + strings.ToLower(strings.Replace(provider.getSubDomain(task.DiscoveryInfo.Name), "_", "-", -1)) + "." + provider.Domain
}
func (provider *Mesos) getBackend(task state.Task, applications []state.Task) string {
application, errApp := getMesos(task, applications)
if errApp != nil {
log.Errorf("Unable to get mesos application from task %s", task.DiscoveryInfo.Name)
return ""
}
return provider.getFrontendBackend(application)
}
func (provider *Mesos) getFrontendBackend(task state.Task) string {
if label, err := provider.getLabel(task, "traefik.backend"); err == nil {
return label
}
return "-" + cleanupSpecialChars(task.DiscoveryInfo.Name)
}
func (provider *Mesos) getHost(task state.Task) string {
return task.IP(strings.Split(provider.IPSources, ",")...)
}
func (provider *Mesos) getID(task state.Task) string {
return cleanupSpecialChars(task.ID)
}
func (provider *Mesos) getFrontEndName(task state.Task) string {
return strings.Replace(cleanupSpecialChars(task.ID), "/", "-", -1)
}
func cleanupSpecialChars(s string) string {
return strings.Replace(strings.Replace(strings.Replace(s, ".", "-", -1), ":", "-", -1), "_", "-", -1)
}
func detectMasters(zk string, masters []string) <-chan []string {
changed := make(chan []string, 1)
if zk != "" {
log.Debugf("Starting master detector for ZK ", zk)
if md, err := detector.New(zk); err != nil {
log.Errorf("failed to create master detector: %v", err)
} else if err := md.Detect(detect.NewMasters(masters, changed)); err != nil {
log.Errorf("failed to initialize master detector: %v", err)
}
} else {
changed <- masters
}
return changed
}
func (provider *Mesos) taskRecords(sj state.State) []state.Task {
var p []state.Task // == nil
for _, f := range sj.Frameworks {
for _, task := range f.Tasks {
for _, slave := range sj.Slaves {
if task.SlaveID == slave.ID {
task.SlaveIP = slave.Hostname
}
}
// only do running and discoverable tasks
if task.State == "TASK_RUNNING" {
p = append(p, task)
}
}
}
return p
}
// ErrorFunction A function definition that returns an error
// to be passed to the Ignore or Panic error handler
type ErrorFunction func() error
// Ignore Calls an ErrorFunction, and ignores the result.
// This allows us to be more explicit when there is no error
// handling to be done, for example in defers
func Ignore(f ErrorFunction) {
_ = f()
}
func (provider *Mesos) getSubDomain(name string) string {
if provider.GroupsAsSubDomains {
splitedName := strings.Split(strings.TrimPrefix(name, "/"), "/")
sort.Sort(sort.Reverse(sort.StringSlice(splitedName)))
reverseName := strings.Join(splitedName, ".")
return reverseName
}
return strings.Replace(strings.TrimPrefix(name, "/"), "/", "-", -1)
}

View file

@ -0,0 +1,164 @@
package provider
import (
"bytes"
"io/ioutil"
"strings"
"text/template"
"unicode"
"crypto/tls"
"crypto/x509"
"fmt"
"os"
"github.com/BurntSushi/toml"
"github.com/containous/traefik/autogen"
"github.com/containous/traefik/log"
"github.com/containous/traefik/safe"
"github.com/containous/traefik/types"
)
// Provider defines methods of a provider.
type Provider interface {
// Provide allows the provider to provide configurations to traefik
// using the given configuration channel.
Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool, constraints []types.Constraint) error
}
// BaseProvider should be inherited by providers
type BaseProvider struct {
Watch bool `description:"Watch provider"`
Filename string `description:"Override default configuration template. For advanced users :)"`
Constraints types.Constraints `description:"Filter services by constraint, matching with Traefik tags."`
}
// MatchConstraints must match with EVERY single contraint
// returns first constraint that do not match or nil
func (p *BaseProvider) MatchConstraints(tags []string) (bool, *types.Constraint) {
// if there is no tags and no contraints, filtering is disabled
if len(tags) == 0 && len(p.Constraints) == 0 {
return true, nil
}
for _, constraint := range p.Constraints {
// xor: if ok and constraint.MustMatch are equal, then no tag is currently matching with the constraint
if ok := constraint.MatchConstraintWithAtLeastOneTag(tags); ok != constraint.MustMatch {
return false, &constraint
}
}
// If no constraint or every constraints matching
return true, nil
}
func (p *BaseProvider) getConfiguration(defaultTemplateFile string, funcMap template.FuncMap, templateObjects interface{}) (*types.Configuration, error) {
var (
buf []byte
err error
)
configuration := new(types.Configuration)
tmpl := template.New(p.Filename).Funcs(funcMap)
if len(p.Filename) > 0 {
buf, err = ioutil.ReadFile(p.Filename)
if err != nil {
return nil, err
}
} else {
buf, err = autogen.Asset(defaultTemplateFile)
if err != nil {
return nil, err
}
}
_, err = tmpl.Parse(string(buf))
if err != nil {
return nil, err
}
var buffer bytes.Buffer
err = tmpl.Execute(&buffer, templateObjects)
if err != nil {
return nil, err
}
var renderedTemplate = buffer.String()
// log.Debugf("Rendering results of %s:\n%s", defaultTemplateFile, renderedTemplate)
if _, err := toml.Decode(renderedTemplate, configuration); err != nil {
return nil, err
}
return configuration, nil
}
func replace(s1 string, s2 string, s3 string) string {
return strings.Replace(s3, s1, s2, -1)
}
func normalize(name string) string {
fargs := func(c rune) bool {
return !unicode.IsLetter(c) && !unicode.IsNumber(c)
}
// get function
return strings.Join(strings.FieldsFunc(name, fargs), "-")
}
// ClientTLS holds TLS specific configurations as client
// CA, Cert and Key can be either path or file contents
type ClientTLS struct {
CA string `description:"TLS CA"`
Cert string `description:"TLS cert"`
Key string `description:"TLS key"`
InsecureSkipVerify bool `description:"TLS insecure skip verify"`
}
// CreateTLSConfig creates a TLS config from ClientTLS structures
func (clientTLS *ClientTLS) CreateTLSConfig() (*tls.Config, error) {
var err error
if clientTLS == nil {
log.Warnf("clientTLS is nil")
return nil, nil
}
caPool := x509.NewCertPool()
if clientTLS.CA != "" {
var ca []byte
if _, errCA := os.Stat(clientTLS.CA); errCA == nil {
ca, err = ioutil.ReadFile(clientTLS.CA)
if err != nil {
return nil, fmt.Errorf("Failed to read CA. %s", err)
}
} else {
ca = []byte(clientTLS.CA)
}
caPool.AppendCertsFromPEM(ca)
}
cert := tls.Certificate{}
_, errKeyIsFile := os.Stat(clientTLS.Key)
if _, errCertIsFile := os.Stat(clientTLS.Cert); errCertIsFile == nil {
if errKeyIsFile == nil {
cert, err = tls.LoadX509KeyPair(clientTLS.Cert, clientTLS.Key)
if err != nil {
return nil, fmt.Errorf("Failed to load TLS keypair: %v", err)
}
} else {
return nil, fmt.Errorf("tls cert is a file, but tls key is not")
}
} else {
if errKeyIsFile != nil {
cert, err = tls.X509KeyPair([]byte(clientTLS.Cert), []byte(clientTLS.Key))
if err != nil {
return nil, fmt.Errorf("Failed to load TLS keypair: %v", err)
}
} else {
return nil, fmt.Errorf("tls key is a file, but tls cert is not")
}
}
TLSConfig := &tls.Config{
Certificates: []tls.Certificate{cert},
RootCAs: caPool,
InsecureSkipVerify: clientTLS.InsecureSkipVerify,
}
return TLSConfig, nil
}

View file

@ -0,0 +1,34 @@
package provider
import (
"fmt"
"github.com/containous/traefik/safe"
"github.com/containous/traefik/types"
"github.com/docker/libkv/store"
"github.com/docker/libkv/store/zookeeper"
)
var _ Provider = (*Zookepper)(nil)
// Zookepper holds configurations of the Zookepper provider.
type Zookepper struct {
Kv `mapstructure:",squash"`
}
// Provide allows the provider to provide configurations to traefik
// using the given configuration channel.
func (provider *Zookepper) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool, constraints []types.Constraint) error {
store, err := provider.CreateStore()
if err != nil {
return fmt.Errorf("Failed to Connect to KV store: %v", err)
}
provider.kvclient = store
return provider.provide(configurationChan, pool, constraints)
}
// CreateStore creates the KV store
func (provider *Zookepper) CreateStore() (store.Store, error) {
provider.storeType = store.ZK
zookeeper.Register()
return provider.createStore()
}

View file

@ -0,0 +1,202 @@
package main
import (
"errors"
"fmt"
"github.com/BurntSushi/ty/fun"
"github.com/containous/mux"
"github.com/containous/traefik/types"
"net"
"net/http"
"reflect"
"sort"
"strings"
)
// Rules holds rule parsing and configuration
type Rules struct {
route *serverRoute
err error
}
func (r *Rules) host(hosts ...string) *mux.Route {
return r.route.route.MatcherFunc(func(req *http.Request, route *mux.RouteMatch) bool {
reqHost, _, err := net.SplitHostPort(req.Host)
if err != nil {
reqHost = req.Host
}
for _, host := range hosts {
if types.CanonicalDomain(reqHost) == types.CanonicalDomain(host) {
return true
}
}
return false
})
}
func (r *Rules) hostRegexp(hosts ...string) *mux.Route {
router := r.route.route.Subrouter()
for _, host := range hosts {
router.Host(types.CanonicalDomain(host))
}
return r.route.route
}
func (r *Rules) path(paths ...string) *mux.Route {
router := r.route.route.Subrouter()
for _, path := range paths {
router.Path(strings.TrimSpace(path))
}
return r.route.route
}
func (r *Rules) pathPrefix(paths ...string) *mux.Route {
router := r.route.route.Subrouter()
for _, path := range paths {
router.PathPrefix(strings.TrimSpace(path))
}
return r.route.route
}
type bySize []string
func (a bySize) Len() int { return len(a) }
func (a bySize) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a bySize) Less(i, j int) bool { return len(a[i]) > len(a[j]) }
func (r *Rules) pathStrip(paths ...string) *mux.Route {
sort.Sort(bySize(paths))
r.route.stripPrefixes = paths
router := r.route.route.Subrouter()
for _, path := range paths {
router.Path(strings.TrimSpace(path))
}
return r.route.route
}
func (r *Rules) pathPrefixStrip(paths ...string) *mux.Route {
sort.Sort(bySize(paths))
r.route.stripPrefixes = paths
router := r.route.route.Subrouter()
for _, path := range paths {
router.PathPrefix(strings.TrimSpace(path))
}
return r.route.route
}
func (r *Rules) methods(methods ...string) *mux.Route {
return r.route.route.Methods(methods...)
}
func (r *Rules) headers(headers ...string) *mux.Route {
return r.route.route.Headers(headers...)
}
func (r *Rules) headersRegexp(headers ...string) *mux.Route {
return r.route.route.HeadersRegexp(headers...)
}
func (r *Rules) parseRules(expression string, onRule func(functionName string, function interface{}, arguments []string) error) error {
functions := map[string]interface{}{
"Host": r.host,
"HostRegexp": r.hostRegexp,
"Path": r.path,
"PathStrip": r.pathStrip,
"PathPrefix": r.pathPrefix,
"PathPrefixStrip": r.pathPrefixStrip,
"Method": r.methods,
"Headers": r.headers,
"HeadersRegexp": r.headersRegexp,
}
if len(expression) == 0 {
return errors.New("Empty rule")
}
f := func(c rune) bool {
return c == ':'
}
// Allow multiple rules separated by ;
splitRule := func(c rune) bool {
return c == ';'
}
parsedRules := strings.FieldsFunc(expression, splitRule)
for _, rule := range parsedRules {
// get function
parsedFunctions := strings.FieldsFunc(rule, f)
if len(parsedFunctions) == 0 {
return errors.New("Error parsing rule: '" + rule + "'")
}
functionName := strings.TrimSpace(parsedFunctions[0])
parsedFunction, ok := functions[functionName]
if !ok {
return errors.New("Error parsing rule: '" + rule + "'. Unknown function: '" + parsedFunctions[0] + "'")
}
parsedFunctions = append(parsedFunctions[:0], parsedFunctions[1:]...)
fargs := func(c rune) bool {
return c == ','
}
// get function
parsedArgs := strings.FieldsFunc(strings.Join(parsedFunctions, ":"), fargs)
if len(parsedArgs) == 0 {
return errors.New("Error parsing args from rule: '" + rule + "'")
}
for i := range parsedArgs {
parsedArgs[i] = strings.TrimSpace(parsedArgs[i])
}
err := onRule(functionName, parsedFunction, parsedArgs)
if err != nil {
return fmt.Errorf("Parsing error on rule: %v", err)
}
}
return nil
}
// Parse parses rules expressions
func (r *Rules) Parse(expression string) (*mux.Route, error) {
var resultRoute *mux.Route
err := r.parseRules(expression, func(functionName string, function interface{}, arguments []string) error {
inputs := make([]reflect.Value, len(arguments))
for i := range arguments {
inputs[i] = reflect.ValueOf(arguments[i])
}
method := reflect.ValueOf(function)
if method.IsValid() {
resultRoute = method.Call(inputs)[0].Interface().(*mux.Route)
if r.err != nil {
return r.err
}
if resultRoute.GetError() != nil {
return resultRoute.GetError()
}
} else {
return errors.New("Method not found: '" + functionName + "'")
}
return nil
})
if err != nil {
return nil, fmt.Errorf("Error parsing rule: %v", err)
}
return resultRoute, nil
}
// ParseDomains parses rules expressions and returns domains
func (r *Rules) ParseDomains(expression string) ([]string, error) {
domains := []string{}
err := r.parseRules(expression, func(functionName string, function interface{}, arguments []string) error {
if functionName == "Host" {
domains = append(domains, arguments...)
}
return nil
})
if err != nil {
return nil, fmt.Errorf("Error parsing domains: %v", err)
}
return fun.Map(types.CanonicalDomain, domains).([]string), nil
}

View file

@ -0,0 +1,146 @@
package safe
import (
"context"
"github.com/containous/traefik/log"
"runtime/debug"
"sync"
)
type routine struct {
goroutine func(chan bool)
stop chan bool
}
type routineCtx func(ctx context.Context)
// Pool is a pool of go routines
type Pool struct {
routines []routine
routinesCtx []routineCtx
waitGroup sync.WaitGroup
lock sync.Mutex
baseCtx context.Context
baseCancel context.CancelFunc
ctx context.Context
cancel context.CancelFunc
}
// NewPool creates a Pool
func NewPool(parentCtx context.Context) *Pool {
baseCtx, baseCancel := context.WithCancel(parentCtx)
ctx, cancel := context.WithCancel(baseCtx)
return &Pool{
baseCtx: baseCtx,
baseCancel: baseCancel,
ctx: ctx,
cancel: cancel,
}
}
// Ctx returns main context
func (p *Pool) Ctx() context.Context {
return p.baseCtx
}
//AddGoCtx adds a recoverable goroutine with a context without starting it
func (p *Pool) AddGoCtx(goroutine routineCtx) {
p.lock.Lock()
p.routinesCtx = append(p.routinesCtx, goroutine)
p.lock.Unlock()
}
//GoCtx starts a recoverable goroutine with a context
func (p *Pool) GoCtx(goroutine routineCtx) {
p.lock.Lock()
p.routinesCtx = append(p.routinesCtx, goroutine)
p.waitGroup.Add(1)
Go(func() {
goroutine(p.ctx)
p.waitGroup.Done()
})
p.lock.Unlock()
}
// Go starts a recoverable goroutine, and can be stopped with stop chan
func (p *Pool) Go(goroutine func(stop chan bool)) {
p.lock.Lock()
newRoutine := routine{
goroutine: goroutine,
stop: make(chan bool, 1),
}
p.routines = append(p.routines, newRoutine)
p.waitGroup.Add(1)
Go(func() {
goroutine(newRoutine.stop)
p.waitGroup.Done()
})
p.lock.Unlock()
}
// Stop stops all started routines, waiting for their termination
func (p *Pool) Stop() {
p.lock.Lock()
defer p.lock.Unlock()
p.cancel()
for _, routine := range p.routines {
routine.stop <- true
}
p.waitGroup.Wait()
for _, routine := range p.routines {
close(routine.stop)
}
}
// Cleanup releases resources used by the pool, and should be called when the pool will no longer be used
func (p *Pool) Cleanup() {
p.Stop()
p.lock.Lock()
defer p.lock.Unlock()
p.baseCancel()
}
// Start starts all stopped routines
func (p *Pool) Start() {
p.lock.Lock()
defer p.lock.Unlock()
p.ctx, p.cancel = context.WithCancel(p.baseCtx)
for _, routine := range p.routines {
p.waitGroup.Add(1)
routine.stop = make(chan bool, 1)
Go(func() {
routine.goroutine(routine.stop)
p.waitGroup.Done()
})
}
for _, routine := range p.routinesCtx {
p.waitGroup.Add(1)
Go(func() {
routine(p.ctx)
p.waitGroup.Done()
})
}
}
// Go starts a recoverable goroutine
func Go(goroutine func()) {
GoWithRecover(goroutine, defaultRecoverGoroutine)
}
// GoWithRecover starts a recoverable goroutine using given customRecover() function
func GoWithRecover(goroutine func(), customRecover func(err interface{})) {
go func() {
defer func() {
if err := recover(); err != nil {
customRecover(err)
}
}()
goroutine()
}()
}
func defaultRecoverGoroutine(err interface{}) {
log.Errorf("Error in Go routine: %s", err)
debug.PrintStack()
}

View file

@ -0,0 +1,30 @@
package safe
import (
"sync"
)
// Safe contains a thread-safe value
type Safe struct {
value interface{}
lock sync.RWMutex
}
// New create a new Safe instance given a value
func New(value interface{}) *Safe {
return &Safe{value: value, lock: sync.RWMutex{}}
}
// Get returns the value
func (s *Safe) Get() interface{} {
s.lock.RLock()
defer s.lock.RUnlock()
return s.value
}
// Set sets a new value
func (s *Safe) Set(value interface{}) {
s.lock.Lock()
defer s.lock.Unlock()
s.value = value
}

View file

@ -0,0 +1,780 @@
/*
Copyright
*/
package main
import (
"context"
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"io/ioutil"
"net/http"
"net/url"
"os"
"os/signal"
"reflect"
"regexp"
"sort"
"syscall"
"time"
"github.com/codegangsta/negroni"
"github.com/containous/mux"
"github.com/containous/traefik/cluster"
"github.com/containous/traefik/log"
"github.com/containous/traefik/middlewares"
"github.com/containous/traefik/provider"
"github.com/containous/traefik/safe"
"github.com/containous/traefik/types"
"github.com/mailgun/manners"
"github.com/streamrail/concurrent-map"
"github.com/vulcand/oxy/cbreaker"
"github.com/vulcand/oxy/connlimit"
"github.com/vulcand/oxy/forward"
"github.com/vulcand/oxy/roundrobin"
"github.com/vulcand/oxy/utils"
)
var oxyLogger = &OxyLogger{}
// Server is the reverse-proxy/load-balancer engine
type Server struct {
serverEntryPoints serverEntryPoints
configurationChan chan types.ConfigMessage
configurationValidatedChan chan types.ConfigMessage
signals chan os.Signal
stopChan chan bool
providers []provider.Provider
currentConfigurations safe.Safe
globalConfiguration GlobalConfiguration
loggerMiddleware *middlewares.Logger
routinesPool *safe.Pool
leadership *cluster.Leadership
}
type serverEntryPoints map[string]*serverEntryPoint
type serverEntryPoint struct {
httpServer *manners.GracefulServer
httpRouter *middlewares.HandlerSwitcher
}
type serverRoute struct {
route *mux.Route
stripPrefixes []string
}
// NewServer returns an initialized Server.
func NewServer(globalConfiguration GlobalConfiguration) *Server {
server := new(Server)
server.serverEntryPoints = make(map[string]*serverEntryPoint)
server.configurationChan = make(chan types.ConfigMessage, 100)
server.configurationValidatedChan = make(chan types.ConfigMessage, 100)
server.signals = make(chan os.Signal, 1)
server.stopChan = make(chan bool, 1)
server.providers = []provider.Provider{}
signal.Notify(server.signals, syscall.SIGINT, syscall.SIGTERM)
currentConfigurations := make(configs)
server.currentConfigurations.Set(currentConfigurations)
server.globalConfiguration = globalConfiguration
server.loggerMiddleware = middlewares.NewLogger(globalConfiguration.AccessLogsFile)
server.routinesPool = safe.NewPool(context.Background())
if globalConfiguration.Cluster != nil {
// leadership creation if cluster mode
server.leadership = cluster.NewLeadership(server.routinesPool.Ctx(), globalConfiguration.Cluster)
}
return server
}
// Start starts the server and blocks until server is shutted down.
func (server *Server) Start() {
server.startHTTPServers()
server.startLeadership()
server.routinesPool.Go(func(stop chan bool) {
server.listenProviders(stop)
})
server.routinesPool.Go(func(stop chan bool) {
server.listenConfigurations(stop)
})
server.configureProviders()
server.startProviders()
go server.listenSignals()
<-server.stopChan
}
// Stop stops the server
func (server *Server) Stop() {
for serverEntryPointName, serverEntryPoint := range server.serverEntryPoints {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(server.globalConfiguration.GraceTimeOut)*time.Second)
go func() {
log.Debugf("Waiting %d seconds before killing connections on entrypoint %s...", 30, serverEntryPointName)
serverEntryPoint.httpServer.BlockingClose()
cancel()
}()
<-ctx.Done()
}
server.stopChan <- true
}
// Close destroys the server
func (server *Server) Close() {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(server.globalConfiguration.GraceTimeOut)*time.Second)
go func(ctx context.Context) {
<-ctx.Done()
if ctx.Err() == context.Canceled {
return
} else if ctx.Err() == context.DeadlineExceeded {
log.Warnf("Timeout while stopping traefik, killing instance ✝")
os.Exit(1)
}
}(ctx)
server.stopLeadership()
server.routinesPool.Cleanup()
close(server.configurationChan)
close(server.configurationValidatedChan)
signal.Stop(server.signals)
close(server.signals)
close(server.stopChan)
server.loggerMiddleware.Close()
cancel()
}
func (server *Server) startLeadership() {
if server.leadership != nil {
server.leadership.Participate(server.routinesPool)
// server.leadership.AddGoCtx(func(ctx context.Context) {
// log.Debugf("Started test routine")
// <-ctx.Done()
// log.Debugf("Stopped test routine")
// })
}
}
func (server *Server) stopLeadership() {
if server.leadership != nil {
server.leadership.Stop()
}
}
func (server *Server) startHTTPServers() {
server.serverEntryPoints = server.buildEntryPoints(server.globalConfiguration)
for newServerEntryPointName, newServerEntryPoint := range server.serverEntryPoints {
serverMiddlewares := []negroni.Handler{server.loggerMiddleware, metrics}
if server.globalConfiguration.Web != nil && server.globalConfiguration.Web.Statistics != nil {
statsRecorder = &StatsRecorder{
numRecentErrors: server.globalConfiguration.Web.Statistics.RecentErrors,
}
serverMiddlewares = append(serverMiddlewares, statsRecorder)
}
if server.globalConfiguration.EntryPoints[newServerEntryPointName].Auth != nil {
authMiddleware, err := middlewares.NewAuthenticator(server.globalConfiguration.EntryPoints[newServerEntryPointName].Auth)
if err != nil {
log.Fatal("Error starting server: ", err)
}
serverMiddlewares = append(serverMiddlewares, authMiddleware)
}
if server.globalConfiguration.EntryPoints[newServerEntryPointName].Compress {
serverMiddlewares = append(serverMiddlewares, &middlewares.Compress{})
}
newsrv, err := server.prepareServer(newServerEntryPointName, newServerEntryPoint.httpRouter, server.globalConfiguration.EntryPoints[newServerEntryPointName], nil, serverMiddlewares...)
if err != nil {
log.Fatal("Error preparing server: ", err)
}
serverEntryPoint := server.serverEntryPoints[newServerEntryPointName]
serverEntryPoint.httpServer = newsrv
go server.startServer(serverEntryPoint.httpServer, server.globalConfiguration)
}
}
func (server *Server) listenProviders(stop chan bool) {
lastReceivedConfiguration := safe.New(time.Unix(0, 0))
lastConfigs := cmap.New()
for {
select {
case <-stop:
return
case configMsg, ok := <-server.configurationChan:
if !ok {
return
}
server.defaultConfigurationValues(configMsg.Configuration)
currentConfigurations := server.currentConfigurations.Get().(configs)
jsonConf, _ := json.Marshal(configMsg.Configuration)
log.Debugf("Configuration received from provider %s: %s", configMsg.ProviderName, string(jsonConf))
if configMsg.Configuration == nil || configMsg.Configuration.Backends == nil && configMsg.Configuration.Frontends == nil {
log.Infof("Skipping empty Configuration for provider %s", configMsg.ProviderName)
} else if reflect.DeepEqual(currentConfigurations[configMsg.ProviderName], configMsg.Configuration) {
log.Infof("Skipping same configuration for provider %s", configMsg.ProviderName)
} else {
lastConfigs.Set(configMsg.ProviderName, &configMsg)
lastReceivedConfigurationValue := lastReceivedConfiguration.Get().(time.Time)
if time.Now().After(lastReceivedConfigurationValue.Add(time.Duration(server.globalConfiguration.ProvidersThrottleDuration))) {
log.Debugf("Last %s config received more than %s, OK", configMsg.ProviderName, server.globalConfiguration.ProvidersThrottleDuration)
// last config received more than n s ago
server.configurationValidatedChan <- configMsg
} else {
log.Debugf("Last %s config received less than %s, waiting...", configMsg.ProviderName, server.globalConfiguration.ProvidersThrottleDuration)
safe.Go(func() {
<-time.After(server.globalConfiguration.ProvidersThrottleDuration)
lastReceivedConfigurationValue := lastReceivedConfiguration.Get().(time.Time)
if time.Now().After(lastReceivedConfigurationValue.Add(time.Duration(server.globalConfiguration.ProvidersThrottleDuration))) {
log.Debugf("Waited for %s config, OK", configMsg.ProviderName)
if lastConfig, ok := lastConfigs.Get(configMsg.ProviderName); ok {
server.configurationValidatedChan <- *lastConfig.(*types.ConfigMessage)
}
}
})
}
lastReceivedConfiguration.Set(time.Now())
}
}
}
}
func (server *Server) defaultConfigurationValues(configuration *types.Configuration) {
if configuration == nil || configuration.Frontends == nil {
return
}
for _, frontend := range configuration.Frontends {
// default endpoints if not defined in frontends
if len(frontend.EntryPoints) == 0 {
frontend.EntryPoints = server.globalConfiguration.DefaultEntryPoints
}
}
for backendName, backend := range configuration.Backends {
_, err := types.NewLoadBalancerMethod(backend.LoadBalancer)
if err != nil {
log.Debugf("Load balancer method '%+v' for backend %s: %v. Using default wrr.", backend.LoadBalancer, backendName, err)
backend.LoadBalancer = &types.LoadBalancer{Method: "wrr"}
}
}
}
func (server *Server) listenConfigurations(stop chan bool) {
for {
select {
case <-stop:
return
case configMsg, ok := <-server.configurationValidatedChan:
if !ok {
return
}
currentConfigurations := server.currentConfigurations.Get().(configs)
// Copy configurations to new map so we don't change current if LoadConfig fails
newConfigurations := make(configs)
for k, v := range currentConfigurations {
newConfigurations[k] = v
}
newConfigurations[configMsg.ProviderName] = configMsg.Configuration
newServerEntryPoints, err := server.loadConfig(newConfigurations, server.globalConfiguration)
if err == nil {
for newServerEntryPointName, newServerEntryPoint := range newServerEntryPoints {
server.serverEntryPoints[newServerEntryPointName].httpRouter.UpdateHandler(newServerEntryPoint.httpRouter.GetHandler())
log.Infof("Server configuration reloaded on %s", server.serverEntryPoints[newServerEntryPointName].httpServer.Addr)
}
server.currentConfigurations.Set(newConfigurations)
server.postLoadConfig()
} else {
log.Error("Error loading new configuration, aborted ", err)
}
}
}
}
func (server *Server) postLoadConfig() {
if server.globalConfiguration.ACME == nil {
return
}
if server.leadership != nil && !server.leadership.IsLeader() {
return
}
if server.globalConfiguration.ACME.OnHostRule {
currentConfigurations := server.currentConfigurations.Get().(configs)
for _, configuration := range currentConfigurations {
for _, frontend := range configuration.Frontends {
for _, route := range frontend.Routes {
rules := Rules{}
domains, err := rules.ParseDomains(route.Rule)
if err != nil {
log.Errorf("Error parsing domains: %v", err)
} else {
server.globalConfiguration.ACME.LoadCertificateForDomains(domains)
}
}
}
}
}
}
func (server *Server) configureProviders() {
// configure providers
if server.globalConfiguration.Docker != nil {
server.providers = append(server.providers, server.globalConfiguration.Docker)
}
if server.globalConfiguration.Marathon != nil {
server.providers = append(server.providers, server.globalConfiguration.Marathon)
}
if server.globalConfiguration.File != nil {
server.providers = append(server.providers, server.globalConfiguration.File)
}
if server.globalConfiguration.Web != nil {
server.globalConfiguration.Web.server = server
server.providers = append(server.providers, server.globalConfiguration.Web)
}
if server.globalConfiguration.Consul != nil {
server.providers = append(server.providers, server.globalConfiguration.Consul)
}
if server.globalConfiguration.ConsulCatalog != nil {
server.providers = append(server.providers, server.globalConfiguration.ConsulCatalog)
}
if server.globalConfiguration.Etcd != nil {
server.providers = append(server.providers, server.globalConfiguration.Etcd)
}
if server.globalConfiguration.Zookeeper != nil {
server.providers = append(server.providers, server.globalConfiguration.Zookeeper)
}
if server.globalConfiguration.Boltdb != nil {
server.providers = append(server.providers, server.globalConfiguration.Boltdb)
}
if server.globalConfiguration.Kubernetes != nil {
server.providers = append(server.providers, server.globalConfiguration.Kubernetes)
}
if server.globalConfiguration.Mesos != nil {
server.providers = append(server.providers, server.globalConfiguration.Mesos)
}
if server.globalConfiguration.Eureka != nil {
server.providers = append(server.providers, server.globalConfiguration.Eureka)
}
}
func (server *Server) startProviders() {
// start providers
for _, provider := range server.providers {
jsonConf, _ := json.Marshal(provider)
log.Infof("Starting provider %v %s", reflect.TypeOf(provider), jsonConf)
currentProvider := provider
safe.Go(func() {
err := currentProvider.Provide(server.configurationChan, server.routinesPool, server.globalConfiguration.Constraints)
if err != nil {
log.Errorf("Error starting provider %s", err)
}
})
}
}
func (server *Server) listenSignals() {
sig := <-server.signals
log.Infof("I have to go... %+v", sig)
log.Info("Stopping server")
server.Stop()
}
// creates a TLS config that allows terminating HTTPS for multiple domains using SNI
func (server *Server) createTLSConfig(entryPointName string, tlsOption *TLS, router *middlewares.HandlerSwitcher) (*tls.Config, error) {
if tlsOption == nil {
return nil, nil
}
config, err := tlsOption.Certificates.CreateTLSConfig()
if err != nil {
return nil, err
}
if len(tlsOption.ClientCAFiles) > 0 {
pool := x509.NewCertPool()
for _, caFile := range tlsOption.ClientCAFiles {
data, err := ioutil.ReadFile(caFile)
if err != nil {
return nil, err
}
ok := pool.AppendCertsFromPEM(data)
if !ok {
return nil, errors.New("invalid certificate(s) in " + caFile)
}
}
config.ClientCAs = pool
config.ClientAuth = tls.RequireAndVerifyClientCert
}
if server.globalConfiguration.ACME != nil {
if _, ok := server.serverEntryPoints[server.globalConfiguration.ACME.EntryPoint]; ok {
if entryPointName == server.globalConfiguration.ACME.EntryPoint {
checkOnDemandDomain := func(domain string) bool {
routeMatch := &mux.RouteMatch{}
router := router.GetHandler()
match := router.Match(&http.Request{URL: &url.URL{}, Host: domain}, routeMatch)
if match && routeMatch.Route != nil {
return true
}
return false
}
if server.leadership == nil {
err := server.globalConfiguration.ACME.CreateLocalConfig(config, checkOnDemandDomain)
if err != nil {
return nil, err
}
} else {
err := server.globalConfiguration.ACME.CreateClusterConfig(server.leadership, config, checkOnDemandDomain)
if err != nil {
return nil, err
}
}
}
} else {
return nil, errors.New("Unknown entrypoint " + server.globalConfiguration.ACME.EntryPoint + " for ACME configuration")
}
}
if len(config.Certificates) == 0 {
return nil, errors.New("No certificates found for TLS entrypoint " + entryPointName)
}
// BuildNameToCertificate parses the CommonName and SubjectAlternateName fields
// in each certificate and populates the config.NameToCertificate map.
config.BuildNameToCertificate()
//Set the minimum TLS version if set in the config TOML
if minConst, exists := minVersion[server.globalConfiguration.EntryPoints[entryPointName].TLS.MinVersion]; exists {
config.PreferServerCipherSuites = true
config.MinVersion = minConst
}
//Set the list of CipherSuites if set in the config TOML
if server.globalConfiguration.EntryPoints[entryPointName].TLS.CipherSuites != nil {
//if our list of CipherSuites is defined in the entrypoint config, we can re-initilize the suites list as empty
config.CipherSuites = make([]uint16, 0)
for _, cipher := range server.globalConfiguration.EntryPoints[entryPointName].TLS.CipherSuites {
if cipherConst, exists := cipherSuites[cipher]; exists {
config.CipherSuites = append(config.CipherSuites, cipherConst)
} else {
//CipherSuite listed in the toml does not exist in our listed
return nil, errors.New("Invalid CipherSuite: " + cipher)
}
}
}
return config, nil
}
func (server *Server) startServer(srv *manners.GracefulServer, globalConfiguration GlobalConfiguration) {
log.Infof("Starting server on %s", srv.Addr)
if srv.TLSConfig != nil {
if err := srv.ListenAndServeTLSWithConfig(srv.TLSConfig); err != nil {
log.Fatal("Error creating server: ", err)
}
} else {
if err := srv.ListenAndServe(); err != nil {
log.Fatal("Error creating server: ", err)
}
}
log.Info("Server stopped")
}
func (server *Server) prepareServer(entryPointName string, router *middlewares.HandlerSwitcher, entryPoint *EntryPoint, oldServer *manners.GracefulServer, middlewares ...negroni.Handler) (*manners.GracefulServer, error) {
log.Infof("Preparing server %s %+v", entryPointName, entryPoint)
// middlewares
var negroni = negroni.New()
for _, middleware := range middlewares {
negroni.Use(middleware)
}
negroni.UseHandler(router)
tlsConfig, err := server.createTLSConfig(entryPointName, entryPoint.TLS, router)
if err != nil {
log.Errorf("Error creating TLS config %s", err)
return nil, err
}
if oldServer == nil {
return manners.NewWithServer(
&http.Server{
Addr: entryPoint.Address,
Handler: negroni,
TLSConfig: tlsConfig,
}), nil
}
gracefulServer, err := oldServer.HijackListener(&http.Server{
Addr: entryPoint.Address,
Handler: negroni,
TLSConfig: tlsConfig,
}, tlsConfig)
if err != nil {
log.Errorf("Error hijacking server %s", err)
return nil, err
}
return gracefulServer, nil
}
func (server *Server) buildEntryPoints(globalConfiguration GlobalConfiguration) map[string]*serverEntryPoint {
serverEntryPoints := make(map[string]*serverEntryPoint)
for entryPointName := range globalConfiguration.EntryPoints {
router := server.buildDefaultHTTPRouter()
serverEntryPoints[entryPointName] = &serverEntryPoint{
httpRouter: middlewares.NewHandlerSwitcher(router),
}
}
return serverEntryPoints
}
// LoadConfig returns a new gorilla.mux Route from the specified global configuration and the dynamic
// provider configurations.
func (server *Server) loadConfig(configurations configs, globalConfiguration GlobalConfiguration) (map[string]*serverEntryPoint, error) {
serverEntryPoints := server.buildEntryPoints(globalConfiguration)
redirectHandlers := make(map[string]http.Handler)
backends := map[string]http.Handler{}
backend2FrontendMap := map[string]string{}
for _, configuration := range configurations {
frontendNames := sortedFrontendNamesForConfig(configuration)
frontend:
for _, frontendName := range frontendNames {
frontend := configuration.Frontends[frontendName]
log.Debugf("Creating frontend %s", frontendName)
fwd, err := forward.New(forward.Logger(oxyLogger), forward.PassHostHeader(frontend.PassHostHeader))
if err != nil {
log.Errorf("Error creating forwarder for frontend %s: %v", frontendName, err)
log.Errorf("Skipping frontend %s...", frontendName)
continue frontend
}
saveBackend := middlewares.NewSaveBackend(fwd)
if len(frontend.EntryPoints) == 0 {
log.Errorf("No entrypoint defined for frontend %s, defaultEntryPoints:%s", frontendName, globalConfiguration.DefaultEntryPoints)
log.Errorf("Skipping frontend %s...", frontendName)
continue frontend
}
for _, entryPointName := range frontend.EntryPoints {
log.Debugf("Wiring frontend %s to entryPoint %s", frontendName, entryPointName)
if _, ok := serverEntryPoints[entryPointName]; !ok {
log.Errorf("Undefined entrypoint '%s' for frontend %s", entryPointName, frontendName)
log.Errorf("Skipping frontend %s...", frontendName)
continue frontend
}
newServerRoute := &serverRoute{route: serverEntryPoints[entryPointName].httpRouter.GetHandler().NewRoute().Name(frontendName)}
for routeName, route := range frontend.Routes {
err := getRoute(newServerRoute, &route)
if err != nil {
log.Errorf("Error creating route for frontend %s: %v", frontendName, err)
log.Errorf("Skipping frontend %s...", frontendName)
continue frontend
}
log.Debugf("Creating route %s %s", routeName, route.Rule)
}
entryPoint := globalConfiguration.EntryPoints[entryPointName]
if entryPoint.Redirect != nil {
if redirectHandlers[entryPointName] != nil {
newServerRoute.route.Handler(redirectHandlers[entryPointName])
} else if handler, err := server.loadEntryPointConfig(entryPointName, entryPoint); err != nil {
log.Errorf("Error loading entrypoint configuration for frontend %s: %v", frontendName, err)
log.Errorf("Skipping frontend %s...", frontendName)
continue frontend
} else {
newServerRoute.route.Handler(handler)
redirectHandlers[entryPointName] = handler
}
} else {
if backends[frontend.Backend] == nil {
log.Debugf("Creating backend %s", frontend.Backend)
var lb http.Handler
rr, _ := roundrobin.New(saveBackend)
if configuration.Backends[frontend.Backend] == nil {
log.Errorf("Undefined backend '%s' for frontend %s", frontend.Backend, frontendName)
log.Errorf("Skipping frontend %s...", frontendName)
continue frontend
}
lbMethod, err := types.NewLoadBalancerMethod(configuration.Backends[frontend.Backend].LoadBalancer)
if err != nil {
log.Errorf("Error loading load balancer method '%+v' for frontend %s: %v", configuration.Backends[frontend.Backend].LoadBalancer, frontendName, err)
log.Errorf("Skipping frontend %s...", frontendName)
continue frontend
}
stickysession := configuration.Backends[frontend.Backend].LoadBalancer.Sticky
cookiename := "_TRAEFIK_BACKEND"
var sticky *roundrobin.StickySession
if stickysession {
sticky = roundrobin.NewStickySession(cookiename)
}
switch lbMethod {
case types.Drr:
log.Debugf("Creating load-balancer drr")
rebalancer, _ := roundrobin.NewRebalancer(rr, roundrobin.RebalancerLogger(oxyLogger))
if stickysession {
log.Debugf("Sticky session with cookie %v", cookiename)
rebalancer, _ = roundrobin.NewRebalancer(rr, roundrobin.RebalancerLogger(oxyLogger), roundrobin.RebalancerStickySession(sticky))
}
lb = rebalancer
for serverName, server := range configuration.Backends[frontend.Backend].Servers {
url, err := url.Parse(server.URL)
if err != nil {
log.Errorf("Error parsing server URL %s: %v", server.URL, err)
log.Errorf("Skipping frontend %s...", frontendName)
continue frontend
}
backend2FrontendMap[url.String()] = frontendName
log.Debugf("Creating server %s at %s with weight %d", serverName, url.String(), server.Weight)
if err := rebalancer.UpsertServer(url, roundrobin.Weight(server.Weight)); err != nil {
log.Errorf("Error adding server %s to load balancer: %v", server.URL, err)
log.Errorf("Skipping frontend %s...", frontendName)
continue frontend
}
}
case types.Wrr:
log.Debugf("Creating load-balancer wrr")
if stickysession {
log.Debugf("Sticky session with cookie %v", cookiename)
rr, _ = roundrobin.New(saveBackend, roundrobin.EnableStickySession(sticky))
}
lb = rr
for serverName, server := range configuration.Backends[frontend.Backend].Servers {
url, err := url.Parse(server.URL)
if err != nil {
log.Errorf("Error parsing server URL %s: %v", server.URL, err)
log.Errorf("Skipping frontend %s...", frontendName)
continue frontend
}
backend2FrontendMap[url.String()] = frontendName
log.Debugf("Creating server %s at %s with weight %d", serverName, url.String(), server.Weight)
if err := rr.UpsertServer(url, roundrobin.Weight(server.Weight)); err != nil {
log.Errorf("Error adding server %s to load balancer: %v", server.URL, err)
log.Errorf("Skipping frontend %s...", frontendName)
continue frontend
}
}
}
maxConns := configuration.Backends[frontend.Backend].MaxConn
if maxConns != nil && maxConns.Amount != 0 {
extractFunc, err := utils.NewExtractor(maxConns.ExtractorFunc)
if err != nil {
log.Errorf("Error creating connlimit: %v", err)
log.Errorf("Skipping frontend %s...", frontendName)
continue frontend
}
log.Debugf("Creating loadd-balancer connlimit")
lb, err = connlimit.New(lb, extractFunc, maxConns.Amount, connlimit.Logger(oxyLogger))
if err != nil {
log.Errorf("Error creating connlimit: %v", err)
log.Errorf("Skipping frontend %s...", frontendName)
continue frontend
}
}
// retry ?
if globalConfiguration.Retry != nil {
retries := len(configuration.Backends[frontend.Backend].Servers)
if globalConfiguration.Retry.Attempts > 0 {
retries = globalConfiguration.Retry.Attempts
}
lb = middlewares.NewRetry(retries, lb)
log.Debugf("Creating retries max attempts %d", retries)
}
var negroni = negroni.New()
if configuration.Backends[frontend.Backend].CircuitBreaker != nil {
log.Debugf("Creating circuit breaker %s", configuration.Backends[frontend.Backend].CircuitBreaker.Expression)
cbreaker, err := middlewares.NewCircuitBreaker(lb, configuration.Backends[frontend.Backend].CircuitBreaker.Expression, cbreaker.Logger(oxyLogger))
if err != nil {
log.Errorf("Error creating circuit breaker: %v", err)
log.Errorf("Skipping frontend %s...", frontendName)
continue frontend
}
negroni.Use(cbreaker)
} else {
negroni.UseHandler(lb)
}
backends[frontend.Backend] = negroni
} else {
log.Debugf("Reusing backend %s", frontend.Backend)
}
if frontend.Priority > 0 {
newServerRoute.route.Priority(frontend.Priority)
}
server.wireFrontendBackend(newServerRoute, backends[frontend.Backend])
}
err := newServerRoute.route.GetError()
if err != nil {
log.Errorf("Error building route: %s", err)
}
}
}
}
middlewares.SetBackend2FrontendMap(&backend2FrontendMap)
//sort routes
for _, serverEntryPoint := range serverEntryPoints {
serverEntryPoint.httpRouter.GetHandler().SortRoutes()
}
return serverEntryPoints, nil
}
func (server *Server) wireFrontendBackend(serverRoute *serverRoute, handler http.Handler) {
// strip prefix
if len(serverRoute.stripPrefixes) > 0 {
serverRoute.route.Handler(&middlewares.StripPrefix{
Prefixes: serverRoute.stripPrefixes,
Handler: handler,
})
} else {
serverRoute.route.Handler(handler)
}
}
func (server *Server) loadEntryPointConfig(entryPointName string, entryPoint *EntryPoint) (http.Handler, error) {
regex := entryPoint.Redirect.Regex
replacement := entryPoint.Redirect.Replacement
if len(entryPoint.Redirect.EntryPoint) > 0 {
regex = "^(?:https?:\\/\\/)?([\\da-z\\.-]+)(?::\\d+)?(.*)$"
if server.globalConfiguration.EntryPoints[entryPoint.Redirect.EntryPoint] == nil {
return nil, errors.New("Unknown entrypoint " + entryPoint.Redirect.EntryPoint)
}
protocol := "http"
if server.globalConfiguration.EntryPoints[entryPoint.Redirect.EntryPoint].TLS != nil {
protocol = "https"
}
r, _ := regexp.Compile("(:\\d+)")
match := r.FindStringSubmatch(server.globalConfiguration.EntryPoints[entryPoint.Redirect.EntryPoint].Address)
if len(match) == 0 {
return nil, errors.New("Bad Address format: " + server.globalConfiguration.EntryPoints[entryPoint.Redirect.EntryPoint].Address)
}
replacement = protocol + "://$1" + match[0] + "$2"
}
rewrite, err := middlewares.NewRewrite(regex, replacement, true)
if err != nil {
return nil, err
}
log.Debugf("Creating entryPoint redirect %s -> %s : %s -> %s", entryPointName, entryPoint.Redirect.EntryPoint, regex, replacement)
negroni := negroni.New()
negroni.Use(rewrite)
return negroni, nil
}
func (server *Server) buildDefaultHTTPRouter() *mux.Router {
router := mux.NewRouter()
router.NotFoundHandler = http.HandlerFunc(notFoundHandler)
router.StrictSlash(true)
router.SkipClean(true)
return router
}
func getRoute(serverRoute *serverRoute, route *types.Route) error {
rules := Rules{route: serverRoute}
newRoute, err := rules.Parse(route.Rule)
if err != nil {
return err
}
newRoute.Priority(serverRoute.route.GetPriority() + len(route.Rule))
serverRoute.route = newRoute
return nil
}
func sortedFrontendNamesForConfig(configuration *types.Configuration) []string {
keys := []string{}
for key := range configuration.Frontends {
keys = append(keys, key)
}
sort.Strings(keys)
return keys
}

View file

@ -0,0 +1,85 @@
package main
import (
"net/http"
"sync"
"time"
)
// StatsRecorder is an optional middleware that records more details statistics
// about requests and how they are processed. This currently consists of recent
// requests that have caused errors (4xx and 5xx status codes), making it easy
// to pinpoint problems.
type StatsRecorder struct {
mutex sync.RWMutex
numRecentErrors int
recentErrors []*statsError
}
// Stats includes all of the stats gathered by the recorder.
type Stats struct {
RecentErrors []*statsError `json:"recent_errors"`
}
// statsError represents an error that has occurred during request processing.
type statsError struct {
StatusCode int `json:"status_code"`
Status string `json:"status"`
Method string `json:"method"`
Host string `json:"host"`
Path string `json:"path"`
Time time.Time `json:"time"`
}
// responseRecorder captures information from the response and preserves it for
// later analysis.
type responseRecorder struct {
http.ResponseWriter
statusCode int
}
// WriteHeader captures the status code for later retrieval.
func (r *responseRecorder) WriteHeader(status int) {
r.ResponseWriter.WriteHeader(status)
r.statusCode = status
}
// ServeHTTP silently extracts information from the request and response as it
// is processed. If the response is 4xx or 5xx, add it to the list of 10 most
// recent errors.
func (s *StatsRecorder) ServeHTTP(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
recorder := &responseRecorder{w, http.StatusOK}
next(recorder, r)
if recorder.statusCode >= 400 {
s.mutex.Lock()
defer s.mutex.Unlock()
s.recentErrors = append([]*statsError{
{
StatusCode: recorder.statusCode,
Status: http.StatusText(recorder.statusCode),
Method: r.Method,
Host: r.Host,
Path: r.URL.Path,
Time: time.Now(),
},
}, s.recentErrors...)
// Limit the size of the list to numRecentErrors
if len(s.recentErrors) > s.numRecentErrors {
s.recentErrors = s.recentErrors[:s.numRecentErrors]
}
}
}
// Data returns a copy of the statistics that have been gathered.
func (s *StatsRecorder) Data() *Stats {
s.mutex.RLock()
defer s.mutex.RUnlock()
// We can't return the slice directly or a race condition might develop
recentErrors := make([]*statsError, len(s.recentErrors))
copy(recentErrors, s.recentErrors)
return &Stats{
RecentErrors: recentErrors,
}
}

View file

@ -0,0 +1,308 @@
package main
import (
"crypto/tls"
"encoding/json"
"fmt"
fmtlog "log"
"net/http"
"os"
"reflect"
"runtime"
"strings"
"text/template"
"github.com/Sirupsen/logrus"
"github.com/containous/flaeg"
"github.com/containous/staert"
"github.com/containous/traefik/acme"
"github.com/containous/traefik/cluster"
"github.com/containous/traefik/log"
"github.com/containous/traefik/middlewares"
"github.com/containous/traefik/provider"
"github.com/containous/traefik/types"
"github.com/containous/traefik/version"
"github.com/docker/libkv/store"
"github.com/satori/go.uuid"
)
var versionTemplate = `Version: {{.Version}}
Codename: {{.Codename}}
Go version: {{.GoVersion}}
Built: {{.BuildTime}}
OS/Arch: {{.Os}}/{{.Arch}}`
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
//traefik config inits
traefikConfiguration := NewTraefikConfiguration()
traefikPointersConfiguration := NewTraefikDefaultPointersConfiguration()
//traefik Command init
traefikCmd := &flaeg.Command{
Name: "traefik",
Description: `traefik is a modern HTTP reverse proxy and load balancer made to deploy microservices with ease.
Complete documentation is available at https://traefik.io`,
Config: traefikConfiguration,
DefaultPointersConfig: traefikPointersConfiguration,
Run: func() error {
run(traefikConfiguration)
return nil
},
}
//version Command init
versionCmd := &flaeg.Command{
Name: "version",
Description: `Print version`,
Config: struct{}{},
DefaultPointersConfig: struct{}{},
Run: func() error {
tmpl, err := template.New("").Parse(versionTemplate)
if err != nil {
return err
}
v := struct {
Version string
Codename string
GoVersion string
BuildTime string
Os string
Arch string
}{
Version: version.Version,
Codename: version.Codename,
GoVersion: runtime.Version(),
BuildTime: version.BuildDate,
Os: runtime.GOOS,
Arch: runtime.GOARCH,
}
if err := tmpl.Execute(os.Stdout, v); err != nil {
return err
}
fmt.Printf("\n")
return nil
},
}
//storeconfig Command init
var kv *staert.KvSource
var err error
storeconfigCmd := &flaeg.Command{
Name: "storeconfig",
Description: `Store the static traefik configuration into a Key-value stores. Traefik will not start.`,
Config: traefikConfiguration,
DefaultPointersConfig: traefikPointersConfiguration,
Run: func() error {
if kv == nil {
return fmt.Errorf("Error using command storeconfig, no Key-value store defined")
}
jsonConf, err := json.Marshal(traefikConfiguration.GlobalConfiguration)
if err != nil {
return err
}
fmtlog.Printf("Storing configuration: %s\n", jsonConf)
err = kv.StoreConfig(traefikConfiguration.GlobalConfiguration)
if err != nil {
return err
}
if traefikConfiguration.GlobalConfiguration.ACME != nil && len(traefikConfiguration.GlobalConfiguration.ACME.StorageFile) > 0 {
// convert ACME json file to KV store
store := acme.NewLocalStore(traefikConfiguration.GlobalConfiguration.ACME.StorageFile)
object, err := store.Load()
if err != nil {
return err
}
meta := cluster.NewMetadata(object)
err = meta.Marshall()
if err != nil {
return err
}
source := staert.KvSource{
Store: kv,
Prefix: traefikConfiguration.GlobalConfiguration.ACME.Storage,
}
err = source.StoreConfig(meta)
if err != nil {
return err
}
}
return nil
},
Metadata: map[string]string{
"parseAllSources": "true",
},
}
//init flaeg source
f := flaeg.New(traefikCmd, os.Args[1:])
//add custom parsers
f.AddParser(reflect.TypeOf(EntryPoints{}), &EntryPoints{})
f.AddParser(reflect.TypeOf(DefaultEntryPoints{}), &DefaultEntryPoints{})
f.AddParser(reflect.TypeOf(types.Constraints{}), &types.Constraints{})
f.AddParser(reflect.TypeOf(provider.Namespaces{}), &provider.Namespaces{})
f.AddParser(reflect.TypeOf([]acme.Domain{}), &acme.Domains{})
//add commands
f.AddCommand(versionCmd)
f.AddCommand(storeconfigCmd)
usedCmd, err := f.GetCommand()
if err != nil {
fmtlog.Println(err)
os.Exit(-1)
}
if _, err := f.Parse(usedCmd); err != nil {
fmtlog.Printf("Error parsing command: %s\n", err)
os.Exit(-1)
}
//staert init
s := staert.NewStaert(traefikCmd)
//init toml source
toml := staert.NewTomlSource("traefik", []string{traefikConfiguration.ConfigFile, "/etc/traefik/", "$HOME/.traefik/", "."})
//add sources to staert
s.AddSource(toml)
s.AddSource(f)
if _, err := s.LoadConfig(); err != nil {
fmtlog.Println(fmt.Errorf("Error reading TOML config file %s : %s", toml.ConfigFileUsed(), err))
os.Exit(-1)
}
traefikConfiguration.ConfigFile = toml.ConfigFileUsed()
kv, err = CreateKvSource(traefikConfiguration)
if err != nil {
fmtlog.Printf("Error creating kv store: %s\n", err)
os.Exit(-1)
}
// IF a KV Store is enable and no sub-command called in args
if kv != nil && usedCmd == traefikCmd {
if traefikConfiguration.Cluster == nil {
traefikConfiguration.Cluster = &types.Cluster{Node: uuid.NewV4().String()}
}
if traefikConfiguration.Cluster.Store == nil {
traefikConfiguration.Cluster.Store = &types.Store{Prefix: kv.Prefix, Store: kv.Store}
}
s.AddSource(kv)
if _, err := s.LoadConfig(); err != nil {
fmtlog.Printf("Error loading configuration: %s\n", err)
os.Exit(-1)
}
}
if err := s.Run(); err != nil {
fmtlog.Printf("Error running traefik: %s\n", err)
os.Exit(-1)
}
os.Exit(0)
}
func run(traefikConfiguration *TraefikConfiguration) {
fmtlog.SetFlags(fmtlog.Lshortfile | fmtlog.LstdFlags)
// load global configuration
globalConfiguration := traefikConfiguration.GlobalConfiguration
http.DefaultTransport.(*http.Transport).MaxIdleConnsPerHost = globalConfiguration.MaxIdleConnsPerHost
if globalConfiguration.InsecureSkipVerify {
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
}
loggerMiddleware := middlewares.NewLogger(globalConfiguration.AccessLogsFile)
defer loggerMiddleware.Close()
if globalConfiguration.File != nil && len(globalConfiguration.File.Filename) == 0 {
// no filename, setting to global config file
if len(traefikConfiguration.ConfigFile) != 0 {
globalConfiguration.File.Filename = traefikConfiguration.ConfigFile
} else {
log.Errorln("Error using file configuration backend, no filename defined")
}
}
if len(globalConfiguration.EntryPoints) == 0 {
globalConfiguration.EntryPoints = map[string]*EntryPoint{"http": {Address: ":80"}}
globalConfiguration.DefaultEntryPoints = []string{"http"}
}
if globalConfiguration.Debug {
globalConfiguration.LogLevel = "DEBUG"
}
// logging
level, err := logrus.ParseLevel(strings.ToLower(globalConfiguration.LogLevel))
if err != nil {
log.Error("Error getting level", err)
}
log.SetLevel(level)
if len(globalConfiguration.TraefikLogsFile) > 0 {
fi, err := os.OpenFile(globalConfiguration.TraefikLogsFile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
defer func() {
if err := fi.Close(); err != nil {
log.Error("Error closing file", err)
}
}()
if err != nil {
log.Error("Error opening file", err)
} else {
log.SetOutput(fi)
log.SetFormatter(&logrus.TextFormatter{DisableColors: true, FullTimestamp: true, DisableSorting: true})
}
} else {
log.SetFormatter(&logrus.TextFormatter{FullTimestamp: true, DisableSorting: true})
}
jsonConf, _ := json.Marshal(globalConfiguration)
log.Infof("Traefik version %s built on %s", version.Version, version.BuildDate)
if len(traefikConfiguration.ConfigFile) != 0 {
log.Infof("Using TOML configuration file %s", traefikConfiguration.ConfigFile)
}
log.Debugf("Global configuration loaded %s", string(jsonConf))
server := NewServer(globalConfiguration)
server.Start()
defer server.Close()
log.Info("Shutting down")
}
// CreateKvSource creates KvSource
// TLS support is enable for Consul and Etcd backends
func CreateKvSource(traefikConfiguration *TraefikConfiguration) (*staert.KvSource, error) {
var kv *staert.KvSource
var store store.Store
var err error
switch {
case traefikConfiguration.Consul != nil:
store, err = traefikConfiguration.Consul.CreateStore()
kv = &staert.KvSource{
Store: store,
Prefix: traefikConfiguration.Consul.Prefix,
}
case traefikConfiguration.Etcd != nil:
store, err = traefikConfiguration.Etcd.CreateStore()
kv = &staert.KvSource{
Store: store,
Prefix: traefikConfiguration.Etcd.Prefix,
}
case traefikConfiguration.Zookeeper != nil:
store, err = traefikConfiguration.Zookeeper.CreateStore()
kv = &staert.KvSource{
Store: store,
Prefix: traefikConfiguration.Zookeeper.Prefix,
}
case traefikConfiguration.Boltdb != nil:
store, err = traefikConfiguration.Boltdb.CreateStore()
kv = &staert.KvSource{
Store: store,
Prefix: traefikConfiguration.Boltdb.Prefix,
}
}
return kv, err
}

View file

@ -0,0 +1,235 @@
package types
import (
"errors"
"fmt"
"github.com/docker/libkv/store"
"github.com/ryanuber/go-glob"
"strings"
)
// Backend holds backend configuration.
type Backend struct {
Servers map[string]Server `json:"servers,omitempty"`
CircuitBreaker *CircuitBreaker `json:"circuitBreaker,omitempty"`
LoadBalancer *LoadBalancer `json:"loadBalancer,omitempty"`
MaxConn *MaxConn `json:"maxConn,omitempty"`
}
// MaxConn holds maximum connection configuration
type MaxConn struct {
Amount int64 `json:"amount,omitempty"`
ExtractorFunc string `json:"extractorFunc,omitempty"`
}
// LoadBalancer holds load balancing configuration.
type LoadBalancer struct {
Method string `json:"method,omitempty"`
Sticky bool `json:"sticky,omitempty"`
}
// CircuitBreaker holds circuit breaker configuration.
type CircuitBreaker struct {
Expression string `json:"expression,omitempty"`
}
// Server holds server configuration.
type Server struct {
URL string `json:"url,omitempty"`
Weight int `json:"weight"`
}
// Route holds route configuration.
type Route struct {
Rule string `json:"rule,omitempty"`
}
// Frontend holds frontend configuration.
type Frontend struct {
EntryPoints []string `json:"entryPoints,omitempty"`
Backend string `json:"backend,omitempty"`
Routes map[string]Route `json:"routes,omitempty"`
PassHostHeader bool `json:"passHostHeader,omitempty"`
Priority int `json:"priority"`
}
// LoadBalancerMethod holds the method of load balancing to use.
type LoadBalancerMethod uint8
const (
// Wrr (default) = Weighted Round Robin
Wrr LoadBalancerMethod = iota
// Drr = Dynamic Round Robin
Drr
)
var loadBalancerMethodNames = []string{
"Wrr",
"Drr",
}
// NewLoadBalancerMethod create a new LoadBalancerMethod from a given LoadBalancer.
func NewLoadBalancerMethod(loadBalancer *LoadBalancer) (LoadBalancerMethod, error) {
if loadBalancer != nil {
for i, name := range loadBalancerMethodNames {
if strings.EqualFold(name, loadBalancer.Method) {
return LoadBalancerMethod(i), nil
}
}
}
return Wrr, ErrInvalidLoadBalancerMethod
}
// ErrInvalidLoadBalancerMethod is thrown when the specified load balancing method is invalid.
var ErrInvalidLoadBalancerMethod = errors.New("Invalid method, using default")
// Configuration of a provider.
type Configuration struct {
Backends map[string]*Backend `json:"backends,omitempty"`
Frontends map[string]*Frontend `json:"frontends,omitempty"`
}
// ConfigMessage hold configuration information exchanged between parts of traefik.
type ConfigMessage struct {
ProviderName string
Configuration *Configuration
}
// Constraint hold a parsed constraint expresssion
type Constraint struct {
Key string
// MustMatch is true if operator is "==" or false if operator is "!="
MustMatch bool
// TODO: support regex
Regex string
}
// NewConstraint receive a string and return a *Constraint, after checking syntax and parsing the constraint expression
func NewConstraint(exp string) (*Constraint, error) {
sep := ""
constraint := &Constraint{}
if strings.Contains(exp, "==") {
sep = "=="
constraint.MustMatch = true
} else if strings.Contains(exp, "!=") {
sep = "!="
constraint.MustMatch = false
} else {
return nil, errors.New("Constraint expression missing valid operator: '==' or '!='")
}
kv := strings.SplitN(exp, sep, 2)
if len(kv) == 2 {
// At the moment, it only supports tags
if kv[0] != "tag" {
return nil, errors.New("Constraint must be tag-based. Syntax: tag==us-*")
}
constraint.Key = kv[0]
constraint.Regex = kv[1]
return constraint, nil
}
return nil, errors.New("Incorrect constraint expression: " + exp)
}
func (c *Constraint) String() string {
if c.MustMatch {
return c.Key + "==" + c.Regex
}
return c.Key + "!=" + c.Regex
}
// UnmarshalText define how unmarshal in TOML parsing
func (c *Constraint) UnmarshalText(text []byte) error {
constraint, err := NewConstraint(string(text))
*c = *constraint
return err
}
// MatchConstraintWithAtLeastOneTag tests a constraint for one single service
func (c *Constraint) MatchConstraintWithAtLeastOneTag(tags []string) bool {
for _, tag := range tags {
if glob.Glob(c.Regex, tag) {
return true
}
}
return false
}
//Set []*Constraint
func (cs *Constraints) Set(str string) error {
exps := strings.Split(str, ",")
if len(exps) == 0 {
return errors.New("Bad Constraint format: " + str)
}
for _, exp := range exps {
constraint, err := NewConstraint(exp)
if err != nil {
return err
}
*cs = append(*cs, *constraint)
}
return nil
}
// Constraints holds a Constraint parser
type Constraints []Constraint
//Get []*Constraint
func (cs *Constraints) Get() interface{} { return []Constraint(*cs) }
//String returns []*Constraint in string
func (cs *Constraints) String() string { return fmt.Sprintf("%+v", *cs) }
//SetValue sets []*Constraint into the parser
func (cs *Constraints) SetValue(val interface{}) {
*cs = Constraints(val.(Constraints))
}
// Type exports the Constraints type as a string
func (cs *Constraints) Type() string {
return fmt.Sprint("constraint")
}
// Store holds KV store cluster config
type Store struct {
store.Store
Prefix string // like this "prefix" (without the /)
}
// Cluster holds cluster config
type Cluster struct {
Node string `description:"Node name"`
Store *Store
}
// Auth holds authentication configuration (BASIC, DIGEST, users)
type Auth struct {
Basic *Basic
Digest *Digest
}
// Users authentication users
type Users []string
// Basic HTTP basic authentication
type Basic struct {
Users
}
// Digest HTTP authentication
type Digest struct {
Users
}
// CanonicalDomain returns a lower case domain with trim space
func CanonicalDomain(domain string) string {
return strings.ToLower(strings.TrimSpace(domain))
}
// Statistics provides options for monitoring request and response stats
type Statistics struct {
RecentErrors int `description:"Number of recent errors logged"`
}

View file

@ -0,0 +1,10 @@
package version
var (
// Version holds the current version of traefik.
Version = "dev"
// Codename holds the current version codename of traefik.
Codename = "cheddar" // beta cheese
// BuildDate holds the build date of traefik.
BuildDate = "I don't remember exactly"
)

311
integration/vendor/github.com/containous/traefik/web.go generated vendored Normal file
View file

@ -0,0 +1,311 @@
package main
import (
"encoding/json"
"expvar"
"fmt"
"io/ioutil"
"net/http"
"runtime"
"github.com/codegangsta/negroni"
"github.com/containous/mux"
"github.com/containous/traefik/autogen"
"github.com/containous/traefik/log"
"github.com/containous/traefik/middlewares"
"github.com/containous/traefik/safe"
"github.com/containous/traefik/types"
"github.com/containous/traefik/version"
"github.com/elazarl/go-bindata-assetfs"
thoas_stats "github.com/thoas/stats"
"github.com/unrolled/render"
)
var (
metrics = thoas_stats.New()
statsRecorder *StatsRecorder
)
// WebProvider is a provider.Provider implementation that provides the UI.
// FIXME to be handled another way.
type WebProvider struct {
Address string `description:"Web administration port"`
CertFile string `description:"SSL certificate"`
KeyFile string `description:"SSL certificate"`
ReadOnly bool `description:"Enable read only API"`
Statistics *types.Statistics `description:"Enable more detailed statistics"`
server *Server
Auth *types.Auth
}
var (
templatesRenderer = render.New(render.Options{
Directory: "nowhere",
})
)
func init() {
expvar.Publish("Goroutines", expvar.Func(goroutines))
}
func goroutines() interface{} {
return runtime.NumGoroutine()
}
// Provide allows the provider to provide configurations to traefik
// using the given configuration channel.
func (provider *WebProvider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool, _ []types.Constraint) error {
systemRouter := mux.NewRouter()
// health route
systemRouter.Methods("GET").Path("/health").HandlerFunc(provider.getHealthHandler)
// ping route
systemRouter.Methods("GET").Path("/ping").HandlerFunc(provider.getPingHandler)
// API routes
systemRouter.Methods("GET").Path("/api").HandlerFunc(provider.getConfigHandler)
systemRouter.Methods("GET").Path("/api/version").HandlerFunc(provider.getVersionHandler)
systemRouter.Methods("GET").Path("/api/providers").HandlerFunc(provider.getConfigHandler)
systemRouter.Methods("GET").Path("/api/providers/{provider}").HandlerFunc(provider.getProviderHandler)
systemRouter.Methods("PUT").Path("/api/providers/{provider}").HandlerFunc(func(response http.ResponseWriter, request *http.Request) {
if provider.ReadOnly {
response.WriteHeader(http.StatusForbidden)
fmt.Fprintf(response, "REST API is in read-only mode")
return
}
vars := mux.Vars(request)
if vars["provider"] != "web" {
response.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(response, "Only 'web' provider can be updated through the REST API")
return
}
configuration := new(types.Configuration)
body, _ := ioutil.ReadAll(request.Body)
err := json.Unmarshal(body, configuration)
if err == nil {
configurationChan <- types.ConfigMessage{ProviderName: "web", Configuration: configuration}
provider.getConfigHandler(response, request)
} else {
log.Errorf("Error parsing configuration %+v", err)
http.Error(response, fmt.Sprintf("%+v", err), http.StatusBadRequest)
}
})
systemRouter.Methods("GET").Path("/api/providers/{provider}/backends").HandlerFunc(provider.getBackendsHandler)
systemRouter.Methods("GET").Path("/api/providers/{provider}/backends/{backend}").HandlerFunc(provider.getBackendHandler)
systemRouter.Methods("GET").Path("/api/providers/{provider}/backends/{backend}/servers").HandlerFunc(provider.getServersHandler)
systemRouter.Methods("GET").Path("/api/providers/{provider}/backends/{backend}/servers/{server}").HandlerFunc(provider.getServerHandler)
systemRouter.Methods("GET").Path("/api/providers/{provider}/frontends").HandlerFunc(provider.getFrontendsHandler)
systemRouter.Methods("GET").Path("/api/providers/{provider}/frontends/{frontend}").HandlerFunc(provider.getFrontendHandler)
systemRouter.Methods("GET").Path("/api/providers/{provider}/frontends/{frontend}/routes").HandlerFunc(provider.getRoutesHandler)
systemRouter.Methods("GET").Path("/api/providers/{provider}/frontends/{frontend}/routes/{route}").HandlerFunc(provider.getRouteHandler)
// Expose dashboard
systemRouter.Methods("GET").Path("/").HandlerFunc(func(response http.ResponseWriter, request *http.Request) {
http.Redirect(response, request, "/dashboard/", 302)
})
systemRouter.Methods("GET").PathPrefix("/dashboard/").Handler(http.StripPrefix("/dashboard/", http.FileServer(&assetfs.AssetFS{Asset: autogen.Asset, AssetInfo: autogen.AssetInfo, AssetDir: autogen.AssetDir, Prefix: "static"})))
// expvars
if provider.server.globalConfiguration.Debug {
systemRouter.Methods("GET").Path("/debug/vars").HandlerFunc(expvarHandler)
}
go func() {
var err error
var negroni = negroni.New()
if provider.Auth != nil {
authMiddleware, err := middlewares.NewAuthenticator(provider.Auth)
if err != nil {
log.Fatal("Error creating Auth: ", err)
}
negroni.Use(authMiddleware)
}
negroni.UseHandler(systemRouter)
if len(provider.CertFile) > 0 && len(provider.KeyFile) > 0 {
err = http.ListenAndServeTLS(provider.Address, provider.CertFile, provider.KeyFile, negroni)
} else {
err = http.ListenAndServe(provider.Address, negroni)
}
if err != nil {
log.Fatal("Error creating server: ", err)
}
}()
return nil
}
// healthResponse combines data returned by thoas/stats with statistics (if
// they are enabled).
type healthResponse struct {
*thoas_stats.Data
*Stats
}
func (provider *WebProvider) getHealthHandler(response http.ResponseWriter, request *http.Request) {
health := &healthResponse{Data: metrics.Data()}
if statsRecorder != nil {
health.Stats = statsRecorder.Data()
}
templatesRenderer.JSON(response, http.StatusOK, health)
}
func (provider *WebProvider) getPingHandler(response http.ResponseWriter, request *http.Request) {
fmt.Fprintf(response, "OK")
}
func (provider *WebProvider) getConfigHandler(response http.ResponseWriter, request *http.Request) {
currentConfigurations := provider.server.currentConfigurations.Get().(configs)
templatesRenderer.JSON(response, http.StatusOK, currentConfigurations)
}
func (provider *WebProvider) getVersionHandler(response http.ResponseWriter, request *http.Request) {
v := struct {
Version string
Codename string
}{
Version: version.Version,
Codename: version.Codename,
}
templatesRenderer.JSON(response, http.StatusOK, v)
}
func (provider *WebProvider) getProviderHandler(response http.ResponseWriter, request *http.Request) {
vars := mux.Vars(request)
providerID := vars["provider"]
currentConfigurations := provider.server.currentConfigurations.Get().(configs)
if provider, ok := currentConfigurations[providerID]; ok {
templatesRenderer.JSON(response, http.StatusOK, provider)
} else {
http.NotFound(response, request)
}
}
func (provider *WebProvider) getBackendsHandler(response http.ResponseWriter, request *http.Request) {
vars := mux.Vars(request)
providerID := vars["provider"]
currentConfigurations := provider.server.currentConfigurations.Get().(configs)
if provider, ok := currentConfigurations[providerID]; ok {
templatesRenderer.JSON(response, http.StatusOK, provider.Backends)
} else {
http.NotFound(response, request)
}
}
func (provider *WebProvider) getBackendHandler(response http.ResponseWriter, request *http.Request) {
vars := mux.Vars(request)
providerID := vars["provider"]
backendID := vars["backend"]
currentConfigurations := provider.server.currentConfigurations.Get().(configs)
if provider, ok := currentConfigurations[providerID]; ok {
if backend, ok := provider.Backends[backendID]; ok {
templatesRenderer.JSON(response, http.StatusOK, backend)
return
}
}
http.NotFound(response, request)
}
func (provider *WebProvider) getServersHandler(response http.ResponseWriter, request *http.Request) {
vars := mux.Vars(request)
providerID := vars["provider"]
backendID := vars["backend"]
currentConfigurations := provider.server.currentConfigurations.Get().(configs)
if provider, ok := currentConfigurations[providerID]; ok {
if backend, ok := provider.Backends[backendID]; ok {
templatesRenderer.JSON(response, http.StatusOK, backend.Servers)
return
}
}
http.NotFound(response, request)
}
func (provider *WebProvider) getServerHandler(response http.ResponseWriter, request *http.Request) {
vars := mux.Vars(request)
providerID := vars["provider"]
backendID := vars["backend"]
serverID := vars["server"]
currentConfigurations := provider.server.currentConfigurations.Get().(configs)
if provider, ok := currentConfigurations[providerID]; ok {
if backend, ok := provider.Backends[backendID]; ok {
if server, ok := backend.Servers[serverID]; ok {
templatesRenderer.JSON(response, http.StatusOK, server)
return
}
}
}
http.NotFound(response, request)
}
func (provider *WebProvider) getFrontendsHandler(response http.ResponseWriter, request *http.Request) {
vars := mux.Vars(request)
providerID := vars["provider"]
currentConfigurations := provider.server.currentConfigurations.Get().(configs)
if provider, ok := currentConfigurations[providerID]; ok {
templatesRenderer.JSON(response, http.StatusOK, provider.Frontends)
} else {
http.NotFound(response, request)
}
}
func (provider *WebProvider) getFrontendHandler(response http.ResponseWriter, request *http.Request) {
vars := mux.Vars(request)
providerID := vars["provider"]
frontendID := vars["frontend"]
currentConfigurations := provider.server.currentConfigurations.Get().(configs)
if provider, ok := currentConfigurations[providerID]; ok {
if frontend, ok := provider.Frontends[frontendID]; ok {
templatesRenderer.JSON(response, http.StatusOK, frontend)
return
}
}
http.NotFound(response, request)
}
func (provider *WebProvider) getRoutesHandler(response http.ResponseWriter, request *http.Request) {
vars := mux.Vars(request)
providerID := vars["provider"]
frontendID := vars["frontend"]
currentConfigurations := provider.server.currentConfigurations.Get().(configs)
if provider, ok := currentConfigurations[providerID]; ok {
if frontend, ok := provider.Frontends[frontendID]; ok {
templatesRenderer.JSON(response, http.StatusOK, frontend.Routes)
return
}
}
http.NotFound(response, request)
}
func (provider *WebProvider) getRouteHandler(response http.ResponseWriter, request *http.Request) {
vars := mux.Vars(request)
providerID := vars["provider"]
frontendID := vars["frontend"]
routeID := vars["route"]
currentConfigurations := provider.server.currentConfigurations.Get().(configs)
if provider, ok := currentConfigurations[providerID]; ok {
if frontend, ok := provider.Frontends[frontendID]; ok {
if route, ok := frontend.Routes[routeID]; ok {
templatesRenderer.JSON(response, http.StatusOK, route)
return
}
}
}
http.NotFound(response, request)
}
func expvarHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprintf(w, "{\n")
first := true
expvar.Do(func(kv expvar.KeyValue) {
if !first {
fmt.Fprintf(w, ",\n")
}
first = false
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
})
fmt.Fprintf(w, "\n}\n")
}