Split Docker provider
This commit is contained in:
parent
2cebd0a083
commit
466d7461b7
71 changed files with 2677 additions and 1190 deletions
|
@ -8,6 +8,7 @@ import (
|
|||
"strings"
|
||||
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/traefik/traefik/v3/pkg/config/dynamic"
|
||||
|
@ -17,7 +18,16 @@ import (
|
|||
"github.com/traefik/traefik/v3/pkg/provider/constraints"
|
||||
)
|
||||
|
||||
func (p *Provider) buildConfiguration(ctx context.Context, containersInspected []dockerData) *dynamic.Configuration {
|
||||
type DynConfBuilder struct {
|
||||
Shared
|
||||
apiClient client.APIClient
|
||||
}
|
||||
|
||||
func NewDynConfBuilder(configuration Shared, apiClient client.APIClient) *DynConfBuilder {
|
||||
return &DynConfBuilder{Shared: configuration, apiClient: apiClient}
|
||||
}
|
||||
|
||||
func (p *DynConfBuilder) build(ctx context.Context, containersInspected []dockerData) *dynamic.Configuration {
|
||||
configurations := make(map[string]*dynamic.Configuration)
|
||||
|
||||
for _, container := range containersInspected {
|
||||
|
@ -92,7 +102,7 @@ func (p *Provider) buildConfiguration(ctx context.Context, containersInspected [
|
|||
return provider.Merge(ctx, configurations)
|
||||
}
|
||||
|
||||
func (p *Provider) buildTCPServiceConfiguration(ctx context.Context, container dockerData, configuration *dynamic.TCPConfiguration) error {
|
||||
func (p *DynConfBuilder) buildTCPServiceConfiguration(ctx context.Context, container dockerData, configuration *dynamic.TCPConfiguration) error {
|
||||
serviceName := getServiceName(container)
|
||||
|
||||
if len(configuration.Services) == 0 {
|
||||
|
@ -117,7 +127,7 @@ func (p *Provider) buildTCPServiceConfiguration(ctx context.Context, container d
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) buildUDPServiceConfiguration(ctx context.Context, container dockerData, configuration *dynamic.UDPConfiguration) error {
|
||||
func (p *DynConfBuilder) buildUDPServiceConfiguration(ctx context.Context, container dockerData, configuration *dynamic.UDPConfiguration) error {
|
||||
serviceName := getServiceName(container)
|
||||
|
||||
if len(configuration.Services) == 0 {
|
||||
|
@ -141,7 +151,7 @@ func (p *Provider) buildUDPServiceConfiguration(ctx context.Context, container d
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) buildServiceConfiguration(ctx context.Context, container dockerData, configuration *dynamic.HTTPConfiguration) error {
|
||||
func (p *DynConfBuilder) buildServiceConfiguration(ctx context.Context, container dockerData, configuration *dynamic.HTTPConfiguration) error {
|
||||
serviceName := getServiceName(container)
|
||||
|
||||
if len(configuration.Services) == 0 {
|
||||
|
@ -167,7 +177,7 @@ func (p *Provider) buildServiceConfiguration(ctx context.Context, container dock
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) keepContainer(ctx context.Context, container dockerData) bool {
|
||||
func (p *DynConfBuilder) keepContainer(ctx context.Context, container dockerData) bool {
|
||||
logger := log.Ctx(ctx)
|
||||
|
||||
if !container.ExtraConf.Enable {
|
||||
|
@ -193,7 +203,7 @@ func (p *Provider) keepContainer(ctx context.Context, container dockerData) bool
|
|||
return true
|
||||
}
|
||||
|
||||
func (p *Provider) addServerTCP(ctx context.Context, container dockerData, loadBalancer *dynamic.TCPServersLoadBalancer) error {
|
||||
func (p *DynConfBuilder) addServerTCP(ctx context.Context, container dockerData, loadBalancer *dynamic.TCPServersLoadBalancer) error {
|
||||
if loadBalancer == nil {
|
||||
return errors.New("load-balancer is not defined")
|
||||
}
|
||||
|
@ -219,7 +229,7 @@ func (p *Provider) addServerTCP(ctx context.Context, container dockerData, loadB
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) addServerUDP(ctx context.Context, container dockerData, loadBalancer *dynamic.UDPServersLoadBalancer) error {
|
||||
func (p *DynConfBuilder) addServerUDP(ctx context.Context, container dockerData, loadBalancer *dynamic.UDPServersLoadBalancer) error {
|
||||
if loadBalancer == nil {
|
||||
return errors.New("load-balancer is not defined")
|
||||
}
|
||||
|
@ -245,7 +255,7 @@ func (p *Provider) addServerUDP(ctx context.Context, container dockerData, loadB
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) addServer(ctx context.Context, container dockerData, loadBalancer *dynamic.ServersLoadBalancer) error {
|
||||
func (p *DynConfBuilder) addServer(ctx context.Context, container dockerData, loadBalancer *dynamic.ServersLoadBalancer) error {
|
||||
if loadBalancer == nil {
|
||||
return errors.New("load-balancer is not defined")
|
||||
}
|
||||
|
@ -275,7 +285,7 @@ func (p *Provider) addServer(ctx context.Context, container dockerData, loadBala
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) getIPPort(ctx context.Context, container dockerData, serverPort string) (string, string, error) {
|
||||
func (p *DynConfBuilder) getIPPort(ctx context.Context, container dockerData, serverPort string) (string, string, error) {
|
||||
logger := log.Ctx(ctx)
|
||||
|
||||
var ip, port string
|
||||
|
@ -307,7 +317,7 @@ func (p *Provider) getIPPort(ctx context.Context, container dockerData, serverPo
|
|||
return ip, port, nil
|
||||
}
|
||||
|
||||
func (p Provider) getIPAddress(ctx context.Context, container dockerData) string {
|
||||
func (p *DynConfBuilder) getIPAddress(ctx context.Context, container dockerData) string {
|
||||
logger := log.Ctx(ctx)
|
||||
|
||||
netNotFound := false
|
||||
|
@ -338,23 +348,17 @@ func (p Provider) getIPAddress(ctx context.Context, container dockerData) string
|
|||
}
|
||||
|
||||
if container.NetworkSettings.NetworkMode.IsContainer() {
|
||||
dockerClient, err := p.createClient()
|
||||
if err != nil {
|
||||
logger.Warn().Err(err).Msg("Unable to get IP address")
|
||||
return ""
|
||||
}
|
||||
|
||||
connectedContainer := container.NetworkSettings.NetworkMode.ConnectedContainer()
|
||||
containerInspected, err := dockerClient.ContainerInspect(context.Background(), connectedContainer)
|
||||
containerInspected, err := p.apiClient.ContainerInspect(context.Background(), connectedContainer)
|
||||
if err != nil {
|
||||
logger.Warn().Err(err).Msgf("Unable to get IP address for container %s: failed to inspect container ID %s", container.Name, connectedContainer)
|
||||
return ""
|
||||
}
|
||||
|
||||
// Check connected container for traefik.docker.network, falling back to
|
||||
// the network specified on the current container.
|
||||
// Check connected container for traefik.docker.network,
|
||||
// falling back to the network specified on the current container.
|
||||
containerParsed := parseContainer(containerInspected)
|
||||
extraConf, err := p.getConfiguration(containerParsed)
|
||||
extraConf, err := p.extractLabels(containerParsed)
|
||||
if err != nil {
|
||||
logger.Warn().Err(err).Msgf("Unable to get IP address for container %s : failed to get extra configuration for container %s", container.Name, containerInspected.Name)
|
||||
return ""
|
||||
|
@ -379,8 +383,9 @@ func (p Provider) getIPAddress(ctx context.Context, container dockerData) string
|
|||
return ""
|
||||
}
|
||||
|
||||
func (p *Provider) getPortBinding(container dockerData, serverPort string) (*nat.PortBinding, error) {
|
||||
func (p *DynConfBuilder) getPortBinding(container dockerData, serverPort string) (*nat.PortBinding, error) {
|
||||
port := getPort(container, serverPort)
|
||||
|
||||
for netPort, portBindings := range container.NetworkSettings.Ports {
|
||||
if strings.EqualFold(string(netPort), port+"/TCP") || strings.EqualFold(string(netPort), port+"/UDP") {
|
||||
for _, p := range portBindings {
|
||||
|
@ -391,36 +396,3 @@ func (p *Provider) getPortBinding(container dockerData, serverPort string) (*nat
|
|||
|
||||
return nil, fmt.Errorf("unable to find the external IP:Port for the container %q", container.Name)
|
||||
}
|
||||
|
||||
func getPort(container dockerData, serverPort string) string {
|
||||
if len(serverPort) > 0 {
|
||||
return serverPort
|
||||
}
|
||||
|
||||
var ports []nat.Port
|
||||
for port := range container.NetworkSettings.Ports {
|
||||
ports = append(ports, port)
|
||||
}
|
||||
|
||||
less := func(i, j nat.Port) bool {
|
||||
return i.Int() < j.Int()
|
||||
}
|
||||
nat.Sort(ports, less)
|
||||
|
||||
if len(ports) > 0 {
|
||||
min := ports[0]
|
||||
return min.Port()
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func getServiceName(container dockerData) string {
|
||||
serviceName := container.ServiceName
|
||||
|
||||
if values, err := getStringMultipleStrict(container.Labels, labelDockerComposeProject, labelDockerComposeService); err == nil {
|
||||
serviceName = values[labelDockerComposeService] + "_" + values[labelDockerComposeProject]
|
||||
}
|
||||
|
||||
return provider.Normalize(serviceName)
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
"github.com/traefik/traefik/v3/pkg/config/dynamic"
|
||||
)
|
||||
|
||||
func TestDefaultRule(t *testing.T) {
|
||||
func TestDynConfBuilder_DefaultRule(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
containers []dockerData
|
||||
|
@ -376,8 +376,10 @@ func TestDefaultRule(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
p := Provider{
|
||||
ExposedByDefault: true,
|
||||
DefaultRule: test.defaultRule,
|
||||
Shared: Shared{
|
||||
ExposedByDefault: true,
|
||||
DefaultRule: test.defaultRule,
|
||||
},
|
||||
}
|
||||
|
||||
err := p.Init()
|
||||
|
@ -385,18 +387,20 @@ func TestDefaultRule(t *testing.T) {
|
|||
|
||||
for i := 0; i < len(test.containers); i++ {
|
||||
var err error
|
||||
test.containers[i].ExtraConf, err = p.getConfiguration(test.containers[i])
|
||||
test.containers[i].ExtraConf, err = p.extractLabels(test.containers[i])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
configuration := p.buildConfiguration(context.Background(), test.containers)
|
||||
builder := NewDynConfBuilder(p.Shared, nil)
|
||||
|
||||
configuration := builder.build(context.Background(), test.containers)
|
||||
|
||||
assert.Equal(t, test.expected, configuration)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_buildConfiguration(t *testing.T) {
|
||||
func TestDynConfBuilder_build(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
containers []dockerData
|
||||
|
@ -3381,10 +3385,12 @@ func Test_buildConfiguration(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
p := Provider{
|
||||
AllowEmptyServices: test.allowEmptyServices,
|
||||
DefaultRule: "Host(`{{ normalize .Name }}.traefik.wtf`)",
|
||||
ExposedByDefault: true,
|
||||
UseBindPortIP: test.useBindPortIP,
|
||||
Shared: Shared{
|
||||
AllowEmptyServices: test.allowEmptyServices,
|
||||
ExposedByDefault: true,
|
||||
UseBindPortIP: test.useBindPortIP,
|
||||
DefaultRule: "Host(`{{ normalize .Name }}.traefik.wtf`)",
|
||||
},
|
||||
}
|
||||
p.Constraints = test.constraints
|
||||
|
||||
|
@ -3393,18 +3399,20 @@ func Test_buildConfiguration(t *testing.T) {
|
|||
|
||||
for i := 0; i < len(test.containers); i++ {
|
||||
var err error
|
||||
test.containers[i].ExtraConf, err = p.getConfiguration(test.containers[i])
|
||||
test.containers[i].ExtraConf, err = p.extractLabels(test.containers[i])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
configuration := p.buildConfiguration(context.Background(), test.containers)
|
||||
builder := NewDynConfBuilder(p.Shared, nil)
|
||||
|
||||
configuration := builder.build(context.Background(), test.containers)
|
||||
|
||||
assert.Equal(t, test.expected, configuration)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDockerGetIPPort(t *testing.T) {
|
||||
func TestDynConfBuilder_getIPPort_docker(t *testing.T) {
|
||||
type expected struct {
|
||||
ip string
|
||||
port string
|
||||
|
@ -3565,12 +3573,12 @@ func TestDockerGetIPPort(t *testing.T) {
|
|||
|
||||
dData := parseContainer(test.container)
|
||||
|
||||
provider := &Provider{
|
||||
builder := NewDynConfBuilder(Shared{
|
||||
Network: "testnet",
|
||||
UseBindPortIP: true,
|
||||
}
|
||||
}, nil)
|
||||
|
||||
actualIP, actualPort, actualError := provider.getIPPort(context.Background(), dData, test.serverPort)
|
||||
actualIP, actualPort, actualError := builder.getIPPort(context.Background(), dData, test.serverPort)
|
||||
if test.expected.error {
|
||||
require.Error(t, actualError)
|
||||
} else {
|
||||
|
@ -3582,73 +3590,7 @@ func TestDockerGetIPPort(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestDockerGetPort(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
container docker.ContainerJSON
|
||||
serverPort string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
desc: "no binding, no server port label",
|
||||
container: containerJSON(name("foo")),
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
desc: "binding, no server port label",
|
||||
container: containerJSON(ports(nat.PortMap{
|
||||
"80/tcp": {},
|
||||
})),
|
||||
expected: "80",
|
||||
},
|
||||
{
|
||||
desc: "binding, multiple ports, no server port label",
|
||||
container: containerJSON(ports(nat.PortMap{
|
||||
"80/tcp": {},
|
||||
"443/tcp": {},
|
||||
})),
|
||||
expected: "80",
|
||||
},
|
||||
{
|
||||
desc: "no binding, server port label",
|
||||
container: containerJSON(),
|
||||
serverPort: "8080",
|
||||
expected: "8080",
|
||||
},
|
||||
{
|
||||
desc: "binding, server port label",
|
||||
container: containerJSON(
|
||||
ports(nat.PortMap{
|
||||
"80/tcp": {},
|
||||
})),
|
||||
serverPort: "8080",
|
||||
expected: "8080",
|
||||
},
|
||||
{
|
||||
desc: "binding, multiple ports, server port label",
|
||||
container: containerJSON(ports(nat.PortMap{
|
||||
"8080/tcp": {},
|
||||
"80/tcp": {},
|
||||
})),
|
||||
serverPort: "8080",
|
||||
expected: "8080",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dData := parseContainer(test.container)
|
||||
|
||||
actual := getPort(dData, test.serverPort)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDockerGetIPAddress(t *testing.T) {
|
||||
func TestDynConfBuilder_getIPAddress_docker(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
container docker.ContainerJSON
|
||||
|
@ -3742,24 +3684,26 @@ func TestDockerGetIPAddress(t *testing.T) {
|
|||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
provider := &Provider{
|
||||
conf := Shared{
|
||||
Network: "webnet",
|
||||
}
|
||||
|
||||
dData := parseContainer(test.container)
|
||||
|
||||
dData.ExtraConf.Docker.Network = provider.Network
|
||||
dData.ExtraConf.Docker.Network = conf.Network
|
||||
if len(test.network) > 0 {
|
||||
dData.ExtraConf.Docker.Network = test.network
|
||||
}
|
||||
|
||||
actual := provider.getIPAddress(context.Background(), dData)
|
||||
builder := NewDynConfBuilder(conf, nil)
|
||||
|
||||
actual := builder.getIPAddress(context.Background(), dData)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSwarmGetIPAddress(t *testing.T) {
|
||||
func TestDynConfBuilder_getIPAddress_swarm(t *testing.T) {
|
||||
testCases := []struct {
|
||||
service swarm.Service
|
||||
expected string
|
||||
|
@ -3810,47 +3754,13 @@ func TestSwarmGetIPAddress(t *testing.T) {
|
|||
t.Run(strconv.Itoa(serviceID), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
provider := &Provider{
|
||||
SwarmMode: true,
|
||||
}
|
||||
|
||||
dData, err := provider.parseService(context.Background(), test.service, test.networks)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual := provider.getIPAddress(context.Background(), dData)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSwarmGetPort(t *testing.T) {
|
||||
testCases := []struct {
|
||||
service swarm.Service
|
||||
serverPort string
|
||||
networks map[string]*docker.NetworkResource
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
service: swarmService(
|
||||
withEndpointSpec(modeDNSSR),
|
||||
),
|
||||
networks: map[string]*docker.NetworkResource{},
|
||||
serverPort: "8080",
|
||||
expected: "8080",
|
||||
},
|
||||
}
|
||||
|
||||
for serviceID, test := range testCases {
|
||||
test := test
|
||||
t.Run(strconv.Itoa(serviceID), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
p := Provider{}
|
||||
p := &SwarmProvider{}
|
||||
|
||||
dData, err := p.parseService(context.Background(), test.service, test.networks)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual := getPort(dData, test.serverPort)
|
||||
builder := NewDynConfBuilder(p.Shared, nil)
|
||||
actual := builder.getIPAddress(context.Background(), dData)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
|
|
35
pkg/provider/docker/data.go
Normal file
35
pkg/provider/docker/data.go
Normal file
|
@ -0,0 +1,35 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
dockercontainertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/go-connections/nat"
|
||||
)
|
||||
|
||||
// dockerData holds the need data to the provider.
|
||||
type dockerData struct {
|
||||
ID string
|
||||
ServiceName string
|
||||
Name string
|
||||
Labels map[string]string // List of labels set to container or service
|
||||
NetworkSettings networkSettings
|
||||
Health string
|
||||
Node *dockertypes.ContainerNode
|
||||
ExtraConf configuration
|
||||
}
|
||||
|
||||
// NetworkSettings holds the networks data to the provider.
|
||||
type networkSettings struct {
|
||||
NetworkMode dockercontainertypes.NetworkMode
|
||||
Ports nat.PortMap
|
||||
Networks map[string]*networkData
|
||||
}
|
||||
|
||||
// Network holds the network data to the provider.
|
||||
type networkData struct {
|
||||
Name string
|
||||
Addr string
|
||||
Port int
|
||||
Protocol string
|
||||
ID string
|
||||
}
|
|
@ -1,602 +0,0 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/docker/cli/cli/connhelper"
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
dockercontainertypes "github.com/docker/docker/api/types/container"
|
||||
eventtypes "github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
swarmtypes "github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/docker/go-connections/sockets"
|
||||
"github.com/rs/zerolog/log"
|
||||
ptypes "github.com/traefik/paerser/types"
|
||||
"github.com/traefik/traefik/v3/pkg/config/dynamic"
|
||||
"github.com/traefik/traefik/v3/pkg/job"
|
||||
"github.com/traefik/traefik/v3/pkg/logs"
|
||||
"github.com/traefik/traefik/v3/pkg/provider"
|
||||
"github.com/traefik/traefik/v3/pkg/safe"
|
||||
"github.com/traefik/traefik/v3/pkg/types"
|
||||
"github.com/traefik/traefik/v3/pkg/version"
|
||||
)
|
||||
|
||||
const (
|
||||
// DockerAPIVersion is a constant holding the version of the Provider API traefik will use.
|
||||
DockerAPIVersion = "1.24"
|
||||
|
||||
// SwarmAPIVersion is a constant holding the version of the Provider API traefik will use.
|
||||
SwarmAPIVersion = "1.24"
|
||||
)
|
||||
|
||||
// DefaultTemplateRule The default template for the default rule.
|
||||
const DefaultTemplateRule = "Host(`{{ normalize .Name }}`)"
|
||||
|
||||
var _ provider.Provider = (*Provider)(nil)
|
||||
|
||||
// Provider holds configurations of the provider.
|
||||
type Provider struct {
|
||||
Constraints string `description:"Constraints is an expression that Traefik matches against the container's labels to determine whether to create any route for that container." json:"constraints,omitempty" toml:"constraints,omitempty" yaml:"constraints,omitempty" export:"true"`
|
||||
Watch bool `description:"Watch Docker events." json:"watch,omitempty" toml:"watch,omitempty" yaml:"watch,omitempty" export:"true"`
|
||||
Endpoint string `description:"Docker server endpoint. Can be a tcp or a unix socket endpoint." json:"endpoint,omitempty" toml:"endpoint,omitempty" yaml:"endpoint,omitempty"`
|
||||
DefaultRule string `description:"Default rule." json:"defaultRule,omitempty" toml:"defaultRule,omitempty" yaml:"defaultRule,omitempty"`
|
||||
TLS *types.ClientTLS `description:"Enable Docker TLS support." json:"tls,omitempty" toml:"tls,omitempty" yaml:"tls,omitempty" export:"true"`
|
||||
ExposedByDefault bool `description:"Expose containers by default." json:"exposedByDefault,omitempty" toml:"exposedByDefault,omitempty" yaml:"exposedByDefault,omitempty" export:"true"`
|
||||
UseBindPortIP bool `description:"Use the ip address from the bound port, rather than from the inner network." json:"useBindPortIP,omitempty" toml:"useBindPortIP,omitempty" yaml:"useBindPortIP,omitempty" export:"true"`
|
||||
SwarmMode bool `description:"Use Docker on Swarm Mode." json:"swarmMode,omitempty" toml:"swarmMode,omitempty" yaml:"swarmMode,omitempty" export:"true"`
|
||||
Network string `description:"Default Docker network used." json:"network,omitempty" toml:"network,omitempty" yaml:"network,omitempty" export:"true"`
|
||||
SwarmModeRefreshSeconds ptypes.Duration `description:"Polling interval for swarm mode." json:"swarmModeRefreshSeconds,omitempty" toml:"swarmModeRefreshSeconds,omitempty" yaml:"swarmModeRefreshSeconds,omitempty" export:"true"`
|
||||
HTTPClientTimeout ptypes.Duration `description:"Client timeout for HTTP connections." json:"httpClientTimeout,omitempty" toml:"httpClientTimeout,omitempty" yaml:"httpClientTimeout,omitempty" export:"true"`
|
||||
AllowEmptyServices bool `description:"Disregards the Docker containers health checks with respect to the creation or removal of the corresponding services." json:"allowEmptyServices,omitempty" toml:"allowEmptyServices,omitempty" yaml:"allowEmptyServices,omitempty" export:"true"`
|
||||
defaultRuleTpl *template.Template
|
||||
}
|
||||
|
||||
// SetDefaults sets the default values.
|
||||
func (p *Provider) SetDefaults() {
|
||||
p.Watch = true
|
||||
p.ExposedByDefault = true
|
||||
p.Endpoint = "unix:///var/run/docker.sock"
|
||||
p.SwarmMode = false
|
||||
p.SwarmModeRefreshSeconds = ptypes.Duration(15 * time.Second)
|
||||
p.DefaultRule = DefaultTemplateRule
|
||||
}
|
||||
|
||||
// Init the provider.
|
||||
func (p *Provider) Init() error {
|
||||
defaultRuleTpl, err := provider.MakeDefaultRuleTemplate(p.DefaultRule, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while parsing default rule: %w", err)
|
||||
}
|
||||
|
||||
p.defaultRuleTpl = defaultRuleTpl
|
||||
return nil
|
||||
}
|
||||
|
||||
// dockerData holds the need data to the provider.
|
||||
type dockerData struct {
|
||||
ID string
|
||||
ServiceName string
|
||||
Name string
|
||||
Labels map[string]string // List of labels set to container or service
|
||||
NetworkSettings networkSettings
|
||||
Health string
|
||||
Node *dockertypes.ContainerNode
|
||||
ExtraConf configuration
|
||||
}
|
||||
|
||||
// NetworkSettings holds the networks data to the provider.
|
||||
type networkSettings struct {
|
||||
NetworkMode dockercontainertypes.NetworkMode
|
||||
Ports nat.PortMap
|
||||
Networks map[string]*networkData
|
||||
}
|
||||
|
||||
// Network holds the network data to the provider.
|
||||
type networkData struct {
|
||||
Name string
|
||||
Addr string
|
||||
Port int
|
||||
Protocol string
|
||||
ID string
|
||||
}
|
||||
|
||||
func (p *Provider) createClient() (client.APIClient, error) {
|
||||
opts, err := p.getClientOpts()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
httpHeaders := map[string]string{
|
||||
"User-Agent": "Traefik " + version.Version,
|
||||
}
|
||||
opts = append(opts, client.WithHTTPHeaders(httpHeaders))
|
||||
|
||||
apiVersion := DockerAPIVersion
|
||||
if p.SwarmMode {
|
||||
apiVersion = SwarmAPIVersion
|
||||
}
|
||||
opts = append(opts, client.WithVersion(apiVersion))
|
||||
|
||||
return client.NewClientWithOpts(opts...)
|
||||
}
|
||||
|
||||
func (p *Provider) getClientOpts() ([]client.Opt, error) {
|
||||
helper, err := connhelper.GetConnectionHelper(p.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// SSH
|
||||
if helper != nil {
|
||||
// https://github.com/docker/cli/blob/ebca1413117a3fcb81c89d6be226dcec74e5289f/cli/context/docker/load.go#L112-L123
|
||||
|
||||
httpClient := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
DialContext: helper.Dialer,
|
||||
},
|
||||
}
|
||||
|
||||
return []client.Opt{
|
||||
client.WithHTTPClient(httpClient),
|
||||
client.WithTimeout(time.Duration(p.HTTPClientTimeout)),
|
||||
client.WithHost(helper.Host), // To avoid 400 Bad Request: malformed Host header daemon error
|
||||
client.WithDialContext(helper.Dialer),
|
||||
}, nil
|
||||
}
|
||||
|
||||
opts := []client.Opt{
|
||||
client.WithHost(p.Endpoint),
|
||||
client.WithTimeout(time.Duration(p.HTTPClientTimeout)),
|
||||
}
|
||||
|
||||
if p.TLS != nil {
|
||||
ctx := log.With().Str(logs.ProviderName, "docker").Logger().WithContext(context.Background())
|
||||
|
||||
conf, err := p.TLS.CreateTLSConfig(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create client TLS configuration: %w", err)
|
||||
}
|
||||
|
||||
hostURL, err := client.ParseHostURL(p.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: conf,
|
||||
}
|
||||
|
||||
if err := sockets.ConfigureTransport(tr, hostURL.Scheme, hostURL.Host); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts = append(opts, client.WithHTTPClient(&http.Client{Transport: tr, Timeout: time.Duration(p.HTTPClientTimeout)}))
|
||||
}
|
||||
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
// Provide allows the docker provider to provide configurations to traefik using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.Pool) error {
|
||||
pool.GoCtx(func(routineCtx context.Context) {
|
||||
logger := log.Ctx(routineCtx).With().Str(logs.ProviderName, "docker").Logger()
|
||||
ctxLog := logger.WithContext(routineCtx)
|
||||
|
||||
operation := func() error {
|
||||
var err error
|
||||
ctx, cancel := context.WithCancel(ctxLog)
|
||||
defer cancel()
|
||||
ctx = log.Ctx(ctx).With().Str(logs.ProviderName, "docker").Logger().WithContext(ctx)
|
||||
|
||||
dockerClient, err := p.createClient()
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Failed to create a client for docker, error")
|
||||
return err
|
||||
}
|
||||
defer dockerClient.Close()
|
||||
|
||||
serverVersion, err := dockerClient.ServerVersion(ctx)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Failed to retrieve information of the docker client and server host")
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Debug().Msgf("Provider connection established with docker %s (API %s)", serverVersion.Version, serverVersion.APIVersion)
|
||||
|
||||
var dockerDataList []dockerData
|
||||
if p.SwarmMode {
|
||||
dockerDataList, err = p.listServices(ctx, dockerClient)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Failed to list services for docker swarm mode")
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
dockerDataList, err = p.listContainers(ctx, dockerClient)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Failed to list containers for docker")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
configuration := p.buildConfiguration(ctxLog, dockerDataList)
|
||||
configurationChan <- dynamic.Message{
|
||||
ProviderName: "docker",
|
||||
Configuration: configuration,
|
||||
}
|
||||
if p.Watch {
|
||||
if p.SwarmMode {
|
||||
errChan := make(chan error)
|
||||
|
||||
// TODO: This need to be change. Linked to Swarm events docker/docker#23827
|
||||
ticker := time.NewTicker(time.Duration(p.SwarmModeRefreshSeconds))
|
||||
|
||||
pool.GoCtx(func(ctx context.Context) {
|
||||
logger := log.Ctx(ctx).With().Str(logs.ProviderName, "docker").Logger()
|
||||
ctx = logger.WithContext(ctx)
|
||||
|
||||
defer close(errChan)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
services, err := p.listServices(ctx, dockerClient)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Failed to list services for docker swarm mode")
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
configuration := p.buildConfiguration(ctx, services)
|
||||
if configuration != nil {
|
||||
configurationChan <- dynamic.Message{
|
||||
ProviderName: "docker",
|
||||
Configuration: configuration,
|
||||
}
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
if err, ok := <-errChan; ok {
|
||||
return err
|
||||
}
|
||||
// channel closed
|
||||
} else {
|
||||
f := filters.NewArgs()
|
||||
f.Add("type", "container")
|
||||
options := dockertypes.EventsOptions{
|
||||
Filters: f,
|
||||
}
|
||||
|
||||
startStopHandle := func(m eventtypes.Message) {
|
||||
logger.Debug().Msgf("Provider event received %+v", m)
|
||||
containers, err := p.listContainers(ctx, dockerClient)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Failed to list containers for docker")
|
||||
// Call cancel to get out of the monitor
|
||||
return
|
||||
}
|
||||
|
||||
configuration := p.buildConfiguration(ctx, containers)
|
||||
if configuration != nil {
|
||||
message := dynamic.Message{
|
||||
ProviderName: "docker",
|
||||
Configuration: configuration,
|
||||
}
|
||||
select {
|
||||
case configurationChan <- message:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eventsc, errc := dockerClient.Events(ctx, options)
|
||||
for {
|
||||
select {
|
||||
case event := <-eventsc:
|
||||
if event.Action == "start" ||
|
||||
event.Action == "die" ||
|
||||
strings.HasPrefix(event.Action, "health_status") {
|
||||
startStopHandle(event)
|
||||
}
|
||||
case err := <-errc:
|
||||
if errors.Is(err, io.EOF) {
|
||||
logger.Debug().Msg("Provider event stream closed")
|
||||
}
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
notify := func(err error, time time.Duration) {
|
||||
logger.Error().Err(err).Msgf("Provider error, retrying in %s", time)
|
||||
}
|
||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), backoff.WithContext(job.NewBackOff(backoff.NewExponentialBackOff()), ctxLog), notify)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Cannot retrieve data")
|
||||
}
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) listContainers(ctx context.Context, dockerClient client.ContainerAPIClient) ([]dockerData, error) {
|
||||
containerList, err := dockerClient.ContainerList(ctx, dockertypes.ContainerListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var inspectedContainers []dockerData
|
||||
// get inspect containers
|
||||
for _, container := range containerList {
|
||||
dData := inspectContainers(ctx, dockerClient, container.ID)
|
||||
if len(dData.Name) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
extraConf, err := p.getConfiguration(dData)
|
||||
if err != nil {
|
||||
log.Ctx(ctx).Error().Err(err).Msgf("Skip container %s", getServiceName(dData))
|
||||
continue
|
||||
}
|
||||
dData.ExtraConf = extraConf
|
||||
|
||||
inspectedContainers = append(inspectedContainers, dData)
|
||||
}
|
||||
return inspectedContainers, nil
|
||||
}
|
||||
|
||||
func inspectContainers(ctx context.Context, dockerClient client.ContainerAPIClient, containerID string) dockerData {
|
||||
containerInspected, err := dockerClient.ContainerInspect(ctx, containerID)
|
||||
if err != nil {
|
||||
log.Ctx(ctx).Warn().Err(err).Msgf("Failed to inspect container %s", containerID)
|
||||
return dockerData{}
|
||||
}
|
||||
|
||||
// This condition is here to avoid to have empty IP https://github.com/traefik/traefik/issues/2459
|
||||
// We register only container which are running
|
||||
if containerInspected.ContainerJSONBase != nil && containerInspected.ContainerJSONBase.State != nil && containerInspected.ContainerJSONBase.State.Running {
|
||||
return parseContainer(containerInspected)
|
||||
}
|
||||
|
||||
return dockerData{}
|
||||
}
|
||||
|
||||
func parseContainer(container dockertypes.ContainerJSON) dockerData {
|
||||
dData := dockerData{
|
||||
NetworkSettings: networkSettings{},
|
||||
}
|
||||
|
||||
if container.ContainerJSONBase != nil {
|
||||
dData.ID = container.ContainerJSONBase.ID
|
||||
dData.Name = container.ContainerJSONBase.Name
|
||||
dData.ServiceName = dData.Name // Default ServiceName to be the container's Name.
|
||||
dData.Node = container.ContainerJSONBase.Node
|
||||
|
||||
if container.ContainerJSONBase.HostConfig != nil {
|
||||
dData.NetworkSettings.NetworkMode = container.ContainerJSONBase.HostConfig.NetworkMode
|
||||
}
|
||||
|
||||
if container.State != nil && container.State.Health != nil {
|
||||
dData.Health = container.State.Health.Status
|
||||
}
|
||||
}
|
||||
|
||||
if container.Config != nil && container.Config.Labels != nil {
|
||||
dData.Labels = container.Config.Labels
|
||||
}
|
||||
|
||||
if container.NetworkSettings != nil {
|
||||
if container.NetworkSettings.Ports != nil {
|
||||
dData.NetworkSettings.Ports = container.NetworkSettings.Ports
|
||||
}
|
||||
if container.NetworkSettings.Networks != nil {
|
||||
dData.NetworkSettings.Networks = make(map[string]*networkData)
|
||||
for name, containerNetwork := range container.NetworkSettings.Networks {
|
||||
addr := containerNetwork.IPAddress
|
||||
if addr == "" {
|
||||
addr = containerNetwork.GlobalIPv6Address
|
||||
}
|
||||
|
||||
dData.NetworkSettings.Networks[name] = &networkData{
|
||||
ID: containerNetwork.NetworkID,
|
||||
Name: name,
|
||||
Addr: addr,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return dData
|
||||
}
|
||||
|
||||
func (p *Provider) listServices(ctx context.Context, dockerClient client.APIClient) ([]dockerData, error) {
|
||||
logger := log.Ctx(ctx)
|
||||
|
||||
serviceList, err := dockerClient.ServiceList(ctx, dockertypes.ServiceListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serverVersion, err := dockerClient.ServerVersion(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
networkListArgs := filters.NewArgs()
|
||||
// https://docs.docker.com/engine/api/v1.29/#tag/Network (Docker 17.06)
|
||||
if versions.GreaterThanOrEqualTo(serverVersion.APIVersion, "1.29") {
|
||||
networkListArgs.Add("scope", "swarm")
|
||||
} else {
|
||||
networkListArgs.Add("driver", "overlay")
|
||||
}
|
||||
|
||||
networkList, err := dockerClient.NetworkList(ctx, dockertypes.NetworkListOptions{Filters: networkListArgs})
|
||||
if err != nil {
|
||||
logger.Debug().Err(err).Msg("Failed to network inspect on client for docker")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
networkMap := make(map[string]*dockertypes.NetworkResource)
|
||||
for _, network := range networkList {
|
||||
networkToAdd := network
|
||||
networkMap[network.ID] = &networkToAdd
|
||||
}
|
||||
|
||||
var dockerDataList []dockerData
|
||||
var dockerDataListTasks []dockerData
|
||||
|
||||
for _, service := range serviceList {
|
||||
dData, err := p.parseService(ctx, service, networkMap)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msgf("Skip container %s", getServiceName(dData))
|
||||
continue
|
||||
}
|
||||
|
||||
if dData.ExtraConf.Docker.LBSwarm {
|
||||
if len(dData.NetworkSettings.Networks) > 0 {
|
||||
dockerDataList = append(dockerDataList, dData)
|
||||
}
|
||||
} else {
|
||||
isGlobalSvc := service.Spec.Mode.Global != nil
|
||||
dockerDataListTasks, err = listTasks(ctx, dockerClient, service.ID, dData, networkMap, isGlobalSvc)
|
||||
if err != nil {
|
||||
logger.Warn().Err(err).Send()
|
||||
} else {
|
||||
dockerDataList = append(dockerDataList, dockerDataListTasks...)
|
||||
}
|
||||
}
|
||||
}
|
||||
return dockerDataList, err
|
||||
}
|
||||
|
||||
func (p *Provider) parseService(ctx context.Context, service swarmtypes.Service, networkMap map[string]*dockertypes.NetworkResource) (dockerData, error) {
|
||||
logger := log.Ctx(ctx)
|
||||
|
||||
dData := dockerData{
|
||||
ID: service.ID,
|
||||
ServiceName: service.Spec.Annotations.Name,
|
||||
Name: service.Spec.Annotations.Name,
|
||||
Labels: service.Spec.Annotations.Labels,
|
||||
NetworkSettings: networkSettings{},
|
||||
}
|
||||
|
||||
extraConf, err := p.getConfiguration(dData)
|
||||
if err != nil {
|
||||
return dockerData{}, err
|
||||
}
|
||||
dData.ExtraConf = extraConf
|
||||
|
||||
if service.Spec.EndpointSpec != nil {
|
||||
if service.Spec.EndpointSpec.Mode == swarmtypes.ResolutionModeDNSRR {
|
||||
if dData.ExtraConf.Docker.LBSwarm {
|
||||
logger.Warn().Msgf("Ignored %s endpoint-mode not supported, service name: %s. Fallback to Traefik load balancing", swarmtypes.ResolutionModeDNSRR, service.Spec.Annotations.Name)
|
||||
}
|
||||
} else if service.Spec.EndpointSpec.Mode == swarmtypes.ResolutionModeVIP {
|
||||
dData.NetworkSettings.Networks = make(map[string]*networkData)
|
||||
for _, virtualIP := range service.Endpoint.VirtualIPs {
|
||||
networkService := networkMap[virtualIP.NetworkID]
|
||||
if networkService != nil {
|
||||
if len(virtualIP.Addr) > 0 {
|
||||
ip, _, _ := net.ParseCIDR(virtualIP.Addr)
|
||||
network := &networkData{
|
||||
Name: networkService.Name,
|
||||
ID: virtualIP.NetworkID,
|
||||
Addr: ip.String(),
|
||||
}
|
||||
dData.NetworkSettings.Networks[network.Name] = network
|
||||
} else {
|
||||
logger.Debug().Msgf("No virtual IPs found in network %s", virtualIP.NetworkID)
|
||||
}
|
||||
} else {
|
||||
logger.Debug().Msgf("Network not found, id: %s", virtualIP.NetworkID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return dData, nil
|
||||
}
|
||||
|
||||
func listTasks(ctx context.Context, dockerClient client.APIClient, serviceID string,
|
||||
serviceDockerData dockerData, networkMap map[string]*dockertypes.NetworkResource, isGlobalSvc bool,
|
||||
) ([]dockerData, error) {
|
||||
serviceIDFilter := filters.NewArgs()
|
||||
serviceIDFilter.Add("service", serviceID)
|
||||
serviceIDFilter.Add("desired-state", "running")
|
||||
|
||||
taskList, err := dockerClient.TaskList(ctx, dockertypes.TaskListOptions{Filters: serviceIDFilter})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var dockerDataList []dockerData
|
||||
for _, task := range taskList {
|
||||
if task.Status.State != swarmtypes.TaskStateRunning {
|
||||
continue
|
||||
}
|
||||
dData := parseTasks(ctx, task, serviceDockerData, networkMap, isGlobalSvc)
|
||||
if len(dData.NetworkSettings.Networks) > 0 {
|
||||
dockerDataList = append(dockerDataList, dData)
|
||||
}
|
||||
}
|
||||
return dockerDataList, err
|
||||
}
|
||||
|
||||
func parseTasks(ctx context.Context, task swarmtypes.Task, serviceDockerData dockerData,
|
||||
networkMap map[string]*dockertypes.NetworkResource, isGlobalSvc bool,
|
||||
) dockerData {
|
||||
dData := dockerData{
|
||||
ID: task.ID,
|
||||
ServiceName: serviceDockerData.Name,
|
||||
Name: serviceDockerData.Name + "." + strconv.Itoa(task.Slot),
|
||||
Labels: serviceDockerData.Labels,
|
||||
ExtraConf: serviceDockerData.ExtraConf,
|
||||
NetworkSettings: networkSettings{},
|
||||
}
|
||||
|
||||
if isGlobalSvc {
|
||||
dData.Name = serviceDockerData.Name + "." + task.ID
|
||||
}
|
||||
|
||||
if task.NetworksAttachments != nil {
|
||||
dData.NetworkSettings.Networks = make(map[string]*networkData)
|
||||
for _, virtualIP := range task.NetworksAttachments {
|
||||
if networkService, present := networkMap[virtualIP.Network.ID]; present {
|
||||
if len(virtualIP.Addresses) > 0 {
|
||||
// Not sure about this next loop - when would a task have multiple IP's for the same network?
|
||||
for _, addr := range virtualIP.Addresses {
|
||||
ip, _, _ := net.ParseCIDR(addr)
|
||||
network := &networkData{
|
||||
ID: virtualIP.Network.ID,
|
||||
Name: networkService.Name,
|
||||
Addr: ip.String(),
|
||||
}
|
||||
dData.NetworkSettings.Networks[network.Name] = network
|
||||
}
|
||||
} else {
|
||||
log.Ctx(ctx).Debug().Msgf("No IP addresses found for network %s", virtualIP.Network.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return dData
|
||||
}
|
193
pkg/provider/docker/pdocker.go
Normal file
193
pkg/provider/docker/pdocker.go
Normal file
|
@ -0,0 +1,193 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
eventtypes "github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/traefik/traefik/v3/pkg/config/dynamic"
|
||||
"github.com/traefik/traefik/v3/pkg/job"
|
||||
"github.com/traefik/traefik/v3/pkg/logs"
|
||||
"github.com/traefik/traefik/v3/pkg/provider"
|
||||
"github.com/traefik/traefik/v3/pkg/safe"
|
||||
)
|
||||
|
||||
// DockerAPIVersion is a constant holding the version of the Provider API traefik will use.
|
||||
const DockerAPIVersion = "1.24"
|
||||
|
||||
const dockerName = "docker"
|
||||
|
||||
var _ provider.Provider = (*Provider)(nil)
|
||||
|
||||
// Provider holds configurations of the provider.
|
||||
type Provider struct {
|
||||
Shared `yaml:",inline" export:"true"`
|
||||
ClientConfig `yaml:",inline" export:"true"`
|
||||
}
|
||||
|
||||
// SetDefaults sets the default values.
|
||||
func (p *Provider) SetDefaults() {
|
||||
p.Watch = true
|
||||
p.ExposedByDefault = true
|
||||
p.Endpoint = "unix:///var/run/docker.sock"
|
||||
p.DefaultRule = DefaultTemplateRule
|
||||
}
|
||||
|
||||
// Init the provider.
|
||||
func (p *Provider) Init() error {
|
||||
defaultRuleTpl, err := provider.MakeDefaultRuleTemplate(p.DefaultRule, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while parsing default rule: %w", err)
|
||||
}
|
||||
|
||||
p.defaultRuleTpl = defaultRuleTpl
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) createClient(ctx context.Context) (*client.Client, error) {
|
||||
p.ClientConfig.apiVersion = DockerAPIVersion
|
||||
return createClient(ctx, p.ClientConfig)
|
||||
}
|
||||
|
||||
// Provide allows the docker provider to provide configurations to traefik using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.Pool) error {
|
||||
pool.GoCtx(func(routineCtx context.Context) {
|
||||
logger := log.Ctx(routineCtx).With().Str(logs.ProviderName, dockerName).Logger()
|
||||
ctxLog := logger.WithContext(routineCtx)
|
||||
|
||||
operation := func() error {
|
||||
var err error
|
||||
ctx, cancel := context.WithCancel(ctxLog)
|
||||
defer cancel()
|
||||
|
||||
ctx = log.Ctx(ctx).With().Str(logs.ProviderName, dockerName).Logger().WithContext(ctx)
|
||||
|
||||
dockerClient, err := p.createClient(ctxLog)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Failed to create Docker API client")
|
||||
return err
|
||||
}
|
||||
defer func() { _ = dockerClient.Close() }()
|
||||
|
||||
builder := NewDynConfBuilder(p.Shared, dockerClient)
|
||||
|
||||
serverVersion, err := dockerClient.ServerVersion(ctx)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Failed to retrieve information of the docker client and server host")
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Debug().Msgf("Provider connection established with docker %s (API %s)", serverVersion.Version, serverVersion.APIVersion)
|
||||
|
||||
dockerDataList, err := p.listContainers(ctx, dockerClient)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Failed to list containers for docker")
|
||||
return err
|
||||
}
|
||||
|
||||
configuration := builder.build(ctxLog, dockerDataList)
|
||||
configurationChan <- dynamic.Message{
|
||||
ProviderName: dockerName,
|
||||
Configuration: configuration,
|
||||
}
|
||||
|
||||
if p.Watch {
|
||||
f := filters.NewArgs()
|
||||
f.Add("type", "container")
|
||||
options := dockertypes.EventsOptions{
|
||||
Filters: f,
|
||||
}
|
||||
|
||||
startStopHandle := func(m eventtypes.Message) {
|
||||
logger.Debug().Msgf("Provider event received %+v", m)
|
||||
containers, err := p.listContainers(ctx, dockerClient)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Failed to list containers for docker")
|
||||
// Call cancel to get out of the monitor
|
||||
return
|
||||
}
|
||||
|
||||
configuration := builder.build(ctx, containers)
|
||||
if configuration != nil {
|
||||
message := dynamic.Message{
|
||||
ProviderName: dockerName,
|
||||
Configuration: configuration,
|
||||
}
|
||||
select {
|
||||
case configurationChan <- message:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eventsc, errc := dockerClient.Events(ctx, options)
|
||||
for {
|
||||
select {
|
||||
case event := <-eventsc:
|
||||
if event.Action == "start" ||
|
||||
event.Action == "die" ||
|
||||
strings.HasPrefix(event.Action, "health_status") {
|
||||
startStopHandle(event)
|
||||
}
|
||||
case err := <-errc:
|
||||
if errors.Is(err, io.EOF) {
|
||||
logger.Debug().Msg("Provider event stream closed")
|
||||
}
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
notify := func(err error, time time.Duration) {
|
||||
logger.Error().Err(err).Msgf("Provider error, retrying in %s", time)
|
||||
}
|
||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), backoff.WithContext(job.NewBackOff(backoff.NewExponentialBackOff()), ctxLog), notify)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Cannot retrieve data")
|
||||
}
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) listContainers(ctx context.Context, dockerClient client.ContainerAPIClient) ([]dockerData, error) {
|
||||
containerList, err := dockerClient.ContainerList(ctx, dockertypes.ContainerListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var inspectedContainers []dockerData
|
||||
// get inspect containers
|
||||
for _, container := range containerList {
|
||||
dData := inspectContainers(ctx, dockerClient, container.ID)
|
||||
if len(dData.Name) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
extraConf, err := p.extractLabels(dData)
|
||||
if err != nil {
|
||||
log.Ctx(ctx).Error().Err(err).Msgf("Skip container %s", getServiceName(dData))
|
||||
continue
|
||||
}
|
||||
|
||||
dData.ExtraConf = extraConf
|
||||
|
||||
inspectedContainers = append(inspectedContainers, dData)
|
||||
}
|
||||
|
||||
return inspectedContainers, nil
|
||||
}
|
332
pkg/provider/docker/pswarm.go
Normal file
332
pkg/provider/docker/pswarm.go
Normal file
|
@ -0,0 +1,332 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
swarmtypes "github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/rs/zerolog/log"
|
||||
ptypes "github.com/traefik/paerser/types"
|
||||
"github.com/traefik/traefik/v3/pkg/config/dynamic"
|
||||
"github.com/traefik/traefik/v3/pkg/job"
|
||||
"github.com/traefik/traefik/v3/pkg/logs"
|
||||
"github.com/traefik/traefik/v3/pkg/provider"
|
||||
"github.com/traefik/traefik/v3/pkg/safe"
|
||||
)
|
||||
|
||||
// SwarmAPIVersion is a constant holding the version of the Provider API traefik will use.
|
||||
const SwarmAPIVersion = "1.24"
|
||||
|
||||
const swarmName = "swarm"
|
||||
|
||||
var _ provider.Provider = (*SwarmProvider)(nil)
|
||||
|
||||
// SwarmProvider holds configurations of the provider.
|
||||
type SwarmProvider struct {
|
||||
Shared `yaml:",inline" export:"true"`
|
||||
ClientConfig `yaml:",inline" export:"true"`
|
||||
|
||||
RefreshSeconds ptypes.Duration `description:"Polling interval for swarm mode." json:"refreshSeconds,omitempty" toml:"refreshSeconds,omitempty" yaml:"refreshSeconds,omitempty" export:"true"`
|
||||
}
|
||||
|
||||
// SetDefaults sets the default values.
|
||||
func (p *SwarmProvider) SetDefaults() {
|
||||
p.Watch = true
|
||||
p.ExposedByDefault = true
|
||||
p.Endpoint = "unix:///var/run/docker.sock"
|
||||
p.RefreshSeconds = ptypes.Duration(15 * time.Second)
|
||||
p.DefaultRule = DefaultTemplateRule
|
||||
}
|
||||
|
||||
// Init the provider.
|
||||
func (p *SwarmProvider) Init() error {
|
||||
defaultRuleTpl, err := provider.MakeDefaultRuleTemplate(p.DefaultRule, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while parsing default rule: %w", err)
|
||||
}
|
||||
|
||||
p.defaultRuleTpl = defaultRuleTpl
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SwarmProvider) createClient(ctx context.Context) (*client.Client, error) {
|
||||
p.ClientConfig.apiVersion = SwarmAPIVersion
|
||||
return createClient(ctx, p.ClientConfig)
|
||||
}
|
||||
|
||||
// Provide allows the docker provider to provide configurations to traefik using the given configuration channel.
|
||||
func (p *SwarmProvider) Provide(configurationChan chan<- dynamic.Message, pool *safe.Pool) error {
|
||||
pool.GoCtx(func(routineCtx context.Context) {
|
||||
logger := log.Ctx(routineCtx).With().Str(logs.ProviderName, swarmName).Logger()
|
||||
ctxLog := logger.WithContext(routineCtx)
|
||||
|
||||
operation := func() error {
|
||||
var err error
|
||||
ctx, cancel := context.WithCancel(ctxLog)
|
||||
defer cancel()
|
||||
|
||||
ctx = log.Ctx(ctx).With().Str(logs.ProviderName, swarmName).Logger().WithContext(ctx)
|
||||
|
||||
dockerClient, err := p.createClient(ctx)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Failed to create Docker API client")
|
||||
return err
|
||||
}
|
||||
defer func() { _ = dockerClient.Close() }()
|
||||
|
||||
builder := NewDynConfBuilder(p.Shared, dockerClient)
|
||||
|
||||
serverVersion, err := dockerClient.ServerVersion(ctx)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Failed to retrieve information of the docker client and server host")
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Debug().Msgf("Provider connection established with docker %s (API %s)", serverVersion.Version, serverVersion.APIVersion)
|
||||
|
||||
dockerDataList, err := p.listServices(ctx, dockerClient)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Failed to list services for docker swarm mode")
|
||||
return err
|
||||
}
|
||||
|
||||
configuration := builder.build(ctxLog, dockerDataList)
|
||||
configurationChan <- dynamic.Message{
|
||||
ProviderName: swarmName,
|
||||
Configuration: configuration,
|
||||
}
|
||||
if p.Watch {
|
||||
errChan := make(chan error)
|
||||
|
||||
// TODO: This need to be change. Linked to Swarm events docker/docker#23827
|
||||
ticker := time.NewTicker(time.Duration(p.RefreshSeconds))
|
||||
|
||||
pool.GoCtx(func(ctx context.Context) {
|
||||
logger := log.Ctx(ctx).With().Str(logs.ProviderName, swarmName).Logger()
|
||||
ctx = logger.WithContext(ctx)
|
||||
|
||||
defer close(errChan)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
services, err := p.listServices(ctx, dockerClient)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Failed to list services for docker swarm mode")
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
configuration := builder.build(ctx, services)
|
||||
if configuration != nil {
|
||||
configurationChan <- dynamic.Message{
|
||||
ProviderName: swarmName,
|
||||
Configuration: configuration,
|
||||
}
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
if err, ok := <-errChan; ok {
|
||||
return err
|
||||
}
|
||||
// channel closed
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
notify := func(err error, time time.Duration) {
|
||||
logger.Error().Err(err).Msgf("Provider error, retrying in %s", time)
|
||||
}
|
||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), backoff.WithContext(job.NewBackOff(backoff.NewExponentialBackOff()), ctxLog), notify)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Cannot retrieve data")
|
||||
}
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SwarmProvider) listServices(ctx context.Context, dockerClient client.APIClient) ([]dockerData, error) {
|
||||
logger := log.Ctx(ctx)
|
||||
|
||||
serviceList, err := dockerClient.ServiceList(ctx, dockertypes.ServiceListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serverVersion, err := dockerClient.ServerVersion(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
networkListArgs := filters.NewArgs()
|
||||
// https://docs.docker.com/engine/api/v1.29/#tag/Network (Docker 17.06)
|
||||
if versions.GreaterThanOrEqualTo(serverVersion.APIVersion, "1.29") {
|
||||
networkListArgs.Add("scope", "swarm")
|
||||
} else {
|
||||
networkListArgs.Add("driver", "overlay")
|
||||
}
|
||||
|
||||
networkList, err := dockerClient.NetworkList(ctx, dockertypes.NetworkListOptions{Filters: networkListArgs})
|
||||
if err != nil {
|
||||
logger.Debug().Err(err).Msg("Failed to network inspect on client for docker")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
networkMap := make(map[string]*dockertypes.NetworkResource)
|
||||
for _, network := range networkList {
|
||||
networkToAdd := network
|
||||
networkMap[network.ID] = &networkToAdd
|
||||
}
|
||||
|
||||
var dockerDataList []dockerData
|
||||
var dockerDataListTasks []dockerData
|
||||
|
||||
for _, service := range serviceList {
|
||||
dData, err := p.parseService(ctx, service, networkMap)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msgf("Skip container %s", getServiceName(dData))
|
||||
continue
|
||||
}
|
||||
|
||||
if dData.ExtraConf.Docker.LBSwarm {
|
||||
if len(dData.NetworkSettings.Networks) > 0 {
|
||||
dockerDataList = append(dockerDataList, dData)
|
||||
}
|
||||
} else {
|
||||
isGlobalSvc := service.Spec.Mode.Global != nil
|
||||
dockerDataListTasks, err = listTasks(ctx, dockerClient, service.ID, dData, networkMap, isGlobalSvc)
|
||||
if err != nil {
|
||||
logger.Warn().Err(err).Send()
|
||||
} else {
|
||||
dockerDataList = append(dockerDataList, dockerDataListTasks...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return dockerDataList, err
|
||||
}
|
||||
|
||||
func (p *SwarmProvider) parseService(ctx context.Context, service swarmtypes.Service, networkMap map[string]*dockertypes.NetworkResource) (dockerData, error) {
|
||||
logger := log.Ctx(ctx)
|
||||
|
||||
dData := dockerData{
|
||||
ID: service.ID,
|
||||
ServiceName: service.Spec.Annotations.Name,
|
||||
Name: service.Spec.Annotations.Name,
|
||||
Labels: service.Spec.Annotations.Labels,
|
||||
NetworkSettings: networkSettings{},
|
||||
}
|
||||
|
||||
extraConf, err := p.extractLabels(dData)
|
||||
if err != nil {
|
||||
return dockerData{}, err
|
||||
}
|
||||
dData.ExtraConf = extraConf
|
||||
|
||||
if service.Spec.EndpointSpec != nil {
|
||||
if service.Spec.EndpointSpec.Mode == swarmtypes.ResolutionModeDNSRR {
|
||||
if dData.ExtraConf.Docker.LBSwarm {
|
||||
logger.Warn().Msgf("Ignored %s endpoint-mode not supported, service name: %s. Fallback to Traefik load balancing", swarmtypes.ResolutionModeDNSRR, service.Spec.Annotations.Name)
|
||||
}
|
||||
} else if service.Spec.EndpointSpec.Mode == swarmtypes.ResolutionModeVIP {
|
||||
dData.NetworkSettings.Networks = make(map[string]*networkData)
|
||||
for _, virtualIP := range service.Endpoint.VirtualIPs {
|
||||
networkService := networkMap[virtualIP.NetworkID]
|
||||
if networkService != nil {
|
||||
if len(virtualIP.Addr) > 0 {
|
||||
ip, _, _ := net.ParseCIDR(virtualIP.Addr)
|
||||
network := &networkData{
|
||||
Name: networkService.Name,
|
||||
ID: virtualIP.NetworkID,
|
||||
Addr: ip.String(),
|
||||
}
|
||||
dData.NetworkSettings.Networks[network.Name] = network
|
||||
} else {
|
||||
logger.Debug().Msgf("No virtual IPs found in network %s", virtualIP.NetworkID)
|
||||
}
|
||||
} else {
|
||||
logger.Debug().Msgf("Network not found, id: %s", virtualIP.NetworkID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return dData, nil
|
||||
}
|
||||
|
||||
func listTasks(ctx context.Context, dockerClient client.APIClient, serviceID string,
|
||||
serviceDockerData dockerData, networkMap map[string]*dockertypes.NetworkResource, isGlobalSvc bool,
|
||||
) ([]dockerData, error) {
|
||||
serviceIDFilter := filters.NewArgs()
|
||||
serviceIDFilter.Add("service", serviceID)
|
||||
serviceIDFilter.Add("desired-state", "running")
|
||||
|
||||
taskList, err := dockerClient.TaskList(ctx, dockertypes.TaskListOptions{Filters: serviceIDFilter})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var dockerDataList []dockerData
|
||||
for _, task := range taskList {
|
||||
if task.Status.State != swarmtypes.TaskStateRunning {
|
||||
continue
|
||||
}
|
||||
dData := parseTasks(ctx, task, serviceDockerData, networkMap, isGlobalSvc)
|
||||
if len(dData.NetworkSettings.Networks) > 0 {
|
||||
dockerDataList = append(dockerDataList, dData)
|
||||
}
|
||||
}
|
||||
return dockerDataList, err
|
||||
}
|
||||
|
||||
func parseTasks(ctx context.Context, task swarmtypes.Task, serviceDockerData dockerData,
|
||||
networkMap map[string]*dockertypes.NetworkResource, isGlobalSvc bool,
|
||||
) dockerData {
|
||||
dData := dockerData{
|
||||
ID: task.ID,
|
||||
ServiceName: serviceDockerData.Name,
|
||||
Name: serviceDockerData.Name + "." + strconv.Itoa(task.Slot),
|
||||
Labels: serviceDockerData.Labels,
|
||||
ExtraConf: serviceDockerData.ExtraConf,
|
||||
NetworkSettings: networkSettings{},
|
||||
}
|
||||
|
||||
if isGlobalSvc {
|
||||
dData.Name = serviceDockerData.Name + "." + task.ID
|
||||
}
|
||||
|
||||
if task.NetworksAttachments != nil {
|
||||
dData.NetworkSettings.Networks = make(map[string]*networkData)
|
||||
for _, virtualIP := range task.NetworksAttachments {
|
||||
if networkService, present := networkMap[virtualIP.Network.ID]; present {
|
||||
if len(virtualIP.Addresses) > 0 {
|
||||
// Not sure about this next loop - when would a task have multiple IP's for the same network?
|
||||
for _, addr := range virtualIP.Addresses {
|
||||
ip, _, _ := net.ParseCIDR(addr)
|
||||
network := &networkData{
|
||||
ID: virtualIP.Network.ID,
|
||||
Name: networkService.Name,
|
||||
Addr: ip.String(),
|
||||
}
|
||||
dData.NetworkSettings.Networks[network.Name] = network
|
||||
}
|
||||
} else {
|
||||
log.Ctx(ctx).Debug().Msgf("No IP addresses found for network %s", virtualIP.Network.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return dData
|
||||
}
|
49
pkg/provider/docker/pswarm_mock_test.go
Normal file
49
pkg/provider/docker/pswarm_mock_test.go
Normal file
|
@ -0,0 +1,49 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
type fakeTasksClient struct {
|
||||
dockerclient.APIClient
|
||||
tasks []swarm.Task
|
||||
container dockertypes.ContainerJSON
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *fakeTasksClient) TaskList(ctx context.Context, options dockertypes.TaskListOptions) ([]swarm.Task, error) {
|
||||
return c.tasks, c.err
|
||||
}
|
||||
|
||||
func (c *fakeTasksClient) ContainerInspect(ctx context.Context, container string) (dockertypes.ContainerJSON, error) {
|
||||
return c.container, c.err
|
||||
}
|
||||
|
||||
type fakeServicesClient struct {
|
||||
dockerclient.APIClient
|
||||
dockerVersion string
|
||||
networks []dockertypes.NetworkResource
|
||||
services []swarm.Service
|
||||
tasks []swarm.Task
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *fakeServicesClient) ServiceList(ctx context.Context, options dockertypes.ServiceListOptions) ([]swarm.Service, error) {
|
||||
return c.services, c.err
|
||||
}
|
||||
|
||||
func (c *fakeServicesClient) ServerVersion(ctx context.Context) (dockertypes.Version, error) {
|
||||
return dockertypes.Version{APIVersion: c.dockerVersion}, c.err
|
||||
}
|
||||
|
||||
func (c *fakeServicesClient) NetworkList(ctx context.Context, options dockertypes.NetworkListOptions) ([]dockertypes.NetworkResource, error) {
|
||||
return c.networks, c.err
|
||||
}
|
||||
|
||||
func (c *fakeServicesClient) TaskList(ctx context.Context, options dockertypes.TaskListOptions) ([]swarm.Task, error) {
|
||||
return c.tasks, c.err
|
||||
}
|
|
@ -9,26 +9,10 @@ import (
|
|||
"github.com/davecgh/go-spew/spew"
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type fakeTasksClient struct {
|
||||
dockerclient.APIClient
|
||||
tasks []swarm.Task
|
||||
container dockertypes.ContainerJSON
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *fakeTasksClient) TaskList(ctx context.Context, options dockertypes.TaskListOptions) ([]swarm.Task, error) {
|
||||
return c.tasks, c.err
|
||||
}
|
||||
|
||||
func (c *fakeTasksClient) ContainerInspect(ctx context.Context, container string) (dockertypes.ContainerJSON, error) {
|
||||
return c.container, c.err
|
||||
}
|
||||
|
||||
func TestListTasks(t *testing.T) {
|
||||
testCases := []struct {
|
||||
service swarm.Service
|
||||
|
@ -83,7 +67,7 @@ func TestListTasks(t *testing.T) {
|
|||
t.Run(strconv.Itoa(caseID), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
p := Provider{}
|
||||
p := SwarmProvider{}
|
||||
dockerData, err := p.parseService(context.Background(), test.service, test.networks)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -103,32 +87,7 @@ func TestListTasks(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
type fakeServicesClient struct {
|
||||
dockerclient.APIClient
|
||||
dockerVersion string
|
||||
networks []dockertypes.NetworkResource
|
||||
services []swarm.Service
|
||||
tasks []swarm.Task
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *fakeServicesClient) ServiceList(ctx context.Context, options dockertypes.ServiceListOptions) ([]swarm.Service, error) {
|
||||
return c.services, c.err
|
||||
}
|
||||
|
||||
func (c *fakeServicesClient) ServerVersion(ctx context.Context) (dockertypes.Version, error) {
|
||||
return dockertypes.Version{APIVersion: c.dockerVersion}, c.err
|
||||
}
|
||||
|
||||
func (c *fakeServicesClient) NetworkList(ctx context.Context, options dockertypes.NetworkListOptions) ([]dockertypes.NetworkResource, error) {
|
||||
return c.networks, c.err
|
||||
}
|
||||
|
||||
func (c *fakeServicesClient) TaskList(ctx context.Context, options dockertypes.TaskListOptions) ([]swarm.Task, error) {
|
||||
return c.tasks, c.err
|
||||
}
|
||||
|
||||
func TestListServices(t *testing.T) {
|
||||
func TestSwarmProvider_listServices(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
services []swarm.Service
|
||||
|
@ -277,7 +236,7 @@ func TestListServices(t *testing.T) {
|
|||
|
||||
dockerClient := &fakeServicesClient{services: test.services, tasks: test.tasks, dockerVersion: test.dockerVersion, networks: test.networks}
|
||||
|
||||
p := Provider{}
|
||||
p := SwarmProvider{}
|
||||
|
||||
serviceDockerData, err := p.listServices(context.Background(), dockerClient)
|
||||
assert.NoError(t, err)
|
||||
|
@ -293,7 +252,7 @@ func TestListServices(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestSwarmTaskParsing(t *testing.T) {
|
||||
func TestSwarmProvider_parseService_task(t *testing.T) {
|
||||
testCases := []struct {
|
||||
service swarm.Service
|
||||
tasks []swarm.Task
|
||||
|
@ -396,7 +355,7 @@ func TestSwarmTaskParsing(t *testing.T) {
|
|||
t.Run(strconv.Itoa(caseID), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
p := Provider{}
|
||||
p := SwarmProvider{}
|
||||
|
||||
dData, err := p.parseService(context.Background(), test.service, test.networks)
|
||||
require.NoError(t, err)
|
211
pkg/provider/docker/shared.go
Normal file
211
pkg/provider/docker/shared.go
Normal file
|
@ -0,0 +1,211 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/cli/connhelper"
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/docker/go-connections/sockets"
|
||||
"github.com/rs/zerolog/log"
|
||||
ptypes "github.com/traefik/paerser/types"
|
||||
"github.com/traefik/traefik/v3/pkg/provider"
|
||||
"github.com/traefik/traefik/v3/pkg/types"
|
||||
"github.com/traefik/traefik/v3/pkg/version"
|
||||
)
|
||||
|
||||
// DefaultTemplateRule The default template for the default rule.
|
||||
const DefaultTemplateRule = "Host(`{{ normalize .Name }}`)"
|
||||
|
||||
type Shared struct {
|
||||
ExposedByDefault bool `description:"Expose containers by default." json:"exposedByDefault,omitempty" toml:"exposedByDefault,omitempty" yaml:"exposedByDefault,omitempty" export:"true"`
|
||||
Constraints string `description:"Constraints is an expression that Traefik matches against the container's labels to determine whether to create any route for that container." json:"constraints,omitempty" toml:"constraints,omitempty" yaml:"constraints,omitempty" export:"true"`
|
||||
AllowEmptyServices bool `description:"Disregards the Docker containers health checks with respect to the creation or removal of the corresponding services." json:"allowEmptyServices,omitempty" toml:"allowEmptyServices,omitempty" yaml:"allowEmptyServices,omitempty" export:"true"`
|
||||
Network string `description:"Default Docker network used." json:"network,omitempty" toml:"network,omitempty" yaml:"network,omitempty" export:"true"`
|
||||
UseBindPortIP bool `description:"Use the ip address from the bound port, rather than from the inner network." json:"useBindPortIP,omitempty" toml:"useBindPortIP,omitempty" yaml:"useBindPortIP,omitempty" export:"true"`
|
||||
|
||||
Watch bool `description:"Watch Docker events." json:"watch,omitempty" toml:"watch,omitempty" yaml:"watch,omitempty" export:"true"`
|
||||
DefaultRule string `description:"Default rule." json:"defaultRule,omitempty" toml:"defaultRule,omitempty" yaml:"defaultRule,omitempty"`
|
||||
|
||||
defaultRuleTpl *template.Template
|
||||
}
|
||||
|
||||
func inspectContainers(ctx context.Context, dockerClient client.ContainerAPIClient, containerID string) dockerData {
|
||||
containerInspected, err := dockerClient.ContainerInspect(ctx, containerID)
|
||||
if err != nil {
|
||||
log.Ctx(ctx).Warn().Err(err).Msgf("Failed to inspect container %s", containerID)
|
||||
return dockerData{}
|
||||
}
|
||||
|
||||
// This condition is here to avoid to have empty IP https://github.com/traefik/traefik/issues/2459
|
||||
// We register only container which are running
|
||||
if containerInspected.ContainerJSONBase != nil && containerInspected.ContainerJSONBase.State != nil && containerInspected.ContainerJSONBase.State.Running {
|
||||
return parseContainer(containerInspected)
|
||||
}
|
||||
|
||||
return dockerData{}
|
||||
}
|
||||
|
||||
func parseContainer(container dockertypes.ContainerJSON) dockerData {
|
||||
dData := dockerData{
|
||||
NetworkSettings: networkSettings{},
|
||||
}
|
||||
|
||||
if container.ContainerJSONBase != nil {
|
||||
dData.ID = container.ContainerJSONBase.ID
|
||||
dData.Name = container.ContainerJSONBase.Name
|
||||
dData.ServiceName = dData.Name // Default ServiceName to be the container's Name.
|
||||
dData.Node = container.ContainerJSONBase.Node
|
||||
|
||||
if container.ContainerJSONBase.HostConfig != nil {
|
||||
dData.NetworkSettings.NetworkMode = container.ContainerJSONBase.HostConfig.NetworkMode
|
||||
}
|
||||
|
||||
if container.State != nil && container.State.Health != nil {
|
||||
dData.Health = container.State.Health.Status
|
||||
}
|
||||
}
|
||||
|
||||
if container.Config != nil && container.Config.Labels != nil {
|
||||
dData.Labels = container.Config.Labels
|
||||
}
|
||||
|
||||
if container.NetworkSettings != nil {
|
||||
if container.NetworkSettings.Ports != nil {
|
||||
dData.NetworkSettings.Ports = container.NetworkSettings.Ports
|
||||
}
|
||||
if container.NetworkSettings.Networks != nil {
|
||||
dData.NetworkSettings.Networks = make(map[string]*networkData)
|
||||
for name, containerNetwork := range container.NetworkSettings.Networks {
|
||||
addr := containerNetwork.IPAddress
|
||||
if addr == "" {
|
||||
addr = containerNetwork.GlobalIPv6Address
|
||||
}
|
||||
|
||||
dData.NetworkSettings.Networks[name] = &networkData{
|
||||
ID: containerNetwork.NetworkID,
|
||||
Name: name,
|
||||
Addr: addr,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return dData
|
||||
}
|
||||
|
||||
type ClientConfig struct {
|
||||
apiVersion string
|
||||
|
||||
Endpoint string `description:"Docker server endpoint. Can be a TCP or a Unix socket endpoint." json:"endpoint,omitempty" toml:"endpoint,omitempty" yaml:"endpoint,omitempty"`
|
||||
TLS *types.ClientTLS `description:"Enable Docker TLS support." json:"tls,omitempty" toml:"tls,omitempty" yaml:"tls,omitempty" export:"true"`
|
||||
HTTPClientTimeout ptypes.Duration `description:"Client timeout for HTTP connections." json:"httpClientTimeout,omitempty" toml:"httpClientTimeout,omitempty" yaml:"httpClientTimeout,omitempty" export:"true"`
|
||||
}
|
||||
|
||||
func createClient(ctx context.Context, cfg ClientConfig) (*client.Client, error) {
|
||||
opts, err := getClientOpts(ctx, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
httpHeaders := map[string]string{
|
||||
"User-Agent": "Traefik " + version.Version,
|
||||
}
|
||||
|
||||
opts = append(opts,
|
||||
client.WithHTTPHeaders(httpHeaders),
|
||||
client.WithVersion(cfg.apiVersion))
|
||||
|
||||
return client.NewClientWithOpts(opts...)
|
||||
}
|
||||
|
||||
func getClientOpts(ctx context.Context, cfg ClientConfig) ([]client.Opt, error) {
|
||||
helper, err := connhelper.GetConnectionHelper(cfg.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// SSH
|
||||
if helper != nil {
|
||||
// https://github.com/docker/cli/blob/ebca1413117a3fcb81c89d6be226dcec74e5289f/cli/context/docker/load.go#L112-L123
|
||||
|
||||
httpClient := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
DialContext: helper.Dialer,
|
||||
},
|
||||
}
|
||||
|
||||
return []client.Opt{
|
||||
client.WithHTTPClient(httpClient),
|
||||
client.WithTimeout(time.Duration(cfg.HTTPClientTimeout)),
|
||||
client.WithHost(helper.Host), // To avoid 400 Bad Request: malformed Host header daemon error
|
||||
client.WithDialContext(helper.Dialer),
|
||||
}, nil
|
||||
}
|
||||
|
||||
opts := []client.Opt{
|
||||
client.WithHost(cfg.Endpoint),
|
||||
client.WithTimeout(time.Duration(cfg.HTTPClientTimeout)),
|
||||
}
|
||||
|
||||
if cfg.TLS != nil {
|
||||
conf, err := cfg.TLS.CreateTLSConfig(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create client TLS configuration: %w", err)
|
||||
}
|
||||
|
||||
hostURL, err := client.ParseHostURL(cfg.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: conf,
|
||||
}
|
||||
|
||||
if err := sockets.ConfigureTransport(tr, hostURL.Scheme, hostURL.Host); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts = append(opts, client.WithHTTPClient(&http.Client{Transport: tr, Timeout: time.Duration(cfg.HTTPClientTimeout)}))
|
||||
}
|
||||
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
func getPort(container dockerData, serverPort string) string {
|
||||
if len(serverPort) > 0 {
|
||||
return serverPort
|
||||
}
|
||||
|
||||
var ports []nat.Port
|
||||
for port := range container.NetworkSettings.Ports {
|
||||
ports = append(ports, port)
|
||||
}
|
||||
|
||||
less := func(i, j nat.Port) bool {
|
||||
return i.Int() < j.Int()
|
||||
}
|
||||
nat.Sort(ports, less)
|
||||
|
||||
if len(ports) > 0 {
|
||||
min := ports[0]
|
||||
return min.Port()
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func getServiceName(container dockerData) string {
|
||||
serviceName := container.ServiceName
|
||||
|
||||
if values, err := getStringMultipleStrict(container.Labels, labelDockerComposeProject, labelDockerComposeService); err == nil {
|
||||
serviceName = values[labelDockerComposeService] + "_" + values[labelDockerComposeProject]
|
||||
}
|
||||
|
||||
return provider.Normalize(serviceName)
|
||||
}
|
|
@ -23,7 +23,7 @@ type specificConfiguration struct {
|
|||
LBSwarm bool
|
||||
}
|
||||
|
||||
func (p *Provider) getConfiguration(container dockerData) (configuration, error) {
|
||||
func (p *Shared) extractLabels(container dockerData) (configuration, error) {
|
||||
conf := configuration{
|
||||
Enable: p.ExposedByDefault,
|
||||
Docker: specificConfiguration{
|
112
pkg/provider/docker/shared_test.go
Normal file
112
pkg/provider/docker/shared_test.go
Normal file
|
@ -0,0 +1,112 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
docker "github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func Test_getPort_docker(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
container docker.ContainerJSON
|
||||
serverPort string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
desc: "no binding, no server port label",
|
||||
container: containerJSON(name("foo")),
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
desc: "binding, no server port label",
|
||||
container: containerJSON(ports(nat.PortMap{
|
||||
"80/tcp": {},
|
||||
})),
|
||||
expected: "80",
|
||||
},
|
||||
{
|
||||
desc: "binding, multiple ports, no server port label",
|
||||
container: containerJSON(ports(nat.PortMap{
|
||||
"80/tcp": {},
|
||||
"443/tcp": {},
|
||||
})),
|
||||
expected: "80",
|
||||
},
|
||||
{
|
||||
desc: "no binding, server port label",
|
||||
container: containerJSON(),
|
||||
serverPort: "8080",
|
||||
expected: "8080",
|
||||
},
|
||||
{
|
||||
desc: "binding, server port label",
|
||||
container: containerJSON(
|
||||
ports(nat.PortMap{
|
||||
"80/tcp": {},
|
||||
})),
|
||||
serverPort: "8080",
|
||||
expected: "8080",
|
||||
},
|
||||
{
|
||||
desc: "binding, multiple ports, server port label",
|
||||
container: containerJSON(ports(nat.PortMap{
|
||||
"8080/tcp": {},
|
||||
"80/tcp": {},
|
||||
})),
|
||||
serverPort: "8080",
|
||||
expected: "8080",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dData := parseContainer(test.container)
|
||||
|
||||
actual := getPort(dData, test.serverPort)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_getPort_swarm(t *testing.T) {
|
||||
testCases := []struct {
|
||||
service swarm.Service
|
||||
serverPort string
|
||||
networks map[string]*docker.NetworkResource
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
service: swarmService(
|
||||
withEndpointSpec(modeDNSSR),
|
||||
),
|
||||
networks: map[string]*docker.NetworkResource{},
|
||||
serverPort: "8080",
|
||||
expected: "8080",
|
||||
},
|
||||
}
|
||||
|
||||
for serviceID, test := range testCases {
|
||||
test := test
|
||||
t.Run(strconv.Itoa(serviceID), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
p := SwarmProvider{}
|
||||
|
||||
dData, err := p.parseService(context.Background(), test.service, test.networks)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual := getPort(dData, test.serverPort)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue