1
0
Fork 0

Add a new protocol

Co-authored-by: Gérald Croës <gerald@containo.us>
This commit is contained in:
Julien Salleyron 2019-03-14 09:30:04 +01:00 committed by Traefiker Bot
parent 0ca2149408
commit 4a68d29ce2
231 changed files with 6895 additions and 4395 deletions

247
old/cluster/datastore.go Normal file
View file

@ -0,0 +1,247 @@
package cluster
import (
"context"
"encoding/json"
"fmt"
"sync"
"time"
"github.com/abronan/valkeyrie/store"
"github.com/cenkalti/backoff"
"github.com/containous/staert"
"github.com/containous/traefik/job"
"github.com/containous/traefik/log"
"github.com/containous/traefik/safe"
uuid "github.com/satori/go.uuid"
)
// Metadata stores Object plus metadata
type Metadata struct {
object Object
Object []byte
Lock string
}
// NewMetadata returns new Metadata
func NewMetadata(object Object) *Metadata {
return &Metadata{object: object}
}
// Marshall marshalls object
func (m *Metadata) Marshall() error {
var err error
m.Object, err = json.Marshal(m.object)
return err
}
func (m *Metadata) unmarshall() error {
if len(m.Object) == 0 {
return nil
}
return json.Unmarshal(m.Object, m.object)
}
// Listener is called when Object has been changed in KV store
type Listener func(Object) error
var _ Store = (*Datastore)(nil)
// Datastore holds a struct synced in a KV store
type Datastore struct {
kv staert.KvSource
ctx context.Context
localLock *sync.RWMutex
meta *Metadata
lockKey string
listener Listener
}
// NewDataStore creates a Datastore
func NewDataStore(ctx context.Context, kvSource staert.KvSource, object Object, listener Listener) (*Datastore, error) {
datastore := Datastore{
kv: kvSource,
ctx: ctx,
meta: &Metadata{object: object},
lockKey: kvSource.Prefix + "/lock",
localLock: &sync.RWMutex{},
listener: listener,
}
err := datastore.watchChanges()
if err != nil {
return nil, err
}
return &datastore, nil
}
func (d *Datastore) watchChanges() error {
stopCh := make(chan struct{})
kvCh, err := d.kv.Watch(d.lockKey, stopCh, nil)
if err != nil {
return fmt.Errorf("error while watching key %s: %v", d.lockKey, err)
}
safe.Go(func() {
ctx, cancel := context.WithCancel(d.ctx)
operation := func() error {
for {
select {
case <-ctx.Done():
stopCh <- struct{}{}
return nil
case _, ok := <-kvCh:
if !ok {
cancel()
return err
}
err = d.reload()
if err != nil {
return err
}
if d.listener != nil {
err := d.listener(d.meta.object)
if err != nil {
log.Errorf("Error calling datastore listener: %s", err)
}
}
}
}
}
notify := func(err error, time time.Duration) {
log.Errorf("Error in watch datastore: %+v, retrying in %s", err, time)
}
err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify)
if err != nil {
log.Errorf("Error in watch datastore: %v", err)
}
})
return nil
}
func (d *Datastore) reload() error {
log.Debug("Datastore reload")
_, err := d.Load()
return err
}
// Begin creates a transaction with the KV store.
func (d *Datastore) Begin() (Transaction, Object, error) {
id := uuid.NewV4().String()
log.Debugf("Transaction %s begins", id)
remoteLock, err := d.kv.NewLock(d.lockKey, &store.LockOptions{TTL: 20 * time.Second, Value: []byte(id)})
if err != nil {
return nil, nil, err
}
stopCh := make(chan struct{})
ctx, cancel := context.WithCancel(d.ctx)
var errLock error
go func() {
_, errLock = remoteLock.Lock(stopCh)
cancel()
}()
select {
case <-ctx.Done():
if errLock != nil {
return nil, nil, errLock
}
case <-d.ctx.Done():
stopCh <- struct{}{}
return nil, nil, d.ctx.Err()
}
// we got the lock! Now make sure we are synced with KV store
operation := func() error {
meta := d.get()
if meta.Lock != id {
return fmt.Errorf("object lock value: expected %s, got %s", id, meta.Lock)
}
return nil
}
notify := func(err error, time time.Duration) {
log.Errorf("Datastore sync error: %v, retrying in %s", err, time)
err = d.reload()
if err != nil {
log.Errorf("Error reloading: %+v", err)
}
}
ebo := backoff.NewExponentialBackOff()
ebo.MaxElapsedTime = 60 * time.Second
err = backoff.RetryNotify(safe.OperationWithRecover(operation), ebo, notify)
if err != nil {
return nil, nil, fmt.Errorf("datastore cannot sync: %v", err)
}
// we synced with KV store, we can now return Setter
return &datastoreTransaction{
Datastore: d,
remoteLock: remoteLock,
id: id,
}, d.meta.object, nil
}
func (d *Datastore) get() *Metadata {
d.localLock.RLock()
defer d.localLock.RUnlock()
return d.meta
}
// Load load atomically a struct from the KV store
func (d *Datastore) Load() (Object, error) {
d.localLock.Lock()
defer d.localLock.Unlock()
// clear Object first, as mapstructure's decoder doesn't have ZeroFields set to true for merging purposes
d.meta.Object = d.meta.Object[:0]
err := d.kv.LoadConfig(d.meta)
if err != nil {
return nil, err
}
err = d.meta.unmarshall()
if err != nil {
return nil, err
}
return d.meta.object, nil
}
// Get atomically a struct from the KV store
func (d *Datastore) Get() Object {
d.localLock.RLock()
defer d.localLock.RUnlock()
return d.meta.object
}
var _ Transaction = (*datastoreTransaction)(nil)
type datastoreTransaction struct {
*Datastore
remoteLock store.Locker
dirty bool
id string
}
// Commit allows to set an object in the KV store
func (s *datastoreTransaction) Commit(object Object) error {
s.localLock.Lock()
defer s.localLock.Unlock()
if s.dirty {
return fmt.Errorf("transaction already used, please begin a new one")
}
s.Datastore.meta.object = object
err := s.Datastore.meta.Marshall()
if err != nil {
return fmt.Errorf("marshall error: %s", err)
}
err = s.kv.StoreConfig(s.Datastore.meta)
if err != nil {
return fmt.Errorf("storeConfig error: %s", err)
}
err = s.remoteLock.Unlock()
if err != nil {
return fmt.Errorf("unlock error: %s", err)
}
s.dirty = true
log.Debugf("Transaction committed %s", s.id)
return nil
}

146
old/cluster/leadership.go Normal file
View file

@ -0,0 +1,146 @@
package cluster
import (
"context"
"net/http"
"time"
"github.com/cenkalti/backoff"
"github.com/containous/mux"
"github.com/containous/traefik/log"
"github.com/containous/traefik/old/types"
"github.com/containous/traefik/safe"
"github.com/docker/leadership"
"github.com/unrolled/render"
)
const clusterLeaderKeySuffix = "/leader"
var templatesRenderer = render.New(render.Options{
Directory: "nowhere",
})
// Leadership allows leadership election using a KV store
type Leadership struct {
*safe.Pool
*types.Cluster
candidate *leadership.Candidate
leader *safe.Safe
listeners []LeaderListener
}
// NewLeadership creates a leadership
func NewLeadership(ctx context.Context, cluster *types.Cluster) *Leadership {
return &Leadership{
Pool: safe.NewPool(ctx),
Cluster: cluster,
candidate: leadership.NewCandidate(cluster.Store, cluster.Store.Prefix+clusterLeaderKeySuffix, cluster.Node, 20*time.Second),
listeners: []LeaderListener{},
leader: safe.New(false),
}
}
// LeaderListener is called when leadership has changed
type LeaderListener func(elected bool) error
// Participate tries to be a leader
func (l *Leadership) Participate(pool *safe.Pool) {
pool.GoCtx(func(ctx context.Context) {
log.Debugf("Node %s running for election", l.Cluster.Node)
defer log.Debugf("Node %s no more running for election", l.Cluster.Node)
backOff := backoff.NewExponentialBackOff()
operation := func() error {
return l.run(ctx, l.candidate)
}
notify := func(err error, time time.Duration) {
log.Errorf("Leadership election error %+v, retrying in %s", err, time)
}
err := backoff.RetryNotify(safe.OperationWithRecover(operation), backOff, notify)
if err != nil {
log.Errorf("Cannot elect leadership %+v", err)
}
})
}
// AddListener adds a leadership listener
func (l *Leadership) AddListener(listener LeaderListener) {
l.listeners = append(l.listeners, listener)
}
// Resign resigns from being a leader
func (l *Leadership) Resign() {
l.candidate.Resign()
log.Infof("Node %s resigned", l.Cluster.Node)
}
func (l *Leadership) run(ctx context.Context, candidate *leadership.Candidate) error {
electedCh, errCh := candidate.RunForElection()
for {
select {
case elected := <-electedCh:
l.onElection(elected)
case err := <-errCh:
return err
case <-ctx.Done():
l.candidate.Resign()
return nil
}
}
}
func (l *Leadership) onElection(elected bool) {
if elected {
log.Infof("Node %s elected leader ♚", l.Cluster.Node)
l.leader.Set(true)
l.Start()
} else {
log.Infof("Node %s elected worker ♝", l.Cluster.Node)
l.leader.Set(false)
l.Stop()
}
for _, listener := range l.listeners {
err := listener(elected)
if err != nil {
log.Errorf("Error calling Leadership listener: %s", err)
}
}
}
type leaderResponse struct {
Leader bool `json:"leader"`
LeaderNode string `json:"leader_node"`
}
func (l *Leadership) getLeaderHandler(response http.ResponseWriter, request *http.Request) {
leaderNode := ""
leaderKv, err := l.Cluster.Store.Get(l.Cluster.Store.Prefix+clusterLeaderKeySuffix, nil)
if err != nil {
log.Error(err)
} else {
leaderNode = string(leaderKv.Value)
}
leader := &leaderResponse{Leader: l.IsLeader(), LeaderNode: leaderNode}
status := http.StatusOK
if !leader.Leader {
// Set status to be `429`, as this will typically cause load balancers to stop sending requests to the instance without removing them from rotation.
status = http.StatusTooManyRequests
}
err = templatesRenderer.JSON(response, status, leader)
if err != nil {
log.Error(err)
}
}
// IsLeader returns true if current node is leader
func (l *Leadership) IsLeader() bool {
return l.leader.Get().(bool)
}
// AddRoutes add dashboard routes on a router
func (l *Leadership) AddRoutes(router *mux.Router) {
// Expose cluster leader
router.Methods(http.MethodGet).Path("/api/cluster/leader").HandlerFunc(l.getLeaderHandler)
}

16
old/cluster/store.go Normal file
View file

@ -0,0 +1,16 @@
package cluster
// Object is the struct to store
type Object interface{}
// Store is a generic interface to represents a storage
type Store interface {
Load() (Object, error)
Get() Object
Begin() (Transaction, Object, error)
}
// Transaction allows to set a struct in the KV store
type Transaction interface {
Commit(object Object) error
}