Vendor main dependencies.
This commit is contained in:
parent
49a09ab7dd
commit
dd5e3fba01
2738 changed files with 1045689 additions and 0 deletions
469
vendor/github.com/docker/libkv/store/boltdb/boltdb.go
generated
vendored
Normal file
469
vendor/github.com/docker/libkv/store/boltdb/boltdb.go
generated
vendored
Normal file
|
@ -0,0 +1,469 @@
|
|||
package boltdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/docker/libkv"
|
||||
"github.com/docker/libkv/store"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrMultipleEndpointsUnsupported is thrown when multiple endpoints specified for
|
||||
// BoltDB. Endpoint has to be a local file path
|
||||
ErrMultipleEndpointsUnsupported = errors.New("boltdb supports one endpoint and should be a file path")
|
||||
// ErrBoltBucketOptionMissing is thrown when boltBcuket config option is missing
|
||||
ErrBoltBucketOptionMissing = errors.New("boltBucket config option missing")
|
||||
)
|
||||
|
||||
const (
|
||||
filePerm os.FileMode = 0644
|
||||
)
|
||||
|
||||
//BoltDB type implements the Store interface
|
||||
type BoltDB struct {
|
||||
client *bolt.DB
|
||||
boltBucket []byte
|
||||
dbIndex uint64
|
||||
path string
|
||||
timeout time.Duration
|
||||
// By default libkv opens and closes the bolt DB connection for every
|
||||
// get/put operation. This allows multiple apps to use a Bolt DB at the
|
||||
// same time.
|
||||
// PersistConnection flag provides an option to override ths behavior.
|
||||
// ie: open the connection in New and use it till Close is called.
|
||||
PersistConnection bool
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
const (
|
||||
libkvmetadatalen = 8
|
||||
transientTimeout = time.Duration(10) * time.Second
|
||||
)
|
||||
|
||||
// Register registers boltdb to libkv
|
||||
func Register() {
|
||||
libkv.AddStore(store.BOLTDB, New)
|
||||
}
|
||||
|
||||
// New opens a new BoltDB connection to the specified path and bucket
|
||||
func New(endpoints []string, options *store.Config) (store.Store, error) {
|
||||
var (
|
||||
db *bolt.DB
|
||||
err error
|
||||
boltOptions *bolt.Options
|
||||
)
|
||||
|
||||
if len(endpoints) > 1 {
|
||||
return nil, ErrMultipleEndpointsUnsupported
|
||||
}
|
||||
|
||||
if (options == nil) || (len(options.Bucket) == 0) {
|
||||
return nil, ErrBoltBucketOptionMissing
|
||||
}
|
||||
|
||||
dir, _ := filepath.Split(endpoints[0])
|
||||
if err = os.MkdirAll(dir, 0750); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if options.PersistConnection {
|
||||
boltOptions = &bolt.Options{Timeout: options.ConnectionTimeout}
|
||||
db, err = bolt.Open(endpoints[0], filePerm, boltOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
b := &BoltDB{
|
||||
client: db,
|
||||
path: endpoints[0],
|
||||
boltBucket: []byte(options.Bucket),
|
||||
timeout: transientTimeout,
|
||||
PersistConnection: options.PersistConnection,
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (b *BoltDB) reset() {
|
||||
b.path = ""
|
||||
b.boltBucket = []byte{}
|
||||
}
|
||||
|
||||
func (b *BoltDB) getDBhandle() (*bolt.DB, error) {
|
||||
var (
|
||||
db *bolt.DB
|
||||
err error
|
||||
)
|
||||
if !b.PersistConnection {
|
||||
boltOptions := &bolt.Options{Timeout: b.timeout}
|
||||
if db, err = bolt.Open(b.path, filePerm, boltOptions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.client = db
|
||||
}
|
||||
|
||||
return b.client, nil
|
||||
}
|
||||
|
||||
func (b *BoltDB) releaseDBhandle() {
|
||||
if !b.PersistConnection {
|
||||
b.client.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// Get the value at "key". BoltDB doesn't provide an inbuilt last modified index with every kv pair. Its implemented by
|
||||
// by a atomic counter maintained by the libkv and appened to the value passed by the client.
|
||||
func (b *BoltDB) Get(key string) (*store.KVPair, error) {
|
||||
var (
|
||||
val []byte
|
||||
db *bolt.DB
|
||||
err error
|
||||
)
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
|
||||
if db, err = b.getDBhandle(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer b.releaseDBhandle()
|
||||
|
||||
err = db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(b.boltBucket)
|
||||
if bucket == nil {
|
||||
return store.ErrKeyNotFound
|
||||
}
|
||||
|
||||
v := bucket.Get([]byte(key))
|
||||
val = make([]byte, len(v))
|
||||
copy(val, v)
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if len(val) == 0 {
|
||||
return nil, store.ErrKeyNotFound
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dbIndex := binary.LittleEndian.Uint64(val[:libkvmetadatalen])
|
||||
val = val[libkvmetadatalen:]
|
||||
|
||||
return &store.KVPair{Key: key, Value: val, LastIndex: (dbIndex)}, nil
|
||||
}
|
||||
|
||||
//Put the key, value pair. index number metadata is prepended to the value
|
||||
func (b *BoltDB) Put(key string, value []byte, opts *store.WriteOptions) error {
|
||||
var (
|
||||
dbIndex uint64
|
||||
db *bolt.DB
|
||||
err error
|
||||
)
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
|
||||
dbval := make([]byte, libkvmetadatalen)
|
||||
|
||||
if db, err = b.getDBhandle(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer b.releaseDBhandle()
|
||||
|
||||
err = db.Update(func(tx *bolt.Tx) error {
|
||||
bucket, err := tx.CreateBucketIfNotExists(b.boltBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dbIndex = atomic.AddUint64(&b.dbIndex, 1)
|
||||
binary.LittleEndian.PutUint64(dbval, dbIndex)
|
||||
dbval = append(dbval, value...)
|
||||
|
||||
err = bucket.Put([]byte(key), dbval)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
//Delete the value for the given key.
|
||||
func (b *BoltDB) Delete(key string) error {
|
||||
var (
|
||||
db *bolt.DB
|
||||
err error
|
||||
)
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
|
||||
if db, err = b.getDBhandle(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer b.releaseDBhandle()
|
||||
|
||||
err = db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(b.boltBucket)
|
||||
if bucket == nil {
|
||||
return store.ErrKeyNotFound
|
||||
}
|
||||
err := bucket.Delete([]byte(key))
|
||||
return err
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Exists checks if the key exists inside the store
|
||||
func (b *BoltDB) Exists(key string) (bool, error) {
|
||||
var (
|
||||
val []byte
|
||||
db *bolt.DB
|
||||
err error
|
||||
)
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
|
||||
if db, err = b.getDBhandle(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer b.releaseDBhandle()
|
||||
|
||||
err = db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(b.boltBucket)
|
||||
if bucket == nil {
|
||||
return store.ErrKeyNotFound
|
||||
}
|
||||
|
||||
val = bucket.Get([]byte(key))
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if len(val) == 0 {
|
||||
return false, err
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
||||
// List returns the range of keys starting with the passed in prefix
|
||||
func (b *BoltDB) List(keyPrefix string) ([]*store.KVPair, error) {
|
||||
var (
|
||||
db *bolt.DB
|
||||
err error
|
||||
)
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
|
||||
kv := []*store.KVPair{}
|
||||
|
||||
if db, err = b.getDBhandle(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer b.releaseDBhandle()
|
||||
|
||||
err = db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(b.boltBucket)
|
||||
if bucket == nil {
|
||||
return store.ErrKeyNotFound
|
||||
}
|
||||
|
||||
cursor := bucket.Cursor()
|
||||
prefix := []byte(keyPrefix)
|
||||
|
||||
for key, v := cursor.Seek(prefix); bytes.HasPrefix(key, prefix); key, v = cursor.Next() {
|
||||
|
||||
dbIndex := binary.LittleEndian.Uint64(v[:libkvmetadatalen])
|
||||
v = v[libkvmetadatalen:]
|
||||
val := make([]byte, len(v))
|
||||
copy(val, v)
|
||||
|
||||
kv = append(kv, &store.KVPair{
|
||||
Key: string(key),
|
||||
Value: val,
|
||||
LastIndex: dbIndex,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if len(kv) == 0 {
|
||||
return nil, store.ErrKeyNotFound
|
||||
}
|
||||
return kv, err
|
||||
}
|
||||
|
||||
// AtomicDelete deletes a value at "key" if the key
|
||||
// has not been modified in the meantime, throws an
|
||||
// error if this is the case
|
||||
func (b *BoltDB) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
|
||||
var (
|
||||
val []byte
|
||||
db *bolt.DB
|
||||
err error
|
||||
)
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
|
||||
if previous == nil {
|
||||
return false, store.ErrPreviousNotSpecified
|
||||
}
|
||||
if db, err = b.getDBhandle(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer b.releaseDBhandle()
|
||||
|
||||
err = db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(b.boltBucket)
|
||||
if bucket == nil {
|
||||
return store.ErrKeyNotFound
|
||||
}
|
||||
|
||||
val = bucket.Get([]byte(key))
|
||||
if val == nil {
|
||||
return store.ErrKeyNotFound
|
||||
}
|
||||
dbIndex := binary.LittleEndian.Uint64(val[:libkvmetadatalen])
|
||||
if dbIndex != previous.LastIndex {
|
||||
return store.ErrKeyModified
|
||||
}
|
||||
err := bucket.Delete([]byte(key))
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
||||
// AtomicPut puts a value at "key" if the key has not been
|
||||
// modified since the last Put, throws an error if this is the case
|
||||
func (b *BoltDB) AtomicPut(key string, value []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) {
|
||||
var (
|
||||
val []byte
|
||||
dbIndex uint64
|
||||
db *bolt.DB
|
||||
err error
|
||||
)
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
|
||||
dbval := make([]byte, libkvmetadatalen)
|
||||
|
||||
if db, err = b.getDBhandle(); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
defer b.releaseDBhandle()
|
||||
|
||||
err = db.Update(func(tx *bolt.Tx) error {
|
||||
var err error
|
||||
bucket := tx.Bucket(b.boltBucket)
|
||||
if bucket == nil {
|
||||
if previous != nil {
|
||||
return store.ErrKeyNotFound
|
||||
}
|
||||
bucket, err = tx.CreateBucket(b.boltBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// AtomicPut is equivalent to Put if previous is nil and the Ky
|
||||
// doesn't exist in the DB.
|
||||
val = bucket.Get([]byte(key))
|
||||
if previous == nil && len(val) != 0 {
|
||||
return store.ErrKeyExists
|
||||
}
|
||||
if previous != nil {
|
||||
if len(val) == 0 {
|
||||
return store.ErrKeyNotFound
|
||||
}
|
||||
dbIndex = binary.LittleEndian.Uint64(val[:libkvmetadatalen])
|
||||
if dbIndex != previous.LastIndex {
|
||||
return store.ErrKeyModified
|
||||
}
|
||||
}
|
||||
dbIndex = atomic.AddUint64(&b.dbIndex, 1)
|
||||
binary.LittleEndian.PutUint64(dbval, b.dbIndex)
|
||||
dbval = append(dbval, value...)
|
||||
return (bucket.Put([]byte(key), dbval))
|
||||
})
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
updated := &store.KVPair{
|
||||
Key: key,
|
||||
Value: value,
|
||||
LastIndex: dbIndex,
|
||||
}
|
||||
|
||||
return true, updated, nil
|
||||
}
|
||||
|
||||
// Close the db connection to the BoltDB
|
||||
func (b *BoltDB) Close() {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
|
||||
if !b.PersistConnection {
|
||||
b.reset()
|
||||
} else {
|
||||
b.client.Close()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteTree deletes a range of keys with a given prefix
|
||||
func (b *BoltDB) DeleteTree(keyPrefix string) error {
|
||||
var (
|
||||
db *bolt.DB
|
||||
err error
|
||||
)
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
|
||||
if db, err = b.getDBhandle(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer b.releaseDBhandle()
|
||||
|
||||
err = db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(b.boltBucket)
|
||||
if bucket == nil {
|
||||
return store.ErrKeyNotFound
|
||||
}
|
||||
|
||||
cursor := bucket.Cursor()
|
||||
prefix := []byte(keyPrefix)
|
||||
|
||||
for key, _ := cursor.Seek(prefix); bytes.HasPrefix(key, prefix); key, _ = cursor.Next() {
|
||||
_ = bucket.Delete([]byte(key))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// NewLock has to implemented at the library level since its not supported by BoltDB
|
||||
func (b *BoltDB) NewLock(key string, options *store.LockOptions) (store.Locker, error) {
|
||||
return nil, store.ErrCallNotSupported
|
||||
}
|
||||
|
||||
// Watch has to implemented at the library level since its not supported by BoltDB
|
||||
func (b *BoltDB) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) {
|
||||
return nil, store.ErrCallNotSupported
|
||||
}
|
||||
|
||||
// WatchTree has to implemented at the library level since its not supported by BoltDB
|
||||
func (b *BoltDB) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
|
||||
return nil, store.ErrCallNotSupported
|
||||
}
|
558
vendor/github.com/docker/libkv/store/consul/consul.go
generated
vendored
Normal file
558
vendor/github.com/docker/libkv/store/consul/consul.go
generated
vendored
Normal file
|
@ -0,0 +1,558 @@
|
|||
package consul
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/libkv"
|
||||
"github.com/docker/libkv/store"
|
||||
api "github.com/hashicorp/consul/api"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultWatchWaitTime is how long we block for at a
|
||||
// time to check if the watched key has changed. This
|
||||
// affects the minimum time it takes to cancel a watch.
|
||||
DefaultWatchWaitTime = 15 * time.Second
|
||||
|
||||
// RenewSessionRetryMax is the number of time we should try
|
||||
// to renew the session before giving up and throwing an error
|
||||
RenewSessionRetryMax = 5
|
||||
|
||||
// MaxSessionDestroyAttempts is the maximum times we will try
|
||||
// to explicitely destroy the session attached to a lock after
|
||||
// the connectivity to the store has been lost
|
||||
MaxSessionDestroyAttempts = 5
|
||||
|
||||
// defaultLockTTL is the default ttl for the consul lock
|
||||
defaultLockTTL = 20 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrMultipleEndpointsUnsupported is thrown when there are
|
||||
// multiple endpoints specified for Consul
|
||||
ErrMultipleEndpointsUnsupported = errors.New("consul does not support multiple endpoints")
|
||||
|
||||
// ErrSessionRenew is thrown when the session can't be
|
||||
// renewed because the Consul version does not support sessions
|
||||
ErrSessionRenew = errors.New("cannot set or renew session for ttl, unable to operate on sessions")
|
||||
)
|
||||
|
||||
// Consul is the receiver type for the
|
||||
// Store interface
|
||||
type Consul struct {
|
||||
sync.Mutex
|
||||
config *api.Config
|
||||
client *api.Client
|
||||
}
|
||||
|
||||
type consulLock struct {
|
||||
lock *api.Lock
|
||||
renewCh chan struct{}
|
||||
}
|
||||
|
||||
// Register registers consul to libkv
|
||||
func Register() {
|
||||
libkv.AddStore(store.CONSUL, New)
|
||||
}
|
||||
|
||||
// New creates a new Consul client given a list
|
||||
// of endpoints and optional tls config
|
||||
func New(endpoints []string, options *store.Config) (store.Store, error) {
|
||||
if len(endpoints) > 1 {
|
||||
return nil, ErrMultipleEndpointsUnsupported
|
||||
}
|
||||
|
||||
s := &Consul{}
|
||||
|
||||
// Create Consul client
|
||||
config := api.DefaultConfig()
|
||||
s.config = config
|
||||
config.HttpClient = http.DefaultClient
|
||||
config.Address = endpoints[0]
|
||||
config.Scheme = "http"
|
||||
|
||||
// Set options
|
||||
if options != nil {
|
||||
if options.TLS != nil {
|
||||
s.setTLS(options.TLS)
|
||||
}
|
||||
if options.ConnectionTimeout != 0 {
|
||||
s.setTimeout(options.ConnectionTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
// Creates a new client
|
||||
client, err := api.NewClient(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.client = client
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// SetTLS sets Consul TLS options
|
||||
func (s *Consul) setTLS(tls *tls.Config) {
|
||||
s.config.HttpClient.Transport = &http.Transport{
|
||||
TLSClientConfig: tls,
|
||||
}
|
||||
s.config.Scheme = "https"
|
||||
}
|
||||
|
||||
// SetTimeout sets the timeout for connecting to Consul
|
||||
func (s *Consul) setTimeout(time time.Duration) {
|
||||
s.config.WaitTime = time
|
||||
}
|
||||
|
||||
// Normalize the key for usage in Consul
|
||||
func (s *Consul) normalize(key string) string {
|
||||
key = store.Normalize(key)
|
||||
return strings.TrimPrefix(key, "/")
|
||||
}
|
||||
|
||||
func (s *Consul) renewSession(pair *api.KVPair, ttl time.Duration) error {
|
||||
// Check if there is any previous session with an active TTL
|
||||
session, err := s.getActiveSession(pair.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if session == "" {
|
||||
entry := &api.SessionEntry{
|
||||
Behavior: api.SessionBehaviorDelete, // Delete the key when the session expires
|
||||
TTL: (ttl / 2).String(), // Consul multiplies the TTL by 2x
|
||||
LockDelay: 1 * time.Millisecond, // Virtually disable lock delay
|
||||
}
|
||||
|
||||
// Create the key session
|
||||
session, _, err = s.client.Session().Create(entry, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lockOpts := &api.LockOptions{
|
||||
Key: pair.Key,
|
||||
Session: session,
|
||||
}
|
||||
|
||||
// Lock and ignore if lock is held
|
||||
// It's just a placeholder for the
|
||||
// ephemeral behavior
|
||||
lock, _ := s.client.LockOpts(lockOpts)
|
||||
if lock != nil {
|
||||
lock.Lock(nil)
|
||||
}
|
||||
}
|
||||
|
||||
_, _, err = s.client.Session().Renew(session, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
// getActiveSession checks if the key already has
|
||||
// a session attached
|
||||
func (s *Consul) getActiveSession(key string) (string, error) {
|
||||
pair, _, err := s.client.KV().Get(key, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if pair != nil && pair.Session != "" {
|
||||
return pair.Session, nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Get the value at "key", returns the last modified index
|
||||
// to use in conjunction to CAS calls
|
||||
func (s *Consul) Get(key string) (*store.KVPair, error) {
|
||||
options := &api.QueryOptions{
|
||||
AllowStale: false,
|
||||
RequireConsistent: true,
|
||||
}
|
||||
|
||||
pair, meta, err := s.client.KV().Get(s.normalize(key), options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If pair is nil then the key does not exist
|
||||
if pair == nil {
|
||||
return nil, store.ErrKeyNotFound
|
||||
}
|
||||
|
||||
return &store.KVPair{Key: pair.Key, Value: pair.Value, LastIndex: meta.LastIndex}, nil
|
||||
}
|
||||
|
||||
// Put a value at "key"
|
||||
func (s *Consul) Put(key string, value []byte, opts *store.WriteOptions) error {
|
||||
key = s.normalize(key)
|
||||
|
||||
p := &api.KVPair{
|
||||
Key: key,
|
||||
Value: value,
|
||||
Flags: api.LockFlagValue,
|
||||
}
|
||||
|
||||
if opts != nil && opts.TTL > 0 {
|
||||
// Create or renew a session holding a TTL. Operations on sessions
|
||||
// are not deterministic: creating or renewing a session can fail
|
||||
for retry := 1; retry <= RenewSessionRetryMax; retry++ {
|
||||
err := s.renewSession(p, opts.TTL)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if retry == RenewSessionRetryMax {
|
||||
return ErrSessionRenew
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_, err := s.client.KV().Put(p, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete a value at "key"
|
||||
func (s *Consul) Delete(key string) error {
|
||||
if _, err := s.Get(key); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := s.client.KV().Delete(s.normalize(key), nil)
|
||||
return err
|
||||
}
|
||||
|
||||
// Exists checks that the key exists inside the store
|
||||
func (s *Consul) Exists(key string) (bool, error) {
|
||||
_, err := s.Get(key)
|
||||
if err != nil {
|
||||
if err == store.ErrKeyNotFound {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// List child nodes of a given directory
|
||||
func (s *Consul) List(directory string) ([]*store.KVPair, error) {
|
||||
pairs, _, err := s.client.KV().List(s.normalize(directory), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(pairs) == 0 {
|
||||
return nil, store.ErrKeyNotFound
|
||||
}
|
||||
|
||||
kv := []*store.KVPair{}
|
||||
|
||||
for _, pair := range pairs {
|
||||
if pair.Key == directory {
|
||||
continue
|
||||
}
|
||||
kv = append(kv, &store.KVPair{
|
||||
Key: pair.Key,
|
||||
Value: pair.Value,
|
||||
LastIndex: pair.ModifyIndex,
|
||||
})
|
||||
}
|
||||
|
||||
return kv, nil
|
||||
}
|
||||
|
||||
// DeleteTree deletes a range of keys under a given directory
|
||||
func (s *Consul) DeleteTree(directory string) error {
|
||||
if _, err := s.List(directory); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := s.client.KV().DeleteTree(s.normalize(directory), nil)
|
||||
return err
|
||||
}
|
||||
|
||||
// Watch for changes on a "key"
|
||||
// It returns a channel that will receive changes or pass
|
||||
// on errors. Upon creation, the current value will first
|
||||
// be sent to the channel. Providing a non-nil stopCh can
|
||||
// be used to stop watching.
|
||||
func (s *Consul) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) {
|
||||
kv := s.client.KV()
|
||||
watchCh := make(chan *store.KVPair)
|
||||
|
||||
go func() {
|
||||
defer close(watchCh)
|
||||
|
||||
// Use a wait time in order to check if we should quit
|
||||
// from time to time.
|
||||
opts := &api.QueryOptions{WaitTime: DefaultWatchWaitTime}
|
||||
|
||||
for {
|
||||
// Check if we should quit
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// Get the key
|
||||
pair, meta, err := kv.Get(key, opts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// If LastIndex didn't change then it means `Get` returned
|
||||
// because of the WaitTime and the key didn't changed.
|
||||
if opts.WaitIndex == meta.LastIndex {
|
||||
continue
|
||||
}
|
||||
opts.WaitIndex = meta.LastIndex
|
||||
|
||||
// Return the value to the channel
|
||||
// FIXME: What happens when a key is deleted?
|
||||
if pair != nil {
|
||||
watchCh <- &store.KVPair{
|
||||
Key: pair.Key,
|
||||
Value: pair.Value,
|
||||
LastIndex: pair.ModifyIndex,
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return watchCh, nil
|
||||
}
|
||||
|
||||
// WatchTree watches for changes on a "directory"
|
||||
// It returns a channel that will receive changes or pass
|
||||
// on errors. Upon creating a watch, the current childs values
|
||||
// will be sent to the channel .Providing a non-nil stopCh can
|
||||
// be used to stop watching.
|
||||
func (s *Consul) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
|
||||
kv := s.client.KV()
|
||||
watchCh := make(chan []*store.KVPair)
|
||||
|
||||
go func() {
|
||||
defer close(watchCh)
|
||||
|
||||
// Use a wait time in order to check if we should quit
|
||||
// from time to time.
|
||||
opts := &api.QueryOptions{WaitTime: DefaultWatchWaitTime}
|
||||
for {
|
||||
// Check if we should quit
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// Get all the childrens
|
||||
pairs, meta, err := kv.List(directory, opts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// If LastIndex didn't change then it means `Get` returned
|
||||
// because of the WaitTime and the child keys didn't change.
|
||||
if opts.WaitIndex == meta.LastIndex {
|
||||
continue
|
||||
}
|
||||
opts.WaitIndex = meta.LastIndex
|
||||
|
||||
// Return children KV pairs to the channel
|
||||
kvpairs := []*store.KVPair{}
|
||||
for _, pair := range pairs {
|
||||
if pair.Key == directory {
|
||||
continue
|
||||
}
|
||||
kvpairs = append(kvpairs, &store.KVPair{
|
||||
Key: pair.Key,
|
||||
Value: pair.Value,
|
||||
LastIndex: pair.ModifyIndex,
|
||||
})
|
||||
}
|
||||
watchCh <- kvpairs
|
||||
}
|
||||
}()
|
||||
|
||||
return watchCh, nil
|
||||
}
|
||||
|
||||
// NewLock returns a handle to a lock struct which can
|
||||
// be used to provide mutual exclusion on a key
|
||||
func (s *Consul) NewLock(key string, options *store.LockOptions) (store.Locker, error) {
|
||||
lockOpts := &api.LockOptions{
|
||||
Key: s.normalize(key),
|
||||
}
|
||||
|
||||
lock := &consulLock{}
|
||||
|
||||
ttl := defaultLockTTL
|
||||
|
||||
if options != nil {
|
||||
// Set optional TTL on Lock
|
||||
if options.TTL != 0 {
|
||||
ttl = options.TTL
|
||||
}
|
||||
// Set optional value on Lock
|
||||
if options.Value != nil {
|
||||
lockOpts.Value = options.Value
|
||||
}
|
||||
}
|
||||
|
||||
entry := &api.SessionEntry{
|
||||
Behavior: api.SessionBehaviorRelease, // Release the lock when the session expires
|
||||
TTL: (ttl / 2).String(), // Consul multiplies the TTL by 2x
|
||||
LockDelay: 1 * time.Millisecond, // Virtually disable lock delay
|
||||
}
|
||||
|
||||
// Create the key session
|
||||
session, _, err := s.client.Session().Create(entry, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Place the session and renew chan on lock
|
||||
lockOpts.Session = session
|
||||
lock.renewCh = options.RenewLock
|
||||
|
||||
l, err := s.client.LockOpts(lockOpts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Renew the session ttl lock periodically
|
||||
s.renewLockSession(entry.TTL, session, options.RenewLock)
|
||||
|
||||
lock.lock = l
|
||||
return lock, nil
|
||||
}
|
||||
|
||||
// renewLockSession is used to renew a session Lock, it takes
|
||||
// a stopRenew chan which is used to explicitely stop the session
|
||||
// renew process. The renew routine never stops until a signal is
|
||||
// sent to this channel. If deleting the session fails because the
|
||||
// connection to the store is lost, it keeps trying to delete the
|
||||
// session periodically until it can contact the store, this ensures
|
||||
// that the lock is not maintained indefinitely which ensures liveness
|
||||
// over safety for the lock when the store becomes unavailable.
|
||||
func (s *Consul) renewLockSession(initialTTL string, id string, stopRenew chan struct{}) {
|
||||
sessionDestroyAttempts := 0
|
||||
ttl, err := time.ParseDuration(initialTTL)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-time.After(ttl / 2):
|
||||
entry, _, err := s.client.Session().Renew(id, nil)
|
||||
if err != nil {
|
||||
// If an error occurs, continue until the
|
||||
// session gets destroyed explicitely or
|
||||
// the session ttl times out
|
||||
continue
|
||||
}
|
||||
if entry == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Handle the server updating the TTL
|
||||
ttl, _ = time.ParseDuration(entry.TTL)
|
||||
|
||||
case <-stopRenew:
|
||||
// Attempt a session destroy
|
||||
_, err := s.client.Session().Destroy(id, nil)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if sessionDestroyAttempts >= MaxSessionDestroyAttempts {
|
||||
return
|
||||
}
|
||||
|
||||
// We can't destroy the session because the store
|
||||
// is unavailable, wait for the session renew period
|
||||
sessionDestroyAttempts++
|
||||
time.Sleep(ttl / 2)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Lock attempts to acquire the lock and blocks while
|
||||
// doing so. It returns a channel that is closed if our
|
||||
// lock is lost or if an error occurs
|
||||
func (l *consulLock) Lock(stopChan chan struct{}) (<-chan struct{}, error) {
|
||||
return l.lock.Lock(stopChan)
|
||||
}
|
||||
|
||||
// Unlock the "key". Calling unlock while
|
||||
// not holding the lock will throw an error
|
||||
func (l *consulLock) Unlock() error {
|
||||
if l.renewCh != nil {
|
||||
close(l.renewCh)
|
||||
}
|
||||
return l.lock.Unlock()
|
||||
}
|
||||
|
||||
// AtomicPut put a value at "key" if the key has not been
|
||||
// modified in the meantime, throws an error if this is the case
|
||||
func (s *Consul) AtomicPut(key string, value []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) {
|
||||
|
||||
p := &api.KVPair{Key: s.normalize(key), Value: value, Flags: api.LockFlagValue}
|
||||
|
||||
if previous == nil {
|
||||
// Consul interprets ModifyIndex = 0 as new key.
|
||||
p.ModifyIndex = 0
|
||||
} else {
|
||||
p.ModifyIndex = previous.LastIndex
|
||||
}
|
||||
|
||||
ok, _, err := s.client.KV().CAS(p, nil)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
if !ok {
|
||||
if previous == nil {
|
||||
return false, nil, store.ErrKeyExists
|
||||
}
|
||||
return false, nil, store.ErrKeyModified
|
||||
}
|
||||
|
||||
pair, err := s.Get(key)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
return true, pair, nil
|
||||
}
|
||||
|
||||
// AtomicDelete deletes a value at "key" if the key has not
|
||||
// been modified in the meantime, throws an error if this is the case
|
||||
func (s *Consul) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
|
||||
if previous == nil {
|
||||
return false, store.ErrPreviousNotSpecified
|
||||
}
|
||||
|
||||
p := &api.KVPair{Key: s.normalize(key), ModifyIndex: previous.LastIndex, Flags: api.LockFlagValue}
|
||||
|
||||
// Extra Get operation to check on the key
|
||||
_, err := s.Get(key)
|
||||
if err != nil && err == store.ErrKeyNotFound {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if work, _, err := s.client.KV().DeleteCAS(p, nil); err != nil {
|
||||
return false, err
|
||||
} else if !work {
|
||||
return false, store.ErrKeyModified
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Close closes the client connection
|
||||
func (s *Consul) Close() {
|
||||
return
|
||||
}
|
606
vendor/github.com/docker/libkv/store/etcd/etcd.go
generated
vendored
Normal file
606
vendor/github.com/docker/libkv/store/etcd/etcd.go
generated
vendored
Normal file
|
@ -0,0 +1,606 @@
|
|||
package etcd
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
etcd "github.com/coreos/etcd/client"
|
||||
"github.com/docker/libkv"
|
||||
"github.com/docker/libkv/store"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrAbortTryLock is thrown when a user stops trying to seek the lock
|
||||
// by sending a signal to the stop chan, this is used to verify if the
|
||||
// operation succeeded
|
||||
ErrAbortTryLock = errors.New("lock operation aborted")
|
||||
)
|
||||
|
||||
// Etcd is the receiver type for the
|
||||
// Store interface
|
||||
type Etcd struct {
|
||||
client etcd.KeysAPI
|
||||
}
|
||||
|
||||
type etcdLock struct {
|
||||
client etcd.KeysAPI
|
||||
stopLock chan struct{}
|
||||
stopRenew chan struct{}
|
||||
key string
|
||||
value string
|
||||
last *etcd.Response
|
||||
ttl time.Duration
|
||||
}
|
||||
|
||||
const (
|
||||
periodicSync = 5 * time.Minute
|
||||
defaultLockTTL = 20 * time.Second
|
||||
defaultUpdateTime = 5 * time.Second
|
||||
)
|
||||
|
||||
// Register registers etcd to libkv
|
||||
func Register() {
|
||||
libkv.AddStore(store.ETCD, New)
|
||||
}
|
||||
|
||||
// New creates a new Etcd client given a list
|
||||
// of endpoints and an optional tls config
|
||||
func New(addrs []string, options *store.Config) (store.Store, error) {
|
||||
s := &Etcd{}
|
||||
|
||||
var (
|
||||
entries []string
|
||||
err error
|
||||
)
|
||||
|
||||
entries = store.CreateEndpoints(addrs, "http")
|
||||
cfg := &etcd.Config{
|
||||
Endpoints: entries,
|
||||
Transport: etcd.DefaultTransport,
|
||||
HeaderTimeoutPerRequest: 3 * time.Second,
|
||||
}
|
||||
|
||||
// Set options
|
||||
if options != nil {
|
||||
if options.TLS != nil {
|
||||
setTLS(cfg, options.TLS, addrs)
|
||||
}
|
||||
if options.ConnectionTimeout != 0 {
|
||||
setTimeout(cfg, options.ConnectionTimeout)
|
||||
}
|
||||
if options.Username != "" {
|
||||
setCredentials(cfg, options.Username, options.Password)
|
||||
}
|
||||
}
|
||||
|
||||
c, err := etcd.New(*cfg)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
s.client = etcd.NewKeysAPI(c)
|
||||
|
||||
// Periodic Cluster Sync
|
||||
go func() {
|
||||
for {
|
||||
if err := c.AutoSync(context.Background(), periodicSync); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// SetTLS sets the tls configuration given a tls.Config scheme
|
||||
func setTLS(cfg *etcd.Config, tls *tls.Config, addrs []string) {
|
||||
entries := store.CreateEndpoints(addrs, "https")
|
||||
cfg.Endpoints = entries
|
||||
|
||||
// Set transport
|
||||
t := http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
TLSClientConfig: tls,
|
||||
}
|
||||
|
||||
cfg.Transport = &t
|
||||
}
|
||||
|
||||
// setTimeout sets the timeout used for connecting to the store
|
||||
func setTimeout(cfg *etcd.Config, time time.Duration) {
|
||||
cfg.HeaderTimeoutPerRequest = time
|
||||
}
|
||||
|
||||
// setCredentials sets the username/password credentials for connecting to Etcd
|
||||
func setCredentials(cfg *etcd.Config, username, password string) {
|
||||
cfg.Username = username
|
||||
cfg.Password = password
|
||||
}
|
||||
|
||||
// Normalize the key for usage in Etcd
|
||||
func (s *Etcd) normalize(key string) string {
|
||||
key = store.Normalize(key)
|
||||
return strings.TrimPrefix(key, "/")
|
||||
}
|
||||
|
||||
// keyNotFound checks on the error returned by the KeysAPI
|
||||
// to verify if the key exists in the store or not
|
||||
func keyNotFound(err error) bool {
|
||||
if err != nil {
|
||||
if etcdError, ok := err.(etcd.Error); ok {
|
||||
if etcdError.Code == etcd.ErrorCodeKeyNotFound ||
|
||||
etcdError.Code == etcd.ErrorCodeNotFile ||
|
||||
etcdError.Code == etcd.ErrorCodeNotDir {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Get the value at "key", returns the last modified
|
||||
// index to use in conjunction to Atomic calls
|
||||
func (s *Etcd) Get(key string) (pair *store.KVPair, err error) {
|
||||
getOpts := &etcd.GetOptions{
|
||||
Quorum: true,
|
||||
}
|
||||
|
||||
result, err := s.client.Get(context.Background(), s.normalize(key), getOpts)
|
||||
if err != nil {
|
||||
if keyNotFound(err) {
|
||||
return nil, store.ErrKeyNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pair = &store.KVPair{
|
||||
Key: key,
|
||||
Value: []byte(result.Node.Value),
|
||||
LastIndex: result.Node.ModifiedIndex,
|
||||
}
|
||||
|
||||
return pair, nil
|
||||
}
|
||||
|
||||
// Put a value at "key"
|
||||
func (s *Etcd) Put(key string, value []byte, opts *store.WriteOptions) error {
|
||||
setOpts := &etcd.SetOptions{}
|
||||
|
||||
// Set options
|
||||
if opts != nil {
|
||||
setOpts.Dir = opts.IsDir
|
||||
setOpts.TTL = opts.TTL
|
||||
}
|
||||
|
||||
_, err := s.client.Set(context.Background(), s.normalize(key), string(value), setOpts)
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete a value at "key"
|
||||
func (s *Etcd) Delete(key string) error {
|
||||
opts := &etcd.DeleteOptions{
|
||||
Recursive: false,
|
||||
}
|
||||
|
||||
_, err := s.client.Delete(context.Background(), s.normalize(key), opts)
|
||||
if keyNotFound(err) {
|
||||
return store.ErrKeyNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Exists checks if the key exists inside the store
|
||||
func (s *Etcd) Exists(key string) (bool, error) {
|
||||
_, err := s.Get(key)
|
||||
if err != nil {
|
||||
if err == store.ErrKeyNotFound {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Watch for changes on a "key"
|
||||
// It returns a channel that will receive changes or pass
|
||||
// on errors. Upon creation, the current value will first
|
||||
// be sent to the channel. Providing a non-nil stopCh can
|
||||
// be used to stop watching.
|
||||
func (s *Etcd) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) {
|
||||
opts := &etcd.WatcherOptions{Recursive: false}
|
||||
watcher := s.client.Watcher(s.normalize(key), opts)
|
||||
|
||||
// watchCh is sending back events to the caller
|
||||
watchCh := make(chan *store.KVPair)
|
||||
|
||||
go func() {
|
||||
defer close(watchCh)
|
||||
|
||||
// Get the current value
|
||||
pair, err := s.Get(key)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Push the current value through the channel.
|
||||
watchCh <- pair
|
||||
|
||||
for {
|
||||
// Check if the watch was stopped by the caller
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
result, err := watcher.Next(context.Background())
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
watchCh <- &store.KVPair{
|
||||
Key: key,
|
||||
Value: []byte(result.Node.Value),
|
||||
LastIndex: result.Node.ModifiedIndex,
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return watchCh, nil
|
||||
}
|
||||
|
||||
// WatchTree watches for changes on a "directory"
|
||||
// It returns a channel that will receive changes or pass
|
||||
// on errors. Upon creating a watch, the current childs values
|
||||
// will be sent to the channel. Providing a non-nil stopCh can
|
||||
// be used to stop watching.
|
||||
func (s *Etcd) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
|
||||
watchOpts := &etcd.WatcherOptions{Recursive: true}
|
||||
watcher := s.client.Watcher(s.normalize(directory), watchOpts)
|
||||
|
||||
// watchCh is sending back events to the caller
|
||||
watchCh := make(chan []*store.KVPair)
|
||||
|
||||
go func() {
|
||||
defer close(watchCh)
|
||||
|
||||
// Get child values
|
||||
list, err := s.List(directory)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Push the current value through the channel.
|
||||
watchCh <- list
|
||||
|
||||
for {
|
||||
// Check if the watch was stopped by the caller
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
_, err := watcher.Next(context.Background())
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
list, err = s.List(directory)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
watchCh <- list
|
||||
}
|
||||
}()
|
||||
|
||||
return watchCh, nil
|
||||
}
|
||||
|
||||
// AtomicPut puts a value at "key" if the key has not been
|
||||
// modified in the meantime, throws an error if this is the case
|
||||
func (s *Etcd) AtomicPut(key string, value []byte, previous *store.KVPair, opts *store.WriteOptions) (bool, *store.KVPair, error) {
|
||||
var (
|
||||
meta *etcd.Response
|
||||
err error
|
||||
)
|
||||
|
||||
setOpts := &etcd.SetOptions{}
|
||||
|
||||
if previous != nil {
|
||||
setOpts.PrevExist = etcd.PrevExist
|
||||
setOpts.PrevIndex = previous.LastIndex
|
||||
if previous.Value != nil {
|
||||
setOpts.PrevValue = string(previous.Value)
|
||||
}
|
||||
} else {
|
||||
setOpts.PrevExist = etcd.PrevNoExist
|
||||
}
|
||||
|
||||
if opts != nil {
|
||||
if opts.TTL > 0 {
|
||||
setOpts.TTL = opts.TTL
|
||||
}
|
||||
}
|
||||
|
||||
meta, err = s.client.Set(context.Background(), s.normalize(key), string(value), setOpts)
|
||||
if err != nil {
|
||||
if etcdError, ok := err.(etcd.Error); ok {
|
||||
// Compare failed
|
||||
if etcdError.Code == etcd.ErrorCodeTestFailed {
|
||||
return false, nil, store.ErrKeyModified
|
||||
}
|
||||
// Node exists error (when PrevNoExist)
|
||||
if etcdError.Code == etcd.ErrorCodeNodeExist {
|
||||
return false, nil, store.ErrKeyExists
|
||||
}
|
||||
}
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
updated := &store.KVPair{
|
||||
Key: key,
|
||||
Value: value,
|
||||
LastIndex: meta.Node.ModifiedIndex,
|
||||
}
|
||||
|
||||
return true, updated, nil
|
||||
}
|
||||
|
||||
// AtomicDelete deletes a value at "key" if the key
|
||||
// has not been modified in the meantime, throws an
|
||||
// error if this is the case
|
||||
func (s *Etcd) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
|
||||
if previous == nil {
|
||||
return false, store.ErrPreviousNotSpecified
|
||||
}
|
||||
|
||||
delOpts := &etcd.DeleteOptions{}
|
||||
|
||||
if previous != nil {
|
||||
delOpts.PrevIndex = previous.LastIndex
|
||||
if previous.Value != nil {
|
||||
delOpts.PrevValue = string(previous.Value)
|
||||
}
|
||||
}
|
||||
|
||||
_, err := s.client.Delete(context.Background(), s.normalize(key), delOpts)
|
||||
if err != nil {
|
||||
if etcdError, ok := err.(etcd.Error); ok {
|
||||
// Key Not Found
|
||||
if etcdError.Code == etcd.ErrorCodeKeyNotFound {
|
||||
return false, store.ErrKeyNotFound
|
||||
}
|
||||
// Compare failed
|
||||
if etcdError.Code == etcd.ErrorCodeTestFailed {
|
||||
return false, store.ErrKeyModified
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// List child nodes of a given directory
|
||||
func (s *Etcd) List(directory string) ([]*store.KVPair, error) {
|
||||
getOpts := &etcd.GetOptions{
|
||||
Quorum: true,
|
||||
Recursive: true,
|
||||
Sort: true,
|
||||
}
|
||||
|
||||
resp, err := s.client.Get(context.Background(), s.normalize(directory), getOpts)
|
||||
if err != nil {
|
||||
if keyNotFound(err) {
|
||||
return nil, store.ErrKeyNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kv := []*store.KVPair{}
|
||||
for _, n := range resp.Node.Nodes {
|
||||
kv = append(kv, &store.KVPair{
|
||||
Key: n.Key,
|
||||
Value: []byte(n.Value),
|
||||
LastIndex: n.ModifiedIndex,
|
||||
})
|
||||
}
|
||||
return kv, nil
|
||||
}
|
||||
|
||||
// DeleteTree deletes a range of keys under a given directory
|
||||
func (s *Etcd) DeleteTree(directory string) error {
|
||||
delOpts := &etcd.DeleteOptions{
|
||||
Recursive: true,
|
||||
}
|
||||
|
||||
_, err := s.client.Delete(context.Background(), s.normalize(directory), delOpts)
|
||||
if keyNotFound(err) {
|
||||
return store.ErrKeyNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// NewLock returns a handle to a lock struct which can
|
||||
// be used to provide mutual exclusion on a key
|
||||
func (s *Etcd) NewLock(key string, options *store.LockOptions) (lock store.Locker, err error) {
|
||||
var value string
|
||||
ttl := defaultLockTTL
|
||||
renewCh := make(chan struct{})
|
||||
|
||||
// Apply options on Lock
|
||||
if options != nil {
|
||||
if options.Value != nil {
|
||||
value = string(options.Value)
|
||||
}
|
||||
if options.TTL != 0 {
|
||||
ttl = options.TTL
|
||||
}
|
||||
if options.RenewLock != nil {
|
||||
renewCh = options.RenewLock
|
||||
}
|
||||
}
|
||||
|
||||
// Create lock object
|
||||
lock = &etcdLock{
|
||||
client: s.client,
|
||||
stopRenew: renewCh,
|
||||
key: s.normalize(key),
|
||||
value: value,
|
||||
ttl: ttl,
|
||||
}
|
||||
|
||||
return lock, nil
|
||||
}
|
||||
|
||||
// Lock attempts to acquire the lock and blocks while
|
||||
// doing so. It returns a channel that is closed if our
|
||||
// lock is lost or if an error occurs
|
||||
func (l *etcdLock) Lock(stopChan chan struct{}) (<-chan struct{}, error) {
|
||||
|
||||
// Lock holder channel
|
||||
lockHeld := make(chan struct{})
|
||||
stopLocking := l.stopRenew
|
||||
|
||||
setOpts := &etcd.SetOptions{
|
||||
TTL: l.ttl,
|
||||
}
|
||||
|
||||
for {
|
||||
setOpts.PrevExist = etcd.PrevNoExist
|
||||
resp, err := l.client.Set(context.Background(), l.key, l.value, setOpts)
|
||||
if err != nil {
|
||||
if etcdError, ok := err.(etcd.Error); ok {
|
||||
if etcdError.Code != etcd.ErrorCodeNodeExist {
|
||||
return nil, err
|
||||
}
|
||||
setOpts.PrevIndex = ^uint64(0)
|
||||
}
|
||||
} else {
|
||||
setOpts.PrevIndex = resp.Node.ModifiedIndex
|
||||
}
|
||||
|
||||
setOpts.PrevExist = etcd.PrevExist
|
||||
l.last, err = l.client.Set(context.Background(), l.key, l.value, setOpts)
|
||||
|
||||
if err == nil {
|
||||
// Leader section
|
||||
l.stopLock = stopLocking
|
||||
go l.holdLock(l.key, lockHeld, stopLocking)
|
||||
break
|
||||
} else {
|
||||
// If this is a legitimate error, return
|
||||
if etcdError, ok := err.(etcd.Error); ok {
|
||||
if etcdError.Code != etcd.ErrorCodeTestFailed {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Seeker section
|
||||
errorCh := make(chan error)
|
||||
chWStop := make(chan bool)
|
||||
free := make(chan bool)
|
||||
|
||||
go l.waitLock(l.key, errorCh, chWStop, free)
|
||||
|
||||
// Wait for the key to be available or for
|
||||
// a signal to stop trying to lock the key
|
||||
select {
|
||||
case <-free:
|
||||
break
|
||||
case err := <-errorCh:
|
||||
return nil, err
|
||||
case <-stopChan:
|
||||
return nil, ErrAbortTryLock
|
||||
}
|
||||
|
||||
// Delete or Expire event occurred
|
||||
// Retry
|
||||
}
|
||||
}
|
||||
|
||||
return lockHeld, nil
|
||||
}
|
||||
|
||||
// Hold the lock as long as we can
|
||||
// Updates the key ttl periodically until we receive
|
||||
// an explicit stop signal from the Unlock method
|
||||
func (l *etcdLock) holdLock(key string, lockHeld chan struct{}, stopLocking <-chan struct{}) {
|
||||
defer close(lockHeld)
|
||||
|
||||
update := time.NewTicker(l.ttl / 3)
|
||||
defer update.Stop()
|
||||
|
||||
var err error
|
||||
setOpts := &etcd.SetOptions{TTL: l.ttl}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-update.C:
|
||||
setOpts.PrevIndex = l.last.Node.ModifiedIndex
|
||||
l.last, err = l.client.Set(context.Background(), key, l.value, setOpts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
case <-stopLocking:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WaitLock simply waits for the key to be available for creation
|
||||
func (l *etcdLock) waitLock(key string, errorCh chan error, stopWatchCh chan bool, free chan<- bool) {
|
||||
opts := &etcd.WatcherOptions{Recursive: false}
|
||||
watcher := l.client.Watcher(key, opts)
|
||||
|
||||
for {
|
||||
event, err := watcher.Next(context.Background())
|
||||
if err != nil {
|
||||
errorCh <- err
|
||||
return
|
||||
}
|
||||
if event.Action == "delete" || event.Action == "expire" {
|
||||
free <- true
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Unlock the "key". Calling unlock while
|
||||
// not holding the lock will throw an error
|
||||
func (l *etcdLock) Unlock() error {
|
||||
if l.stopLock != nil {
|
||||
l.stopLock <- struct{}{}
|
||||
}
|
||||
if l.last != nil {
|
||||
delOpts := &etcd.DeleteOptions{
|
||||
PrevIndex: l.last.Node.ModifiedIndex,
|
||||
}
|
||||
_, err := l.client.Delete(context.Background(), l.key, delOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the client connection
|
||||
func (s *Etcd) Close() {
|
||||
return
|
||||
}
|
47
vendor/github.com/docker/libkv/store/helpers.go
generated
vendored
Normal file
47
vendor/github.com/docker/libkv/store/helpers.go
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CreateEndpoints creates a list of endpoints given the right scheme
|
||||
func CreateEndpoints(addrs []string, scheme string) (entries []string) {
|
||||
for _, addr := range addrs {
|
||||
entries = append(entries, scheme+"://"+addr)
|
||||
}
|
||||
return entries
|
||||
}
|
||||
|
||||
// Normalize the key for each store to the form:
|
||||
//
|
||||
// /path/to/key
|
||||
//
|
||||
func Normalize(key string) string {
|
||||
return "/" + join(SplitKey(key))
|
||||
}
|
||||
|
||||
// GetDirectory gets the full directory part of
|
||||
// the key to the form:
|
||||
//
|
||||
// /path/to/
|
||||
//
|
||||
func GetDirectory(key string) string {
|
||||
parts := SplitKey(key)
|
||||
parts = parts[:len(parts)-1]
|
||||
return "/" + join(parts)
|
||||
}
|
||||
|
||||
// SplitKey splits the key to extract path informations
|
||||
func SplitKey(key string) (path []string) {
|
||||
if strings.Contains(key, "/") {
|
||||
path = strings.Split(key, "/")
|
||||
} else {
|
||||
path = []string{key}
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
// join the path parts with '/'
|
||||
func join(parts []string) string {
|
||||
return strings.Join(parts, "/")
|
||||
}
|
132
vendor/github.com/docker/libkv/store/store.go
generated
vendored
Normal file
132
vendor/github.com/docker/libkv/store/store.go
generated
vendored
Normal file
|
@ -0,0 +1,132 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Backend represents a KV Store Backend
|
||||
type Backend string
|
||||
|
||||
const (
|
||||
// CONSUL backend
|
||||
CONSUL Backend = "consul"
|
||||
// ETCD backend
|
||||
ETCD Backend = "etcd"
|
||||
// ZK backend
|
||||
ZK Backend = "zk"
|
||||
// BOLTDB backend
|
||||
BOLTDB Backend = "boltdb"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrBackendNotSupported is thrown when the backend k/v store is not supported by libkv
|
||||
ErrBackendNotSupported = errors.New("Backend storage not supported yet, please choose one of")
|
||||
// ErrCallNotSupported is thrown when a method is not implemented/supported by the current backend
|
||||
ErrCallNotSupported = errors.New("The current call is not supported with this backend")
|
||||
// ErrNotReachable is thrown when the API cannot be reached for issuing common store operations
|
||||
ErrNotReachable = errors.New("Api not reachable")
|
||||
// ErrCannotLock is thrown when there is an error acquiring a lock on a key
|
||||
ErrCannotLock = errors.New("Error acquiring the lock")
|
||||
// ErrKeyModified is thrown during an atomic operation if the index does not match the one in the store
|
||||
ErrKeyModified = errors.New("Unable to complete atomic operation, key modified")
|
||||
// ErrKeyNotFound is thrown when the key is not found in the store during a Get operation
|
||||
ErrKeyNotFound = errors.New("Key not found in store")
|
||||
// ErrPreviousNotSpecified is thrown when the previous value is not specified for an atomic operation
|
||||
ErrPreviousNotSpecified = errors.New("Previous K/V pair should be provided for the Atomic operation")
|
||||
// ErrKeyExists is thrown when the previous value exists in the case of an AtomicPut
|
||||
ErrKeyExists = errors.New("Previous K/V pair exists, cannot complete Atomic operation")
|
||||
)
|
||||
|
||||
// Config contains the options for a storage client
|
||||
type Config struct {
|
||||
ClientTLS *ClientTLSConfig
|
||||
TLS *tls.Config
|
||||
ConnectionTimeout time.Duration
|
||||
Bucket string
|
||||
PersistConnection bool
|
||||
Username string
|
||||
Password string
|
||||
}
|
||||
|
||||
// ClientTLSConfig contains data for a Client TLS configuration in the form
|
||||
// the etcd client wants it. Eventually we'll adapt it for ZK and Consul.
|
||||
type ClientTLSConfig struct {
|
||||
CertFile string
|
||||
KeyFile string
|
||||
CACertFile string
|
||||
}
|
||||
|
||||
// Store represents the backend K/V storage
|
||||
// Each store should support every call listed
|
||||
// here. Or it couldn't be implemented as a K/V
|
||||
// backend for libkv
|
||||
type Store interface {
|
||||
// Put a value at the specified key
|
||||
Put(key string, value []byte, options *WriteOptions) error
|
||||
|
||||
// Get a value given its key
|
||||
Get(key string) (*KVPair, error)
|
||||
|
||||
// Delete the value at the specified key
|
||||
Delete(key string) error
|
||||
|
||||
// Verify if a Key exists in the store
|
||||
Exists(key string) (bool, error)
|
||||
|
||||
// Watch for changes on a key
|
||||
Watch(key string, stopCh <-chan struct{}) (<-chan *KVPair, error)
|
||||
|
||||
// WatchTree watches for changes on child nodes under
|
||||
// a given directory
|
||||
WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*KVPair, error)
|
||||
|
||||
// NewLock creates a lock for a given key.
|
||||
// The returned Locker is not held and must be acquired
|
||||
// with `.Lock`. The Value is optional.
|
||||
NewLock(key string, options *LockOptions) (Locker, error)
|
||||
|
||||
// List the content of a given prefix
|
||||
List(directory string) ([]*KVPair, error)
|
||||
|
||||
// DeleteTree deletes a range of keys under a given directory
|
||||
DeleteTree(directory string) error
|
||||
|
||||
// Atomic CAS operation on a single value.
|
||||
// Pass previous = nil to create a new key.
|
||||
AtomicPut(key string, value []byte, previous *KVPair, options *WriteOptions) (bool, *KVPair, error)
|
||||
|
||||
// Atomic delete of a single value
|
||||
AtomicDelete(key string, previous *KVPair) (bool, error)
|
||||
|
||||
// Close the store connection
|
||||
Close()
|
||||
}
|
||||
|
||||
// KVPair represents {Key, Value, Lastindex} tuple
|
||||
type KVPair struct {
|
||||
Key string
|
||||
Value []byte
|
||||
LastIndex uint64
|
||||
}
|
||||
|
||||
// WriteOptions contains optional request parameters
|
||||
type WriteOptions struct {
|
||||
IsDir bool
|
||||
TTL time.Duration
|
||||
}
|
||||
|
||||
// LockOptions contains optional request parameters
|
||||
type LockOptions struct {
|
||||
Value []byte // Optional, value to associate with the lock
|
||||
TTL time.Duration // Optional, expiration ttl associated with the lock
|
||||
RenewLock chan struct{} // Optional, chan used to control and stop the session ttl renewal for the lock
|
||||
}
|
||||
|
||||
// Locker provides locking mechanism on top of the store.
|
||||
// Similar to `sync.Lock` except it may return errors.
|
||||
type Locker interface {
|
||||
Lock(stopChan chan struct{}) (<-chan struct{}, error)
|
||||
Unlock() error
|
||||
}
|
429
vendor/github.com/docker/libkv/store/zookeeper/zookeeper.go
generated
vendored
Normal file
429
vendor/github.com/docker/libkv/store/zookeeper/zookeeper.go
generated
vendored
Normal file
|
@ -0,0 +1,429 @@
|
|||
package zookeeper
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/libkv"
|
||||
"github.com/docker/libkv/store"
|
||||
zk "github.com/samuel/go-zookeeper/zk"
|
||||
)
|
||||
|
||||
const (
|
||||
// SOH control character
|
||||
SOH = "\x01"
|
||||
|
||||
defaultTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
// Zookeeper is the receiver type for
|
||||
// the Store interface
|
||||
type Zookeeper struct {
|
||||
timeout time.Duration
|
||||
client *zk.Conn
|
||||
}
|
||||
|
||||
type zookeeperLock struct {
|
||||
client *zk.Conn
|
||||
lock *zk.Lock
|
||||
key string
|
||||
value []byte
|
||||
}
|
||||
|
||||
// Register registers zookeeper to libkv
|
||||
func Register() {
|
||||
libkv.AddStore(store.ZK, New)
|
||||
}
|
||||
|
||||
// New creates a new Zookeeper client given a
|
||||
// list of endpoints and an optional tls config
|
||||
func New(endpoints []string, options *store.Config) (store.Store, error) {
|
||||
s := &Zookeeper{}
|
||||
s.timeout = defaultTimeout
|
||||
|
||||
// Set options
|
||||
if options != nil {
|
||||
if options.ConnectionTimeout != 0 {
|
||||
s.setTimeout(options.ConnectionTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
// Connect to Zookeeper
|
||||
conn, _, err := zk.Connect(endpoints, s.timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.client = conn
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// setTimeout sets the timeout for connecting to Zookeeper
|
||||
func (s *Zookeeper) setTimeout(time time.Duration) {
|
||||
s.timeout = time
|
||||
}
|
||||
|
||||
// Get the value at "key", returns the last modified index
|
||||
// to use in conjunction to Atomic calls
|
||||
func (s *Zookeeper) Get(key string) (pair *store.KVPair, err error) {
|
||||
resp, meta, err := s.client.Get(s.normalize(key))
|
||||
|
||||
if err != nil {
|
||||
if err == zk.ErrNoNode {
|
||||
return nil, store.ErrKeyNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// FIXME handle very rare cases where Get returns the
|
||||
// SOH control character instead of the actual value
|
||||
if string(resp) == SOH {
|
||||
return s.Get(store.Normalize(key))
|
||||
}
|
||||
|
||||
pair = &store.KVPair{
|
||||
Key: key,
|
||||
Value: resp,
|
||||
LastIndex: uint64(meta.Version),
|
||||
}
|
||||
|
||||
return pair, nil
|
||||
}
|
||||
|
||||
// createFullPath creates the entire path for a directory
|
||||
// that does not exist
|
||||
func (s *Zookeeper) createFullPath(path []string, ephemeral bool) error {
|
||||
for i := 1; i <= len(path); i++ {
|
||||
newpath := "/" + strings.Join(path[:i], "/")
|
||||
if i == len(path) && ephemeral {
|
||||
_, err := s.client.Create(newpath, []byte{}, zk.FlagEphemeral, zk.WorldACL(zk.PermAll))
|
||||
return err
|
||||
}
|
||||
_, err := s.client.Create(newpath, []byte{}, 0, zk.WorldACL(zk.PermAll))
|
||||
if err != nil {
|
||||
// Skip if node already exists
|
||||
if err != zk.ErrNodeExists {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put a value at "key"
|
||||
func (s *Zookeeper) Put(key string, value []byte, opts *store.WriteOptions) error {
|
||||
fkey := s.normalize(key)
|
||||
|
||||
exists, err := s.Exists(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !exists {
|
||||
if opts != nil && opts.TTL > 0 {
|
||||
s.createFullPath(store.SplitKey(strings.TrimSuffix(key, "/")), true)
|
||||
} else {
|
||||
s.createFullPath(store.SplitKey(strings.TrimSuffix(key, "/")), false)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = s.client.Set(fkey, value, -1)
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete a value at "key"
|
||||
func (s *Zookeeper) Delete(key string) error {
|
||||
err := s.client.Delete(s.normalize(key), -1)
|
||||
if err == zk.ErrNoNode {
|
||||
return store.ErrKeyNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Exists checks if the key exists inside the store
|
||||
func (s *Zookeeper) Exists(key string) (bool, error) {
|
||||
exists, _, err := s.client.Exists(s.normalize(key))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return exists, nil
|
||||
}
|
||||
|
||||
// Watch for changes on a "key"
|
||||
// It returns a channel that will receive changes or pass
|
||||
// on errors. Upon creation, the current value will first
|
||||
// be sent to the channel. Providing a non-nil stopCh can
|
||||
// be used to stop watching.
|
||||
func (s *Zookeeper) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) {
|
||||
// Get the key first
|
||||
pair, err := s.Get(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Catch zk notifications and fire changes into the channel.
|
||||
watchCh := make(chan *store.KVPair)
|
||||
go func() {
|
||||
defer close(watchCh)
|
||||
|
||||
// Get returns the current value to the channel prior
|
||||
// to listening to any event that may occur on that key
|
||||
watchCh <- pair
|
||||
for {
|
||||
_, _, eventCh, err := s.client.GetW(s.normalize(key))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case e := <-eventCh:
|
||||
if e.Type == zk.EventNodeDataChanged {
|
||||
if entry, err := s.Get(key); err == nil {
|
||||
watchCh <- entry
|
||||
}
|
||||
}
|
||||
case <-stopCh:
|
||||
// There is no way to stop GetW so just quit
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return watchCh, nil
|
||||
}
|
||||
|
||||
// WatchTree watches for changes on a "directory"
|
||||
// It returns a channel that will receive changes or pass
|
||||
// on errors. Upon creating a watch, the current childs values
|
||||
// will be sent to the channel .Providing a non-nil stopCh can
|
||||
// be used to stop watching.
|
||||
func (s *Zookeeper) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
|
||||
// List the childrens first
|
||||
entries, err := s.List(directory)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Catch zk notifications and fire changes into the channel.
|
||||
watchCh := make(chan []*store.KVPair)
|
||||
go func() {
|
||||
defer close(watchCh)
|
||||
|
||||
// List returns the children values to the channel
|
||||
// prior to listening to any events that may occur
|
||||
// on those keys
|
||||
watchCh <- entries
|
||||
|
||||
for {
|
||||
_, _, eventCh, err := s.client.ChildrenW(s.normalize(directory))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case e := <-eventCh:
|
||||
if e.Type == zk.EventNodeChildrenChanged {
|
||||
if kv, err := s.List(directory); err == nil {
|
||||
watchCh <- kv
|
||||
}
|
||||
}
|
||||
case <-stopCh:
|
||||
// There is no way to stop GetW so just quit
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return watchCh, nil
|
||||
}
|
||||
|
||||
// List child nodes of a given directory
|
||||
func (s *Zookeeper) List(directory string) ([]*store.KVPair, error) {
|
||||
keys, stat, err := s.client.Children(s.normalize(directory))
|
||||
if err != nil {
|
||||
if err == zk.ErrNoNode {
|
||||
return nil, store.ErrKeyNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kv := []*store.KVPair{}
|
||||
|
||||
// FIXME Costly Get request for each child key..
|
||||
for _, key := range keys {
|
||||
pair, err := s.Get(strings.TrimSuffix(directory, "/") + s.normalize(key))
|
||||
if err != nil {
|
||||
// If node is not found: List is out of date, retry
|
||||
if err == zk.ErrNoNode {
|
||||
return s.List(directory)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kv = append(kv, &store.KVPair{
|
||||
Key: key,
|
||||
Value: []byte(pair.Value),
|
||||
LastIndex: uint64(stat.Version),
|
||||
})
|
||||
}
|
||||
|
||||
return kv, nil
|
||||
}
|
||||
|
||||
// DeleteTree deletes a range of keys under a given directory
|
||||
func (s *Zookeeper) DeleteTree(directory string) error {
|
||||
pairs, err := s.List(directory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var reqs []interface{}
|
||||
|
||||
for _, pair := range pairs {
|
||||
reqs = append(reqs, &zk.DeleteRequest{
|
||||
Path: s.normalize(directory + "/" + pair.Key),
|
||||
Version: -1,
|
||||
})
|
||||
}
|
||||
|
||||
_, err = s.client.Multi(reqs...)
|
||||
return err
|
||||
}
|
||||
|
||||
// AtomicPut put a value at "key" if the key has not been
|
||||
// modified in the meantime, throws an error if this is the case
|
||||
func (s *Zookeeper) AtomicPut(key string, value []byte, previous *store.KVPair, _ *store.WriteOptions) (bool, *store.KVPair, error) {
|
||||
var lastIndex uint64
|
||||
|
||||
if previous != nil {
|
||||
meta, err := s.client.Set(s.normalize(key), value, int32(previous.LastIndex))
|
||||
if err != nil {
|
||||
// Compare Failed
|
||||
if err == zk.ErrBadVersion {
|
||||
return false, nil, store.ErrKeyModified
|
||||
}
|
||||
return false, nil, err
|
||||
}
|
||||
lastIndex = uint64(meta.Version)
|
||||
} else {
|
||||
// Interpret previous == nil as create operation.
|
||||
_, err := s.client.Create(s.normalize(key), value, 0, zk.WorldACL(zk.PermAll))
|
||||
if err != nil {
|
||||
// Directory does not exist
|
||||
if err == zk.ErrNoNode {
|
||||
|
||||
// Create the directory
|
||||
parts := store.SplitKey(strings.TrimSuffix(key, "/"))
|
||||
parts = parts[:len(parts)-1]
|
||||
if err = s.createFullPath(parts, false); err != nil {
|
||||
// Failed to create the directory.
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
// Create the node
|
||||
if _, err := s.client.Create(s.normalize(key), value, 0, zk.WorldACL(zk.PermAll)); err != nil {
|
||||
// Node exist error (when previous nil)
|
||||
if err == zk.ErrNodeExists {
|
||||
return false, nil, store.ErrKeyExists
|
||||
}
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
} else {
|
||||
// Node Exists error (when previous nil)
|
||||
if err == zk.ErrNodeExists {
|
||||
return false, nil, store.ErrKeyExists
|
||||
}
|
||||
|
||||
// Unhandled error
|
||||
return false, nil, err
|
||||
}
|
||||
}
|
||||
lastIndex = 0 // Newly created nodes have version 0.
|
||||
}
|
||||
|
||||
pair := &store.KVPair{
|
||||
Key: key,
|
||||
Value: value,
|
||||
LastIndex: lastIndex,
|
||||
}
|
||||
|
||||
return true, pair, nil
|
||||
}
|
||||
|
||||
// AtomicDelete deletes a value at "key" if the key
|
||||
// has not been modified in the meantime, throws an
|
||||
// error if this is the case
|
||||
func (s *Zookeeper) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
|
||||
if previous == nil {
|
||||
return false, store.ErrPreviousNotSpecified
|
||||
}
|
||||
|
||||
err := s.client.Delete(s.normalize(key), int32(previous.LastIndex))
|
||||
if err != nil {
|
||||
// Key not found
|
||||
if err == zk.ErrNoNode {
|
||||
return false, store.ErrKeyNotFound
|
||||
}
|
||||
// Compare failed
|
||||
if err == zk.ErrBadVersion {
|
||||
return false, store.ErrKeyModified
|
||||
}
|
||||
// General store error
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// NewLock returns a handle to a lock struct which can
|
||||
// be used to provide mutual exclusion on a key
|
||||
func (s *Zookeeper) NewLock(key string, options *store.LockOptions) (lock store.Locker, err error) {
|
||||
value := []byte("")
|
||||
|
||||
// Apply options
|
||||
if options != nil {
|
||||
if options.Value != nil {
|
||||
value = options.Value
|
||||
}
|
||||
}
|
||||
|
||||
lock = &zookeeperLock{
|
||||
client: s.client,
|
||||
key: s.normalize(key),
|
||||
value: value,
|
||||
lock: zk.NewLock(s.client, s.normalize(key), zk.WorldACL(zk.PermAll)),
|
||||
}
|
||||
|
||||
return lock, err
|
||||
}
|
||||
|
||||
// Lock attempts to acquire the lock and blocks while
|
||||
// doing so. It returns a channel that is closed if our
|
||||
// lock is lost or if an error occurs
|
||||
func (l *zookeeperLock) Lock(stopChan chan struct{}) (<-chan struct{}, error) {
|
||||
err := l.lock.Lock()
|
||||
|
||||
if err == nil {
|
||||
// We hold the lock, we can set our value
|
||||
// FIXME: The value is left behind
|
||||
// (problematic for leader election)
|
||||
_, err = l.client.Set(l.key, l.value, -1)
|
||||
}
|
||||
|
||||
return make(chan struct{}), err
|
||||
}
|
||||
|
||||
// Unlock the "key". Calling unlock while
|
||||
// not holding the lock will throw an error
|
||||
func (l *zookeeperLock) Unlock() error {
|
||||
return l.lock.Unlock()
|
||||
}
|
||||
|
||||
// Close closes the client connection
|
||||
func (s *Zookeeper) Close() {
|
||||
s.client.Close()
|
||||
}
|
||||
|
||||
// Normalize the key for usage in Zookeeper
|
||||
func (s *Zookeeper) normalize(key string) string {
|
||||
key = store.Normalize(key)
|
||||
return strings.TrimSuffix(key, "/")
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue