1
0
Fork 0

fix: update lego.

This commit is contained in:
Ludovic Fernandez 2019-03-27 11:18:04 +01:00 committed by Traefiker Bot
parent b893374dc1
commit c17de070fb
432 changed files with 182 additions and 259514 deletions

View file

@ -1,476 +0,0 @@
package boltdb
import (
"bytes"
"encoding/binary"
"errors"
"os"
"path/filepath"
"sync"
"sync/atomic"
"time"
"github.com/abronan/valkeyrie"
"github.com/abronan/valkeyrie/store"
"github.com/coreos/bbolt"
)
var (
// ErrMultipleEndpointsUnsupported is thrown when multiple endpoints specified for
// BoltDB. Endpoint has to be a local file path
ErrMultipleEndpointsUnsupported = errors.New("boltdb supports one endpoint and should be a file path")
// ErrBoltBucketOptionMissing is thrown when boltBcuket config option is missing
ErrBoltBucketOptionMissing = errors.New("boltBucket config option missing")
)
const (
filePerm os.FileMode = 0644
)
//BoltDB type implements the Store interface
type BoltDB struct {
client *bolt.DB
boltBucket []byte
dbIndex uint64
path string
timeout time.Duration
// By default valkeyrie opens and closes the bolt DB connection for every
// get/put operation. This allows multiple apps to use a Bolt DB at the
// same time.
// PersistConnection flag provides an option to override ths behavior.
// ie: open the connection in New and use it till Close is called.
PersistConnection bool
sync.Mutex
}
const (
metadatalen = 8
transientTimeout = time.Duration(10) * time.Second
)
// Register registers boltdb to valkeyrie
func Register() {
valkeyrie.AddStore(store.BOLTDB, New)
}
// New opens a new BoltDB connection to the specified path and bucket
func New(endpoints []string, options *store.Config) (store.Store, error) {
var (
db *bolt.DB
err error
boltOptions *bolt.Options
timeout = transientTimeout
)
if len(endpoints) > 1 {
return nil, ErrMultipleEndpointsUnsupported
}
if (options == nil) || (len(options.Bucket) == 0) {
return nil, ErrBoltBucketOptionMissing
}
dir, _ := filepath.Split(endpoints[0])
if err = os.MkdirAll(dir, 0750); err != nil {
return nil, err
}
if options.PersistConnection {
boltOptions = &bolt.Options{Timeout: options.ConnectionTimeout}
db, err = bolt.Open(endpoints[0], filePerm, boltOptions)
if err != nil {
return nil, err
}
}
if options.ConnectionTimeout != 0 {
timeout = options.ConnectionTimeout
}
b := &BoltDB{
client: db,
path: endpoints[0],
boltBucket: []byte(options.Bucket),
timeout: timeout,
PersistConnection: options.PersistConnection,
}
return b, nil
}
func (b *BoltDB) reset() {
b.path = ""
b.boltBucket = []byte{}
}
func (b *BoltDB) getDBhandle() (*bolt.DB, error) {
var (
db *bolt.DB
err error
)
if !b.PersistConnection {
boltOptions := &bolt.Options{Timeout: b.timeout}
if db, err = bolt.Open(b.path, filePerm, boltOptions); err != nil {
return nil, err
}
b.client = db
}
return b.client, nil
}
func (b *BoltDB) releaseDBhandle() {
if !b.PersistConnection {
b.client.Close()
}
}
// Get the value at "key". BoltDB doesn't provide an inbuilt last modified index with every kv pair. Its implemented by
// by a atomic counter maintained by the valkeyrie and appened to the value passed by the client.
func (b *BoltDB) Get(key string, opts *store.ReadOptions) (*store.KVPair, error) {
var (
val []byte
db *bolt.DB
err error
)
b.Lock()
defer b.Unlock()
if db, err = b.getDBhandle(); err != nil {
return nil, err
}
defer b.releaseDBhandle()
err = db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(b.boltBucket)
if bucket == nil {
return store.ErrKeyNotFound
}
v := bucket.Get([]byte(key))
val = make([]byte, len(v))
copy(val, v)
return nil
})
if len(val) == 0 {
return nil, store.ErrKeyNotFound
}
if err != nil {
return nil, err
}
dbIndex := binary.LittleEndian.Uint64(val[:metadatalen])
val = val[metadatalen:]
return &store.KVPair{Key: key, Value: val, LastIndex: (dbIndex)}, nil
}
//Put the key, value pair. index number metadata is prepended to the value
func (b *BoltDB) Put(key string, value []byte, opts *store.WriteOptions) error {
var (
dbIndex uint64
db *bolt.DB
err error
)
b.Lock()
defer b.Unlock()
dbval := make([]byte, metadatalen)
if db, err = b.getDBhandle(); err != nil {
return err
}
defer b.releaseDBhandle()
err = db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(b.boltBucket)
if err != nil {
return err
}
dbIndex = atomic.AddUint64(&b.dbIndex, 1)
binary.LittleEndian.PutUint64(dbval, dbIndex)
dbval = append(dbval, value...)
err = bucket.Put([]byte(key), dbval)
if err != nil {
return err
}
return nil
})
return err
}
//Delete the value for the given key.
func (b *BoltDB) Delete(key string) error {
var (
db *bolt.DB
err error
)
b.Lock()
defer b.Unlock()
if db, err = b.getDBhandle(); err != nil {
return err
}
defer b.releaseDBhandle()
err = db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(b.boltBucket)
if bucket == nil {
return store.ErrKeyNotFound
}
err := bucket.Delete([]byte(key))
return err
})
return err
}
// Exists checks if the key exists inside the store
func (b *BoltDB) Exists(key string, opts *store.ReadOptions) (bool, error) {
var (
val []byte
db *bolt.DB
err error
)
b.Lock()
defer b.Unlock()
if db, err = b.getDBhandle(); err != nil {
return false, err
}
defer b.releaseDBhandle()
err = db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(b.boltBucket)
if bucket == nil {
return store.ErrKeyNotFound
}
val = bucket.Get([]byte(key))
return nil
})
if len(val) == 0 {
return false, err
}
return true, err
}
// List returns the range of keys starting with the passed in prefix
func (b *BoltDB) List(keyPrefix string, opts *store.ReadOptions) ([]*store.KVPair, error) {
var (
db *bolt.DB
err error
)
b.Lock()
defer b.Unlock()
kv := []*store.KVPair{}
if db, err = b.getDBhandle(); err != nil {
return nil, err
}
defer b.releaseDBhandle()
hasResult := false
err = db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(b.boltBucket)
if bucket == nil {
return store.ErrKeyNotFound
}
cursor := bucket.Cursor()
prefix := []byte(keyPrefix)
for key, v := cursor.Seek(prefix); bytes.HasPrefix(key, prefix); key, v = cursor.Next() {
hasResult = true
dbIndex := binary.LittleEndian.Uint64(v[:metadatalen])
v = v[metadatalen:]
val := make([]byte, len(v))
copy(val, v)
if string(key) != keyPrefix {
kv = append(kv, &store.KVPair{
Key: string(key),
Value: val,
LastIndex: dbIndex,
})
}
}
return nil
})
if !hasResult {
return nil, store.ErrKeyNotFound
}
return kv, err
}
// AtomicDelete deletes a value at "key" if the key
// has not been modified in the meantime, throws an
// error if this is the case
func (b *BoltDB) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
var (
val []byte
db *bolt.DB
err error
)
b.Lock()
defer b.Unlock()
if previous == nil {
return false, store.ErrPreviousNotSpecified
}
if db, err = b.getDBhandle(); err != nil {
return false, err
}
defer b.releaseDBhandle()
err = db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(b.boltBucket)
if bucket == nil {
return store.ErrKeyNotFound
}
val = bucket.Get([]byte(key))
if val == nil {
return store.ErrKeyNotFound
}
dbIndex := binary.LittleEndian.Uint64(val[:metadatalen])
if dbIndex != previous.LastIndex {
return store.ErrKeyModified
}
err := bucket.Delete([]byte(key))
return err
})
if err != nil {
return false, err
}
return true, err
}
// AtomicPut puts a value at "key" if the key has not been
// modified since the last Put, throws an error if this is the case
func (b *BoltDB) AtomicPut(key string, value []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) {
var (
val []byte
dbIndex uint64
db *bolt.DB
err error
)
b.Lock()
defer b.Unlock()
dbval := make([]byte, metadatalen)
if db, err = b.getDBhandle(); err != nil {
return false, nil, err
}
defer b.releaseDBhandle()
err = db.Update(func(tx *bolt.Tx) error {
var err error
bucket := tx.Bucket(b.boltBucket)
if bucket == nil {
if previous != nil {
return store.ErrKeyNotFound
}
bucket, err = tx.CreateBucket(b.boltBucket)
if err != nil {
return err
}
}
// AtomicPut is equivalent to Put if previous is nil and the Ky
// doesn't exist in the DB.
val = bucket.Get([]byte(key))
if previous == nil && len(val) != 0 {
return store.ErrKeyExists
}
if previous != nil {
if len(val) == 0 {
return store.ErrKeyNotFound
}
dbIndex = binary.LittleEndian.Uint64(val[:metadatalen])
if dbIndex != previous.LastIndex {
return store.ErrKeyModified
}
}
dbIndex = atomic.AddUint64(&b.dbIndex, 1)
binary.LittleEndian.PutUint64(dbval, b.dbIndex)
dbval = append(dbval, value...)
return (bucket.Put([]byte(key), dbval))
})
if err != nil {
return false, nil, err
}
updated := &store.KVPair{
Key: key,
Value: value,
LastIndex: dbIndex,
}
return true, updated, nil
}
// Close the db connection to the BoltDB
func (b *BoltDB) Close() {
b.Lock()
defer b.Unlock()
if !b.PersistConnection {
b.reset()
} else {
b.client.Close()
}
return
}
// DeleteTree deletes a range of keys with a given prefix
func (b *BoltDB) DeleteTree(keyPrefix string) error {
var (
db *bolt.DB
err error
)
b.Lock()
defer b.Unlock()
if db, err = b.getDBhandle(); err != nil {
return err
}
defer b.releaseDBhandle()
err = db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(b.boltBucket)
if bucket == nil {
return store.ErrKeyNotFound
}
cursor := bucket.Cursor()
prefix := []byte(keyPrefix)
for key, _ := cursor.Seek(prefix); bytes.HasPrefix(key, prefix); key, _ = cursor.Next() {
_ = bucket.Delete([]byte(key))
}
return nil
})
return err
}
// NewLock has to implemented at the library level since its not supported by BoltDB
func (b *BoltDB) NewLock(key string, options *store.LockOptions) (store.Locker, error) {
return nil, store.ErrCallNotSupported
}
// Watch has to implemented at the library level since its not supported by BoltDB
func (b *BoltDB) Watch(key string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan *store.KVPair, error) {
return nil, store.ErrCallNotSupported
}
// WatchTree has to implemented at the library level since its not supported by BoltDB
func (b *BoltDB) WatchTree(directory string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan []*store.KVPair, error) {
return nil, store.ErrCallNotSupported
}

View file

@ -1,575 +0,0 @@
package consul
import (
"crypto/tls"
"errors"
"net/http"
"strings"
"sync"
"time"
"github.com/abronan/valkeyrie"
"github.com/abronan/valkeyrie/store"
api "github.com/hashicorp/consul/api"
)
const (
// DefaultWatchWaitTime is how long we block for at a
// time to check if the watched key has changed. This
// affects the minimum time it takes to cancel a watch.
DefaultWatchWaitTime = 15 * time.Second
// RenewSessionRetryMax is the number of time we should try
// to renew the session before giving up and throwing an error
RenewSessionRetryMax = 5
// MaxSessionDestroyAttempts is the maximum times we will try
// to explicitly destroy the session attached to a lock after
// the connectivity to the store has been lost
MaxSessionDestroyAttempts = 5
// defaultLockTTL is the default ttl for the consul lock
defaultLockTTL = 20 * time.Second
)
var (
// ErrMultipleEndpointsUnsupported is thrown when there are
// multiple endpoints specified for Consul
ErrMultipleEndpointsUnsupported = errors.New("consul does not support multiple endpoints")
// ErrSessionRenew is thrown when the session can't be
// renewed because the Consul version does not support sessions
ErrSessionRenew = errors.New("cannot set or renew session for ttl, unable to operate on sessions")
)
// Consul is the receiver type for the
// Store interface
type Consul struct {
sync.Mutex
config *api.Config
client *api.Client
}
type consulLock struct {
lock *api.Lock
renewCh chan struct{}
}
// Register registers consul to valkeyrie
func Register() {
valkeyrie.AddStore(store.CONSUL, New)
}
// New creates a new Consul client given a list
// of endpoints and optional tls config
func New(endpoints []string, options *store.Config) (store.Store, error) {
if len(endpoints) > 1 {
return nil, ErrMultipleEndpointsUnsupported
}
s := &Consul{}
// Create Consul client
config := api.DefaultConfig()
s.config = config
config.HttpClient = http.DefaultClient
config.Address = endpoints[0]
// Set options
if options != nil {
if options.TLS != nil {
s.setTLS(options.TLS)
}
if options.ConnectionTimeout != 0 {
s.setTimeout(options.ConnectionTimeout)
}
}
// Creates a new client
client, err := api.NewClient(config)
if err != nil {
return nil, err
}
s.client = client
return s, nil
}
// SetTLS sets Consul TLS options
func (s *Consul) setTLS(tls *tls.Config) {
s.config.HttpClient.Transport = &http.Transport{
TLSClientConfig: tls,
}
s.config.Scheme = "https"
}
// SetTimeout sets the timeout for connecting to Consul
func (s *Consul) setTimeout(time time.Duration) {
s.config.WaitTime = time
}
// Normalize the key for usage in Consul
func (s *Consul) normalize(key string) string {
key = store.Normalize(key)
return strings.TrimPrefix(key, "/")
}
func (s *Consul) renewSession(pair *api.KVPair, ttl time.Duration) error {
// Check if there is any previous session with an active TTL
session, err := s.getActiveSession(pair.Key)
if err != nil {
return err
}
if session == "" {
entry := &api.SessionEntry{
Behavior: api.SessionBehaviorDelete, // Delete the key when the session expires
TTL: (ttl / 2).String(), // Consul multiplies the TTL by 2x
LockDelay: 1 * time.Millisecond, // Virtually disable lock delay
}
// Create the key session
session, _, err = s.client.Session().Create(entry, nil)
if err != nil {
return err
}
lockOpts := &api.LockOptions{
Key: pair.Key,
Session: session,
}
// Lock and ignore if lock is held
// It's just a placeholder for the
// ephemeral behavior
lock, _ := s.client.LockOpts(lockOpts)
if lock != nil {
lock.Lock(nil)
}
}
_, _, err = s.client.Session().Renew(session, nil)
return err
}
// getActiveSession checks if the key already has
// a session attached
func (s *Consul) getActiveSession(key string) (string, error) {
pair, _, err := s.client.KV().Get(key, nil)
if err != nil {
return "", err
}
if pair != nil && pair.Session != "" {
return pair.Session, nil
}
return "", nil
}
// Get the value at "key", returns the last modified index
// to use in conjunction to CAS calls
func (s *Consul) Get(key string, opts *store.ReadOptions) (*store.KVPair, error) {
options := &api.QueryOptions{
AllowStale: false,
RequireConsistent: true,
}
// Get options
if opts != nil {
options.RequireConsistent = opts.Consistent
}
pair, meta, err := s.client.KV().Get(s.normalize(key), options)
if err != nil {
return nil, err
}
// If pair is nil then the key does not exist
if pair == nil {
return nil, store.ErrKeyNotFound
}
return &store.KVPair{Key: pair.Key, Value: pair.Value, LastIndex: meta.LastIndex}, nil
}
// Put a value at "key"
func (s *Consul) Put(key string, value []byte, opts *store.WriteOptions) error {
key = s.normalize(key)
p := &api.KVPair{
Key: key,
Value: value,
Flags: api.LockFlagValue,
}
if opts != nil && opts.TTL > 0 {
// Create or renew a session holding a TTL. Operations on sessions
// are not deterministic: creating or renewing a session can fail
for retry := 1; retry <= RenewSessionRetryMax; retry++ {
err := s.renewSession(p, opts.TTL)
if err == nil {
break
}
if retry == RenewSessionRetryMax {
return ErrSessionRenew
}
}
}
_, err := s.client.KV().Put(p, nil)
return err
}
// Delete a value at "key"
func (s *Consul) Delete(key string) error {
if _, err := s.Get(key, nil); err != nil {
return err
}
_, err := s.client.KV().Delete(s.normalize(key), nil)
return err
}
// Exists checks that the key exists inside the store
func (s *Consul) Exists(key string, opts *store.ReadOptions) (bool, error) {
_, err := s.Get(key, opts)
if err != nil {
if err == store.ErrKeyNotFound {
return false, nil
}
return false, err
}
return true, nil
}
// List child nodes of a given directory
func (s *Consul) List(directory string, opts *store.ReadOptions) ([]*store.KVPair, error) {
options := &api.QueryOptions{
AllowStale: false,
RequireConsistent: true,
}
if opts != nil {
if !opts.Consistent {
options.AllowStale = true
options.RequireConsistent = false
}
}
pairs, _, err := s.client.KV().List(s.normalize(directory), options)
if err != nil {
return nil, err
}
if len(pairs) == 0 {
return nil, store.ErrKeyNotFound
}
kv := []*store.KVPair{}
for _, pair := range pairs {
if pair.Key == directory {
continue
}
kv = append(kv, &store.KVPair{
Key: pair.Key,
Value: pair.Value,
LastIndex: pair.ModifyIndex,
})
}
return kv, nil
}
// DeleteTree deletes a range of keys under a given directory
func (s *Consul) DeleteTree(directory string) error {
if _, err := s.List(directory, nil); err != nil {
return err
}
_, err := s.client.KV().DeleteTree(s.normalize(directory), nil)
return err
}
// Watch for changes on a "key"
// It returns a channel that will receive changes or pass
// on errors. Upon creation, the current value will first
// be sent to the channel. Providing a non-nil stopCh can
// be used to stop watching.
func (s *Consul) Watch(key string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan *store.KVPair, error) {
kv := s.client.KV()
watchCh := make(chan *store.KVPair)
go func() {
defer close(watchCh)
// Use a wait time in order to check if we should quit
// from time to time.
opts := &api.QueryOptions{WaitTime: DefaultWatchWaitTime}
for {
// Check if we should quit
select {
case <-stopCh:
return
default:
}
// Get the key
pair, meta, err := kv.Get(key, opts)
if err != nil {
return
}
// If LastIndex didn't change then it means `Get` returned
// because of the WaitTime and the key didn't changed.
if opts.WaitIndex == meta.LastIndex {
continue
}
opts.WaitIndex = meta.LastIndex
// Return the value to the channel
if pair != nil {
watchCh <- &store.KVPair{
Key: pair.Key,
Value: pair.Value,
LastIndex: pair.ModifyIndex,
}
}
}
}()
return watchCh, nil
}
// WatchTree watches for changes on a "directory"
// It returns a channel that will receive changes or pass
// on errors. Upon creating a watch, the current childs values
// will be sent to the channel .Providing a non-nil stopCh can
// be used to stop watching.
func (s *Consul) WatchTree(directory string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan []*store.KVPair, error) {
kv := s.client.KV()
watchCh := make(chan []*store.KVPair)
go func() {
defer close(watchCh)
// Use a wait time in order to check if we should quit
// from time to time.
opts := &api.QueryOptions{WaitTime: DefaultWatchWaitTime}
for {
// Check if we should quit
select {
case <-stopCh:
return
default:
}
// Get all the childrens
pairs, meta, err := kv.List(directory, opts)
if err != nil {
return
}
// If LastIndex didn't change then it means `Get` returned
// because of the WaitTime and the child keys didn't change.
if opts.WaitIndex == meta.LastIndex {
continue
}
opts.WaitIndex = meta.LastIndex
// Return children KV pairs to the channel
kvpairs := []*store.KVPair{}
for _, pair := range pairs {
if pair.Key == directory {
continue
}
kvpairs = append(kvpairs, &store.KVPair{
Key: pair.Key,
Value: pair.Value,
LastIndex: pair.ModifyIndex,
})
}
watchCh <- kvpairs
}
}()
return watchCh, nil
}
// NewLock returns a handle to a lock struct which can
// be used to provide mutual exclusion on a key
func (s *Consul) NewLock(key string, options *store.LockOptions) (store.Locker, error) {
lockOpts := &api.LockOptions{
Key: s.normalize(key),
}
lock := &consulLock{}
ttl := defaultLockTTL
if options != nil {
// Set optional TTL on Lock
if options.TTL != 0 {
ttl = options.TTL
}
// Set optional value on Lock
if options.Value != nil {
lockOpts.Value = options.Value
}
}
entry := &api.SessionEntry{
Behavior: api.SessionBehaviorRelease, // Release the lock when the session expires
TTL: (ttl / 2).String(), // Consul multiplies the TTL by 2x
LockDelay: 1 * time.Millisecond, // Virtually disable lock delay
}
// Create the key session
session, _, err := s.client.Session().Create(entry, nil)
if err != nil {
return nil, err
}
// Place the session and renew chan on lock
lockOpts.Session = session
lock.renewCh = options.RenewLock
l, err := s.client.LockOpts(lockOpts)
if err != nil {
return nil, err
}
// Renew the session ttl lock periodically
s.renewLockSession(entry.TTL, session, options.RenewLock)
lock.lock = l
return lock, nil
}
// renewLockSession is used to renew a session Lock, it takes
// a stopRenew chan which is used to explicitly stop the session
// renew process. The renew routine never stops until a signal is
// sent to this channel. If deleting the session fails because the
// connection to the store is lost, it keeps trying to delete the
// session periodically until it can contact the store, this ensures
// that the lock is not maintained indefinitely which ensures liveness
// over safety for the lock when the store becomes unavailable.
func (s *Consul) renewLockSession(initialTTL string, id string, stopRenew chan struct{}) {
sessionDestroyAttempts := 0
ttl, err := time.ParseDuration(initialTTL)
if err != nil {
return
}
go func() {
for {
select {
case <-time.After(ttl / 2):
entry, _, err := s.client.Session().Renew(id, nil)
if err != nil {
// If an error occurs, continue until the
// session gets destroyed explicitly or
// the session ttl times out
continue
}
if entry == nil {
return
}
// Handle the server updating the TTL
ttl, _ = time.ParseDuration(entry.TTL)
case <-stopRenew:
// Attempt a session destroy
_, err := s.client.Session().Destroy(id, nil)
if err == nil {
return
}
// We cannot destroy the session because the store
// is unavailable, wait for the session renew period.
// Give up after 'MaxSessionDestroyAttempts'.
sessionDestroyAttempts++
if sessionDestroyAttempts >= MaxSessionDestroyAttempts {
return
}
time.Sleep(ttl / 2)
}
}
}()
}
// Lock attempts to acquire the lock and blocks while
// doing so. It returns a channel that is closed if our
// lock is lost or if an error occurs
func (l *consulLock) Lock(stopChan chan struct{}) (<-chan struct{}, error) {
return l.lock.Lock(stopChan)
}
// Unlock the "key". Calling unlock while
// not holding the lock will throw an error
func (l *consulLock) Unlock() error {
if l.renewCh != nil {
close(l.renewCh)
}
return l.lock.Unlock()
}
// AtomicPut put a value at "key" if the key has not been
// modified in the meantime, throws an error if this is the case
func (s *Consul) AtomicPut(key string, value []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) {
p := &api.KVPair{Key: s.normalize(key), Value: value, Flags: api.LockFlagValue}
if previous == nil {
// Consul interprets ModifyIndex = 0 as new key.
p.ModifyIndex = 0
} else {
p.ModifyIndex = previous.LastIndex
}
ok, _, err := s.client.KV().CAS(p, nil)
if err != nil {
return false, nil, err
}
if !ok {
if previous == nil {
return false, nil, store.ErrKeyExists
}
return false, nil, store.ErrKeyModified
}
pair, err := s.Get(key, nil)
if err != nil {
return false, nil, err
}
return true, pair, nil
}
// AtomicDelete deletes a value at "key" if the key has not
// been modified in the meantime, throws an error if this is the case
func (s *Consul) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
if previous == nil {
return false, store.ErrPreviousNotSpecified
}
p := &api.KVPair{Key: s.normalize(key), ModifyIndex: previous.LastIndex, Flags: api.LockFlagValue}
// Extra Get operation to check on the key
_, err := s.Get(key, nil)
if err != nil && err == store.ErrKeyNotFound {
return false, err
}
if work, _, err := s.client.KV().DeleteCAS(p, nil); err != nil {
return false, err
} else if !work {
return false, store.ErrKeyModified
}
return true, nil
}
// Close closes the client connection
func (s *Consul) Close() {
return
}

View file

@ -1,534 +0,0 @@
package etcdv3
import (
"context"
"crypto/tls"
"strings"
"sync"
"time"
"github.com/abronan/valkeyrie"
"github.com/abronan/valkeyrie/store"
etcd "github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/clientv3/concurrency"
)
const (
defaultLockTTL = 20 * time.Second
etcdDefaultTimeout = 5 * time.Second
lockSuffix = "___lock"
)
// EtcdV3 is the receiver type for the
// Store interface
type EtcdV3 struct {
client *etcd.Client
}
type etcdLock struct {
lock sync.Mutex
store *EtcdV3
mutex *concurrency.Mutex
session *concurrency.Session
mutexKey string // mutexKey is the key to write appended with a "_lock" suffix
writeKey string // writeKey is the actual key to update protected by the mutexKey
value string
ttl time.Duration
}
// Register registers etcd to valkeyrie
func Register() {
valkeyrie.AddStore(store.ETCDV3, New)
}
// New creates a new Etcd client given a list
// of endpoints and an optional tls config
func New(addrs []string, options *store.Config) (store.Store, error) {
s := &EtcdV3{}
var (
entries []string
err error
)
entries = store.CreateEndpoints(addrs, "http")
cfg := &etcd.Config{
Endpoints: entries,
}
// Set options
if options != nil {
if options.TLS != nil {
setTLS(cfg, options.TLS, addrs)
}
if options.ConnectionTimeout != 0 {
setTimeout(cfg, options.ConnectionTimeout)
}
if options.Username != "" {
setCredentials(cfg, options.Username, options.Password)
}
if options.SyncPeriod != 0 {
cfg.AutoSyncInterval = options.SyncPeriod
}
}
s.client, err = etcd.New(*cfg)
if err != nil {
return nil, err
}
return s, nil
}
// setTLS sets the tls configuration given a tls.Config scheme
func setTLS(cfg *etcd.Config, tls *tls.Config, addrs []string) {
entries := store.CreateEndpoints(addrs, "https")
cfg.Endpoints = entries
cfg.TLS = tls
}
// setTimeout sets the timeout used for connecting to the store
func setTimeout(cfg *etcd.Config, time time.Duration) {
cfg.DialTimeout = time
}
// setCredentials sets the username/password credentials for connecting to Etcd
func setCredentials(cfg *etcd.Config, username, password string) {
cfg.Username = username
cfg.Password = password
}
// Normalize the key for usage in Etcd
func (s *EtcdV3) normalize(key string) string {
key = store.Normalize(key)
return strings.TrimPrefix(key, "/")
}
// Get the value at "key", returns the last modified
// index to use in conjunction to Atomic calls
func (s *EtcdV3) Get(key string, opts *store.ReadOptions) (pair *store.KVPair, err error) {
ctx, cancel := context.WithTimeout(context.Background(), etcdDefaultTimeout)
var result *etcd.GetResponse
if opts != nil && !opts.Consistent {
result, err = s.client.KV.Get(ctx, s.normalize(key), etcd.WithSerializable())
} else {
result, err = s.client.KV.Get(ctx, s.normalize(key))
}
cancel()
if err != nil {
return nil, err
}
if result.Count == 0 {
return nil, store.ErrKeyNotFound
}
kvs := []*store.KVPair{}
for _, pair := range result.Kvs {
kvs = append(kvs, &store.KVPair{
Key: string(pair.Key),
Value: []byte(pair.Value),
LastIndex: uint64(pair.ModRevision),
})
}
return kvs[0], nil
}
// Put a value at "key"
func (s *EtcdV3) Put(key string, value []byte, opts *store.WriteOptions) (err error) {
ctx, cancel := context.WithTimeout(context.Background(), etcdDefaultTimeout)
pr := s.client.Txn(ctx)
if opts != nil && opts.TTL > 0 {
lease := etcd.NewLease(s.client)
resp, err := lease.Grant(context.Background(), int64(opts.TTL/time.Second))
if err != nil {
cancel()
return err
}
pr.Then(etcd.OpPut(key, string(value), etcd.WithLease(resp.ID)))
} else {
pr.Then(etcd.OpPut(key, string(value)))
}
_, err = pr.Commit()
cancel()
if err != nil {
return err
}
return nil
}
// Delete a value at "key"
func (s *EtcdV3) Delete(key string) error {
resp, err := s.client.KV.Delete(context.Background(), s.normalize(key))
if resp.Deleted == 0 {
return store.ErrKeyNotFound
}
return err
}
// Exists checks if the key exists inside the store
func (s *EtcdV3) Exists(key string, opts *store.ReadOptions) (bool, error) {
_, err := s.Get(key, opts)
if err != nil {
if err == store.ErrKeyNotFound {
return false, nil
}
return false, err
}
return true, nil
}
// Watch for changes on a "key"
// It returns a channel that will receive changes or pass
// on errors. Upon creation, the current value will first
// be sent to the channel. Providing a non-nil stopCh can
// be used to stop watching.
func (s *EtcdV3) Watch(key string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan *store.KVPair, error) {
wc := etcd.NewWatcher(s.client)
// respCh is sending back events to the caller
respCh := make(chan *store.KVPair)
// Get the current value
pair, err := s.Get(key, opts)
if err != nil {
return nil, err
}
go func() {
defer wc.Close()
defer close(respCh)
// Push the current value through the channel.
respCh <- pair
watchCh := wc.Watch(context.Background(), s.normalize(key))
for resp := range watchCh {
// Check if the watch was stopped by the caller
select {
case <-stopCh:
return
default:
}
for _, ev := range resp.Events {
respCh <- &store.KVPair{
Key: key,
Value: []byte(ev.Kv.Value),
LastIndex: uint64(ev.Kv.ModRevision),
}
}
}
}()
return respCh, nil
}
// WatchTree watches for changes on a "directory"
// It returns a channel that will receive changes or pass
// on errors. Upon creating a watch, the current childs values
// will be sent to the channel. Providing a non-nil stopCh can
// be used to stop watching.
func (s *EtcdV3) WatchTree(directory string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan []*store.KVPair, error) {
wc := etcd.NewWatcher(s.client)
// respCh is sending back events to the caller
respCh := make(chan []*store.KVPair)
// Get the current value
rev, pairs, err := s.list(directory, opts)
if err != nil {
return nil, err
}
go func() {
defer wc.Close()
defer close(respCh)
// Push the current value through the channel.
respCh <- pairs
rev++
watchCh := wc.Watch(context.Background(), s.normalize(directory), etcd.WithPrefix(), etcd.WithRev(rev))
for resp := range watchCh {
// Check if the watch was stopped by the caller
select {
case <-stopCh:
return
default:
}
list := make([]*store.KVPair, len(resp.Events))
for i, ev := range resp.Events {
list[i] = &store.KVPair{
Key: string(ev.Kv.Key),
Value: []byte(ev.Kv.Value),
LastIndex: uint64(ev.Kv.ModRevision),
}
}
respCh <- list
}
}()
return respCh, nil
}
// AtomicPut puts a value at "key" if the key has not been
// modified in the meantime, throws an error if this is the case
func (s *EtcdV3) AtomicPut(key string, value []byte, previous *store.KVPair, opts *store.WriteOptions) (bool, *store.KVPair, error) {
var cmp etcd.Cmp
var testIndex bool
if previous != nil {
// We compare on the last modified index
testIndex = true
cmp = etcd.Compare(etcd.ModRevision(key), "=", int64(previous.LastIndex))
} else {
// Previous key is not given, thus we want the key not to exist
testIndex = false
cmp = etcd.Compare(etcd.CreateRevision(key), "=", 0)
}
ctx, cancel := context.WithTimeout(context.Background(), etcdDefaultTimeout)
pr := s.client.Txn(ctx).If(cmp)
// We set the TTL if given
if opts != nil && opts.TTL > 0 {
lease := etcd.NewLease(s.client)
resp, err := lease.Grant(context.Background(), int64(opts.TTL/time.Second))
if err != nil {
cancel()
return false, nil, err
}
pr.Then(etcd.OpPut(key, string(value), etcd.WithLease(resp.ID)))
} else {
pr.Then(etcd.OpPut(key, string(value)))
}
txn, err := pr.Commit()
cancel()
if err != nil {
return false, nil, err
}
if !txn.Succeeded {
if testIndex {
return false, nil, store.ErrKeyModified
}
return false, nil, store.ErrKeyExists
}
updated := &store.KVPair{
Key: key,
Value: value,
LastIndex: uint64(txn.Header.Revision),
}
return true, updated, nil
}
// AtomicDelete deletes a value at "key" if the key
// has not been modified in the meantime, throws an
// error if this is the case
func (s *EtcdV3) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
if previous == nil {
return false, store.ErrPreviousNotSpecified
}
// We compare on the last modified index
cmp := etcd.Compare(etcd.ModRevision(key), "=", int64(previous.LastIndex))
ctx, cancel := context.WithTimeout(context.Background(), etcdDefaultTimeout)
txn, err := s.client.Txn(ctx).
If(cmp).
Then(etcd.OpDelete(key)).
Commit()
cancel()
if err != nil {
return false, err
}
if len(txn.Responses) == 0 {
return false, store.ErrKeyNotFound
}
if !txn.Succeeded {
return false, store.ErrKeyModified
}
return true, nil
}
// List child nodes of a given directory
func (s *EtcdV3) List(directory string, opts *store.ReadOptions) ([]*store.KVPair, error) {
_, kv, err := s.list(directory, opts)
return kv, err
}
// DeleteTree deletes a range of keys under a given directory
func (s *EtcdV3) DeleteTree(directory string) error {
ctx, cancel := context.WithTimeout(context.Background(), etcdDefaultTimeout)
resp, err := s.client.KV.Delete(ctx, s.normalize(directory), etcd.WithPrefix())
cancel()
if resp.Deleted == 0 {
return store.ErrKeyNotFound
}
return err
}
// NewLock returns a handle to a lock struct which can
// be used to provide mutual exclusion on a key
func (s *EtcdV3) NewLock(key string, options *store.LockOptions) (lock store.Locker, err error) {
var value string
ttl := defaultLockTTL
renewCh := make(chan struct{})
// Apply options on Lock
if options != nil {
if options.Value != nil {
value = string(options.Value)
}
if options.TTL != 0 {
ttl = options.TTL
}
if options.RenewLock != nil {
renewCh = options.RenewLock
}
}
// Create Session for Mutex
session, err := concurrency.NewSession(s.client, concurrency.WithTTL(int(ttl/time.Second)))
if err != nil {
return nil, err
}
go func() {
<-renewCh
session.Close()
return
}()
// A Mutex is a simple key that can only be held by a single process.
// An etcd mutex behaves like a Zookeeper lock: a side key is created with
// a suffix (such as "_lock") and represents the mutex. Thus we have a pair
// composed of the key to protect with a lock: "/key", and a side key that
// acts as the lock: "/key_lock"
mutexKey := s.normalize(key + lockSuffix)
writeKey := s.normalize(key)
// Create lock object
lock = &etcdLock{
store: s,
mutex: concurrency.NewMutex(session, mutexKey),
session: session,
mutexKey: mutexKey,
writeKey: writeKey,
value: value,
ttl: ttl,
}
return lock, nil
}
// Lock attempts to acquire the lock and blocks while
// doing so. It returns a channel that is closed if our
// lock is lost or if an error occurs
func (l *etcdLock) Lock(stopChan chan struct{}) (<-chan struct{}, error) {
l.lock.Lock()
defer l.lock.Unlock()
ctx, cancel := context.WithCancel(context.Background())
go func() {
<-stopChan
cancel()
}()
err := l.mutex.Lock(ctx)
if err != nil {
if err == context.Canceled {
return nil, nil
}
return nil, err
}
err = l.store.Put(l.writeKey, []byte(l.value), nil)
if err != nil {
return nil, err
}
return l.session.Done(), nil
}
// Unlock the "key". Calling unlock while
// not holding the lock will throw an error
func (l *etcdLock) Unlock() error {
l.lock.Lock()
defer l.lock.Unlock()
return l.mutex.Unlock(context.Background())
}
// Close closes the client connection
func (s *EtcdV3) Close() {
s.client.Close()
}
// list child nodes of a given directory and return revision number
func (s *EtcdV3) list(directory string, opts *store.ReadOptions) (int64, []*store.KVPair, error) {
ctx, cancel := context.WithTimeout(context.Background(), etcdDefaultTimeout)
var resp *etcd.GetResponse
var err error
if opts != nil && !opts.Consistent {
resp, err = s.client.KV.Get(ctx, s.normalize(directory), etcd.WithSerializable(), etcd.WithPrefix(), etcd.WithSort(etcd.SortByKey, etcd.SortDescend))
} else {
resp, err = s.client.KV.Get(ctx, s.normalize(directory), etcd.WithPrefix(), etcd.WithSort(etcd.SortByKey, etcd.SortDescend))
}
cancel()
if err != nil {
return 0, nil, err
}
if resp.Count == 0 {
return 0, nil, store.ErrKeyNotFound
}
kv := []*store.KVPair{}
for _, n := range resp.Kvs {
if string(n.Key) == directory {
continue
}
// Filter out etcd mutex side keys with `___lock` suffix
if strings.Contains(string(n.Key), lockSuffix) {
continue
}
kv = append(kv, &store.KVPair{
Key: string(n.Key),
Value: []byte(n.Value),
LastIndex: uint64(n.ModRevision),
})
}
return resp.Header.Revision, kv, nil
}

View file

@ -1,593 +0,0 @@
package zookeeper
import (
"strings"
"time"
"github.com/abronan/valkeyrie"
"github.com/abronan/valkeyrie/store"
zk "github.com/samuel/go-zookeeper/zk"
)
const (
// SOH control character
SOH = "\x01"
defaultTimeout = 10 * time.Second
syncRetryLimit = 5
)
// Zookeeper is the receiver type for
// the Store interface
type Zookeeper struct {
timeout time.Duration
client *zk.Conn
}
type zookeeperLock struct {
client *zk.Conn
lock *zk.Lock
key string
value []byte
}
// Register registers zookeeper to valkeyrie
func Register() {
valkeyrie.AddStore(store.ZK, New)
}
// New creates a new Zookeeper client given a
// list of endpoints and an optional tls config
func New(endpoints []string, options *store.Config) (store.Store, error) {
s := &Zookeeper{}
s.timeout = defaultTimeout
// Set options
if options != nil {
if options.ConnectionTimeout != 0 {
s.setTimeout(options.ConnectionTimeout)
}
}
// Connect to Zookeeper
conn, _, err := zk.Connect(endpoints, s.timeout)
if err != nil {
return nil, err
}
s.client = conn
return s, nil
}
// setTimeout sets the timeout for connecting to Zookeeper
func (s *Zookeeper) setTimeout(time time.Duration) {
s.timeout = time
}
// Get the value at "key", returns the last modified index
// to use in conjunction to Atomic calls
func (s *Zookeeper) Get(key string, opts *store.ReadOptions) (pair *store.KVPair, err error) {
resp, meta, err := s.get(key)
if err != nil {
return nil, err
}
pair = &store.KVPair{
Key: key,
Value: resp,
LastIndex: uint64(meta.Version),
}
return pair, nil
}
// createFullPath creates the entire path for a directory
// that does not exist and sets the value of the last
// znode to data
func (s *Zookeeper) createFullPath(path []string, data []byte, ephemeral bool) error {
for i := 1; i <= len(path); i++ {
newpath := "/" + strings.Join(path[:i], "/")
if i == len(path) {
flag := 0
if ephemeral {
flag = zk.FlagEphemeral
}
_, err := s.client.Create(newpath, data, int32(flag), zk.WorldACL(zk.PermAll))
return err
}
_, err := s.client.Create(newpath, []byte{}, 0, zk.WorldACL(zk.PermAll))
if err != nil {
// Skip if node already exists
if err != zk.ErrNodeExists {
return err
}
}
}
return nil
}
// Put a value at "key"
func (s *Zookeeper) Put(key string, value []byte, opts *store.WriteOptions) error {
fkey := s.normalize(key)
exists, err := s.Exists(key, nil)
if err != nil {
return err
}
if !exists {
if opts != nil && opts.TTL > 0 {
s.createFullPath(store.SplitKey(strings.TrimSuffix(key, "/")), value, true)
} else {
s.createFullPath(store.SplitKey(strings.TrimSuffix(key, "/")), value, false)
}
} else {
_, err = s.client.Set(fkey, value, -1)
}
return err
}
// Delete a value at "key"
func (s *Zookeeper) Delete(key string) error {
err := s.client.Delete(s.normalize(key), -1)
if err == zk.ErrNoNode {
return store.ErrKeyNotFound
}
return err
}
// Exists checks if the key exists inside the store
func (s *Zookeeper) Exists(key string, opts *store.ReadOptions) (bool, error) {
exists, _, err := s.client.Exists(s.normalize(key))
if err != nil {
return false, err
}
return exists, nil
}
// Watch for changes on a "key"
// It returns a channel that will receive changes or pass
// on errors. Upon creation, the current value will first
// be sent to the channel. Providing a non-nil stopCh can
// be used to stop watching.
func (s *Zookeeper) Watch(key string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan *store.KVPair, error) {
// Catch zk notifications and fire changes into the channel.
watchCh := make(chan *store.KVPair)
go func() {
defer close(watchCh)
var fireEvt = true
for {
resp, meta, eventCh, err := s.getW(key)
if err != nil {
return
}
if fireEvt {
watchCh <- &store.KVPair{
Key: key,
Value: resp,
LastIndex: uint64(meta.Version),
}
}
select {
case e := <-eventCh:
// Only fire an event if the data in the node changed.
// Simply reset the watch if this is any other event
// (e.g. a session event).
fireEvt = e.Type == zk.EventNodeDataChanged
case <-stopCh:
// There is no way to stop GetW so just quit
return
}
}
}()
return watchCh, nil
}
// WatchTree watches for changes on a "directory"
// It returns a channel that will receive changes or pass
// on errors. Upon creating a watch, the current childs values
// will be sent to the channel .Providing a non-nil stopCh can
// be used to stop watching.
func (s *Zookeeper) WatchTree(directory string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan []*store.KVPair, error) {
// Catch zk notifications and fire changes into the channel.
watchCh := make(chan []*store.KVPair)
go func() {
defer close(watchCh)
var fireEvt = true
for {
WATCH:
keys, _, eventCh, err := s.client.ChildrenW(s.normalize(directory))
if err != nil {
return
}
if fireEvt {
kvs, err := s.getListWithPath(directory, keys, opts)
if err != nil {
// Failed to get values for one or more of the keys,
// the list may be out of date so try again.
goto WATCH
}
watchCh <- kvs
}
select {
case e := <-eventCh:
// Only fire an event if the children have changed.
// Simply reset the watch if this is any other event
// (e.g. a session event).
fireEvt = e.Type == zk.EventNodeChildrenChanged
case <-stopCh:
// There is no way to stop ChildrenW so just quit
return
}
}
}()
return watchCh, nil
}
// listChildren lists the direct children of a directory
func (s *Zookeeper) listChildren(directory string) ([]string, error) {
children, _, err := s.client.Children(s.normalize(directory))
if err != nil {
if err == zk.ErrNoNode {
return nil, store.ErrKeyNotFound
}
return nil, err
}
return children, nil
}
// listChildrenRecursive lists the children of a directory as well as
// all the descending childs from sub-folders in a recursive fashion.
func (s *Zookeeper) listChildrenRecursive(list *[]string, directory string) error {
children, err := s.listChildren(directory)
if err != nil {
return err
}
// We reached a leaf.
if len(children) == 0 {
return nil
}
for _, c := range children {
c = strings.TrimSuffix(directory, "/") + "/" + c
err := s.listChildrenRecursive(list, c)
if err != nil && err != zk.ErrNoChildrenForEphemerals {
return err
}
*list = append(*list, c)
}
return nil
}
// List child nodes of a given directory
func (s *Zookeeper) List(directory string, opts *store.ReadOptions) ([]*store.KVPair, error) {
children := make([]string, 0)
err := s.listChildrenRecursive(&children, directory)
if err != nil {
return nil, err
}
kvs, err := s.getList(children, opts)
if err != nil {
// If node is not found: List is out of date, retry
if err == store.ErrKeyNotFound {
return s.List(directory, opts)
}
return nil, err
}
return kvs, nil
}
// DeleteTree deletes a range of keys under a given directory
func (s *Zookeeper) DeleteTree(directory string) error {
children, err := s.listChildren(directory)
if err != nil {
return err
}
var reqs []interface{}
for _, c := range children {
reqs = append(reqs, &zk.DeleteRequest{
Path: s.normalize(directory + "/" + c),
Version: -1,
})
}
_, err = s.client.Multi(reqs...)
return err
}
// AtomicPut put a value at "key" if the key has not been
// modified in the meantime, throws an error if this is the case
func (s *Zookeeper) AtomicPut(key string, value []byte, previous *store.KVPair, _ *store.WriteOptions) (bool, *store.KVPair, error) {
var lastIndex uint64
if previous != nil {
meta, err := s.client.Set(s.normalize(key), value, int32(previous.LastIndex))
if err != nil {
// Compare Failed
if err == zk.ErrBadVersion {
return false, nil, store.ErrKeyModified
}
return false, nil, err
}
lastIndex = uint64(meta.Version)
} else {
// Interpret previous == nil as create operation.
_, err := s.client.Create(s.normalize(key), value, 0, zk.WorldACL(zk.PermAll))
if err != nil {
// Directory does not exist
if err == zk.ErrNoNode {
// Create the directory
parts := store.SplitKey(strings.TrimSuffix(key, "/"))
parts = parts[:len(parts)-1]
if err = s.createFullPath(parts, []byte{}, false); err != nil {
// Failed to create the directory.
return false, nil, err
}
// Create the node
if _, err := s.client.Create(s.normalize(key), value, 0, zk.WorldACL(zk.PermAll)); err != nil {
// Node exist error (when previous nil)
if err == zk.ErrNodeExists {
return false, nil, store.ErrKeyExists
}
return false, nil, err
}
} else {
// Node Exists error (when previous nil)
if err == zk.ErrNodeExists {
return false, nil, store.ErrKeyExists
}
// Unhandled error
return false, nil, err
}
}
lastIndex = 0 // Newly created nodes have version 0.
}
pair := &store.KVPair{
Key: key,
Value: value,
LastIndex: lastIndex,
}
return true, pair, nil
}
// AtomicDelete deletes a value at "key" if the key
// has not been modified in the meantime, throws an
// error if this is the case
func (s *Zookeeper) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
if previous == nil {
return false, store.ErrPreviousNotSpecified
}
err := s.client.Delete(s.normalize(key), int32(previous.LastIndex))
if err != nil {
// Key not found
if err == zk.ErrNoNode {
return false, store.ErrKeyNotFound
}
// Compare failed
if err == zk.ErrBadVersion {
return false, store.ErrKeyModified
}
// General store error
return false, err
}
return true, nil
}
// NewLock returns a handle to a lock struct which can
// be used to provide mutual exclusion on a key
func (s *Zookeeper) NewLock(key string, options *store.LockOptions) (lock store.Locker, err error) {
value := []byte("")
// Apply options
if options != nil {
if options.Value != nil {
value = options.Value
}
}
lock = &zookeeperLock{
client: s.client,
key: s.normalize(key),
value: value,
lock: zk.NewLock(s.client, s.normalize(key), zk.WorldACL(zk.PermAll)),
}
return lock, err
}
// Lock attempts to acquire the lock and blocks while
// doing so. It returns a channel that is closed if our
// lock is lost or if an error occurs
func (l *zookeeperLock) Lock(stopChan chan struct{}) (<-chan struct{}, error) {
err := l.lock.Lock()
lostCh := make(chan struct{})
if err == nil {
// We hold the lock, we can set our value
_, err = l.client.Set(l.key, l.value, -1)
if err == nil {
go l.monitorLock(stopChan, lostCh)
}
}
return lostCh, err
}
// Unlock the "key". Calling unlock while
// not holding the lock will throw an error
func (l *zookeeperLock) Unlock() error {
return l.lock.Unlock()
}
// Close closes the client connection
func (s *Zookeeper) Close() {
s.client.Close()
}
// Normalize the key for usage in Zookeeper
func (s *Zookeeper) normalize(key string) string {
key = store.Normalize(key)
return strings.TrimSuffix(key, "/")
}
func (l *zookeeperLock) monitorLock(stopCh <-chan struct{}, lostCh chan struct{}) {
defer close(lostCh)
for {
_, _, eventCh, err := l.client.GetW(l.key)
if err != nil {
// We failed to set watch, relinquish the lock
return
}
select {
case e := <-eventCh:
if e.Type == zk.EventNotWatching ||
(e.Type == zk.EventSession && e.State == zk.StateExpired) {
// Either the session has been closed and our watch has been
// invalidated or the session has expired.
return
} else if e.Type == zk.EventNodeDataChanged {
// Somemone else has written to the lock node and believes
// that they have the lock.
return
}
case <-stopCh:
// The caller has requested that we relinquish our lock
return
}
}
}
func (s *Zookeeper) get(key string) ([]byte, *zk.Stat, error) {
var resp []byte
var meta *zk.Stat
var err error
// To guard against older versions of valkeyrie
// creating and writing to znodes non-atomically,
// We try to resync few times if we read SOH or
// an empty string
for i := 0; i <= syncRetryLimit; i++ {
resp, meta, err = s.client.Get(s.normalize(key))
if err != nil {
if err == zk.ErrNoNode {
return nil, nil, store.ErrKeyNotFound
}
return nil, nil, err
}
if string(resp) != SOH && string(resp) != "" {
return resp, meta, nil
}
if i < syncRetryLimit {
if _, err = s.client.Sync(s.normalize(key)); err != nil {
return nil, nil, err
}
}
}
return resp, meta, nil
}
func (s *Zookeeper) getW(key string) ([]byte, *zk.Stat, <-chan zk.Event, error) {
var resp []byte
var meta *zk.Stat
var ech <-chan zk.Event
var err error
// To guard against older versions of valkeyrie
// creating and writing to znodes non-atomically,
// We try to resync few times if we read SOH or
// an empty string
for i := 0; i <= syncRetryLimit; i++ {
resp, meta, ech, err = s.client.GetW(s.normalize(key))
if err != nil {
if err == zk.ErrNoNode {
return nil, nil, nil, store.ErrKeyNotFound
}
return nil, nil, nil, err
}
if string(resp) != SOH && string(resp) != "" {
return resp, meta, ech, nil
}
if i < syncRetryLimit {
if _, err = s.client.Sync(s.normalize(key)); err != nil {
return nil, nil, nil, err
}
}
}
return resp, meta, ech, nil
}
// getListWithPath gets the key/value pairs for a list of keys under
// a given path.
//
// This is generally used when we get a list of child keys which
// are stripped out of their path (for example when using ChildrenW).
func (s *Zookeeper) getListWithPath(path string, keys []string, opts *store.ReadOptions) ([]*store.KVPair, error) {
kvs := []*store.KVPair{}
for _, key := range keys {
pair, err := s.Get(strings.TrimSuffix(path, "/")+s.normalize(key), opts)
if err != nil {
return nil, err
}
kvs = append(kvs, &store.KVPair{
Key: key,
Value: pair.Value,
LastIndex: pair.LastIndex,
})
}
return kvs, nil
}
// getList returns key/value pairs from a list of keys.
//
// This is generally used when we have a full list of keys with
// their full path included.
func (s *Zookeeper) getList(keys []string, opts *store.ReadOptions) ([]*store.KVPair, error) {
kvs := []*store.KVPair{}
for _, key := range keys {
pair, err := s.Get(strings.TrimSuffix(key, "/"), nil)
if err != nil {
return nil, err
}
kvs = append(kvs, &store.KVPair{
Key: key,
Value: pair.Value,
LastIndex: pair.LastIndex,
})
}
return kvs, nil
}