Update libkv dependency

This commit is contained in:
NicoMen 2017-11-17 17:22:03 +01:00 committed by Traefiker
parent cdab6b1796
commit 66e489addb
237 changed files with 62817 additions and 16116 deletions

View file

@ -51,7 +51,7 @@ func (f *Follower) follow() {
defer close(f.leaderCh)
defer close(f.errCh)
ch, err := f.client.Watch(f.key, f.stopCh)
ch, err := f.client.Watch(f.key, f.stopCh, nil)
if err != nil {
f.errCh <- err
}

View file

@ -10,7 +10,7 @@ import (
"sync/atomic"
"time"
"github.com/boltdb/bolt"
"github.com/coreos/bbolt"
"github.com/docker/libkv"
"github.com/docker/libkv/store"
)
@ -127,7 +127,7 @@ func (b *BoltDB) releaseDBhandle() {
// Get the value at "key". BoltDB doesn't provide an inbuilt last modified index with every kv pair. Its implemented by
// by a atomic counter maintained by the libkv and appened to the value passed by the client.
func (b *BoltDB) Get(key string) (*store.KVPair, error) {
func (b *BoltDB) Get(key string, opts *store.ReadOptions) (*store.KVPair, error) {
var (
val []byte
db *bolt.DB
@ -229,7 +229,7 @@ func (b *BoltDB) Delete(key string) error {
}
// Exists checks if the key exists inside the store
func (b *BoltDB) Exists(key string) (bool, error) {
func (b *BoltDB) Exists(key string, opts *store.ReadOptions) (bool, error) {
var (
val []byte
db *bolt.DB
@ -261,7 +261,7 @@ func (b *BoltDB) Exists(key string) (bool, error) {
}
// List returns the range of keys starting with the passed in prefix
func (b *BoltDB) List(keyPrefix string) ([]*store.KVPair, error) {
func (b *BoltDB) List(keyPrefix string, opts *store.ReadOptions) ([]*store.KVPair, error) {
var (
db *bolt.DB
err error
@ -275,7 +275,7 @@ func (b *BoltDB) List(keyPrefix string) ([]*store.KVPair, error) {
return nil, err
}
defer b.releaseDBhandle()
hasResult := false
err = db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(b.boltBucket)
if bucket == nil {
@ -286,21 +286,23 @@ func (b *BoltDB) List(keyPrefix string) ([]*store.KVPair, error) {
prefix := []byte(keyPrefix)
for key, v := cursor.Seek(prefix); bytes.HasPrefix(key, prefix); key, v = cursor.Next() {
hasResult = true
dbIndex := binary.LittleEndian.Uint64(v[:libkvmetadatalen])
v = v[libkvmetadatalen:]
val := make([]byte, len(v))
copy(val, v)
kv = append(kv, &store.KVPair{
Key: string(key),
Value: val,
LastIndex: dbIndex,
})
if string(key) != keyPrefix {
kv = append(kv, &store.KVPair{
Key: string(key),
Value: val,
LastIndex: dbIndex,
})
}
}
return nil
})
if len(kv) == 0 {
if !hasResult {
return nil, store.ErrKeyNotFound
}
return kv, err
@ -464,11 +466,11 @@ func (b *BoltDB) NewLock(key string, options *store.LockOptions) (store.Locker,
}
// Watch has to implemented at the library level since its not supported by BoltDB
func (b *BoltDB) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) {
func (b *BoltDB) Watch(key string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan *store.KVPair, error) {
return nil, store.ErrCallNotSupported
}
// WatchTree has to implemented at the library level since its not supported by BoltDB
func (b *BoltDB) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
func (b *BoltDB) WatchTree(directory string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan []*store.KVPair, error) {
return nil, store.ErrCallNotSupported
}

View file

@ -24,7 +24,7 @@ const (
RenewSessionRetryMax = 5
// MaxSessionDestroyAttempts is the maximum times we will try
// to explicitely destroy the session attached to a lock after
// to explicitly destroy the session attached to a lock after
// the connectivity to the store has been lost
MaxSessionDestroyAttempts = 5
@ -74,7 +74,6 @@ func New(endpoints []string, options *store.Config) (store.Store, error) {
s.config = config
config.HttpClient = http.DefaultClient
config.Address = endpoints[0]
config.Scheme = "http"
// Set options
if options != nil {
@ -168,12 +167,17 @@ func (s *Consul) getActiveSession(key string) (string, error) {
// Get the value at "key", returns the last modified index
// to use in conjunction to CAS calls
func (s *Consul) Get(key string) (*store.KVPair, error) {
func (s *Consul) Get(key string, opts *store.ReadOptions) (*store.KVPair, error) {
options := &api.QueryOptions{
AllowStale: false,
RequireConsistent: true,
}
// Get options
if opts != nil {
options.RequireConsistent = opts.Consistent
}
pair, meta, err := s.client.KV().Get(s.normalize(key), options)
if err != nil {
return nil, err
@ -217,7 +221,7 @@ func (s *Consul) Put(key string, value []byte, opts *store.WriteOptions) error {
// Delete a value at "key"
func (s *Consul) Delete(key string) error {
if _, err := s.Get(key); err != nil {
if _, err := s.Get(key, nil); err != nil {
return err
}
_, err := s.client.KV().Delete(s.normalize(key), nil)
@ -225,8 +229,8 @@ func (s *Consul) Delete(key string) error {
}
// Exists checks that the key exists inside the store
func (s *Consul) Exists(key string) (bool, error) {
_, err := s.Get(key)
func (s *Consul) Exists(key string, opts *store.ReadOptions) (bool, error) {
_, err := s.Get(key, opts)
if err != nil {
if err == store.ErrKeyNotFound {
return false, nil
@ -237,8 +241,20 @@ func (s *Consul) Exists(key string) (bool, error) {
}
// List child nodes of a given directory
func (s *Consul) List(directory string) ([]*store.KVPair, error) {
pairs, _, err := s.client.KV().List(s.normalize(directory), nil)
func (s *Consul) List(directory string, opts *store.ReadOptions) ([]*store.KVPair, error) {
options := &api.QueryOptions{
AllowStale: false,
RequireConsistent: true,
}
if opts != nil {
if !opts.Consistent {
options.AllowStale = true
options.RequireConsistent = false
}
}
pairs, _, err := s.client.KV().List(s.normalize(directory), options)
if err != nil {
return nil, err
}
@ -264,7 +280,7 @@ func (s *Consul) List(directory string) ([]*store.KVPair, error) {
// DeleteTree deletes a range of keys under a given directory
func (s *Consul) DeleteTree(directory string) error {
if _, err := s.List(directory); err != nil {
if _, err := s.List(directory, nil); err != nil {
return err
}
_, err := s.client.KV().DeleteTree(s.normalize(directory), nil)
@ -276,7 +292,7 @@ func (s *Consul) DeleteTree(directory string) error {
// on errors. Upon creation, the current value will first
// be sent to the channel. Providing a non-nil stopCh can
// be used to stop watching.
func (s *Consul) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) {
func (s *Consul) Watch(key string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan *store.KVPair, error) {
kv := s.client.KV()
watchCh := make(chan *store.KVPair)
@ -309,7 +325,6 @@ func (s *Consul) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair
opts.WaitIndex = meta.LastIndex
// Return the value to the channel
// FIXME: What happens when a key is deleted?
if pair != nil {
watchCh <- &store.KVPair{
Key: pair.Key,
@ -328,7 +343,7 @@ func (s *Consul) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair
// on errors. Upon creating a watch, the current childs values
// will be sent to the channel .Providing a non-nil stopCh can
// be used to stop watching.
func (s *Consul) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
func (s *Consul) WatchTree(directory string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan []*store.KVPair, error) {
kv := s.client.KV()
watchCh := make(chan []*store.KVPair)
@ -429,7 +444,7 @@ func (s *Consul) NewLock(key string, options *store.LockOptions) (store.Locker,
}
// renewLockSession is used to renew a session Lock, it takes
// a stopRenew chan which is used to explicitely stop the session
// a stopRenew chan which is used to explicitly stop the session
// renew process. The renew routine never stops until a signal is
// sent to this channel. If deleting the session fails because the
// connection to the store is lost, it keeps trying to delete the
@ -449,7 +464,7 @@ func (s *Consul) renewLockSession(initialTTL string, id string, stopRenew chan s
entry, _, err := s.client.Session().Renew(id, nil)
if err != nil {
// If an error occurs, continue until the
// session gets destroyed explicitely or
// session gets destroyed explicitly or
// the session ttl times out
continue
}
@ -467,13 +482,15 @@ func (s *Consul) renewLockSession(initialTTL string, id string, stopRenew chan s
return
}
// We cannot destroy the session because the store
// is unavailable, wait for the session renew period.
// Give up after 'MaxSessionDestroyAttempts'.
sessionDestroyAttempts++
if sessionDestroyAttempts >= MaxSessionDestroyAttempts {
return
}
// We can't destroy the session because the store
// is unavailable, wait for the session renew period
sessionDestroyAttempts++
time.Sleep(ttl / 2)
}
}
@ -520,7 +537,7 @@ func (s *Consul) AtomicPut(key string, value []byte, previous *store.KVPair, opt
return false, nil, store.ErrKeyModified
}
pair, err := s.Get(key)
pair, err := s.Get(key, nil)
if err != nil {
return false, nil, err
}
@ -538,7 +555,7 @@ func (s *Consul) AtomicDelete(key string, previous *store.KVPair) (bool, error)
p := &api.KVPair{Key: s.normalize(key), ModifyIndex: previous.LastIndex, Flags: api.LockFlagValue}
// Extra Get operation to check on the key
_, err := s.Get(key)
_, err := s.Get(key, nil)
if err != nil && err == store.ErrKeyNotFound {
return false, err
}

View file

@ -7,6 +7,7 @@ import (
"net"
"net/http"
"strings"
"sync"
"time"
"golang.org/x/net/context"
@ -16,6 +17,10 @@ import (
"github.com/docker/libkv/store"
)
const (
lockSuffix = "___lock"
)
var (
// ErrAbortTryLock is thrown when a user stops trying to seek the lock
// by sending a signal to the stop chan, this is used to verify if the
@ -30,17 +35,20 @@ type Etcd struct {
}
type etcdLock struct {
client etcd.KeysAPI
lock sync.Mutex
client etcd.KeysAPI
stopLock chan struct{}
stopRenew chan struct{}
key string
value string
last *etcd.Response
ttl time.Duration
mutexKey string
writeKey string
value string
last *etcd.Response
ttl time.Duration
}
const (
periodicSync = 5 * time.Minute
defaultLockTTL = 20 * time.Second
defaultUpdateTime = 5 * time.Second
)
@ -88,13 +96,13 @@ func New(addrs []string, options *store.Config) (store.Store, error) {
s.client = etcd.NewKeysAPI(c)
// Periodic Cluster Sync
go func() {
for {
if err := c.AutoSync(context.Background(), periodicSync); err != nil {
return
if options != nil && options.SyncPeriod != 0 {
go func() {
for {
c.AutoSync(context.Background(), options.SyncPeriod)
}
}
}()
}()
}
return s, nil
}
@ -151,11 +159,16 @@ func keyNotFound(err error) bool {
// Get the value at "key", returns the last modified
// index to use in conjunction to Atomic calls
func (s *Etcd) Get(key string) (pair *store.KVPair, err error) {
func (s *Etcd) Get(key string, opts *store.ReadOptions) (pair *store.KVPair, err error) {
getOpts := &etcd.GetOptions{
Quorum: true,
}
// Get options
if opts != nil {
getOpts.Quorum = opts.Consistent
}
result, err := s.client.Get(context.Background(), s.normalize(key), getOpts)
if err != nil {
if keyNotFound(err) {
@ -201,8 +214,8 @@ func (s *Etcd) Delete(key string) error {
}
// Exists checks if the key exists inside the store
func (s *Etcd) Exists(key string) (bool, error) {
_, err := s.Get(key)
func (s *Etcd) Exists(key string, opts *store.ReadOptions) (bool, error) {
_, err := s.Get(key, opts)
if err != nil {
if err == store.ErrKeyNotFound {
return false, nil
@ -217,22 +230,22 @@ func (s *Etcd) Exists(key string) (bool, error) {
// on errors. Upon creation, the current value will first
// be sent to the channel. Providing a non-nil stopCh can
// be used to stop watching.
func (s *Etcd) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) {
opts := &etcd.WatcherOptions{Recursive: false}
watcher := s.client.Watcher(s.normalize(key), opts)
func (s *Etcd) Watch(key string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan *store.KVPair, error) {
wopts := &etcd.WatcherOptions{Recursive: false}
watcher := s.client.Watcher(s.normalize(key), wopts)
// watchCh is sending back events to the caller
watchCh := make(chan *store.KVPair)
// Get the current value
pair, err := s.Get(key, opts)
if err != nil {
return nil, err
}
go func() {
defer close(watchCh)
// Get the current value
pair, err := s.Get(key)
if err != nil {
return
}
// Push the current value through the channel.
watchCh <- pair
@ -266,22 +279,22 @@ func (s *Etcd) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair,
// on errors. Upon creating a watch, the current childs values
// will be sent to the channel. Providing a non-nil stopCh can
// be used to stop watching.
func (s *Etcd) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
func (s *Etcd) WatchTree(directory string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan []*store.KVPair, error) {
watchOpts := &etcd.WatcherOptions{Recursive: true}
watcher := s.client.Watcher(s.normalize(directory), watchOpts)
// watchCh is sending back events to the caller
watchCh := make(chan []*store.KVPair)
// List current children
list, err := s.List(directory, opts)
if err != nil {
return nil, err
}
go func() {
defer close(watchCh)
// Get child values
list, err := s.List(directory)
if err != nil {
return
}
// Push the current value through the channel.
watchCh <- list
@ -299,7 +312,7 @@ func (s *Etcd) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*st
return
}
list, err = s.List(directory)
list, err = s.List(directory, opts)
if err != nil {
return
}
@ -397,13 +410,18 @@ func (s *Etcd) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
}
// List child nodes of a given directory
func (s *Etcd) List(directory string) ([]*store.KVPair, error) {
func (s *Etcd) List(directory string, opts *store.ReadOptions) ([]*store.KVPair, error) {
getOpts := &etcd.GetOptions{
Quorum: true,
Recursive: true,
Sort: true,
}
// Get options
if opts != nil {
getOpts.Quorum = opts.Consistent
}
resp, err := s.client.Get(context.Background(), s.normalize(directory), getOpts)
if err != nil {
if keyNotFound(err) {
@ -414,6 +432,26 @@ func (s *Etcd) List(directory string) ([]*store.KVPair, error) {
kv := []*store.KVPair{}
for _, n := range resp.Node.Nodes {
if n.Key == directory {
continue
}
// Etcd v2 seems to stop listing child keys at directories even
// with the "Recursive" option. If the child is a directory,
// we call `List` recursively to go through the whole set.
if n.Dir {
pairs, err := s.List(n.Key, opts)
if err != nil {
return nil, err
}
kv = append(kv, pairs...)
}
// Filter out etcd mutex side keys with `___lock` suffix
if strings.Contains(string(n.Key), lockSuffix) {
continue
}
kv = append(kv, &store.KVPair{
Key: n.Key,
Value: []byte(n.Value),
@ -460,7 +498,8 @@ func (s *Etcd) NewLock(key string, options *store.LockOptions) (lock store.Locke
lock = &etcdLock{
client: s.client,
stopRenew: renewCh,
key: s.normalize(key),
mutexKey: s.normalize(key + lockSuffix),
writeKey: s.normalize(key),
value: value,
ttl: ttl,
}
@ -472,6 +511,8 @@ func (s *Etcd) NewLock(key string, options *store.LockOptions) (lock store.Locke
// doing so. It returns a channel that is closed if our
// lock is lost or if an error occurs
func (l *etcdLock) Lock(stopChan chan struct{}) (<-chan struct{}, error) {
l.lock.Lock()
defer l.lock.Unlock()
// Lock holder channel
lockHeld := make(chan struct{})
@ -483,7 +524,7 @@ func (l *etcdLock) Lock(stopChan chan struct{}) (<-chan struct{}, error) {
for {
setOpts.PrevExist = etcd.PrevNoExist
resp, err := l.client.Set(context.Background(), l.key, l.value, setOpts)
resp, err := l.client.Set(context.Background(), l.mutexKey, "", setOpts)
if err != nil {
if etcdError, ok := err.(etcd.Error); ok {
if etcdError.Code != etcd.ErrorCodeNodeExist {
@ -496,12 +537,19 @@ func (l *etcdLock) Lock(stopChan chan struct{}) (<-chan struct{}, error) {
}
setOpts.PrevExist = etcd.PrevExist
l.last, err = l.client.Set(context.Background(), l.key, l.value, setOpts)
l.last, err = l.client.Set(context.Background(), l.mutexKey, "", setOpts)
if err == nil {
// Leader section
l.stopLock = stopLocking
go l.holdLock(l.key, lockHeld, stopLocking)
go l.holdLock(l.mutexKey, lockHeld, stopLocking)
// We are holding the lock, set the write key
_, err = l.client.Set(context.Background(), l.writeKey, l.value, nil)
if err != nil {
return nil, err
}
break
} else {
// If this is a legitimate error, return
@ -516,7 +564,7 @@ func (l *etcdLock) Lock(stopChan chan struct{}) (<-chan struct{}, error) {
chWStop := make(chan bool)
free := make(chan bool)
go l.waitLock(l.key, errorCh, chWStop, free)
go l.waitLock(l.mutexKey, errorCh, chWStop, free)
// Wait for the key to be available or for
// a signal to stop trying to lock the key
@ -553,7 +601,7 @@ func (l *etcdLock) holdLock(key string, lockHeld chan struct{}, stopLocking <-ch
select {
case <-update.C:
setOpts.PrevIndex = l.last.Node.ModifiedIndex
l.last, err = l.client.Set(context.Background(), key, l.value, setOpts)
l.last, err = l.client.Set(context.Background(), key, "", setOpts)
if err != nil {
return
}
@ -575,7 +623,7 @@ func (l *etcdLock) waitLock(key string, errorCh chan error, stopWatchCh chan boo
errorCh <- err
return
}
if event.Action == "delete" || event.Action == "expire" {
if event.Action == "delete" || event.Action == "compareAndDelete" || event.Action == "expire" {
free <- true
return
}
@ -585,6 +633,9 @@ func (l *etcdLock) waitLock(key string, errorCh chan error, stopWatchCh chan boo
// Unlock the "key". Calling unlock while
// not holding the lock will throw an error
func (l *etcdLock) Unlock() error {
l.lock.Lock()
defer l.lock.Unlock()
if l.stopLock != nil {
l.stopLock <- struct{}{}
}
@ -592,7 +643,7 @@ func (l *etcdLock) Unlock() error {
delOpts := &etcd.DeleteOptions{
PrevIndex: l.last.Node.ModifiedIndex,
}
_, err := l.client.Delete(context.Background(), l.key, delOpts)
_, err := l.client.Delete(context.Background(), l.mutexKey, delOpts)
if err != nil {
return err
}

534
vendor/github.com/docker/libkv/store/etcd/v3/etcd.go generated vendored Normal file
View file

@ -0,0 +1,534 @@
package etcdv3
import (
"context"
"crypto/tls"
"strings"
"sync"
"time"
etcd "github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/clientv3/concurrency"
"github.com/docker/libkv"
"github.com/docker/libkv/store"
)
const (
defaultLockTTL = 20 * time.Second
etcdDefaultTimeout = 5 * time.Second
lockSuffix = "___lock"
)
// EtcdV3 is the receiver type for the
// Store interface
type EtcdV3 struct {
client *etcd.Client
}
type etcdLock struct {
lock sync.Mutex
store *EtcdV3
mutex *concurrency.Mutex
session *concurrency.Session
mutexKey string // mutexKey is the key to write appended with a "_lock" suffix
writeKey string // writeKey is the actual key to update protected by the mutexKey
value string
ttl time.Duration
}
// Register registers etcd to libkv
func Register() {
libkv.AddStore(store.ETCDV3, New)
}
// New creates a new Etcd client given a list
// of endpoints and an optional tls config
func New(addrs []string, options *store.Config) (store.Store, error) {
s := &EtcdV3{}
var (
entries []string
err error
)
entries = store.CreateEndpoints(addrs, "http")
cfg := &etcd.Config{
Endpoints: entries,
}
// Set options
if options != nil {
if options.TLS != nil {
setTLS(cfg, options.TLS, addrs)
}
if options.ConnectionTimeout != 0 {
setTimeout(cfg, options.ConnectionTimeout)
}
if options.Username != "" {
setCredentials(cfg, options.Username, options.Password)
}
if options.SyncPeriod != 0 {
cfg.AutoSyncInterval = options.SyncPeriod
}
}
s.client, err = etcd.New(*cfg)
if err != nil {
return nil, err
}
return s, nil
}
// setTLS sets the tls configuration given a tls.Config scheme
func setTLS(cfg *etcd.Config, tls *tls.Config, addrs []string) {
entries := store.CreateEndpoints(addrs, "https")
cfg.Endpoints = entries
cfg.TLS = tls
}
// setTimeout sets the timeout used for connecting to the store
func setTimeout(cfg *etcd.Config, time time.Duration) {
cfg.DialTimeout = time
}
// setCredentials sets the username/password credentials for connecting to Etcd
func setCredentials(cfg *etcd.Config, username, password string) {
cfg.Username = username
cfg.Password = password
}
// Normalize the key for usage in Etcd
func (s *EtcdV3) normalize(key string) string {
key = store.Normalize(key)
return strings.TrimPrefix(key, "/")
}
// Get the value at "key", returns the last modified
// index to use in conjunction to Atomic calls
func (s *EtcdV3) Get(key string, opts *store.ReadOptions) (pair *store.KVPair, err error) {
ctx, cancel := context.WithTimeout(context.Background(), etcdDefaultTimeout)
var result *etcd.GetResponse
if opts != nil && !opts.Consistent {
result, err = s.client.KV.Get(ctx, s.normalize(key), etcd.WithSerializable())
} else {
result, err = s.client.KV.Get(ctx, s.normalize(key))
}
cancel()
if err != nil {
return nil, err
}
if result.Count == 0 {
return nil, store.ErrKeyNotFound
}
kvs := []*store.KVPair{}
for _, pair := range result.Kvs {
kvs = append(kvs, &store.KVPair{
Key: string(pair.Key),
Value: []byte(pair.Value),
LastIndex: uint64(pair.ModRevision),
})
}
return kvs[0], nil
}
// Put a value at "key"
func (s *EtcdV3) Put(key string, value []byte, opts *store.WriteOptions) (err error) {
ctx, cancel := context.WithTimeout(context.Background(), etcdDefaultTimeout)
pr := s.client.Txn(ctx)
if opts != nil && opts.TTL > 0 {
lease := etcd.NewLease(s.client)
resp, err := lease.Grant(context.Background(), int64(opts.TTL/time.Second))
if err != nil {
cancel()
return err
}
pr.Then(etcd.OpPut(key, string(value), etcd.WithLease(resp.ID)))
} else {
pr.Then(etcd.OpPut(key, string(value)))
}
_, err = pr.Commit()
cancel()
if err != nil {
return err
}
return nil
}
// Delete a value at "key"
func (s *EtcdV3) Delete(key string) error {
resp, err := s.client.KV.Delete(context.Background(), s.normalize(key))
if resp.Deleted == 0 {
return store.ErrKeyNotFound
}
return err
}
// Exists checks if the key exists inside the store
func (s *EtcdV3) Exists(key string, opts *store.ReadOptions) (bool, error) {
_, err := s.Get(key, opts)
if err != nil {
if err == store.ErrKeyNotFound {
return false, nil
}
return false, err
}
return true, nil
}
// Watch for changes on a "key"
// It returns a channel that will receive changes or pass
// on errors. Upon creation, the current value will first
// be sent to the channel. Providing a non-nil stopCh can
// be used to stop watching.
func (s *EtcdV3) Watch(key string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan *store.KVPair, error) {
wc := etcd.NewWatcher(s.client)
// respCh is sending back events to the caller
respCh := make(chan *store.KVPair)
// Get the current value
pair, err := s.Get(key, opts)
if err != nil {
return nil, err
}
go func() {
defer wc.Close()
defer close(respCh)
// Push the current value through the channel.
respCh <- pair
watchCh := wc.Watch(context.Background(), s.normalize(key))
for resp := range watchCh {
// Check if the watch was stopped by the caller
select {
case <-stopCh:
return
default:
}
for _, ev := range resp.Events {
respCh <- &store.KVPair{
Key: key,
Value: []byte(ev.Kv.Value),
LastIndex: uint64(ev.Kv.ModRevision),
}
}
}
}()
return respCh, nil
}
// WatchTree watches for changes on a "directory"
// It returns a channel that will receive changes or pass
// on errors. Upon creating a watch, the current childs values
// will be sent to the channel. Providing a non-nil stopCh can
// be used to stop watching.
func (s *EtcdV3) WatchTree(directory string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan []*store.KVPair, error) {
wc := etcd.NewWatcher(s.client)
// respCh is sending back events to the caller
respCh := make(chan []*store.KVPair)
// Get the current value
rev, pairs, err := s.list(directory, opts)
if err != nil {
return nil, err
}
go func() {
defer wc.Close()
defer close(respCh)
// Push the current value through the channel.
respCh <- pairs
rev++
watchCh := wc.Watch(context.Background(), s.normalize(directory), etcd.WithPrefix(), etcd.WithRev(rev))
for resp := range watchCh {
// Check if the watch was stopped by the caller
select {
case <-stopCh:
return
default:
}
list := make([]*store.KVPair, len(resp.Events))
for i, ev := range resp.Events {
list[i] = &store.KVPair{
Key: string(ev.Kv.Key),
Value: []byte(ev.Kv.Value),
LastIndex: uint64(ev.Kv.ModRevision),
}
}
respCh <- list
}
}()
return respCh, nil
}
// AtomicPut puts a value at "key" if the key has not been
// modified in the meantime, throws an error if this is the case
func (s *EtcdV3) AtomicPut(key string, value []byte, previous *store.KVPair, opts *store.WriteOptions) (bool, *store.KVPair, error) {
var cmp etcd.Cmp
var testIndex bool
if previous != nil {
// We compare on the last modified index
testIndex = true
cmp = etcd.Compare(etcd.ModRevision(key), "=", int64(previous.LastIndex))
} else {
// Previous key is not given, thus we want the key not to exist
testIndex = false
cmp = etcd.Compare(etcd.CreateRevision(key), "=", 0)
}
ctx, cancel := context.WithTimeout(context.Background(), etcdDefaultTimeout)
pr := s.client.Txn(ctx).If(cmp)
// We set the TTL if given
if opts != nil && opts.TTL > 0 {
lease := etcd.NewLease(s.client)
resp, err := lease.Grant(context.Background(), int64(opts.TTL/time.Second))
if err != nil {
cancel()
return false, nil, err
}
pr.Then(etcd.OpPut(key, string(value), etcd.WithLease(resp.ID)))
} else {
pr.Then(etcd.OpPut(key, string(value)))
}
txn, err := pr.Commit()
cancel()
if err != nil {
return false, nil, err
}
if !txn.Succeeded {
if testIndex {
return false, nil, store.ErrKeyModified
}
return false, nil, store.ErrKeyExists
}
updated := &store.KVPair{
Key: key,
Value: value,
LastIndex: uint64(txn.Header.Revision),
}
return true, updated, nil
}
// AtomicDelete deletes a value at "key" if the key
// has not been modified in the meantime, throws an
// error if this is the case
func (s *EtcdV3) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
if previous == nil {
return false, store.ErrPreviousNotSpecified
}
// We compare on the last modified index
cmp := etcd.Compare(etcd.ModRevision(key), "=", int64(previous.LastIndex))
ctx, cancel := context.WithTimeout(context.Background(), etcdDefaultTimeout)
txn, err := s.client.Txn(ctx).
If(cmp).
Then(etcd.OpDelete(key)).
Commit()
cancel()
if err != nil {
return false, err
}
if len(txn.Responses) == 0 {
return false, store.ErrKeyNotFound
}
if !txn.Succeeded {
return false, store.ErrKeyModified
}
return true, nil
}
// List child nodes of a given directory
func (s *EtcdV3) List(directory string, opts *store.ReadOptions) ([]*store.KVPair, error) {
_, kv, err := s.list(directory, opts)
return kv, err
}
// DeleteTree deletes a range of keys under a given directory
func (s *EtcdV3) DeleteTree(directory string) error {
ctx, cancel := context.WithTimeout(context.Background(), etcdDefaultTimeout)
resp, err := s.client.KV.Delete(ctx, s.normalize(directory), etcd.WithPrefix())
cancel()
if resp.Deleted == 0 {
return store.ErrKeyNotFound
}
return err
}
// NewLock returns a handle to a lock struct which can
// be used to provide mutual exclusion on a key
func (s *EtcdV3) NewLock(key string, options *store.LockOptions) (lock store.Locker, err error) {
var value string
ttl := defaultLockTTL
renewCh := make(chan struct{})
// Apply options on Lock
if options != nil {
if options.Value != nil {
value = string(options.Value)
}
if options.TTL != 0 {
ttl = options.TTL
}
if options.RenewLock != nil {
renewCh = options.RenewLock
}
}
// Create Session for Mutex
session, err := concurrency.NewSession(s.client, concurrency.WithTTL(int(ttl/time.Second)))
if err != nil {
return nil, err
}
go func() {
<-renewCh
session.Close()
return
}()
// A Mutex is a simple key that can only be held by a single process.
// An etcd mutex behaves like a Zookeeper lock: a side key is created with
// a suffix (such as "_lock") and represents the mutex. Thus we have a pair
// composed of the key to protect with a lock: "/key", and a side key that
// acts as the lock: "/key_lock"
mutexKey := s.normalize(key + lockSuffix)
writeKey := s.normalize(key)
// Create lock object
lock = &etcdLock{
store: s,
mutex: concurrency.NewMutex(session, mutexKey),
session: session,
mutexKey: mutexKey,
writeKey: writeKey,
value: value,
ttl: ttl,
}
return lock, nil
}
// Lock attempts to acquire the lock and blocks while
// doing so. It returns a channel that is closed if our
// lock is lost or if an error occurs
func (l *etcdLock) Lock(stopChan chan struct{}) (<-chan struct{}, error) {
l.lock.Lock()
defer l.lock.Unlock()
ctx, cancel := context.WithCancel(context.Background())
go func() {
<-stopChan
cancel()
}()
err := l.mutex.Lock(ctx)
if err != nil {
if err == context.Canceled {
return nil, nil
}
return nil, err
}
err = l.store.Put(l.writeKey, []byte(l.value), nil)
if err != nil {
return nil, err
}
return l.session.Done(), nil
}
// Unlock the "key". Calling unlock while
// not holding the lock will throw an error
func (l *etcdLock) Unlock() error {
l.lock.Lock()
defer l.lock.Unlock()
return l.mutex.Unlock(context.Background())
}
// Close closes the client connection
func (s *EtcdV3) Close() {
s.client.Close()
}
// list child nodes of a given directory and return revision number
func (s *EtcdV3) list(directory string, opts *store.ReadOptions) (int64, []*store.KVPair, error) {
ctx, cancel := context.WithTimeout(context.Background(), etcdDefaultTimeout)
var resp *etcd.GetResponse
var err error
if opts != nil && !opts.Consistent {
resp, err = s.client.KV.Get(ctx, s.normalize(directory), etcd.WithSerializable(), etcd.WithPrefix(), etcd.WithSort(etcd.SortByKey, etcd.SortDescend))
} else {
resp, err = s.client.KV.Get(ctx, s.normalize(directory), etcd.WithPrefix(), etcd.WithSort(etcd.SortByKey, etcd.SortDescend))
}
cancel()
if err != nil {
return 0, nil, err
}
if resp.Count == 0 {
return 0, nil, store.ErrKeyNotFound
}
kv := []*store.KVPair{}
for _, n := range resp.Kvs {
if string(n.Key) == directory {
continue
}
// Filter out etcd mutex side keys with `___lock` suffix
if strings.Contains(string(n.Key), lockSuffix) {
continue
}
kv = append(kv, &store.KVPair{
Key: string(n.Key),
Value: []byte(n.Value),
LastIndex: uint64(n.ModRevision),
})
}
return resp.Header.Revision, kv, nil
}

View file

@ -12,12 +12,16 @@ type Backend string
const (
// CONSUL backend
CONSUL Backend = "consul"
// ETCD backend
// ETCD backend with v2 client (backward compatibility)
ETCD Backend = "etcd"
// ETCDV3 backend with v3 client
ETCDV3 Backend = "etcdv3"
// ZK backend
ZK Backend = "zk"
// BOLTDB backend
BOLTDB Backend = "boltdb"
// REDIS backend
REDIS Backend = "redis"
)
var (
@ -44,6 +48,7 @@ type Config struct {
ClientTLS *ClientTLSConfig
TLS *tls.Config
ConnectionTimeout time.Duration
SyncPeriod time.Duration
Bucket string
PersistConnection bool
Username string
@ -67,20 +72,20 @@ type Store interface {
Put(key string, value []byte, options *WriteOptions) error
// Get a value given its key
Get(key string) (*KVPair, error)
Get(key string, options *ReadOptions) (*KVPair, error)
// Delete the value at the specified key
Delete(key string) error
// Verify if a Key exists in the store
Exists(key string) (bool, error)
Exists(key string, options *ReadOptions) (bool, error)
// Watch for changes on a key
Watch(key string, stopCh <-chan struct{}) (<-chan *KVPair, error)
Watch(key string, stopCh <-chan struct{}, options *ReadOptions) (<-chan *KVPair, error)
// WatchTree watches for changes on child nodes under
// a given directory
WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*KVPair, error)
WatchTree(directory string, stopCh <-chan struct{}, options *ReadOptions) (<-chan []*KVPair, error)
// NewLock creates a lock for a given key.
// The returned Locker is not held and must be acquired
@ -88,7 +93,7 @@ type Store interface {
NewLock(key string, options *LockOptions) (Locker, error)
// List the content of a given prefix
List(directory string) ([]*KVPair, error)
List(directory string, options *ReadOptions) ([]*KVPair, error)
// DeleteTree deletes a range of keys under a given directory
DeleteTree(directory string) error
@ -117,6 +122,16 @@ type WriteOptions struct {
TTL time.Duration
}
// ReadOptions contains optional request parameters
type ReadOptions struct {
// Consistent defines if the behavior of a Get operation is
// linearizable or not. Linearizability allows us to 'see'
// objects based on a real-time total order as opposed to
// an arbitrary order or with stale values ('inconsistent'
// scenario).
Consistent bool
}
// LockOptions contains optional request parameters
type LockOptions struct {
Value []byte // Optional, value to associate with the lock

View file

@ -14,6 +14,8 @@ const (
SOH = "\x01"
defaultTimeout = 10 * time.Second
syncRetryLimit = 5
)
// Zookeeper is the receiver type for
@ -65,22 +67,13 @@ func (s *Zookeeper) setTimeout(time time.Duration) {
// Get the value at "key", returns the last modified index
// to use in conjunction to Atomic calls
func (s *Zookeeper) Get(key string) (pair *store.KVPair, err error) {
resp, meta, err := s.client.Get(s.normalize(key))
func (s *Zookeeper) Get(key string, opts *store.ReadOptions) (pair *store.KVPair, err error) {
resp, meta, err := s.get(key)
if err != nil {
if err == zk.ErrNoNode {
return nil, store.ErrKeyNotFound
}
return nil, err
}
// FIXME handle very rare cases where Get returns the
// SOH control character instead of the actual value
if string(resp) == SOH {
return s.Get(store.Normalize(key))
}
pair = &store.KVPair{
Key: key,
Value: resp,
@ -91,14 +84,21 @@ func (s *Zookeeper) Get(key string) (pair *store.KVPair, err error) {
}
// createFullPath creates the entire path for a directory
// that does not exist
func (s *Zookeeper) createFullPath(path []string, ephemeral bool) error {
// that does not exist and sets the value of the last
// znode to data
func (s *Zookeeper) createFullPath(path []string, data []byte, ephemeral bool) error {
for i := 1; i <= len(path); i++ {
newpath := "/" + strings.Join(path[:i], "/")
if i == len(path) && ephemeral {
_, err := s.client.Create(newpath, []byte{}, zk.FlagEphemeral, zk.WorldACL(zk.PermAll))
if i == len(path) {
flag := 0
if ephemeral {
flag = zk.FlagEphemeral
}
_, err := s.client.Create(newpath, data, int32(flag), zk.WorldACL(zk.PermAll))
return err
}
_, err := s.client.Create(newpath, []byte{}, 0, zk.WorldACL(zk.PermAll))
if err != nil {
// Skip if node already exists
@ -114,20 +114,21 @@ func (s *Zookeeper) createFullPath(path []string, ephemeral bool) error {
func (s *Zookeeper) Put(key string, value []byte, opts *store.WriteOptions) error {
fkey := s.normalize(key)
exists, err := s.Exists(key)
exists, err := s.Exists(key, nil)
if err != nil {
return err
}
if !exists {
if opts != nil && opts.TTL > 0 {
s.createFullPath(store.SplitKey(strings.TrimSuffix(key, "/")), true)
s.createFullPath(store.SplitKey(strings.TrimSuffix(key, "/")), value, true)
} else {
s.createFullPath(store.SplitKey(strings.TrimSuffix(key, "/")), false)
s.createFullPath(store.SplitKey(strings.TrimSuffix(key, "/")), value, false)
}
} else {
_, err = s.client.Set(fkey, value, -1)
}
_, err = s.client.Set(fkey, value, -1)
return err
}
@ -141,7 +142,7 @@ func (s *Zookeeper) Delete(key string) error {
}
// Exists checks if the key exists inside the store
func (s *Zookeeper) Exists(key string) (bool, error) {
func (s *Zookeeper) Exists(key string, opts *store.ReadOptions) (bool, error) {
exists, _, err := s.client.Exists(s.normalize(key))
if err != nil {
return false, err
@ -154,33 +155,31 @@ func (s *Zookeeper) Exists(key string) (bool, error) {
// on errors. Upon creation, the current value will first
// be sent to the channel. Providing a non-nil stopCh can
// be used to stop watching.
func (s *Zookeeper) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) {
// Get the key first
pair, err := s.Get(key)
if err != nil {
return nil, err
}
func (s *Zookeeper) Watch(key string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan *store.KVPair, error) {
// Catch zk notifications and fire changes into the channel.
watchCh := make(chan *store.KVPair)
go func() {
defer close(watchCh)
// Get returns the current value to the channel prior
// to listening to any event that may occur on that key
watchCh <- pair
var fireEvt = true
for {
_, _, eventCh, err := s.client.GetW(s.normalize(key))
resp, meta, eventCh, err := s.getW(key)
if err != nil {
return
}
if fireEvt {
watchCh <- &store.KVPair{
Key: key,
Value: resp,
LastIndex: uint64(meta.Version),
}
}
select {
case e := <-eventCh:
if e.Type == zk.EventNodeDataChanged {
if entry, err := s.Get(key); err == nil {
watchCh <- entry
}
}
// Only fire an event if the data in the node changed.
// Simply reset the watch if this is any other event
// (e.g. a session event).
fireEvt = e.Type == zk.EventNodeDataChanged
case <-stopCh:
// There is no way to stop GetW so just quit
return
@ -196,37 +195,36 @@ func (s *Zookeeper) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVP
// on errors. Upon creating a watch, the current childs values
// will be sent to the channel .Providing a non-nil stopCh can
// be used to stop watching.
func (s *Zookeeper) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
// List the childrens first
entries, err := s.List(directory)
if err != nil {
return nil, err
}
func (s *Zookeeper) WatchTree(directory string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan []*store.KVPair, error) {
// Catch zk notifications and fire changes into the channel.
watchCh := make(chan []*store.KVPair)
go func() {
defer close(watchCh)
// List returns the children values to the channel
// prior to listening to any events that may occur
// on those keys
watchCh <- entries
var fireEvt = true
for {
_, _, eventCh, err := s.client.ChildrenW(s.normalize(directory))
WATCH:
keys, _, eventCh, err := s.client.ChildrenW(s.normalize(directory))
if err != nil {
return
}
if fireEvt {
kvs, err := s.getListWithPath(directory, keys, opts)
if err != nil {
// Failed to get values for one or more of the keys,
// the list may be out of date so try again.
goto WATCH
}
watchCh <- kvs
}
select {
case e := <-eventCh:
if e.Type == zk.EventNodeChildrenChanged {
if kv, err := s.List(directory); err == nil {
watchCh <- kv
}
}
// Only fire an event if the children have changed.
// Simply reset the watch if this is any other event
// (e.g. a session event).
fireEvt = e.Type == zk.EventNodeChildrenChanged
case <-stopCh:
// There is no way to stop GetW so just quit
// There is no way to stop ChildrenW so just quit
return
}
}
@ -235,51 +233,75 @@ func (s *Zookeeper) WatchTree(directory string, stopCh <-chan struct{}) (<-chan
return watchCh, nil
}
// List child nodes of a given directory
func (s *Zookeeper) List(directory string) ([]*store.KVPair, error) {
keys, stat, err := s.client.Children(s.normalize(directory))
// listChildren lists the direct children of a directory
func (s *Zookeeper) listChildren(directory string) ([]string, error) {
children, _, err := s.client.Children(s.normalize(directory))
if err != nil {
if err == zk.ErrNoNode {
return nil, store.ErrKeyNotFound
}
return nil, err
}
return children, nil
}
kv := []*store.KVPair{}
// FIXME Costly Get request for each child key..
for _, key := range keys {
pair, err := s.Get(strings.TrimSuffix(directory, "/") + s.normalize(key))
if err != nil {
// If node is not found: List is out of date, retry
if err == store.ErrKeyNotFound {
return s.List(directory)
}
return nil, err
}
kv = append(kv, &store.KVPair{
Key: key,
Value: []byte(pair.Value),
LastIndex: uint64(stat.Version),
})
// listChildrenRecursive lists the children of a directory as well as
// all the descending childs from sub-folders in a recursive fashion.
func (s *Zookeeper) listChildrenRecursive(list *[]string, directory string) error {
children, err := s.listChildren(directory)
if err != nil {
return err
}
return kv, nil
// We reached a leaf.
if len(children) == 0 {
return nil
}
for _, c := range children {
c = strings.TrimSuffix(directory, "/") + "/" + c
err := s.listChildrenRecursive(list, c)
if err != nil && err != zk.ErrNoChildrenForEphemerals {
return err
}
*list = append(*list, c)
}
return nil
}
// List child nodes of a given directory
func (s *Zookeeper) List(directory string, opts *store.ReadOptions) ([]*store.KVPair, error) {
children := make([]string, 0)
err := s.listChildrenRecursive(&children, directory)
if err != nil {
return nil, err
}
kvs, err := s.getList(children, opts)
if err != nil {
// If node is not found: List is out of date, retry
if err == store.ErrKeyNotFound {
return s.List(directory, opts)
}
return nil, err
}
return kvs, nil
}
// DeleteTree deletes a range of keys under a given directory
func (s *Zookeeper) DeleteTree(directory string) error {
pairs, err := s.List(directory)
children, err := s.listChildren(directory)
if err != nil {
return err
}
var reqs []interface{}
for _, pair := range pairs {
for _, c := range children {
reqs = append(reqs, &zk.DeleteRequest{
Path: s.normalize(directory + "/" + pair.Key),
Path: s.normalize(directory + "/" + c),
Version: -1,
})
}
@ -313,7 +335,7 @@ func (s *Zookeeper) AtomicPut(key string, value []byte, previous *store.KVPair,
// Create the directory
parts := store.SplitKey(strings.TrimSuffix(key, "/"))
parts = parts[:len(parts)-1]
if err = s.createFullPath(parts, false); err != nil {
if err = s.createFullPath(parts, []byte{}, false); err != nil {
// Failed to create the directory.
return false, nil, err
}
@ -401,14 +423,16 @@ func (s *Zookeeper) NewLock(key string, options *store.LockOptions) (lock store.
func (l *zookeeperLock) Lock(stopChan chan struct{}) (<-chan struct{}, error) {
err := l.lock.Lock()
lostCh := make(chan struct{})
if err == nil {
// We hold the lock, we can set our value
// FIXME: The value is left behind
// (problematic for leader election)
_, err = l.client.Set(l.key, l.value, -1)
if err == nil {
go l.monitorLock(stopChan, lostCh)
}
}
return make(chan struct{}), err
return lostCh, err
}
// Unlock the "key". Calling unlock while
@ -427,3 +451,143 @@ func (s *Zookeeper) normalize(key string) string {
key = store.Normalize(key)
return strings.TrimSuffix(key, "/")
}
func (l *zookeeperLock) monitorLock(stopCh <-chan struct{}, lostCh chan struct{}) {
defer close(lostCh)
for {
_, _, eventCh, err := l.client.GetW(l.key)
if err != nil {
// We failed to set watch, relinquish the lock
return
}
select {
case e := <-eventCh:
if e.Type == zk.EventNotWatching ||
(e.Type == zk.EventSession && e.State == zk.StateExpired) {
// Either the session has been closed and our watch has been
// invalidated or the session has expired.
return
} else if e.Type == zk.EventNodeDataChanged {
// Somemone else has written to the lock node and believes
// that they have the lock.
return
}
case <-stopCh:
// The caller has requested that we relinquish our lock
return
}
}
}
func (s *Zookeeper) get(key string) ([]byte, *zk.Stat, error) {
var resp []byte
var meta *zk.Stat
var err error
// To guard against older versions of libkv
// creating and writing to znodes non-atomically,
// We try to resync few times if we read SOH or
// an empty string
for i := 0; i <= syncRetryLimit; i++ {
resp, meta, err = s.client.Get(s.normalize(key))
if err != nil {
if err == zk.ErrNoNode {
return nil, nil, store.ErrKeyNotFound
}
return nil, nil, err
}
if string(resp) != SOH && string(resp) != "" {
return resp, meta, nil
}
if i < syncRetryLimit {
if _, err = s.client.Sync(s.normalize(key)); err != nil {
return nil, nil, err
}
}
}
return resp, meta, nil
}
func (s *Zookeeper) getW(key string) ([]byte, *zk.Stat, <-chan zk.Event, error) {
var resp []byte
var meta *zk.Stat
var ech <-chan zk.Event
var err error
// To guard against older versions of libkv
// creating and writing to znodes non-atomically,
// We try to resync few times if we read SOH or
// an empty string
for i := 0; i <= syncRetryLimit; i++ {
resp, meta, ech, err = s.client.GetW(s.normalize(key))
if err != nil {
if err == zk.ErrNoNode {
return nil, nil, nil, store.ErrKeyNotFound
}
return nil, nil, nil, err
}
if string(resp) != SOH && string(resp) != "" {
return resp, meta, ech, nil
}
if i < syncRetryLimit {
if _, err = s.client.Sync(s.normalize(key)); err != nil {
return nil, nil, nil, err
}
}
}
return resp, meta, ech, nil
}
// getListWithPath gets the key/value pairs for a list of keys under
// a given path.
//
// This is generally used when we get a list of child keys which
// are stripped out of their path (for example when using ChildrenW).
func (s *Zookeeper) getListWithPath(path string, keys []string, opts *store.ReadOptions) ([]*store.KVPair, error) {
kvs := []*store.KVPair{}
for _, key := range keys {
pair, err := s.Get(strings.TrimSuffix(path, "/")+s.normalize(key), opts)
if err != nil {
return nil, err
}
kvs = append(kvs, &store.KVPair{
Key: key,
Value: pair.Value,
LastIndex: pair.LastIndex,
})
}
return kvs, nil
}
// getList returns key/value pairs from a list of keys.
//
// This is generally used when we have a full list of keys with
// their full path included.
func (s *Zookeeper) getList(keys []string, opts *store.ReadOptions) ([]*store.KVPair, error) {
kvs := []*store.KVPair{}
for _, key := range keys {
pair, err := s.Get(strings.TrimSuffix(key, "/"), nil)
if err != nil {
return nil, err
}
kvs = append(kvs, &store.KVPair{
Key: key,
Value: pair.Value,
LastIndex: pair.LastIndex,
})
}
return kvs, nil
}