chore: update docker and k8s
This commit is contained in:
parent
2b5c7f9e91
commit
c2d440a914
1283 changed files with 67741 additions and 27918 deletions
72
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
72
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
|
@ -28,9 +28,9 @@ import (
|
|||
|
||||
// Config contains all the settings for a Controller.
|
||||
type Config struct {
|
||||
// The queue for your objects; either a FIFO or
|
||||
// a DeltaFIFO. Your Process() function should accept
|
||||
// the output of this Queue's Pop() method.
|
||||
// The queue for your objects - has to be a DeltaFIFO due to
|
||||
// assumptions in the implementation. Your Process() function
|
||||
// should accept the output of this Queue's Pop() method.
|
||||
Queue
|
||||
|
||||
// Something that can list and watch your objects.
|
||||
|
@ -285,45 +285,7 @@ func NewInformer(
|
|||
// This will hold the client state, as we know it.
|
||||
clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
|
||||
|
||||
// This will hold incoming changes. Note how we pass clientState in as a
|
||||
// KeyLister, that way resync operations will result in the correct set
|
||||
// of update/delete deltas.
|
||||
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, clientState)
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
ListerWatcher: lw,
|
||||
ObjectType: objType,
|
||||
FullResyncPeriod: resyncPeriod,
|
||||
RetryOnError: false,
|
||||
|
||||
Process: func(obj interface{}) error {
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(Deltas) {
|
||||
switch d.Type {
|
||||
case Sync, Added, Updated:
|
||||
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
|
||||
if err := clientState.Update(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnUpdate(old, d.Object)
|
||||
} else {
|
||||
if err := clientState.Add(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnAdd(d.Object)
|
||||
}
|
||||
case Deleted:
|
||||
if err := clientState.Delete(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnDelete(d.Object)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
return clientState, New(cfg)
|
||||
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState)
|
||||
}
|
||||
|
||||
// NewIndexerInformer returns a Indexer and a controller for populating the index
|
||||
|
@ -352,6 +314,30 @@ func NewIndexerInformer(
|
|||
// This will hold the client state, as we know it.
|
||||
clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
|
||||
|
||||
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState)
|
||||
}
|
||||
|
||||
// newInformer returns a controller for populating the store while also
|
||||
// providing event notifications.
|
||||
//
|
||||
// Parameters
|
||||
// * lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// * objType is an object of the type that you expect to receive.
|
||||
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// * h is the object you want notifications sent to.
|
||||
// * clientState is the store you want to populate
|
||||
//
|
||||
func newInformer(
|
||||
lw ListerWatcher,
|
||||
objType runtime.Object,
|
||||
resyncPeriod time.Duration,
|
||||
h ResourceEventHandler,
|
||||
clientState Store,
|
||||
) Controller {
|
||||
// This will hold incoming changes. Note how we pass clientState in as a
|
||||
// KeyLister, that way resync operations will result in the correct set
|
||||
// of update/delete deltas.
|
||||
|
@ -390,5 +376,5 @@ func NewIndexerInformer(
|
|||
return nil
|
||||
},
|
||||
}
|
||||
return clientState, New(cfg)
|
||||
return New(cfg)
|
||||
}
|
||||
|
|
36
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
36
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
|
@ -23,7 +23,7 @@ import (
|
|||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// NewDeltaFIFO returns a Store which can be used process changes to items.
|
||||
|
@ -320,17 +320,15 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err
|
|||
newDeltas := append(f.items[id], Delta{actionType, obj})
|
||||
newDeltas = dedupDeltas(newDeltas)
|
||||
|
||||
_, exists := f.items[id]
|
||||
if len(newDeltas) > 0 {
|
||||
if !exists {
|
||||
if _, exists := f.items[id]; !exists {
|
||||
f.queue = append(f.queue, id)
|
||||
}
|
||||
f.items[id] = newDeltas
|
||||
f.cond.Broadcast()
|
||||
} else if exists {
|
||||
// We need to remove this from our map (extra items
|
||||
// in the queue are ignored if they are not in the
|
||||
// map).
|
||||
} else {
|
||||
// We need to remove this from our map (extra items in the queue are
|
||||
// ignored if they are not in the map).
|
||||
delete(f.items, id)
|
||||
}
|
||||
return nil
|
||||
|
@ -348,9 +346,6 @@ func (f *DeltaFIFO) List() []interface{} {
|
|||
func (f *DeltaFIFO) listLocked() []interface{} {
|
||||
list := make([]interface{}, 0, len(f.items))
|
||||
for _, item := range f.items {
|
||||
// Copy item's slice so operations on this slice
|
||||
// won't interfere with the object we return.
|
||||
item = copyDeltas(item)
|
||||
list = append(list, item.Newest().Object)
|
||||
}
|
||||
return list
|
||||
|
@ -398,10 +393,7 @@ func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err err
|
|||
func (f *DeltaFIFO) IsClosed() bool {
|
||||
f.closedLock.Lock()
|
||||
defer f.closedLock.Unlock()
|
||||
if f.closed {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return f.closed
|
||||
}
|
||||
|
||||
// Pop blocks until an item is added to the queue, and then returns it. If
|
||||
|
@ -432,10 +424,10 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
|||
}
|
||||
id := f.queue[0]
|
||||
f.queue = f.queue[1:]
|
||||
item, ok := f.items[id]
|
||||
if f.initialPopulationCount > 0 {
|
||||
f.initialPopulationCount--
|
||||
}
|
||||
item, ok := f.items[id]
|
||||
if !ok {
|
||||
// Item may have been deleted subsequently.
|
||||
continue
|
||||
|
@ -474,6 +466,7 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
|
|||
|
||||
if f.knownObjects == nil {
|
||||
// Do deletion detection against our own list.
|
||||
queuedDeletions := 0
|
||||
for k, oldItem := range f.items {
|
||||
if keys.Has(k) {
|
||||
continue
|
||||
|
@ -482,6 +475,7 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
|
|||
if n := oldItem.Newest(); n != nil {
|
||||
deletedObj = n.Object
|
||||
}
|
||||
queuedDeletions++
|
||||
if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -489,7 +483,9 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
|
|||
|
||||
if !f.populated {
|
||||
f.populated = true
|
||||
f.initialPopulationCount = len(list)
|
||||
// While there shouldn't be any queued deletions in the initial
|
||||
// population of the queue, it's better to be on the safe side.
|
||||
f.initialPopulationCount = len(list) + queuedDeletions
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -506,10 +502,10 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
|
|||
deletedObj, exists, err := f.knownObjects.GetByKey(k)
|
||||
if err != nil {
|
||||
deletedObj = nil
|
||||
glog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k)
|
||||
klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k)
|
||||
} else if !exists {
|
||||
deletedObj = nil
|
||||
glog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k)
|
||||
klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k)
|
||||
}
|
||||
queuedDeletions++
|
||||
if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
|
||||
|
@ -553,10 +549,10 @@ func (f *DeltaFIFO) syncKey(key string) error {
|
|||
func (f *DeltaFIFO) syncKeyLocked(key string) error {
|
||||
obj, exists, err := f.knownObjects.GetByKey(key)
|
||||
if err != nil {
|
||||
glog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key)
|
||||
klog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key)
|
||||
return nil
|
||||
} else if !exists {
|
||||
glog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key)
|
||||
klog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
60
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
generated
vendored
60
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
generated
vendored
|
@ -20,8 +20,8 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// ExpirationCache implements the store interface
|
||||
|
@ -48,7 +48,7 @@ type ExpirationCache struct {
|
|||
// ExpirationPolicy dictates when an object expires. Currently only abstracted out
|
||||
// so unittests don't rely on the system clock.
|
||||
type ExpirationPolicy interface {
|
||||
IsExpired(obj *timestampedEntry) bool
|
||||
IsExpired(obj *TimestampedEntry) bool
|
||||
}
|
||||
|
||||
// TTLPolicy implements a ttl based ExpirationPolicy.
|
||||
|
@ -63,26 +63,29 @@ type TTLPolicy struct {
|
|||
|
||||
// IsExpired returns true if the given object is older than the ttl, or it can't
|
||||
// determine its age.
|
||||
func (p *TTLPolicy) IsExpired(obj *timestampedEntry) bool {
|
||||
return p.Ttl > 0 && p.Clock.Since(obj.timestamp) > p.Ttl
|
||||
func (p *TTLPolicy) IsExpired(obj *TimestampedEntry) bool {
|
||||
return p.Ttl > 0 && p.Clock.Since(obj.Timestamp) > p.Ttl
|
||||
}
|
||||
|
||||
// timestampedEntry is the only type allowed in a ExpirationCache.
|
||||
type timestampedEntry struct {
|
||||
obj interface{}
|
||||
timestamp time.Time
|
||||
// TimestampedEntry is the only type allowed in a ExpirationCache.
|
||||
// Keep in mind that it is not safe to share timestamps between computers.
|
||||
// Behavior may be inconsistent if you get a timestamp from the API Server and
|
||||
// use it on the client machine as part of your ExpirationCache.
|
||||
type TimestampedEntry struct {
|
||||
Obj interface{}
|
||||
Timestamp time.Time
|
||||
}
|
||||
|
||||
// getTimestampedEntry returns the timestampedEntry stored under the given key.
|
||||
func (c *ExpirationCache) getTimestampedEntry(key string) (*timestampedEntry, bool) {
|
||||
// getTimestampedEntry returns the TimestampedEntry stored under the given key.
|
||||
func (c *ExpirationCache) getTimestampedEntry(key string) (*TimestampedEntry, bool) {
|
||||
item, _ := c.cacheStorage.Get(key)
|
||||
if tsEntry, ok := item.(*timestampedEntry); ok {
|
||||
if tsEntry, ok := item.(*TimestampedEntry); ok {
|
||||
return tsEntry, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// getOrExpire retrieves the object from the timestampedEntry if and only if it hasn't
|
||||
// getOrExpire retrieves the object from the TimestampedEntry if and only if it hasn't
|
||||
// already expired. It holds a write lock across deletion.
|
||||
func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) {
|
||||
// Prevent all inserts from the time we deem an item as "expired" to when we
|
||||
|
@ -95,11 +98,11 @@ func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) {
|
|||
return nil, false
|
||||
}
|
||||
if c.expirationPolicy.IsExpired(timestampedItem) {
|
||||
glog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj)
|
||||
klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.Obj)
|
||||
c.cacheStorage.Delete(key)
|
||||
return nil, false
|
||||
}
|
||||
return timestampedItem.obj, true
|
||||
return timestampedItem.Obj, true
|
||||
}
|
||||
|
||||
// GetByKey returns the item stored under the key, or sets exists=false.
|
||||
|
@ -126,7 +129,7 @@ func (c *ExpirationCache) List() []interface{} {
|
|||
|
||||
list := make([]interface{}, 0, len(items))
|
||||
for _, item := range items {
|
||||
obj := item.(*timestampedEntry).obj
|
||||
obj := item.(*TimestampedEntry).Obj
|
||||
if key, err := c.keyFunc(obj); err != nil {
|
||||
list = append(list, obj)
|
||||
} else if obj, exists := c.getOrExpire(key); exists {
|
||||
|
@ -144,14 +147,14 @@ func (c *ExpirationCache) ListKeys() []string {
|
|||
// Add timestamps an item and inserts it into the cache, overwriting entries
|
||||
// that might exist under the same key.
|
||||
func (c *ExpirationCache) Add(obj interface{}) error {
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
|
||||
key, err := c.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
c.cacheStorage.Add(key, ×tampedEntry{obj, c.clock.Now()})
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
|
||||
c.cacheStorage.Add(key, &TimestampedEntry{obj, c.clock.Now()})
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -163,12 +166,12 @@ func (c *ExpirationCache) Update(obj interface{}) error {
|
|||
|
||||
// Delete removes an item from the cache.
|
||||
func (c *ExpirationCache) Delete(obj interface{}) error {
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
key, err := c.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
c.cacheStorage.Delete(key)
|
||||
return nil
|
||||
}
|
||||
|
@ -177,17 +180,17 @@ func (c *ExpirationCache) Delete(obj interface{}) error {
|
|||
// before attempting the replace operation. The replace operation will
|
||||
// delete the contents of the ExpirationCache `c`.
|
||||
func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) error {
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
items := map[string]interface{}{}
|
||||
items := make(map[string]interface{}, len(list))
|
||||
ts := c.clock.Now()
|
||||
for _, item := range list {
|
||||
key, err := c.keyFunc(item)
|
||||
if err != nil {
|
||||
return KeyError{item, err}
|
||||
}
|
||||
items[key] = ×tampedEntry{item, ts}
|
||||
items[key] = &TimestampedEntry{item, ts}
|
||||
}
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
c.cacheStorage.Replace(items, resourceVersion)
|
||||
return nil
|
||||
}
|
||||
|
@ -199,10 +202,15 @@ func (c *ExpirationCache) Resync() error {
|
|||
|
||||
// NewTTLStore creates and returns a ExpirationCache with a TTLPolicy
|
||||
func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store {
|
||||
return NewExpirationStore(keyFunc, &TTLPolicy{ttl, clock.RealClock{}})
|
||||
}
|
||||
|
||||
// NewExpirationStore creates and returns a ExpirationCache for a given policy
|
||||
func NewExpirationStore(keyFunc KeyFunc, expirationPolicy ExpirationPolicy) Store {
|
||||
return &ExpirationCache{
|
||||
cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}),
|
||||
keyFunc: keyFunc,
|
||||
clock: clock.RealClock{},
|
||||
expirationPolicy: &TTLPolicy{ttl, clock.RealClock{}},
|
||||
expirationPolicy: expirationPolicy,
|
||||
}
|
||||
}
|
||||
|
|
2
vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go
generated
vendored
|
@ -38,7 +38,7 @@ type FakeExpirationPolicy struct {
|
|||
RetrieveKeyFunc KeyFunc
|
||||
}
|
||||
|
||||
func (p *FakeExpirationPolicy) IsExpired(obj *timestampedEntry) bool {
|
||||
func (p *FakeExpirationPolicy) IsExpired(obj *TimestampedEntry) bool {
|
||||
key, _ := p.RetrieveKeyFunc(obj)
|
||||
return !p.NeverExpire.Has(key)
|
||||
}
|
||||
|
|
2
vendor/k8s.io/client-go/tools/cache/fake_custom_store.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/fake_custom_store.go
generated
vendored
|
@ -40,7 +40,7 @@ func (f *FakeCustomStore) Add(obj interface{}) error {
|
|||
// Update calls the custom Update function if defined
|
||||
func (f *FakeCustomStore) Update(obj interface{}) error {
|
||||
if f.UpdateFunc != nil {
|
||||
return f.Update(obj)
|
||||
return f.UpdateFunc(obj)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
2
vendor/k8s.io/client-go/tools/cache/fifo.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/fifo.go
generated
vendored
|
@ -297,7 +297,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
|||
// after calling this function. f's queue is reset, too; upon return, it
|
||||
// will contain the items in the map, in no particular order.
|
||||
func (f *FIFO) Replace(list []interface{}, resourceVersion string) error {
|
||||
items := map[string]interface{}{}
|
||||
items := make(map[string]interface{}, len(list))
|
||||
for _, item := range list {
|
||||
key, err := f.keyFunc(item)
|
||||
if err != nil {
|
||||
|
|
2
vendor/k8s.io/client-go/tools/cache/heap.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/heap.go
generated
vendored
|
@ -204,7 +204,7 @@ func (h *Heap) AddIfNotPresent(obj interface{}) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// addIfNotPresentLocked assumes the lock is already held and adds the the provided
|
||||
// addIfNotPresentLocked assumes the lock is already held and adds the provided
|
||||
// item to the queue if it does not already exist.
|
||||
func (h *Heap) addIfNotPresentLocked(key string, obj interface{}) {
|
||||
if _, exists := h.data.items[key]; exists {
|
||||
|
|
24
vendor/k8s.io/client-go/tools/cache/listers.go
generated
vendored
24
vendor/k8s.io/client-go/tools/cache/listers.go
generated
vendored
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package cache
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
|
@ -31,7 +31,14 @@ import (
|
|||
type AppendFunc func(interface{})
|
||||
|
||||
func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error {
|
||||
selectAll := selector.Empty()
|
||||
for _, m := range store.List() {
|
||||
if selectAll {
|
||||
// Avoid computing labels of the objects to speed up common flows
|
||||
// of listing all objects.
|
||||
appendFn(m)
|
||||
continue
|
||||
}
|
||||
metadata, err := meta.Accessor(m)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -44,8 +51,15 @@ func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error {
|
|||
}
|
||||
|
||||
func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error {
|
||||
selectAll := selector.Empty()
|
||||
if namespace == metav1.NamespaceAll {
|
||||
for _, m := range indexer.List() {
|
||||
if selectAll {
|
||||
// Avoid computing labels of the objects to speed up common flows
|
||||
// of listing all objects.
|
||||
appendFn(m)
|
||||
continue
|
||||
}
|
||||
metadata, err := meta.Accessor(m)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -60,7 +74,7 @@ func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selec
|
|||
items, err := indexer.Index(NamespaceIndex, &metav1.ObjectMeta{Namespace: namespace})
|
||||
if err != nil {
|
||||
// Ignore error; do slow search without index.
|
||||
glog.Warningf("can not retrieve list of objects using index : %v", err)
|
||||
klog.Warningf("can not retrieve list of objects using index : %v", err)
|
||||
for _, m := range indexer.List() {
|
||||
metadata, err := meta.Accessor(m)
|
||||
if err != nil {
|
||||
|
@ -74,6 +88,12 @@ func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selec
|
|||
return nil
|
||||
}
|
||||
for _, m := range items {
|
||||
if selectAll {
|
||||
// Avoid computing labels of the objects to speed up common flows
|
||||
// of listing all objects.
|
||||
appendFn(m)
|
||||
continue
|
||||
}
|
||||
metadata, err := meta.Accessor(m)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
97
vendor/k8s.io/client-go/tools/cache/listwatch.go
generated
vendored
97
vendor/k8s.io/client-go/tools/cache/listwatch.go
generated
vendored
|
@ -18,27 +18,34 @@ package cache
|
|||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/pager"
|
||||
)
|
||||
|
||||
// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource.
|
||||
type ListerWatcher interface {
|
||||
// Lister is any object that knows how to perform an initial list.
|
||||
type Lister interface {
|
||||
// List should return a list type object; the Items field will be extracted, and the
|
||||
// ResourceVersion field will be used to start the watch in the right place.
|
||||
List(options metav1.ListOptions) (runtime.Object, error)
|
||||
}
|
||||
|
||||
// Watcher is any object that knows how to start a watch on a resource.
|
||||
type Watcher interface {
|
||||
// Watch should begin a watch at the specified version.
|
||||
Watch(options metav1.ListOptions) (watch.Interface, error)
|
||||
}
|
||||
|
||||
// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource.
|
||||
type ListerWatcher interface {
|
||||
Lister
|
||||
Watcher
|
||||
}
|
||||
|
||||
// ListFunc knows how to list resources
|
||||
type ListFunc func(options metav1.ListOptions) (runtime.Object, error)
|
||||
|
||||
|
@ -93,13 +100,6 @@ func NewFilteredListWatchFromClient(c Getter, resource string, namespace string,
|
|||
return &ListWatch{ListFunc: listFunc, WatchFunc: watchFunc}
|
||||
}
|
||||
|
||||
func timeoutFromListOptions(options metav1.ListOptions) time.Duration {
|
||||
if options.TimeoutSeconds != nil {
|
||||
return time.Duration(*options.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// List a set of apiserver resources
|
||||
func (lw *ListWatch) List(options metav1.ListOptions) (runtime.Object, error) {
|
||||
if !lw.DisableChunking {
|
||||
|
@ -112,76 +112,3 @@ func (lw *ListWatch) List(options metav1.ListOptions) (runtime.Object, error) {
|
|||
func (lw *ListWatch) Watch(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return lw.WatchFunc(options)
|
||||
}
|
||||
|
||||
// ListWatchUntil checks the provided conditions against the items returned by the list watcher, returning wait.ErrWaitTimeout
|
||||
// if timeout is exceeded without all conditions returning true, or an error if an error occurs.
|
||||
// TODO: check for watch expired error and retry watch from latest point? Same issue exists for Until.
|
||||
func ListWatchUntil(timeout time.Duration, lw ListerWatcher, conditions ...watch.ConditionFunc) (*watch.Event, error) {
|
||||
if len(conditions) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
list, err := lw.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
initialItems, err := meta.ExtractList(list)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// use the initial items as simulated "adds"
|
||||
var lastEvent *watch.Event
|
||||
currIndex := 0
|
||||
passedConditions := 0
|
||||
for _, condition := range conditions {
|
||||
// check the next condition against the previous event and short circuit waiting for the next watch
|
||||
if lastEvent != nil {
|
||||
done, err := condition(*lastEvent)
|
||||
if err != nil {
|
||||
return lastEvent, err
|
||||
}
|
||||
if done {
|
||||
passedConditions = passedConditions + 1
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
ConditionSucceeded:
|
||||
for currIndex < len(initialItems) {
|
||||
lastEvent = &watch.Event{Type: watch.Added, Object: initialItems[currIndex]}
|
||||
currIndex++
|
||||
|
||||
done, err := condition(*lastEvent)
|
||||
if err != nil {
|
||||
return lastEvent, err
|
||||
}
|
||||
if done {
|
||||
passedConditions = passedConditions + 1
|
||||
break ConditionSucceeded
|
||||
}
|
||||
}
|
||||
}
|
||||
if passedConditions == len(conditions) {
|
||||
return lastEvent, nil
|
||||
}
|
||||
remainingConditions := conditions[passedConditions:]
|
||||
|
||||
metaObj, err := meta.ListAccessor(list)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currResourceVersion := metaObj.GetResourceVersion()
|
||||
|
||||
watchInterface, err := lw.Watch(metav1.ListOptions{ResourceVersion: currResourceVersion})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
evt, err := watch.Until(timeout, watchInterface, remainingConditions...)
|
||||
if err == watch.ErrWatchClosed {
|
||||
// present a consistent error interface to callers
|
||||
err = wait.ErrWaitTimeout
|
||||
}
|
||||
return evt, err
|
||||
}
|
||||
|
|
4
vendor/k8s.io/client-go/tools/cache/mutation_cache.go
generated
vendored
4
vendor/k8s.io/client-go/tools/cache/mutation_cache.go
generated
vendored
|
@ -22,7 +22,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -156,7 +156,7 @@ func (c *mutationCache) ByIndex(name string, indexKey string) ([]interface{}, er
|
|||
}
|
||||
elements, err := fn(updated)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err)
|
||||
klog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err)
|
||||
continue
|
||||
}
|
||||
for _, inIndex := range elements {
|
||||
|
|
4
vendor/k8s.io/client-go/tools/cache/mutation_detector.go
generated
vendored
4
vendor/k8s.io/client-go/tools/cache/mutation_detector.go
generated
vendored
|
@ -24,7 +24,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
|
@ -45,7 +45,7 @@ func NewCacheMutationDetector(name string) CacheMutationDetector {
|
|||
if !mutationDetectionEnabled {
|
||||
return dummyMutationDetector{}
|
||||
}
|
||||
glog.Warningln("Mutation detector is enabled, this will result in memory leakage.")
|
||||
klog.Warningln("Mutation detector is enabled, this will result in memory leakage.")
|
||||
return &defaultCacheMutationDetector{name: name, period: 1 * time.Second}
|
||||
}
|
||||
|
||||
|
|
214
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
214
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -24,25 +25,22 @@ import (
|
|||
"net"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"regexp"
|
||||
goruntime "runtime"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/naming"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/pager"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/utils/trace"
|
||||
)
|
||||
|
||||
// Reflector watches a specified resource and causes all changes to be reflected in the given store.
|
||||
|
@ -71,20 +69,21 @@ type Reflector struct {
|
|||
lastSyncResourceVersion string
|
||||
// lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion
|
||||
lastSyncResourceVersionMutex sync.RWMutex
|
||||
// WatchListPageSize is the requested chunk size of initial and resync watch lists.
|
||||
// Defaults to pager.PageSize.
|
||||
WatchListPageSize int64
|
||||
}
|
||||
|
||||
var (
|
||||
// We try to spread the load on apiserver by setting timeouts for
|
||||
// watch requests - it is random in [minWatchTimeout, 2*minWatchTimeout].
|
||||
// However, it can be modified to avoid periodic resync to break the
|
||||
// TCP connection.
|
||||
minWatchTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
// NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector
|
||||
// The indexer is configured to key on namespace
|
||||
func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interface{}, resyncPeriod time.Duration) (indexer Indexer, reflector *Reflector) {
|
||||
indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{"namespace": MetaNamespaceIndexFunc})
|
||||
indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{NamespaceIndex: MetaNamespaceIndexFunc})
|
||||
reflector = NewReflector(lw, expectedType, indexer, resyncPeriod)
|
||||
return indexer, reflector
|
||||
}
|
||||
|
@ -96,20 +95,13 @@ func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interfa
|
|||
// resyncPeriod, so that you can use reflectors to periodically process everything as
|
||||
// well as incrementally processing the things that change.
|
||||
func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
|
||||
return NewNamedReflector(getDefaultReflectorName(internalPackages...), lw, expectedType, store, resyncPeriod)
|
||||
return NewNamedReflector(naming.GetNameFromCallsite(internalPackages...), lw, expectedType, store, resyncPeriod)
|
||||
}
|
||||
|
||||
// reflectorDisambiguator is used to disambiguate started reflectors.
|
||||
// initialized to an unstable value to ensure meaning isn't attributed to the suffix.
|
||||
var reflectorDisambiguator = int64(time.Now().UnixNano() % 12345)
|
||||
|
||||
// NewNamedReflector same as NewReflector, but with a specified name for logging
|
||||
func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
|
||||
reflectorSuffix := atomic.AddInt64(&reflectorDisambiguator, 1)
|
||||
r := &Reflector{
|
||||
name: name,
|
||||
// we need this to be unique per process (some names are still the same) but obvious who it belongs to
|
||||
metrics: newReflectorMetrics(makeValidPrometheusMetricLabel(fmt.Sprintf("reflector_"+name+"_%d", reflectorSuffix))),
|
||||
name: name,
|
||||
listerWatcher: lw,
|
||||
store: store,
|
||||
expectedType: reflect.TypeOf(expectedType),
|
||||
|
@ -120,86 +112,14 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{},
|
|||
return r
|
||||
}
|
||||
|
||||
func makeValidPrometheusMetricLabel(in string) string {
|
||||
// this isn't perfect, but it removes our common characters
|
||||
return strings.NewReplacer("/", "_", ".", "_", "-", "_", ":", "_").Replace(in)
|
||||
}
|
||||
|
||||
// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common
|
||||
// call chains to NewReflector, so they'd be low entropy names for reflectors
|
||||
var internalPackages = []string{"client-go/tools/cache/", "/runtime/asm_"}
|
||||
|
||||
// getDefaultReflectorName walks back through the call stack until we find a caller from outside of the ignoredPackages
|
||||
// it returns back a shortpath/filename:line to aid in identification of this reflector when it starts logging
|
||||
func getDefaultReflectorName(ignoredPackages ...string) string {
|
||||
name := "????"
|
||||
const maxStack = 10
|
||||
for i := 1; i < maxStack; i++ {
|
||||
_, file, line, ok := goruntime.Caller(i)
|
||||
if !ok {
|
||||
file, line, ok = extractStackCreator()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
i += maxStack
|
||||
}
|
||||
if hasPackage(file, ignoredPackages) {
|
||||
continue
|
||||
}
|
||||
|
||||
file = trimPackagePrefix(file)
|
||||
name = fmt.Sprintf("%s:%d", file, line)
|
||||
break
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// hasPackage returns true if the file is in one of the ignored packages.
|
||||
func hasPackage(file string, ignoredPackages []string) bool {
|
||||
for _, ignoredPackage := range ignoredPackages {
|
||||
if strings.Contains(file, ignoredPackage) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// trimPackagePrefix reduces duplicate values off the front of a package name.
|
||||
func trimPackagePrefix(file string) string {
|
||||
if l := strings.LastIndex(file, "k8s.io/client-go/pkg/"); l >= 0 {
|
||||
return file[l+len("k8s.io/client-go/"):]
|
||||
}
|
||||
if l := strings.LastIndex(file, "/src/"); l >= 0 {
|
||||
return file[l+5:]
|
||||
}
|
||||
if l := strings.LastIndex(file, "/pkg/"); l >= 0 {
|
||||
return file[l+1:]
|
||||
}
|
||||
return file
|
||||
}
|
||||
|
||||
var stackCreator = regexp.MustCompile(`(?m)^created by (.*)\n\s+(.*):(\d+) \+0x[[:xdigit:]]+$`)
|
||||
|
||||
// extractStackCreator retrieves the goroutine file and line that launched this stack. Returns false
|
||||
// if the creator cannot be located.
|
||||
// TODO: Go does not expose this via runtime https://github.com/golang/go/issues/11440
|
||||
func extractStackCreator() (string, int, bool) {
|
||||
stack := debug.Stack()
|
||||
matches := stackCreator.FindStringSubmatch(string(stack))
|
||||
if matches == nil || len(matches) != 4 {
|
||||
return "", 0, false
|
||||
}
|
||||
line, err := strconv.Atoi(matches[3])
|
||||
if err != nil {
|
||||
return "", 0, false
|
||||
}
|
||||
return matches[2], line, true
|
||||
}
|
||||
var internalPackages = []string{"client-go/tools/cache/"}
|
||||
|
||||
// Run starts a watch and handles watch events. Will restart the watch if it is closed.
|
||||
// Run will exit when stopCh is closed.
|
||||
func (r *Reflector) Run(stopCh <-chan struct{}) {
|
||||
glog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name)
|
||||
klog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name)
|
||||
wait.Until(func() {
|
||||
if err := r.ListAndWatch(stopCh); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
|
@ -237,34 +157,71 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) {
|
|||
// and then use the resource version to watch.
|
||||
// It returns error if ListAndWatch didn't even try to initialize watch.
|
||||
func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
glog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name)
|
||||
klog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name)
|
||||
var resourceVersion string
|
||||
|
||||
// Explicitly set "0" as resource version - it's fine for the List()
|
||||
// to be served from cache and potentially be delayed relative to
|
||||
// etcd contents. Reflector framework will catch up via Watch() eventually.
|
||||
options := metav1.ListOptions{ResourceVersion: "0"}
|
||||
r.metrics.numberOfLists.Inc()
|
||||
start := r.clock.Now()
|
||||
list, err := r.listerWatcher.List(options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err)
|
||||
|
||||
if err := func() error {
|
||||
initTrace := trace.New("Reflector " + r.name + " ListAndWatch")
|
||||
defer initTrace.LogIfLong(10 * time.Second)
|
||||
var list runtime.Object
|
||||
var err error
|
||||
listCh := make(chan struct{}, 1)
|
||||
panicCh := make(chan interface{}, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
panicCh <- r
|
||||
}
|
||||
}()
|
||||
// Attempt to gather list in chunks, if supported by listerWatcher, if not, the first
|
||||
// list request will return the full response.
|
||||
pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) {
|
||||
return r.listerWatcher.List(opts)
|
||||
}))
|
||||
if r.WatchListPageSize != 0 {
|
||||
pager.PageSize = r.WatchListPageSize
|
||||
}
|
||||
// Pager falls back to full list if paginated list calls fail due to an "Expired" error.
|
||||
list, err = pager.List(context.Background(), options)
|
||||
close(listCh)
|
||||
}()
|
||||
select {
|
||||
case <-stopCh:
|
||||
return nil
|
||||
case r := <-panicCh:
|
||||
panic(r)
|
||||
case <-listCh:
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err)
|
||||
}
|
||||
initTrace.Step("Objects listed")
|
||||
listMetaInterface, err := meta.ListAccessor(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err)
|
||||
}
|
||||
resourceVersion = listMetaInterface.GetResourceVersion()
|
||||
initTrace.Step("Resource version extracted")
|
||||
items, err := meta.ExtractList(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err)
|
||||
}
|
||||
initTrace.Step("Objects extracted")
|
||||
if err := r.syncWith(items, resourceVersion); err != nil {
|
||||
return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err)
|
||||
}
|
||||
initTrace.Step("SyncWith done")
|
||||
r.setLastSyncResourceVersion(resourceVersion)
|
||||
initTrace.Step("Resource version updated")
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return err
|
||||
}
|
||||
r.metrics.listDuration.Observe(time.Since(start).Seconds())
|
||||
listMetaInterface, err := meta.ListAccessor(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err)
|
||||
}
|
||||
resourceVersion = listMetaInterface.GetResourceVersion()
|
||||
items, err := meta.ExtractList(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err)
|
||||
}
|
||||
r.metrics.numberOfItemsInList.Observe(float64(len(items)))
|
||||
if err := r.syncWith(items, resourceVersion); err != nil {
|
||||
return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err)
|
||||
}
|
||||
r.setLastSyncResourceVersion(resourceVersion)
|
||||
|
||||
resyncerrc := make(chan error, 1)
|
||||
cancelCh := make(chan struct{})
|
||||
|
@ -283,7 +240,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
|||
return
|
||||
}
|
||||
if r.ShouldResync == nil || r.ShouldResync() {
|
||||
glog.V(4).Infof("%s: forcing resync", r.name)
|
||||
klog.V(4).Infof("%s: forcing resync", r.name)
|
||||
if err := r.store.Resync(); err != nil {
|
||||
resyncerrc <- err
|
||||
return
|
||||
|
@ -308,16 +265,20 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
|||
// We want to avoid situations of hanging watchers. Stop any wachers that do not
|
||||
// receive any events within the timeout window.
|
||||
TimeoutSeconds: &timeoutSeconds,
|
||||
// To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks.
|
||||
// Reflector doesn't assume bookmarks are returned at all (if the server do not support
|
||||
// watch bookmarks, it will ignore this field).
|
||||
// Disabled in Alpha release of watch bookmarks feature.
|
||||
AllowWatchBookmarks: false,
|
||||
}
|
||||
|
||||
r.metrics.numberOfWatches.Inc()
|
||||
w, err := r.listerWatcher.Watch(options)
|
||||
if err != nil {
|
||||
switch err {
|
||||
case io.EOF:
|
||||
// watch closed normally
|
||||
case io.ErrUnexpectedEOF:
|
||||
glog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err)
|
||||
klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedType, err))
|
||||
}
|
||||
|
@ -338,7 +299,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
|||
|
||||
if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil {
|
||||
if err != errorStopRequested {
|
||||
glog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err)
|
||||
klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -362,11 +323,6 @@ func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, err
|
|||
// Stopping the watcher should be idempotent and if we return from this function there's no way
|
||||
// we're coming back in with the same watch interface.
|
||||
defer w.Stop()
|
||||
// update metrics
|
||||
defer func() {
|
||||
r.metrics.numberOfItemsInWatch.Observe(float64(eventCount))
|
||||
r.metrics.watchDuration.Observe(time.Since(start).Seconds())
|
||||
}()
|
||||
|
||||
loop:
|
||||
for {
|
||||
|
@ -411,6 +367,8 @@ loop:
|
|||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", r.name, event.Object, err))
|
||||
}
|
||||
case watch.Bookmark:
|
||||
// A `Bookmark` means watch has synced here, just update the resourceVersion
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
|
||||
}
|
||||
|
@ -420,12 +378,11 @@ loop:
|
|||
}
|
||||
}
|
||||
|
||||
watchDuration := r.clock.Now().Sub(start)
|
||||
watchDuration := r.clock.Since(start)
|
||||
if watchDuration < 1*time.Second && eventCount == 0 {
|
||||
r.metrics.numberOfShortWatches.Inc()
|
||||
return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name)
|
||||
}
|
||||
glog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount)
|
||||
klog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -441,9 +398,4 @@ func (r *Reflector) setLastSyncResourceVersion(v string) {
|
|||
r.lastSyncResourceVersionMutex.Lock()
|
||||
defer r.lastSyncResourceVersionMutex.Unlock()
|
||||
r.lastSyncResourceVersion = v
|
||||
|
||||
rv, err := strconv.Atoi(v)
|
||||
if err == nil {
|
||||
r.metrics.lastResourceVersion.Set(float64(rv))
|
||||
}
|
||||
}
|
||||
|
|
17
vendor/k8s.io/client-go/tools/cache/reflector_metrics.go
generated
vendored
17
vendor/k8s.io/client-go/tools/cache/reflector_metrics.go
generated
vendored
|
@ -94,23 +94,6 @@ var metricsFactory = struct {
|
|||
metricsProvider: noopMetricsProvider{},
|
||||
}
|
||||
|
||||
func newReflectorMetrics(name string) *reflectorMetrics {
|
||||
var ret *reflectorMetrics
|
||||
if len(name) == 0 {
|
||||
return ret
|
||||
}
|
||||
return &reflectorMetrics{
|
||||
numberOfLists: metricsFactory.metricsProvider.NewListsMetric(name),
|
||||
listDuration: metricsFactory.metricsProvider.NewListDurationMetric(name),
|
||||
numberOfItemsInList: metricsFactory.metricsProvider.NewItemsInListMetric(name),
|
||||
numberOfWatches: metricsFactory.metricsProvider.NewWatchesMetric(name),
|
||||
numberOfShortWatches: metricsFactory.metricsProvider.NewShortWatchesMetric(name),
|
||||
watchDuration: metricsFactory.metricsProvider.NewWatchDurationMetric(name),
|
||||
numberOfItemsInWatch: metricsFactory.metricsProvider.NewItemsInWatchMetric(name),
|
||||
lastResourceVersion: metricsFactory.metricsProvider.NewLastResourceVersionMetric(name),
|
||||
}
|
||||
}
|
||||
|
||||
// SetReflectorMetricsProvider sets the metrics provider
|
||||
func SetReflectorMetricsProvider(metricsProvider MetricsProvider) {
|
||||
metricsFactory.setProviders.Do(func() {
|
||||
|
|
105
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
105
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
|
@ -25,37 +25,90 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/buffer"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/utils/buffer"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// SharedInformer has a shared data cache and is capable of distributing notifications for changes
|
||||
// to the cache to multiple listeners who registered via AddEventHandler. If you use this, there is
|
||||
// one behavior change compared to a standard Informer. When you receive a notification, the cache
|
||||
// will be AT LEAST as fresh as the notification, but it MAY be more fresh. You should NOT depend
|
||||
// on the contents of the cache exactly matching the notification you've received in handler
|
||||
// functions. If there was a create, followed by a delete, the cache may NOT have your item. This
|
||||
// has advantages over the broadcaster since it allows us to share a common cache across many
|
||||
// controllers. Extending the broadcaster would have required us keep duplicate caches for each
|
||||
// watch.
|
||||
// SharedInformer provides eventually consistent linkage of its
|
||||
// clients to the authoritative state of a given collection of
|
||||
// objects. An object is identified by its API group, kind/resource,
|
||||
// namespace, and name. One SharedInfomer provides linkage to objects
|
||||
// of a particular API group and kind/resource. The linked object
|
||||
// collection of a SharedInformer may be further restricted to one
|
||||
// namespace and/or by label selector and/or field selector.
|
||||
//
|
||||
// The authoritative state of an object is what apiservers provide
|
||||
// access to, and an object goes through a strict sequence of states.
|
||||
// A state is either "absent" or present with a ResourceVersion and
|
||||
// other appropriate content.
|
||||
//
|
||||
// A SharedInformer maintains a local cache, exposed by Store(), of
|
||||
// the state of each relevant object. This cache is eventually
|
||||
// consistent with the authoritative state. This means that, unless
|
||||
// prevented by persistent communication problems, if ever a
|
||||
// particular object ID X is authoritatively associated with a state S
|
||||
// then for every SharedInformer I whose collection includes (X, S)
|
||||
// eventually either (1) I's cache associates X with S or a later
|
||||
// state of X, (2) I is stopped, or (3) the authoritative state
|
||||
// service for X terminates. To be formally complete, we say that the
|
||||
// absent state meets any restriction by label selector or field
|
||||
// selector.
|
||||
//
|
||||
// As a simple example, if a collection of objects is henceforeth
|
||||
// unchanging and a SharedInformer is created that links to that
|
||||
// collection then that SharedInformer's cache eventually holds an
|
||||
// exact copy of that collection (unless it is stopped too soon, the
|
||||
// authoritative state service ends, or communication problems between
|
||||
// the two persistently thwart achievement).
|
||||
//
|
||||
// As another simple example, if the local cache ever holds a
|
||||
// non-absent state for some object ID and the object is eventually
|
||||
// removed from the authoritative state then eventually the object is
|
||||
// removed from the local cache (unless the SharedInformer is stopped
|
||||
// too soon, the authoritative state service emnds, or communication
|
||||
// problems persistently thwart the desired result).
|
||||
//
|
||||
// The keys in Store() are of the form namespace/name for namespaced
|
||||
// objects, and are simply the name for non-namespaced objects.
|
||||
//
|
||||
// A client is identified here by a ResourceEventHandler. For every
|
||||
// update to the SharedInformer's local cache and for every client,
|
||||
// eventually either the SharedInformer is stopped or the client is
|
||||
// notified of the update. These notifications happen after the
|
||||
// corresponding cache update and, in the case of a
|
||||
// SharedIndexInformer, after the corresponding index updates. It is
|
||||
// possible that additional cache and index updates happen before such
|
||||
// a prescribed notification. For a given SharedInformer and client,
|
||||
// all notifications are delivered sequentially. For a given
|
||||
// SharedInformer, client, and object ID, the notifications are
|
||||
// delivered in order.
|
||||
//
|
||||
// A delete notification exposes the last locally known non-absent
|
||||
// state, except that its ResourceVersion is replaced with a
|
||||
// ResourceVersion in which the object is actually absent.
|
||||
type SharedInformer interface {
|
||||
// AddEventHandler adds an event handler to the shared informer using the shared informer's resync
|
||||
// period. Events to a single handler are delivered sequentially, but there is no coordination
|
||||
// between different handlers.
|
||||
AddEventHandler(handler ResourceEventHandler)
|
||||
// AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the
|
||||
// specified resync period. Events to a single handler are delivered sequentially, but there is
|
||||
// no coordination between different handlers.
|
||||
// AddEventHandlerWithResyncPeriod adds an event handler to the
|
||||
// shared informer using the specified resync period. The resync
|
||||
// operation consists of delivering to the handler a create
|
||||
// notification for every object in the informer's local cache; it
|
||||
// does not add any interactions with the authoritative storage.
|
||||
AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration)
|
||||
// GetStore returns the Store.
|
||||
// GetStore returns the informer's local cache as a Store.
|
||||
GetStore() Store
|
||||
// GetController gives back a synthetic interface that "votes" to start the informer
|
||||
GetController() Controller
|
||||
// Run starts the shared informer, which will be stopped when stopCh is closed.
|
||||
// Run starts and runs the shared informer, returning after it stops.
|
||||
// The informer will be stopped when stopCh is closed.
|
||||
Run(stopCh <-chan struct{})
|
||||
// HasSynced returns true if the shared informer's store has synced.
|
||||
// HasSynced returns true if the shared informer's store has been
|
||||
// informed by at least one full LIST of the authoritative state
|
||||
// of the informer's object collection. This is unrelated to "resync".
|
||||
HasSynced() bool
|
||||
// LastSyncResourceVersion is the resource version observed when last synced with the underlying
|
||||
// store. The value returned is not synchronized with access to the underlying store and is not
|
||||
|
@ -86,7 +139,7 @@ func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, defaultEve
|
|||
resyncCheckPeriod: defaultEventHandlerResyncPeriod,
|
||||
defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod,
|
||||
cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", objType)),
|
||||
clock: realClock,
|
||||
clock: realClock,
|
||||
}
|
||||
return sharedIndexInformer
|
||||
}
|
||||
|
@ -116,11 +169,11 @@ func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool
|
|||
},
|
||||
stopCh)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("stop requested")
|
||||
klog.V(2).Infof("stop requested")
|
||||
return false
|
||||
}
|
||||
|
||||
glog.V(4).Infof("caches populated")
|
||||
klog.V(4).Infof("caches populated")
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -279,11 +332,11 @@ func determineResyncPeriod(desired, check time.Duration) time.Duration {
|
|||
return desired
|
||||
}
|
||||
if check == 0 {
|
||||
glog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired)
|
||||
klog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired)
|
||||
return 0
|
||||
}
|
||||
if desired < check {
|
||||
glog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check)
|
||||
klog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check)
|
||||
return check
|
||||
}
|
||||
return desired
|
||||
|
@ -296,19 +349,19 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv
|
|||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.stopped {
|
||||
glog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler)
|
||||
klog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler)
|
||||
return
|
||||
}
|
||||
|
||||
if resyncPeriod > 0 {
|
||||
if resyncPeriod < minimumResyncPeriod {
|
||||
glog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod)
|
||||
klog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod)
|
||||
resyncPeriod = minimumResyncPeriod
|
||||
}
|
||||
|
||||
if resyncPeriod < s.resyncCheckPeriod {
|
||||
if s.started {
|
||||
glog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod)
|
||||
klog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod)
|
||||
resyncPeriod = s.resyncCheckPeriod
|
||||
} else {
|
||||
// if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update
|
||||
|
@ -555,7 +608,7 @@ func (p *processorListener) run() {
|
|||
case deleteNotification:
|
||||
p.handler.OnDelete(notification.oldObj)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next))
|
||||
utilruntime.HandleError(fmt.Errorf("unrecognized notification: %T", next))
|
||||
}
|
||||
}
|
||||
// the only way to get here is if the p.nextCh is empty and closed
|
||||
|
|
2
vendor/k8s.io/client-go/tools/cache/store.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/store.go
generated
vendored
|
@ -210,7 +210,7 @@ func (c *cache) GetByKey(key string) (item interface{}, exists bool, err error)
|
|||
// 'c' takes ownership of the list, you should not reference the list again
|
||||
// after calling this function.
|
||||
func (c *cache) Replace(list []interface{}, resourceVersion string) error {
|
||||
items := map[string]interface{}{}
|
||||
items := make(map[string]interface{}, len(list))
|
||||
for _, item := range list {
|
||||
key, err := c.keyFunc(item)
|
||||
if err != nil {
|
||||
|
|
21
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
21
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
|
@ -148,12 +148,19 @@ func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{},
|
|||
}
|
||||
index := c.indices[indexName]
|
||||
|
||||
// need to de-dupe the return list. Since multiple keys are allowed, this can happen.
|
||||
returnKeySet := sets.String{}
|
||||
for _, indexKey := range indexKeys {
|
||||
set := index[indexKey]
|
||||
for _, key := range set.UnsortedList() {
|
||||
returnKeySet.Insert(key)
|
||||
var returnKeySet sets.String
|
||||
if len(indexKeys) == 1 {
|
||||
// In majority of cases, there is exactly one value matching.
|
||||
// Optimize the most common path - deduping is not needed here.
|
||||
returnKeySet = index[indexKeys[0]]
|
||||
} else {
|
||||
// Need to de-dupe the return list.
|
||||
// Since multiple keys are allowed, this can happen.
|
||||
returnKeySet = sets.String{}
|
||||
for _, indexKey := range indexKeys {
|
||||
for key := range index[indexKey] {
|
||||
returnKeySet.Insert(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -178,7 +185,7 @@ func (c *threadSafeMap) ByIndex(indexName, indexKey string) ([]interface{}, erro
|
|||
|
||||
set := index[indexKey]
|
||||
list := make([]interface{}, 0, set.Len())
|
||||
for _, key := range set.List() {
|
||||
for key := range set {
|
||||
list = append(list, c.items[key])
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue