Bump kubernetes/client-go
This commit is contained in:
parent
029fa83690
commit
83a92596c3
901 changed files with 169303 additions and 306433 deletions
108
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
108
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
|
@ -20,9 +20,10 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/pkg/runtime"
|
||||
utilruntime "k8s.io/client-go/pkg/util/runtime"
|
||||
"k8s.io/client-go/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
// Config contains all the settings for a Controller.
|
||||
|
@ -50,6 +51,11 @@ type Config struct {
|
|||
// queue.
|
||||
FullResyncPeriod time.Duration
|
||||
|
||||
// ShouldResync, if specified, is invoked when the controller's reflector determines the next
|
||||
// periodic sync should occur. If this returns true, it means the reflector should proceed with
|
||||
// the resync.
|
||||
ShouldResync ShouldResyncFunc
|
||||
|
||||
// If true, when Process() returns an error, re-enqueue the object.
|
||||
// TODO: add interface to let you inject a delay/backoff or drop
|
||||
// the object completely if desired. Pass the object in
|
||||
|
@ -57,26 +63,33 @@ type Config struct {
|
|||
RetryOnError bool
|
||||
}
|
||||
|
||||
// ShouldResyncFunc is a type of function that indicates if a reflector should perform a
|
||||
// resync or not. It can be used by a shared informer to support multiple event handlers with custom
|
||||
// resync periods.
|
||||
type ShouldResyncFunc func() bool
|
||||
|
||||
// ProcessFunc processes a single object.
|
||||
type ProcessFunc func(obj interface{}) error
|
||||
|
||||
// Controller is a generic controller framework.
|
||||
type Controller struct {
|
||||
type controller struct {
|
||||
config Config
|
||||
reflector *Reflector
|
||||
reflectorMutex sync.RWMutex
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
// TODO make the "Controller" private, and convert all references to use ControllerInterface instead
|
||||
type ControllerInterface interface {
|
||||
type Controller interface {
|
||||
Run(stopCh <-chan struct{})
|
||||
HasSynced() bool
|
||||
LastSyncResourceVersion() string
|
||||
}
|
||||
|
||||
// New makes a new Controller from the given Config.
|
||||
func New(c *Config) *Controller {
|
||||
ctlr := &Controller{
|
||||
func New(c *Config) Controller {
|
||||
ctlr := &controller{
|
||||
config: *c,
|
||||
clock: &clock.RealClock{},
|
||||
}
|
||||
return ctlr
|
||||
}
|
||||
|
@ -84,37 +97,43 @@ func New(c *Config) *Controller {
|
|||
// Run begins processing items, and will continue until a value is sent down stopCh.
|
||||
// It's an error to call Run more than once.
|
||||
// Run blocks; call via go.
|
||||
func (c *Controller) Run(stopCh <-chan struct{}) {
|
||||
func (c *controller) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
go func() {
|
||||
<-stopCh
|
||||
c.config.Queue.Close()
|
||||
}()
|
||||
r := NewReflector(
|
||||
c.config.ListerWatcher,
|
||||
c.config.ObjectType,
|
||||
c.config.Queue,
|
||||
c.config.FullResyncPeriod,
|
||||
)
|
||||
r.ShouldResync = c.config.ShouldResync
|
||||
r.clock = c.clock
|
||||
|
||||
c.reflectorMutex.Lock()
|
||||
c.reflector = r
|
||||
c.reflectorMutex.Unlock()
|
||||
|
||||
r.RunUntil(stopCh)
|
||||
var wg wait.Group
|
||||
defer wg.Wait()
|
||||
|
||||
wg.StartWithChannel(stopCh, r.Run)
|
||||
|
||||
wait.Until(c.processLoop, time.Second, stopCh)
|
||||
}
|
||||
|
||||
// Returns true once this controller has completed an initial resource listing
|
||||
func (c *Controller) HasSynced() bool {
|
||||
func (c *controller) HasSynced() bool {
|
||||
return c.config.Queue.HasSynced()
|
||||
}
|
||||
|
||||
// Requeue adds the provided object back into the queue if it does not already exist.
|
||||
func (c *Controller) Requeue(obj interface{}) error {
|
||||
return c.config.Queue.AddIfNotPresent(Deltas{
|
||||
Delta{
|
||||
Type: Sync,
|
||||
Object: obj,
|
||||
},
|
||||
})
|
||||
func (c *controller) LastSyncResourceVersion() string {
|
||||
if c.reflector == nil {
|
||||
return ""
|
||||
}
|
||||
return c.reflector.LastSyncResourceVersion()
|
||||
}
|
||||
|
||||
// processLoop drains the work queue.
|
||||
|
@ -126,10 +145,13 @@ func (c *Controller) Requeue(obj interface{}) error {
|
|||
// actually exit when the controller is stopped. Or just give up on this stuff
|
||||
// ever being stoppable. Converting this whole package to use Context would
|
||||
// also be helpful.
|
||||
func (c *Controller) processLoop() {
|
||||
func (c *controller) processLoop() {
|
||||
for {
|
||||
obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process))
|
||||
if err != nil {
|
||||
if err == FIFOClosedError {
|
||||
return
|
||||
}
|
||||
if c.config.RetryOnError {
|
||||
// This is the safe way to re-enqueue.
|
||||
c.config.Queue.AddIfNotPresent(obj)
|
||||
|
@ -188,6 +210,47 @@ func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) {
|
|||
}
|
||||
}
|
||||
|
||||
// FilteringResourceEventHandler applies the provided filter to all events coming
|
||||
// in, ensuring the appropriate nested handler method is invoked. An object
|
||||
// that starts passing the filter after an update is considered an add, and an
|
||||
// object that stops passing the filter after an update is considered a delete.
|
||||
type FilteringResourceEventHandler struct {
|
||||
FilterFunc func(obj interface{}) bool
|
||||
Handler ResourceEventHandler
|
||||
}
|
||||
|
||||
// OnAdd calls the nested handler only if the filter succeeds
|
||||
func (r FilteringResourceEventHandler) OnAdd(obj interface{}) {
|
||||
if !r.FilterFunc(obj) {
|
||||
return
|
||||
}
|
||||
r.Handler.OnAdd(obj)
|
||||
}
|
||||
|
||||
// OnUpdate ensures the proper handler is called depending on whether the filter matches
|
||||
func (r FilteringResourceEventHandler) OnUpdate(oldObj, newObj interface{}) {
|
||||
newer := r.FilterFunc(newObj)
|
||||
older := r.FilterFunc(oldObj)
|
||||
switch {
|
||||
case newer && older:
|
||||
r.Handler.OnUpdate(oldObj, newObj)
|
||||
case newer && !older:
|
||||
r.Handler.OnAdd(newObj)
|
||||
case !newer && older:
|
||||
r.Handler.OnDelete(oldObj)
|
||||
default:
|
||||
// do nothing
|
||||
}
|
||||
}
|
||||
|
||||
// OnDelete calls the nested handler only if the filter succeeds
|
||||
func (r FilteringResourceEventHandler) OnDelete(obj interface{}) {
|
||||
if !r.FilterFunc(obj) {
|
||||
return
|
||||
}
|
||||
r.Handler.OnDelete(obj)
|
||||
}
|
||||
|
||||
// DeletionHandlingMetaNamespaceKeyFunc checks for
|
||||
// DeletedFinalStateUnknown objects before calling
|
||||
// MetaNamespaceKeyFunc.
|
||||
|
@ -218,7 +281,7 @@ func NewInformer(
|
|||
objType runtime.Object,
|
||||
resyncPeriod time.Duration,
|
||||
h ResourceEventHandler,
|
||||
) (Store, *Controller) {
|
||||
) (Store, Controller) {
|
||||
// This will hold the client state, as we know it.
|
||||
clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
|
||||
|
||||
|
@ -277,6 +340,7 @@ func NewInformer(
|
|||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// * h is the object you want notifications sent to.
|
||||
// * indexers is the indexer for the received object type.
|
||||
//
|
||||
func NewIndexerInformer(
|
||||
lw ListerWatcher,
|
||||
|
@ -284,7 +348,7 @@ func NewIndexerInformer(
|
|||
resyncPeriod time.Duration,
|
||||
h ResourceEventHandler,
|
||||
indexers Indexers,
|
||||
) (Indexer, *Controller) {
|
||||
) (Indexer, Controller) {
|
||||
// This will hold the client state, as we know it.
|
||||
clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
|
||||
|
||||
|
|
54
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
54
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
|
@ -21,7 +21,7 @@ import (
|
|||
"fmt"
|
||||
"sync"
|
||||
|
||||
"k8s.io/client-go/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
@ -118,6 +118,12 @@ type DeltaFIFO struct {
|
|||
// purpose of figuring out which items have been deleted
|
||||
// when Replace() or Delete() is called.
|
||||
knownObjects KeyListerGetter
|
||||
|
||||
// Indication the queue is closed.
|
||||
// Used to indicate a queue is closed so a control loop can exit when a queue is empty.
|
||||
// Currently, not used to gate any of CRED operations.
|
||||
closed bool
|
||||
closedLock sync.Mutex
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -132,6 +138,14 @@ var (
|
|||
ErrZeroLengthDeltasObject = errors.New("0 length Deltas object; can't get key")
|
||||
)
|
||||
|
||||
// Close the queue.
|
||||
func (f *DeltaFIFO) Close() {
|
||||
f.closedLock.Lock()
|
||||
defer f.closedLock.Unlock()
|
||||
f.closed = true
|
||||
f.cond.Broadcast()
|
||||
}
|
||||
|
||||
// KeyOf exposes f's keyFunc, but also detects the key of a Deltas object or
|
||||
// DeletedFinalStateUnknown objects.
|
||||
func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) {
|
||||
|
@ -387,6 +401,16 @@ func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err err
|
|||
return d, exists, nil
|
||||
}
|
||||
|
||||
// Checks if the queue is closed
|
||||
func (f *DeltaFIFO) IsClosed() bool {
|
||||
f.closedLock.Lock()
|
||||
defer f.closedLock.Unlock()
|
||||
if f.closed {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Pop blocks until an item is added to the queue, and then returns it. If
|
||||
// multiple items are ready, they are returned in the order in which they were
|
||||
// added/updated. The item is removed from the queue (and the store) before it
|
||||
|
@ -404,6 +428,13 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
|||
defer f.lock.Unlock()
|
||||
for {
|
||||
for len(f.queue) == 0 {
|
||||
// When the queue is empty, invocation of Pop() is blocked until new item is enqueued.
|
||||
// When Close() is called, the f.closed is set and the condition is broadcasted.
|
||||
// Which causes this loop to continue and return from the Pop().
|
||||
if f.IsClosed() {
|
||||
return nil, FIFOClosedError
|
||||
}
|
||||
|
||||
f.cond.Wait()
|
||||
}
|
||||
id := f.queue[0]
|
||||
|
@ -505,14 +536,16 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
|
|||
|
||||
// Resync will send a sync event for each item
|
||||
func (f *DeltaFIFO) Resync() error {
|
||||
var keys []string
|
||||
func() {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
keys = f.knownObjects.ListKeys()
|
||||
}()
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.knownObjects == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
keys := f.knownObjects.ListKeys()
|
||||
for _, k := range keys {
|
||||
if err := f.syncKey(k); err != nil {
|
||||
if err := f.syncKeyLocked(k); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -522,6 +555,11 @@ func (f *DeltaFIFO) Resync() error {
|
|||
func (f *DeltaFIFO) syncKey(key string) error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
return f.syncKeyLocked(key)
|
||||
}
|
||||
|
||||
func (f *DeltaFIFO) syncKeyLocked(key string) error {
|
||||
obj, exists, err := f.knownObjects.GetByKey(key)
|
||||
if err != nil {
|
||||
glog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key)
|
||||
|
|
2
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
generated
vendored
|
@ -21,7 +21,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/client-go/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
)
|
||||
|
||||
// ExpirationCache implements the store interface
|
||||
|
|
4
vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go
generated
vendored
4
vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go
generated
vendored
|
@ -17,8 +17,8 @@ limitations under the License.
|
|||
package cache
|
||||
|
||||
import (
|
||||
"k8s.io/client-go/pkg/util/clock"
|
||||
"k8s.io/client-go/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
type fakeThreadSafeMap struct {
|
||||
|
|
41
vendor/k8s.io/client-go/tools/cache/fifo.go
generated
vendored
41
vendor/k8s.io/client-go/tools/cache/fifo.go
generated
vendored
|
@ -17,9 +17,10 @@ limitations under the License.
|
|||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"k8s.io/client-go/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// PopProcessFunc is passed to Pop() method of Queue interface.
|
||||
|
@ -33,6 +34,8 @@ type ErrRequeue struct {
|
|||
Err error
|
||||
}
|
||||
|
||||
var FIFOClosedError error = errors.New("DeltaFIFO: manipulating with closed queue")
|
||||
|
||||
func (e ErrRequeue) Error() string {
|
||||
if e.Err == nil {
|
||||
return "the popped item should be requeued without returning an error"
|
||||
|
@ -58,6 +61,9 @@ type Queue interface {
|
|||
|
||||
// Return true if the first batch of items has been popped
|
||||
HasSynced() bool
|
||||
|
||||
// Close queue
|
||||
Close()
|
||||
}
|
||||
|
||||
// Helper function for popping from Queue.
|
||||
|
@ -100,12 +106,26 @@ type FIFO struct {
|
|||
// keyFunc is used to make the key used for queued item insertion and retrieval, and
|
||||
// should be deterministic.
|
||||
keyFunc KeyFunc
|
||||
|
||||
// Indication the queue is closed.
|
||||
// Used to indicate a queue is closed so a control loop can exit when a queue is empty.
|
||||
// Currently, not used to gate any of CRED operations.
|
||||
closed bool
|
||||
closedLock sync.Mutex
|
||||
}
|
||||
|
||||
var (
|
||||
_ = Queue(&FIFO{}) // FIFO is a Queue
|
||||
)
|
||||
|
||||
// Close the queue.
|
||||
func (f *FIFO) Close() {
|
||||
f.closedLock.Lock()
|
||||
defer f.closedLock.Unlock()
|
||||
f.closed = true
|
||||
f.cond.Broadcast()
|
||||
}
|
||||
|
||||
// Return true if an Add/Update/Delete/AddIfNotPresent are called first,
|
||||
// or an Update called first but the first batch of items inserted by Replace() has been popped
|
||||
func (f *FIFO) HasSynced() bool {
|
||||
|
@ -149,7 +169,7 @@ func (f *FIFO) AddIfNotPresent(obj interface{}) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// addIfNotPresent assumes the fifo lock is already held and adds the the provided
|
||||
// addIfNotPresent assumes the fifo lock is already held and adds the provided
|
||||
// item to the queue under id if it does not already exist.
|
||||
func (f *FIFO) addIfNotPresent(id string, obj interface{}) {
|
||||
f.populated = true
|
||||
|
@ -222,6 +242,16 @@ func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) {
|
|||
return item, exists, nil
|
||||
}
|
||||
|
||||
// Checks if the queue is closed
|
||||
func (f *FIFO) IsClosed() bool {
|
||||
f.closedLock.Lock()
|
||||
defer f.closedLock.Unlock()
|
||||
if f.closed {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Pop waits until an item is ready and processes it. If multiple items are
|
||||
// ready, they are returned in the order in which they were added/updated.
|
||||
// The item is removed from the queue (and the store) before it is processed,
|
||||
|
@ -233,6 +263,13 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
|||
defer f.lock.Unlock()
|
||||
for {
|
||||
for len(f.queue) == 0 {
|
||||
// When the queue is empty, invocation of Pop() is blocked until new item is enqueued.
|
||||
// When Close() is called, the f.closed is set and the condition is broadcasted.
|
||||
// Which causes this loop to continue and return from the Pop().
|
||||
if f.IsClosed() {
|
||||
return nil, FIFOClosedError
|
||||
}
|
||||
|
||||
f.cond.Wait()
|
||||
}
|
||||
id := f.queue[0]
|
||||
|
|
323
vendor/k8s.io/client-go/tools/cache/heap.go
generated
vendored
Normal file
323
vendor/k8s.io/client-go/tools/cache/heap.go
generated
vendored
Normal file
|
@ -0,0 +1,323 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file implements a heap data structure.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
closedMsg = "heap is closed"
|
||||
)
|
||||
|
||||
type LessFunc func(interface{}, interface{}) bool
|
||||
type heapItem struct {
|
||||
obj interface{} // The object which is stored in the heap.
|
||||
index int // The index of the object's key in the Heap.queue.
|
||||
}
|
||||
|
||||
type itemKeyValue struct {
|
||||
key string
|
||||
obj interface{}
|
||||
}
|
||||
|
||||
// heapData is an internal struct that implements the standard heap interface
|
||||
// and keeps the data stored in the heap.
|
||||
type heapData struct {
|
||||
// items is a map from key of the objects to the objects and their index.
|
||||
// We depend on the property that items in the map are in the queue and vice versa.
|
||||
items map[string]*heapItem
|
||||
// queue implements a heap data structure and keeps the order of elements
|
||||
// according to the heap invariant. The queue keeps the keys of objects stored
|
||||
// in "items".
|
||||
queue []string
|
||||
|
||||
// keyFunc is used to make the key used for queued item insertion and retrieval, and
|
||||
// should be deterministic.
|
||||
keyFunc KeyFunc
|
||||
// lessFunc is used to compare two objects in the heap.
|
||||
lessFunc LessFunc
|
||||
}
|
||||
|
||||
var (
|
||||
_ = heap.Interface(&heapData{}) // heapData is a standard heap
|
||||
)
|
||||
|
||||
// Less compares two objects and returns true if the first one should go
|
||||
// in front of the second one in the heap.
|
||||
func (h *heapData) Less(i, j int) bool {
|
||||
if i > len(h.queue) || j > len(h.queue) {
|
||||
return false
|
||||
}
|
||||
itemi, ok := h.items[h.queue[i]]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
itemj, ok := h.items[h.queue[j]]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return h.lessFunc(itemi.obj, itemj.obj)
|
||||
}
|
||||
|
||||
// Len returns the number of items in the Heap.
|
||||
func (h *heapData) Len() int { return len(h.queue) }
|
||||
|
||||
// Swap implements swapping of two elements in the heap. This is a part of standard
|
||||
// heap interface and should never be called directly.
|
||||
func (h *heapData) Swap(i, j int) {
|
||||
h.queue[i], h.queue[j] = h.queue[j], h.queue[i]
|
||||
item := h.items[h.queue[i]]
|
||||
item.index = i
|
||||
item = h.items[h.queue[j]]
|
||||
item.index = j
|
||||
}
|
||||
|
||||
// Push is supposed to be called by heap.Push only.
|
||||
func (h *heapData) Push(kv interface{}) {
|
||||
keyValue := kv.(*itemKeyValue)
|
||||
n := len(h.queue)
|
||||
h.items[keyValue.key] = &heapItem{keyValue.obj, n}
|
||||
h.queue = append(h.queue, keyValue.key)
|
||||
}
|
||||
|
||||
// Pop is supposed to be called by heap.Pop only.
|
||||
func (h *heapData) Pop() interface{} {
|
||||
key := h.queue[len(h.queue)-1]
|
||||
h.queue = h.queue[0 : len(h.queue)-1]
|
||||
item, ok := h.items[key]
|
||||
if !ok {
|
||||
// This is an error
|
||||
return nil
|
||||
}
|
||||
delete(h.items, key)
|
||||
return item.obj
|
||||
}
|
||||
|
||||
// Heap is a thread-safe producer/consumer queue that implements a heap data structure.
|
||||
// It can be used to implement priority queues and similar data structures.
|
||||
type Heap struct {
|
||||
lock sync.RWMutex
|
||||
cond sync.Cond
|
||||
|
||||
// data stores objects and has a queue that keeps their ordering according
|
||||
// to the heap invariant.
|
||||
data *heapData
|
||||
|
||||
// closed indicates that the queue is closed.
|
||||
// It is mainly used to let Pop() exit its control loop while waiting for an item.
|
||||
closed bool
|
||||
}
|
||||
|
||||
// Close the Heap and signals condition variables that may be waiting to pop
|
||||
// items from the heap.
|
||||
func (h *Heap) Close() {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
h.closed = true
|
||||
h.cond.Broadcast()
|
||||
}
|
||||
|
||||
// Add inserts an item, and puts it in the queue. The item is updated if it
|
||||
// already exists.
|
||||
func (h *Heap) Add(obj interface{}) error {
|
||||
key, err := h.data.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
if h.closed {
|
||||
return fmt.Errorf(closedMsg)
|
||||
}
|
||||
if _, exists := h.data.items[key]; exists {
|
||||
h.data.items[key].obj = obj
|
||||
heap.Fix(h.data, h.data.items[key].index)
|
||||
} else {
|
||||
h.addIfNotPresentLocked(key, obj)
|
||||
}
|
||||
h.cond.Broadcast()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Adds all the items in the list to the queue and then signals the condition
|
||||
// variable. It is useful when the caller would like to add all of the items
|
||||
// to the queue before consumer starts processing them.
|
||||
func (h *Heap) BulkAdd(list []interface{}) error {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
if h.closed {
|
||||
return fmt.Errorf(closedMsg)
|
||||
}
|
||||
for _, obj := range list {
|
||||
key, err := h.data.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
if _, exists := h.data.items[key]; exists {
|
||||
h.data.items[key].obj = obj
|
||||
heap.Fix(h.data, h.data.items[key].index)
|
||||
} else {
|
||||
h.addIfNotPresentLocked(key, obj)
|
||||
}
|
||||
}
|
||||
h.cond.Broadcast()
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddIfNotPresent inserts an item, and puts it in the queue. If an item with
|
||||
// the key is present in the map, no changes is made to the item.
|
||||
//
|
||||
// This is useful in a single producer/consumer scenario so that the consumer can
|
||||
// safely retry items without contending with the producer and potentially enqueueing
|
||||
// stale items.
|
||||
func (h *Heap) AddIfNotPresent(obj interface{}) error {
|
||||
id, err := h.data.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
if h.closed {
|
||||
return fmt.Errorf(closedMsg)
|
||||
}
|
||||
h.addIfNotPresentLocked(id, obj)
|
||||
h.cond.Broadcast()
|
||||
return nil
|
||||
}
|
||||
|
||||
// addIfNotPresentLocked assumes the lock is already held and adds the the provided
|
||||
// item to the queue if it does not already exist.
|
||||
func (h *Heap) addIfNotPresentLocked(key string, obj interface{}) {
|
||||
if _, exists := h.data.items[key]; exists {
|
||||
return
|
||||
}
|
||||
heap.Push(h.data, &itemKeyValue{key, obj})
|
||||
}
|
||||
|
||||
// Update is the same as Add in this implementation. When the item does not
|
||||
// exist, it is added.
|
||||
func (h *Heap) Update(obj interface{}) error {
|
||||
return h.Add(obj)
|
||||
}
|
||||
|
||||
// Delete removes an item.
|
||||
func (h *Heap) Delete(obj interface{}) error {
|
||||
key, err := h.data.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
if item, ok := h.data.items[key]; ok {
|
||||
heap.Remove(h.data, item.index)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("object not found")
|
||||
}
|
||||
|
||||
// Pop waits until an item is ready. If multiple items are
|
||||
// ready, they are returned in the order given by Heap.data.lessFunc.
|
||||
func (h *Heap) Pop() (interface{}, error) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
for len(h.data.queue) == 0 {
|
||||
// When the queue is empty, invocation of Pop() is blocked until new item is enqueued.
|
||||
// When Close() is called, the h.closed is set and the condition is broadcast,
|
||||
// which causes this loop to continue and return from the Pop().
|
||||
if h.closed {
|
||||
return nil, fmt.Errorf("heap is closed")
|
||||
}
|
||||
h.cond.Wait()
|
||||
}
|
||||
obj := heap.Pop(h.data)
|
||||
if obj != nil {
|
||||
return obj, nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("object was removed from heap data")
|
||||
}
|
||||
}
|
||||
|
||||
// List returns a list of all the items.
|
||||
func (h *Heap) List() []interface{} {
|
||||
h.lock.RLock()
|
||||
defer h.lock.RUnlock()
|
||||
list := make([]interface{}, 0, len(h.data.items))
|
||||
for _, item := range h.data.items {
|
||||
list = append(list, item.obj)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// ListKeys returns a list of all the keys of the objects currently in the Heap.
|
||||
func (h *Heap) ListKeys() []string {
|
||||
h.lock.RLock()
|
||||
defer h.lock.RUnlock()
|
||||
list := make([]string, 0, len(h.data.items))
|
||||
for key := range h.data.items {
|
||||
list = append(list, key)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// Get returns the requested item, or sets exists=false.
|
||||
func (h *Heap) Get(obj interface{}) (interface{}, bool, error) {
|
||||
key, err := h.data.keyFunc(obj)
|
||||
if err != nil {
|
||||
return nil, false, KeyError{obj, err}
|
||||
}
|
||||
return h.GetByKey(key)
|
||||
}
|
||||
|
||||
// GetByKey returns the requested item, or sets exists=false.
|
||||
func (h *Heap) GetByKey(key string) (interface{}, bool, error) {
|
||||
h.lock.RLock()
|
||||
defer h.lock.RUnlock()
|
||||
item, exists := h.data.items[key]
|
||||
if !exists {
|
||||
return nil, false, nil
|
||||
}
|
||||
return item.obj, true, nil
|
||||
}
|
||||
|
||||
// IsClosed returns true if the queue is closed.
|
||||
func (h *Heap) IsClosed() bool {
|
||||
h.lock.RLock()
|
||||
defer h.lock.RUnlock()
|
||||
if h.closed {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NewHeap returns a Heap which can be used to queue up items to process.
|
||||
func NewHeap(keyFn KeyFunc, lessFn LessFunc) *Heap {
|
||||
h := &Heap{
|
||||
data: &heapData{
|
||||
items: map[string]*heapItem{},
|
||||
queue: []string{},
|
||||
keyFunc: keyFn,
|
||||
lessFunc: lessFn,
|
||||
},
|
||||
}
|
||||
h.cond.L = &h.lock
|
||||
return h
|
||||
}
|
6
vendor/k8s.io/client-go/tools/cache/index.go
generated
vendored
6
vendor/k8s.io/client-go/tools/cache/index.go
generated
vendored
|
@ -19,8 +19,8 @@ package cache
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/client-go/pkg/api/meta"
|
||||
"k8s.io/client-go/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// Indexer is a storage interface that lets you list objects using multiple indexing functions
|
||||
|
@ -28,6 +28,8 @@ type Indexer interface {
|
|||
Store
|
||||
// Retrieve list of objects that match on the named indexing function
|
||||
Index(indexName string, obj interface{}) ([]interface{}, error)
|
||||
// IndexKeys returns the set of keys that match on the named indexing function.
|
||||
IndexKeys(indexName, indexKey string) ([]string, error)
|
||||
// ListIndexFuncValues returns the list of generated values of an Index func
|
||||
ListIndexFuncValues(indexName string) []string
|
||||
// ByIndex lists object that match on the named indexing function with the exact key
|
||||
|
|
342
vendor/k8s.io/client-go/tools/cache/listers.go
generated
vendored
342
vendor/k8s.io/client-go/tools/cache/listers.go
generated
vendored
|
@ -17,20 +17,14 @@ limitations under the License.
|
|||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/client-go/pkg/api"
|
||||
"k8s.io/client-go/pkg/api/errors"
|
||||
"k8s.io/client-go/pkg/api/meta"
|
||||
"k8s.io/client-go/pkg/api/unversioned"
|
||||
"k8s.io/client-go/pkg/apis/apps"
|
||||
"k8s.io/client-go/pkg/apis/certificates"
|
||||
"k8s.io/client-go/pkg/apis/extensions"
|
||||
"k8s.io/client-go/pkg/apis/policy"
|
||||
"k8s.io/client-go/pkg/apis/storage"
|
||||
"k8s.io/client-go/pkg/labels"
|
||||
"k8s.io/client-go/pkg/runtime"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// AppendFunc is used to add a matching item to whatever list the caller is using
|
||||
|
@ -50,7 +44,7 @@ func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error {
|
|||
}
|
||||
|
||||
func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error {
|
||||
if namespace == api.NamespaceAll {
|
||||
if namespace == metav1.NamespaceAll {
|
||||
for _, m := range indexer.List() {
|
||||
metadata, err := meta.Accessor(m)
|
||||
if err != nil {
|
||||
|
@ -63,7 +57,7 @@ func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selec
|
|||
return nil
|
||||
}
|
||||
|
||||
items, err := indexer.Index(NamespaceIndex, &api.ObjectMeta{Namespace: namespace})
|
||||
items, err := indexer.Index(NamespaceIndex, &metav1.ObjectMeta{Namespace: namespace})
|
||||
if err != nil {
|
||||
// Ignore error; do slow search without index.
|
||||
glog.Warningf("can not retrieve list of objects using index : %v", err)
|
||||
|
@ -110,13 +104,13 @@ type GenericNamespaceLister interface {
|
|||
Get(name string) (runtime.Object, error)
|
||||
}
|
||||
|
||||
func NewGenericLister(indexer Indexer, resource unversioned.GroupResource) GenericLister {
|
||||
func NewGenericLister(indexer Indexer, resource schema.GroupResource) GenericLister {
|
||||
return &genericLister{indexer: indexer, resource: resource}
|
||||
}
|
||||
|
||||
type genericLister struct {
|
||||
indexer Indexer
|
||||
resource unversioned.GroupResource
|
||||
resource schema.GroupResource
|
||||
}
|
||||
|
||||
func (s *genericLister) List(selector labels.Selector) (ret []runtime.Object, err error) {
|
||||
|
@ -144,7 +138,7 @@ func (s *genericLister) Get(name string) (runtime.Object, error) {
|
|||
type genericNamespaceLister struct {
|
||||
indexer Indexer
|
||||
namespace string
|
||||
resource unversioned.GroupResource
|
||||
resource schema.GroupResource
|
||||
}
|
||||
|
||||
func (s *genericNamespaceLister) List(selector labels.Selector) (ret []runtime.Object, err error) {
|
||||
|
@ -164,315 +158,3 @@ func (s *genericNamespaceLister) Get(name string) (runtime.Object, error) {
|
|||
}
|
||||
return obj.(runtime.Object), nil
|
||||
}
|
||||
|
||||
// TODO: generate these classes and methods for all resources of interest using
|
||||
// a script. Can use "go generate" once 1.4 is supported by all users.
|
||||
|
||||
// NodeConditionPredicate is a function that indicates whether the given node's conditions meet
|
||||
// some set of criteria defined by the function.
|
||||
type NodeConditionPredicate func(node *api.Node) bool
|
||||
|
||||
// StoreToNodeLister makes a Store have the List method of the client.NodeInterface
|
||||
// The Store must contain (only) Nodes.
|
||||
type StoreToNodeLister struct {
|
||||
Store
|
||||
}
|
||||
|
||||
func (s *StoreToNodeLister) List() (machines api.NodeList, err error) {
|
||||
for _, m := range s.Store.List() {
|
||||
machines.Items = append(machines.Items, *(m.(*api.Node)))
|
||||
}
|
||||
return machines, nil
|
||||
}
|
||||
|
||||
// NodeCondition returns a storeToNodeConditionLister
|
||||
func (s *StoreToNodeLister) NodeCondition(predicate NodeConditionPredicate) storeToNodeConditionLister {
|
||||
// TODO: Move this filtering server side. Currently our selectors don't facilitate searching through a list so we
|
||||
// have the reflector filter out the Unschedulable field and sift through node conditions in the lister.
|
||||
return storeToNodeConditionLister{s.Store, predicate}
|
||||
}
|
||||
|
||||
// storeToNodeConditionLister filters and returns nodes matching the given type and status from the store.
|
||||
type storeToNodeConditionLister struct {
|
||||
store Store
|
||||
predicate NodeConditionPredicate
|
||||
}
|
||||
|
||||
// List returns a list of nodes that match the conditions defined by the predicate functions in the storeToNodeConditionLister.
|
||||
func (s storeToNodeConditionLister) List() (nodes []*api.Node, err error) {
|
||||
for _, m := range s.store.List() {
|
||||
node := m.(*api.Node)
|
||||
if s.predicate(node) {
|
||||
nodes = append(nodes, node)
|
||||
} else {
|
||||
glog.V(5).Infof("Node %s matches none of the conditions", node.Name)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// StoreToDaemonSetLister gives a store List and Exists methods. The store must contain only DaemonSets.
|
||||
type StoreToDaemonSetLister struct {
|
||||
Store
|
||||
}
|
||||
|
||||
// Exists checks if the given daemon set exists in the store.
|
||||
func (s *StoreToDaemonSetLister) Exists(ds *extensions.DaemonSet) (bool, error) {
|
||||
_, exists, err := s.Store.Get(ds)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return exists, nil
|
||||
}
|
||||
|
||||
// List lists all daemon sets in the store.
|
||||
// TODO: converge on the interface in pkg/client
|
||||
func (s *StoreToDaemonSetLister) List() (dss extensions.DaemonSetList, err error) {
|
||||
for _, c := range s.Store.List() {
|
||||
dss.Items = append(dss.Items, *(c.(*extensions.DaemonSet)))
|
||||
}
|
||||
return dss, nil
|
||||
}
|
||||
|
||||
// GetPodDaemonSets returns a list of daemon sets managing a pod.
|
||||
// Returns an error if and only if no matching daemon sets are found.
|
||||
func (s *StoreToDaemonSetLister) GetPodDaemonSets(pod *api.Pod) (daemonSets []extensions.DaemonSet, err error) {
|
||||
var selector labels.Selector
|
||||
var daemonSet extensions.DaemonSet
|
||||
|
||||
if len(pod.Labels) == 0 {
|
||||
err = fmt.Errorf("no daemon sets found for pod %v because it has no labels", pod.Name)
|
||||
return
|
||||
}
|
||||
|
||||
for _, m := range s.Store.List() {
|
||||
daemonSet = *m.(*extensions.DaemonSet)
|
||||
if daemonSet.Namespace != pod.Namespace {
|
||||
continue
|
||||
}
|
||||
selector, err = unversioned.LabelSelectorAsSelector(daemonSet.Spec.Selector)
|
||||
if err != nil {
|
||||
// this should not happen if the DaemonSet passed validation
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If a daemonSet with a nil or empty selector creeps in, it should match nothing, not everything.
|
||||
if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
|
||||
continue
|
||||
}
|
||||
daemonSets = append(daemonSets, daemonSet)
|
||||
}
|
||||
if len(daemonSets) == 0 {
|
||||
err = fmt.Errorf("could not find daemon set for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// StoreToEndpointsLister makes a Store that lists endpoints.
|
||||
type StoreToEndpointsLister struct {
|
||||
Store
|
||||
}
|
||||
|
||||
// List lists all endpoints in the store.
|
||||
func (s *StoreToEndpointsLister) List() (services api.EndpointsList, err error) {
|
||||
for _, m := range s.Store.List() {
|
||||
services.Items = append(services.Items, *(m.(*api.Endpoints)))
|
||||
}
|
||||
return services, nil
|
||||
}
|
||||
|
||||
// GetServiceEndpoints returns the endpoints of a service, matched on service name.
|
||||
func (s *StoreToEndpointsLister) GetServiceEndpoints(svc *api.Service) (ep api.Endpoints, err error) {
|
||||
for _, m := range s.Store.List() {
|
||||
ep = *m.(*api.Endpoints)
|
||||
if svc.Name == ep.Name && svc.Namespace == ep.Namespace {
|
||||
return ep, nil
|
||||
}
|
||||
}
|
||||
err = fmt.Errorf("could not find endpoints for service: %v", svc.Name)
|
||||
return
|
||||
}
|
||||
|
||||
// Typed wrapper around a store of PersistentVolumes
|
||||
type StoreToPVFetcher struct {
|
||||
Store
|
||||
}
|
||||
|
||||
// GetPersistentVolumeInfo returns cached data for the PersistentVolume 'id'.
|
||||
func (s *StoreToPVFetcher) GetPersistentVolumeInfo(id string) (*api.PersistentVolume, error) {
|
||||
o, exists, err := s.Get(&api.PersistentVolume{ObjectMeta: api.ObjectMeta{Name: id}})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error retrieving PersistentVolume '%v' from cache: %v", id, err)
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("PersistentVolume '%v' not found", id)
|
||||
}
|
||||
|
||||
return o.(*api.PersistentVolume), nil
|
||||
}
|
||||
|
||||
// StoreToStatefulSetLister gives a store List and Exists methods. The store must contain only StatefulSets.
|
||||
type StoreToStatefulSetLister struct {
|
||||
Store
|
||||
}
|
||||
|
||||
// Exists checks if the given StatefulSet exists in the store.
|
||||
func (s *StoreToStatefulSetLister) Exists(ps *apps.StatefulSet) (bool, error) {
|
||||
_, exists, err := s.Store.Get(ps)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return exists, nil
|
||||
}
|
||||
|
||||
// List lists all StatefulSets in the store.
|
||||
func (s *StoreToStatefulSetLister) List() (psList []apps.StatefulSet, err error) {
|
||||
for _, ps := range s.Store.List() {
|
||||
psList = append(psList, *(ps.(*apps.StatefulSet)))
|
||||
}
|
||||
return psList, nil
|
||||
}
|
||||
|
||||
type storeStatefulSetsNamespacer struct {
|
||||
store Store
|
||||
namespace string
|
||||
}
|
||||
|
||||
func (s *StoreToStatefulSetLister) StatefulSets(namespace string) storeStatefulSetsNamespacer {
|
||||
return storeStatefulSetsNamespacer{s.Store, namespace}
|
||||
}
|
||||
|
||||
// GetPodStatefulSets returns a list of StatefulSets managing a pod. Returns an error only if no matching StatefulSets are found.
|
||||
func (s *StoreToStatefulSetLister) GetPodStatefulSets(pod *api.Pod) (psList []apps.StatefulSet, err error) {
|
||||
var selector labels.Selector
|
||||
var ps apps.StatefulSet
|
||||
|
||||
if len(pod.Labels) == 0 {
|
||||
err = fmt.Errorf("no StatefulSets found for pod %v because it has no labels", pod.Name)
|
||||
return
|
||||
}
|
||||
|
||||
for _, m := range s.Store.List() {
|
||||
ps = *m.(*apps.StatefulSet)
|
||||
if ps.Namespace != pod.Namespace {
|
||||
continue
|
||||
}
|
||||
selector, err = unversioned.LabelSelectorAsSelector(ps.Spec.Selector)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("invalid selector: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// If a StatefulSet with a nil or empty selector creeps in, it should match nothing, not everything.
|
||||
if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
|
||||
continue
|
||||
}
|
||||
psList = append(psList, ps)
|
||||
}
|
||||
if len(psList) == 0 {
|
||||
err = fmt.Errorf("could not find StatefulSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// StoreToCertificateRequestLister gives a store List and Exists methods. The store must contain only CertificateRequests.
|
||||
type StoreToCertificateRequestLister struct {
|
||||
Store
|
||||
}
|
||||
|
||||
// Exists checks if the given csr exists in the store.
|
||||
func (s *StoreToCertificateRequestLister) Exists(csr *certificates.CertificateSigningRequest) (bool, error) {
|
||||
_, exists, err := s.Store.Get(csr)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return exists, nil
|
||||
}
|
||||
|
||||
// StoreToCertificateRequestLister lists all csrs in the store.
|
||||
func (s *StoreToCertificateRequestLister) List() (csrs certificates.CertificateSigningRequestList, err error) {
|
||||
for _, c := range s.Store.List() {
|
||||
csrs.Items = append(csrs.Items, *(c.(*certificates.CertificateSigningRequest)))
|
||||
}
|
||||
return csrs, nil
|
||||
}
|
||||
|
||||
type StoreToPodDisruptionBudgetLister struct {
|
||||
Store
|
||||
}
|
||||
|
||||
// GetPodPodDisruptionBudgets returns a list of PodDisruptionBudgets matching a pod. Returns an error only if no matching PodDisruptionBudgets are found.
|
||||
func (s *StoreToPodDisruptionBudgetLister) GetPodPodDisruptionBudgets(pod *api.Pod) (pdbList []policy.PodDisruptionBudget, err error) {
|
||||
var selector labels.Selector
|
||||
|
||||
if len(pod.Labels) == 0 {
|
||||
err = fmt.Errorf("no PodDisruptionBudgets found for pod %v because it has no labels", pod.Name)
|
||||
return
|
||||
}
|
||||
|
||||
for _, m := range s.Store.List() {
|
||||
pdb, ok := m.(*policy.PodDisruptionBudget)
|
||||
if !ok {
|
||||
glog.Errorf("Unexpected: %v is not a PodDisruptionBudget", m)
|
||||
continue
|
||||
}
|
||||
if pdb.Namespace != pod.Namespace {
|
||||
continue
|
||||
}
|
||||
selector, err = unversioned.LabelSelectorAsSelector(pdb.Spec.Selector)
|
||||
if err != nil {
|
||||
glog.Warningf("invalid selector: %v", err)
|
||||
// TODO(mml): add an event to the PDB
|
||||
continue
|
||||
}
|
||||
|
||||
// If a PDB with a nil or empty selector creeps in, it should match nothing, not everything.
|
||||
if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
|
||||
continue
|
||||
}
|
||||
pdbList = append(pdbList, *pdb)
|
||||
}
|
||||
if len(pdbList) == 0 {
|
||||
err = fmt.Errorf("could not find PodDisruptionBudget for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// StorageClassLister knows how to list storage classes
|
||||
type StorageClassLister interface {
|
||||
List(selector labels.Selector) (ret []*storage.StorageClass, err error)
|
||||
Get(name string) (*storage.StorageClass, error)
|
||||
}
|
||||
|
||||
// storageClassLister implements StorageClassLister
|
||||
type storageClassLister struct {
|
||||
indexer Indexer
|
||||
}
|
||||
|
||||
// NewStorageClassLister returns a new lister.
|
||||
func NewStorageClassLister(indexer Indexer) StorageClassLister {
|
||||
return &storageClassLister{indexer: indexer}
|
||||
}
|
||||
|
||||
// List returns a list of storage classes
|
||||
func (s *storageClassLister) List(selector labels.Selector) (ret []*storage.StorageClass, err error) {
|
||||
err = ListAll(s.indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*storage.StorageClass))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// List returns a list of storage classes
|
||||
func (s *storageClassLister) Get(name string) (*storage.StorageClass, error) {
|
||||
key := &storage.StorageClass{ObjectMeta: api.ObjectMeta{Name: name}}
|
||||
obj, exists, err := s.indexer.Get(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(storage.Resource("storageclass"), name)
|
||||
}
|
||||
return obj.(*storage.StorageClass), nil
|
||||
}
|
||||
|
|
348
vendor/k8s.io/client-go/tools/cache/listers_core.go
generated
vendored
348
vendor/k8s.io/client-go/tools/cache/listers_core.go
generated
vendored
|
@ -1,348 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/client-go/pkg/api"
|
||||
"k8s.io/client-go/pkg/api/errors"
|
||||
"k8s.io/client-go/pkg/labels"
|
||||
)
|
||||
|
||||
// TODO: generate these classes and methods for all resources of interest using
|
||||
// a script. Can use "go generate" once 1.4 is supported by all users.
|
||||
|
||||
// Lister makes an Index have the List method. The Stores must contain only the expected type
|
||||
// Example:
|
||||
// s := cache.NewStore()
|
||||
// lw := cache.ListWatch{Client: c, FieldSelector: sel, Resource: "pods"}
|
||||
// r := cache.NewReflector(lw, &api.Pod{}, s).Run()
|
||||
// l := StoreToPodLister{s}
|
||||
// l.List()
|
||||
|
||||
// StoreToPodLister helps list pods
|
||||
type StoreToPodLister struct {
|
||||
Indexer Indexer
|
||||
}
|
||||
|
||||
func (s *StoreToPodLister) List(selector labels.Selector) (ret []*api.Pod, err error) {
|
||||
err = ListAll(s.Indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*api.Pod))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (s *StoreToPodLister) Pods(namespace string) storePodsNamespacer {
|
||||
return storePodsNamespacer{Indexer: s.Indexer, namespace: namespace}
|
||||
}
|
||||
|
||||
type storePodsNamespacer struct {
|
||||
Indexer Indexer
|
||||
namespace string
|
||||
}
|
||||
|
||||
func (s storePodsNamespacer) List(selector labels.Selector) (ret []*api.Pod, err error) {
|
||||
err = ListAllByNamespace(s.Indexer, s.namespace, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*api.Pod))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (s storePodsNamespacer) Get(name string) (*api.Pod, error) {
|
||||
obj, exists, err := s.Indexer.GetByKey(s.namespace + "/" + name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(api.Resource("pod"), name)
|
||||
}
|
||||
return obj.(*api.Pod), nil
|
||||
}
|
||||
|
||||
// StoreToServiceLister helps list services
|
||||
type StoreToServiceLister struct {
|
||||
Indexer Indexer
|
||||
}
|
||||
|
||||
func (s *StoreToServiceLister) List(selector labels.Selector) (ret []*api.Service, err error) {
|
||||
err = ListAll(s.Indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*api.Service))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (s *StoreToServiceLister) Services(namespace string) storeServicesNamespacer {
|
||||
return storeServicesNamespacer{s.Indexer, namespace}
|
||||
}
|
||||
|
||||
type storeServicesNamespacer struct {
|
||||
indexer Indexer
|
||||
namespace string
|
||||
}
|
||||
|
||||
func (s storeServicesNamespacer) List(selector labels.Selector) (ret []*api.Service, err error) {
|
||||
err = ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*api.Service))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (s storeServicesNamespacer) Get(name string) (*api.Service, error) {
|
||||
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(api.Resource("service"), name)
|
||||
}
|
||||
return obj.(*api.Service), nil
|
||||
}
|
||||
|
||||
// TODO: Move this back to scheduler as a helper function that takes a Store,
|
||||
// rather than a method of StoreToServiceLister.
|
||||
func (s *StoreToServiceLister) GetPodServices(pod *api.Pod) (services []*api.Service, err error) {
|
||||
allServices, err := s.Services(pod.Namespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := range allServices {
|
||||
service := allServices[i]
|
||||
if service.Spec.Selector == nil {
|
||||
// services with nil selectors match nothing, not everything.
|
||||
continue
|
||||
}
|
||||
selector := labels.Set(service.Spec.Selector).AsSelectorPreValidated()
|
||||
if selector.Matches(labels.Set(pod.Labels)) {
|
||||
services = append(services, service)
|
||||
}
|
||||
}
|
||||
|
||||
return services, nil
|
||||
}
|
||||
|
||||
// StoreToReplicationControllerLister helps list rcs
|
||||
type StoreToReplicationControllerLister struct {
|
||||
Indexer Indexer
|
||||
}
|
||||
|
||||
func (s *StoreToReplicationControllerLister) List(selector labels.Selector) (ret []*api.ReplicationController, err error) {
|
||||
err = ListAll(s.Indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*api.ReplicationController))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (s *StoreToReplicationControllerLister) ReplicationControllers(namespace string) storeReplicationControllersNamespacer {
|
||||
return storeReplicationControllersNamespacer{s.Indexer, namespace}
|
||||
}
|
||||
|
||||
type storeReplicationControllersNamespacer struct {
|
||||
indexer Indexer
|
||||
namespace string
|
||||
}
|
||||
|
||||
func (s storeReplicationControllersNamespacer) List(selector labels.Selector) (ret []*api.ReplicationController, err error) {
|
||||
err = ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*api.ReplicationController))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (s storeReplicationControllersNamespacer) Get(name string) (*api.ReplicationController, error) {
|
||||
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(api.Resource("replicationcontroller"), name)
|
||||
}
|
||||
return obj.(*api.ReplicationController), nil
|
||||
}
|
||||
|
||||
// GetPodControllers returns a list of replication controllers managing a pod. Returns an error only if no matching controllers are found.
|
||||
func (s *StoreToReplicationControllerLister) GetPodControllers(pod *api.Pod) (controllers []*api.ReplicationController, err error) {
|
||||
if len(pod.Labels) == 0 {
|
||||
err = fmt.Errorf("no controllers found for pod %v because it has no labels", pod.Name)
|
||||
return
|
||||
}
|
||||
|
||||
key := &api.ReplicationController{ObjectMeta: api.ObjectMeta{Namespace: pod.Namespace}}
|
||||
items, err := s.Indexer.Index(NamespaceIndex, key)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, m := range items {
|
||||
rc := m.(*api.ReplicationController)
|
||||
selector := labels.Set(rc.Spec.Selector).AsSelectorPreValidated()
|
||||
|
||||
// If an rc with a nil or empty selector creeps in, it should match nothing, not everything.
|
||||
if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
|
||||
continue
|
||||
}
|
||||
controllers = append(controllers, rc)
|
||||
}
|
||||
if len(controllers) == 0 {
|
||||
err = fmt.Errorf("could not find controller for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// StoreToServiceAccountLister helps list service accounts
|
||||
type StoreToServiceAccountLister struct {
|
||||
Indexer Indexer
|
||||
}
|
||||
|
||||
func (s *StoreToServiceAccountLister) List(selector labels.Selector) (ret []*api.ServiceAccount, err error) {
|
||||
err = ListAll(s.Indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*api.ServiceAccount))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (s *StoreToServiceAccountLister) ServiceAccounts(namespace string) storeServiceAccountsNamespacer {
|
||||
return storeServiceAccountsNamespacer{s.Indexer, namespace}
|
||||
}
|
||||
|
||||
type storeServiceAccountsNamespacer struct {
|
||||
indexer Indexer
|
||||
namespace string
|
||||
}
|
||||
|
||||
func (s storeServiceAccountsNamespacer) List(selector labels.Selector) (ret []*api.ServiceAccount, err error) {
|
||||
err = ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*api.ServiceAccount))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (s storeServiceAccountsNamespacer) Get(name string) (*api.ServiceAccount, error) {
|
||||
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(api.Resource("serviceaccount"), name)
|
||||
}
|
||||
return obj.(*api.ServiceAccount), nil
|
||||
}
|
||||
|
||||
// StoreToLimitRangeLister helps list limit ranges
|
||||
type StoreToLimitRangeLister struct {
|
||||
Indexer Indexer
|
||||
}
|
||||
|
||||
func (s *StoreToLimitRangeLister) List(selector labels.Selector) (ret []*api.LimitRange, err error) {
|
||||
err = ListAll(s.Indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*api.LimitRange))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// StoreToPersistentVolumeClaimLister helps list pvcs
|
||||
type StoreToPersistentVolumeClaimLister struct {
|
||||
Indexer Indexer
|
||||
}
|
||||
|
||||
// List returns all persistentvolumeclaims that match the specified selector
|
||||
func (s *StoreToPersistentVolumeClaimLister) List(selector labels.Selector) (ret []*api.PersistentVolumeClaim, err error) {
|
||||
err = ListAll(s.Indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*api.PersistentVolumeClaim))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (s *StoreToLimitRangeLister) LimitRanges(namespace string) storeLimitRangesNamespacer {
|
||||
return storeLimitRangesNamespacer{s.Indexer, namespace}
|
||||
}
|
||||
|
||||
type storeLimitRangesNamespacer struct {
|
||||
indexer Indexer
|
||||
namespace string
|
||||
}
|
||||
|
||||
func (s storeLimitRangesNamespacer) List(selector labels.Selector) (ret []*api.LimitRange, err error) {
|
||||
err = ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*api.LimitRange))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (s storeLimitRangesNamespacer) Get(name string) (*api.LimitRange, error) {
|
||||
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(api.Resource("limitrange"), name)
|
||||
}
|
||||
return obj.(*api.LimitRange), nil
|
||||
}
|
||||
|
||||
// PersistentVolumeClaims returns all claims in a specified namespace.
|
||||
func (s *StoreToPersistentVolumeClaimLister) PersistentVolumeClaims(namespace string) storePersistentVolumeClaimsNamespacer {
|
||||
return storePersistentVolumeClaimsNamespacer{Indexer: s.Indexer, namespace: namespace}
|
||||
}
|
||||
|
||||
type storePersistentVolumeClaimsNamespacer struct {
|
||||
Indexer Indexer
|
||||
namespace string
|
||||
}
|
||||
|
||||
func (s storePersistentVolumeClaimsNamespacer) List(selector labels.Selector) (ret []*api.PersistentVolumeClaim, err error) {
|
||||
err = ListAllByNamespace(s.Indexer, s.namespace, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*api.PersistentVolumeClaim))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (s storePersistentVolumeClaimsNamespacer) Get(name string) (*api.PersistentVolumeClaim, error) {
|
||||
obj, exists, err := s.Indexer.GetByKey(s.namespace + "/" + name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(api.Resource("persistentvolumeclaims"), name)
|
||||
}
|
||||
return obj.(*api.PersistentVolumeClaim), nil
|
||||
}
|
||||
|
||||
// IndexerToNamespaceLister gives an Indexer List method
|
||||
type IndexerToNamespaceLister struct {
|
||||
Indexer
|
||||
}
|
||||
|
||||
// List returns a list of namespaces
|
||||
func (i *IndexerToNamespaceLister) List(selector labels.Selector) (ret []*api.Namespace, err error) {
|
||||
err = ListAll(i.Indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*api.Namespace))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (i *IndexerToNamespaceLister) Get(name string) (*api.Namespace, error) {
|
||||
obj, exists, err := i.Indexer.GetByKey(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(api.Resource("namespace"), name)
|
||||
}
|
||||
return obj.(*api.Namespace), nil
|
||||
}
|
210
vendor/k8s.io/client-go/tools/cache/listers_extensions.go
generated
vendored
210
vendor/k8s.io/client-go/tools/cache/listers_extensions.go
generated
vendored
|
@ -1,210 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/client-go/pkg/api"
|
||||
"k8s.io/client-go/pkg/api/errors"
|
||||
"k8s.io/client-go/pkg/api/unversioned"
|
||||
"k8s.io/client-go/pkg/apis/extensions"
|
||||
"k8s.io/client-go/pkg/labels"
|
||||
)
|
||||
|
||||
// TODO: generate these classes and methods for all resources of interest using
|
||||
// a script. Can use "go generate" once 1.4 is supported by all users.
|
||||
|
||||
// Lister makes an Index have the List method. The Stores must contain only the expected type
|
||||
// Example:
|
||||
// s := cache.NewStore()
|
||||
// lw := cache.ListWatch{Client: c, FieldSelector: sel, Resource: "pods"}
|
||||
// r := cache.NewReflector(lw, &extensions.Deployment{}, s).Run()
|
||||
// l := StoreToDeploymentLister{s}
|
||||
// l.List()
|
||||
|
||||
// StoreToDeploymentLister helps list deployments
|
||||
type StoreToDeploymentLister struct {
|
||||
Indexer Indexer
|
||||
}
|
||||
|
||||
func (s *StoreToDeploymentLister) List(selector labels.Selector) (ret []*extensions.Deployment, err error) {
|
||||
err = ListAll(s.Indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*extensions.Deployment))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (s *StoreToDeploymentLister) Deployments(namespace string) storeDeploymentsNamespacer {
|
||||
return storeDeploymentsNamespacer{Indexer: s.Indexer, namespace: namespace}
|
||||
}
|
||||
|
||||
type storeDeploymentsNamespacer struct {
|
||||
Indexer Indexer
|
||||
namespace string
|
||||
}
|
||||
|
||||
func (s storeDeploymentsNamespacer) List(selector labels.Selector) (ret []*extensions.Deployment, err error) {
|
||||
err = ListAllByNamespace(s.Indexer, s.namespace, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*extensions.Deployment))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (s storeDeploymentsNamespacer) Get(name string) (*extensions.Deployment, error) {
|
||||
obj, exists, err := s.Indexer.GetByKey(s.namespace + "/" + name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(extensions.Resource("deployment"), name)
|
||||
}
|
||||
return obj.(*extensions.Deployment), nil
|
||||
}
|
||||
|
||||
// GetDeploymentsForReplicaSet returns a list of deployments managing a replica set. Returns an error only if no matching deployments are found.
|
||||
func (s *StoreToDeploymentLister) GetDeploymentsForReplicaSet(rs *extensions.ReplicaSet) (deployments []*extensions.Deployment, err error) {
|
||||
if len(rs.Labels) == 0 {
|
||||
err = fmt.Errorf("no deployments found for ReplicaSet %v because it has no labels", rs.Name)
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: MODIFY THIS METHOD so that it checks for the podTemplateSpecHash label
|
||||
dList, err := s.Deployments(rs.Namespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, d := range dList {
|
||||
selector, err := unversioned.LabelSelectorAsSelector(d.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid label selector: %v", err)
|
||||
}
|
||||
// If a deployment with a nil or empty selector creeps in, it should match nothing, not everything.
|
||||
if selector.Empty() || !selector.Matches(labels.Set(rs.Labels)) {
|
||||
continue
|
||||
}
|
||||
deployments = append(deployments, d)
|
||||
}
|
||||
if len(deployments) == 0 {
|
||||
err = fmt.Errorf("could not find deployments set for ReplicaSet %s in namespace %s with labels: %v", rs.Name, rs.Namespace, rs.Labels)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetDeploymentsForDeployments returns a list of deployments managing a pod. Returns an error only if no matching deployments are found.
|
||||
// TODO eliminate shallow copies
|
||||
func (s *StoreToDeploymentLister) GetDeploymentsForPod(pod *api.Pod) (deployments []*extensions.Deployment, err error) {
|
||||
if len(pod.Labels) == 0 {
|
||||
err = fmt.Errorf("no deployments found for Pod %v because it has no labels", pod.Name)
|
||||
return
|
||||
}
|
||||
|
||||
if len(pod.Labels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
dList, err := s.Deployments(pod.Namespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, d := range dList {
|
||||
selector, err := unversioned.LabelSelectorAsSelector(d.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid label selector: %v", err)
|
||||
}
|
||||
// If a deployment with a nil or empty selector creeps in, it should match nothing, not everything.
|
||||
if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
|
||||
continue
|
||||
}
|
||||
deployments = append(deployments, d)
|
||||
}
|
||||
if len(deployments) == 0 {
|
||||
err = fmt.Errorf("could not find deployments set for Pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// StoreToReplicaSetLister helps list replicasets
|
||||
type StoreToReplicaSetLister struct {
|
||||
Indexer Indexer
|
||||
}
|
||||
|
||||
func (s *StoreToReplicaSetLister) List(selector labels.Selector) (ret []*extensions.ReplicaSet, err error) {
|
||||
err = ListAll(s.Indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*extensions.ReplicaSet))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (s *StoreToReplicaSetLister) ReplicaSets(namespace string) storeReplicaSetsNamespacer {
|
||||
return storeReplicaSetsNamespacer{Indexer: s.Indexer, namespace: namespace}
|
||||
}
|
||||
|
||||
type storeReplicaSetsNamespacer struct {
|
||||
Indexer Indexer
|
||||
namespace string
|
||||
}
|
||||
|
||||
func (s storeReplicaSetsNamespacer) List(selector labels.Selector) (ret []*extensions.ReplicaSet, err error) {
|
||||
err = ListAllByNamespace(s.Indexer, s.namespace, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*extensions.ReplicaSet))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (s storeReplicaSetsNamespacer) Get(name string) (*extensions.ReplicaSet, error) {
|
||||
obj, exists, err := s.Indexer.GetByKey(s.namespace + "/" + name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(extensions.Resource("replicaset"), name)
|
||||
}
|
||||
return obj.(*extensions.ReplicaSet), nil
|
||||
}
|
||||
|
||||
// GetPodReplicaSets returns a list of ReplicaSets managing a pod. Returns an error only if no matching ReplicaSets are found.
|
||||
func (s *StoreToReplicaSetLister) GetPodReplicaSets(pod *api.Pod) (rss []*extensions.ReplicaSet, err error) {
|
||||
if len(pod.Labels) == 0 {
|
||||
err = fmt.Errorf("no ReplicaSets found for pod %v because it has no labels", pod.Name)
|
||||
return
|
||||
}
|
||||
|
||||
list, err := s.ReplicaSets(pod.Namespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, rs := range list {
|
||||
if rs.Namespace != pod.Namespace {
|
||||
continue
|
||||
}
|
||||
selector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid selector: %v", err)
|
||||
}
|
||||
|
||||
// If a ReplicaSet with a nil or empty selector creeps in, it should match nothing, not everything.
|
||||
if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
|
||||
continue
|
||||
}
|
||||
rss = append(rss, rs)
|
||||
}
|
||||
if len(rss) == 0 {
|
||||
err = fmt.Errorf("could not find ReplicaSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
|
||||
}
|
||||
return
|
||||
}
|
234
vendor/k8s.io/client-go/tools/cache/listers_rbac.go
generated
vendored
234
vendor/k8s.io/client-go/tools/cache/listers_rbac.go
generated
vendored
|
@ -1,234 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"k8s.io/client-go/pkg/api/errors"
|
||||
"k8s.io/client-go/pkg/apis/rbac"
|
||||
"k8s.io/client-go/pkg/labels"
|
||||
)
|
||||
|
||||
// TODO: generate these classes and methods for all resources of interest using
|
||||
// a script. Can use "go generate" once 1.4 is supported by all users.
|
||||
|
||||
// Lister makes an Index have the List method. The Stores must contain only the expected type
|
||||
// Example:
|
||||
// s := cache.NewStore()
|
||||
// lw := cache.ListWatch{Client: c, FieldSelector: sel, Resource: "pods"}
|
||||
// r := cache.NewReflector(lw, &rbac.ClusterRole{}, s).Run()
|
||||
// l := clusterRoleLister{s}
|
||||
// l.List()
|
||||
|
||||
func NewClusterRoleLister(indexer Indexer) ClusterRoleLister {
|
||||
return &clusterRoleLister{indexer: indexer}
|
||||
}
|
||||
func NewClusterRoleBindingLister(indexer Indexer) ClusterRoleBindingLister {
|
||||
return &clusterRoleBindingLister{indexer: indexer}
|
||||
}
|
||||
func NewRoleLister(indexer Indexer) RoleLister {
|
||||
return &roleLister{indexer: indexer}
|
||||
}
|
||||
func NewRoleBindingLister(indexer Indexer) RoleBindingLister {
|
||||
return &roleBindingLister{indexer: indexer}
|
||||
}
|
||||
|
||||
// these interfaces are used by the rbac authorizer
|
||||
type authorizerClusterRoleGetter interface {
|
||||
GetClusterRole(name string) (*rbac.ClusterRole, error)
|
||||
}
|
||||
|
||||
type authorizerClusterRoleBindingLister interface {
|
||||
ListClusterRoleBindings() ([]*rbac.ClusterRoleBinding, error)
|
||||
}
|
||||
|
||||
type authorizerRoleGetter interface {
|
||||
GetRole(namespace, name string) (*rbac.Role, error)
|
||||
}
|
||||
|
||||
type authorizerRoleBindingLister interface {
|
||||
ListRoleBindings(namespace string) ([]*rbac.RoleBinding, error)
|
||||
}
|
||||
|
||||
type ClusterRoleLister interface {
|
||||
authorizerClusterRoleGetter
|
||||
List(selector labels.Selector) (ret []*rbac.ClusterRole, err error)
|
||||
Get(name string) (*rbac.ClusterRole, error)
|
||||
}
|
||||
|
||||
type clusterRoleLister struct {
|
||||
indexer Indexer
|
||||
}
|
||||
|
||||
func (s *clusterRoleLister) List(selector labels.Selector) (ret []*rbac.ClusterRole, err error) {
|
||||
err = ListAll(s.indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*rbac.ClusterRole))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (s clusterRoleLister) Get(name string) (*rbac.ClusterRole, error) {
|
||||
obj, exists, err := s.indexer.GetByKey(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(rbac.Resource("clusterrole"), name)
|
||||
}
|
||||
return obj.(*rbac.ClusterRole), nil
|
||||
}
|
||||
|
||||
func (s clusterRoleLister) GetClusterRole(name string) (*rbac.ClusterRole, error) {
|
||||
return s.Get(name)
|
||||
}
|
||||
|
||||
type ClusterRoleBindingLister interface {
|
||||
authorizerClusterRoleBindingLister
|
||||
List(selector labels.Selector) (ret []*rbac.ClusterRoleBinding, err error)
|
||||
Get(name string) (*rbac.ClusterRoleBinding, error)
|
||||
}
|
||||
|
||||
type clusterRoleBindingLister struct {
|
||||
indexer Indexer
|
||||
}
|
||||
|
||||
func (s *clusterRoleBindingLister) List(selector labels.Selector) (ret []*rbac.ClusterRoleBinding, err error) {
|
||||
err = ListAll(s.indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*rbac.ClusterRoleBinding))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (s clusterRoleBindingLister) Get(name string) (*rbac.ClusterRoleBinding, error) {
|
||||
obj, exists, err := s.indexer.GetByKey(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(rbac.Resource("clusterrolebinding"), name)
|
||||
}
|
||||
return obj.(*rbac.ClusterRoleBinding), nil
|
||||
}
|
||||
|
||||
func (s clusterRoleBindingLister) ListClusterRoleBindings() ([]*rbac.ClusterRoleBinding, error) {
|
||||
return s.List(labels.Everything())
|
||||
}
|
||||
|
||||
type RoleLister interface {
|
||||
authorizerRoleGetter
|
||||
List(selector labels.Selector) (ret []*rbac.Role, err error)
|
||||
Roles(namespace string) RoleNamespaceLister
|
||||
}
|
||||
|
||||
type RoleNamespaceLister interface {
|
||||
List(selector labels.Selector) (ret []*rbac.Role, err error)
|
||||
Get(name string) (*rbac.Role, error)
|
||||
}
|
||||
|
||||
type roleLister struct {
|
||||
indexer Indexer
|
||||
}
|
||||
|
||||
func (s *roleLister) List(selector labels.Selector) (ret []*rbac.Role, err error) {
|
||||
err = ListAll(s.indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*rbac.Role))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (s *roleLister) Roles(namespace string) RoleNamespaceLister {
|
||||
return roleNamespaceLister{indexer: s.indexer, namespace: namespace}
|
||||
}
|
||||
|
||||
func (s roleLister) GetRole(namespace, name string) (*rbac.Role, error) {
|
||||
return s.Roles(namespace).Get(name)
|
||||
}
|
||||
|
||||
type roleNamespaceLister struct {
|
||||
indexer Indexer
|
||||
namespace string
|
||||
}
|
||||
|
||||
func (s roleNamespaceLister) List(selector labels.Selector) (ret []*rbac.Role, err error) {
|
||||
err = ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*rbac.Role))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (s roleNamespaceLister) Get(name string) (*rbac.Role, error) {
|
||||
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(rbac.Resource("role"), name)
|
||||
}
|
||||
return obj.(*rbac.Role), nil
|
||||
}
|
||||
|
||||
type RoleBindingLister interface {
|
||||
authorizerRoleBindingLister
|
||||
List(selector labels.Selector) (ret []*rbac.RoleBinding, err error)
|
||||
RoleBindings(namespace string) RoleBindingNamespaceLister
|
||||
}
|
||||
|
||||
type RoleBindingNamespaceLister interface {
|
||||
List(selector labels.Selector) (ret []*rbac.RoleBinding, err error)
|
||||
Get(name string) (*rbac.RoleBinding, error)
|
||||
}
|
||||
|
||||
type roleBindingLister struct {
|
||||
indexer Indexer
|
||||
}
|
||||
|
||||
func (s *roleBindingLister) List(selector labels.Selector) (ret []*rbac.RoleBinding, err error) {
|
||||
err = ListAll(s.indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*rbac.RoleBinding))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceLister {
|
||||
return roleBindingNamespaceLister{indexer: s.indexer, namespace: namespace}
|
||||
}
|
||||
|
||||
func (s roleBindingLister) ListRoleBindings(namespace string) ([]*rbac.RoleBinding, error) {
|
||||
return s.RoleBindings(namespace).List(labels.Everything())
|
||||
}
|
||||
|
||||
type roleBindingNamespaceLister struct {
|
||||
indexer Indexer
|
||||
namespace string
|
||||
}
|
||||
|
||||
func (s roleBindingNamespaceLister) List(selector labels.Selector) (ret []*rbac.RoleBinding, err error) {
|
||||
err = ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*rbac.RoleBinding))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (s roleBindingNamespaceLister) Get(name string) (*rbac.RoleBinding, error) {
|
||||
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(rbac.Resource("rolebinding"), name)
|
||||
}
|
||||
return obj.(*rbac.RoleBinding), nil
|
||||
}
|
64
vendor/k8s.io/client-go/tools/cache/listwatch.go
generated
vendored
64
vendor/k8s.io/client-go/tools/cache/listwatch.go
generated
vendored
|
@ -19,28 +19,32 @@ package cache
|
|||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/pkg/api"
|
||||
"k8s.io/client-go/pkg/api/meta"
|
||||
"k8s.io/client-go/pkg/fields"
|
||||
"k8s.io/client-go/pkg/runtime"
|
||||
"k8s.io/client-go/pkg/watch"
|
||||
"k8s.io/client-go/rest"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/pager"
|
||||
)
|
||||
|
||||
// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource.
|
||||
type ListerWatcher interface {
|
||||
// List should return a list type object; the Items field will be extracted, and the
|
||||
// ResourceVersion field will be used to start the watch in the right place.
|
||||
List(options api.ListOptions) (runtime.Object, error)
|
||||
List(options metav1.ListOptions) (runtime.Object, error)
|
||||
// Watch should begin a watch at the specified version.
|
||||
Watch(options api.ListOptions) (watch.Interface, error)
|
||||
Watch(options metav1.ListOptions) (watch.Interface, error)
|
||||
}
|
||||
|
||||
// ListFunc knows how to list resources
|
||||
type ListFunc func(options api.ListOptions) (runtime.Object, error)
|
||||
type ListFunc func(options metav1.ListOptions) (runtime.Object, error)
|
||||
|
||||
// WatchFunc knows how to watch resources
|
||||
type WatchFunc func(options api.ListOptions) (watch.Interface, error)
|
||||
type WatchFunc func(options metav1.ListOptions) (watch.Interface, error)
|
||||
|
||||
// ListWatch knows how to list and watch a set of apiserver resources. It satisfies the ListerWatcher interface.
|
||||
// It is a convenience function for users of NewReflector, etc.
|
||||
|
@ -48,37 +52,39 @@ type WatchFunc func(options api.ListOptions) (watch.Interface, error)
|
|||
type ListWatch struct {
|
||||
ListFunc ListFunc
|
||||
WatchFunc WatchFunc
|
||||
// DisableChunking requests no chunking for this list watcher.
|
||||
DisableChunking bool
|
||||
}
|
||||
|
||||
// Getter interface knows how to access Get method from RESTClient.
|
||||
type Getter interface {
|
||||
Get() *rest.Request
|
||||
Get() *restclient.Request
|
||||
}
|
||||
|
||||
// NewListWatchFromClient creates a new ListWatch from the specified client, resource, namespace and field selector.
|
||||
func NewListWatchFromClient(c Getter, resource string, namespace string, fieldSelector fields.Selector) *ListWatch {
|
||||
listFunc := func(options api.ListOptions) (runtime.Object, error) {
|
||||
listFunc := func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = fieldSelector.String()
|
||||
return c.Get().
|
||||
Namespace(namespace).
|
||||
Resource(resource).
|
||||
VersionedParams(&options, api.ParameterCodec).
|
||||
FieldsSelectorParam(fieldSelector).
|
||||
VersionedParams(&options, metav1.ParameterCodec).
|
||||
Do().
|
||||
Get()
|
||||
}
|
||||
watchFunc := func(options api.ListOptions) (watch.Interface, error) {
|
||||
watchFunc := func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.Watch = true
|
||||
options.FieldSelector = fieldSelector.String()
|
||||
return c.Get().
|
||||
Prefix("watch").
|
||||
Namespace(namespace).
|
||||
Resource(resource).
|
||||
VersionedParams(&options, api.ParameterCodec).
|
||||
FieldsSelectorParam(fieldSelector).
|
||||
VersionedParams(&options, metav1.ParameterCodec).
|
||||
Watch()
|
||||
}
|
||||
return &ListWatch{ListFunc: listFunc, WatchFunc: watchFunc}
|
||||
}
|
||||
|
||||
func timeoutFromListOptions(options api.ListOptions) time.Duration {
|
||||
func timeoutFromListOptions(options metav1.ListOptions) time.Duration {
|
||||
if options.TimeoutSeconds != nil {
|
||||
return time.Duration(*options.TimeoutSeconds) * time.Second
|
||||
}
|
||||
|
@ -86,22 +92,27 @@ func timeoutFromListOptions(options api.ListOptions) time.Duration {
|
|||
}
|
||||
|
||||
// List a set of apiserver resources
|
||||
func (lw *ListWatch) List(options api.ListOptions) (runtime.Object, error) {
|
||||
func (lw *ListWatch) List(options metav1.ListOptions) (runtime.Object, error) {
|
||||
if !lw.DisableChunking {
|
||||
return pager.New(pager.SimplePageFunc(lw.ListFunc)).List(context.TODO(), options)
|
||||
}
|
||||
return lw.ListFunc(options)
|
||||
}
|
||||
|
||||
// Watch a set of apiserver resources
|
||||
func (lw *ListWatch) Watch(options api.ListOptions) (watch.Interface, error) {
|
||||
func (lw *ListWatch) Watch(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return lw.WatchFunc(options)
|
||||
}
|
||||
|
||||
// ListWatchUntil checks the provided conditions against the items returned by the list watcher, returning wait.ErrWaitTimeout
|
||||
// if timeout is exceeded without all conditions returning true, or an error if an error occurs.
|
||||
// TODO: check for watch expired error and retry watch from latest point? Same issue exists for Until.
|
||||
func ListWatchUntil(timeout time.Duration, lw ListerWatcher, conditions ...watch.ConditionFunc) (*watch.Event, error) {
|
||||
if len(conditions) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
list, err := lw.List(api.ListOptions{})
|
||||
list, err := lw.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -153,10 +164,15 @@ func ListWatchUntil(timeout time.Duration, lw ListerWatcher, conditions ...watch
|
|||
}
|
||||
currResourceVersion := metaObj.GetResourceVersion()
|
||||
|
||||
watchInterface, err := lw.Watch(api.ListOptions{ResourceVersion: currResourceVersion})
|
||||
watchInterface, err := lw.Watch(metav1.ListOptions{ResourceVersion: currResourceVersion})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return watch.Until(timeout, watchInterface, remainingConditions...)
|
||||
evt, err := watch.Until(timeout, watchInterface, remainingConditions...)
|
||||
if err == watch.ErrWatchClosed {
|
||||
// present a consistent error interface to callers
|
||||
err = wait.ErrWaitTimeout
|
||||
}
|
||||
return evt, err
|
||||
}
|
||||
|
|
261
vendor/k8s.io/client-go/tools/cache/mutation_cache.go
generated
vendored
Normal file
261
vendor/k8s.io/client-go/tools/cache/mutation_cache.go
generated
vendored
Normal file
|
@ -0,0 +1,261 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilcache "k8s.io/apimachinery/pkg/util/cache"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// MutationCache is able to take the result of update operations and stores them in an LRU
|
||||
// that can be used to provide a more current view of a requested object. It requires interpreting
|
||||
// resourceVersions for comparisons.
|
||||
// Implementations must be thread-safe.
|
||||
// TODO find a way to layer this into an informer/lister
|
||||
type MutationCache interface {
|
||||
GetByKey(key string) (interface{}, bool, error)
|
||||
ByIndex(indexName, indexKey string) ([]interface{}, error)
|
||||
Mutation(interface{})
|
||||
}
|
||||
|
||||
type ResourceVersionComparator interface {
|
||||
CompareResourceVersion(lhs, rhs runtime.Object) int
|
||||
}
|
||||
|
||||
// NewIntegerResourceVersionMutationCache returns a MutationCache that understands how to
|
||||
// deal with objects that have a resource version that:
|
||||
//
|
||||
// - is an integer
|
||||
// - increases when updated
|
||||
// - is comparable across the same resource in a namespace
|
||||
//
|
||||
// Most backends will have these semantics. Indexer may be nil. ttl controls how long an item
|
||||
// remains in the mutation cache before it is removed.
|
||||
//
|
||||
// If includeAdds is true, objects in the mutation cache will be returned even if they don't exist
|
||||
// in the underlying store. This is only safe if your use of the cache can handle mutation entries
|
||||
// remaining in the cache for up to ttl when mutations and deletes occur very closely in time.
|
||||
func NewIntegerResourceVersionMutationCache(backingCache Store, indexer Indexer, ttl time.Duration, includeAdds bool) MutationCache {
|
||||
return &mutationCache{
|
||||
backingCache: backingCache,
|
||||
indexer: indexer,
|
||||
mutationCache: utilcache.NewLRUExpireCache(100),
|
||||
comparator: etcdObjectVersioner{},
|
||||
ttl: ttl,
|
||||
includeAdds: includeAdds,
|
||||
}
|
||||
}
|
||||
|
||||
// mutationCache doesn't guarantee that it returns values added via Mutation since they can page out and
|
||||
// since you can't distinguish between, "didn't observe create" and "was deleted after create",
|
||||
// if the key is missing from the backing cache, we always return it as missing
|
||||
type mutationCache struct {
|
||||
lock sync.Mutex
|
||||
backingCache Store
|
||||
indexer Indexer
|
||||
mutationCache *utilcache.LRUExpireCache
|
||||
includeAdds bool
|
||||
ttl time.Duration
|
||||
|
||||
comparator ResourceVersionComparator
|
||||
}
|
||||
|
||||
// GetByKey is never guaranteed to return back the value set in Mutation. It could be paged out, it could
|
||||
// be older than another copy, the backingCache may be more recent or, you might have written twice into the same key.
|
||||
// You get a value that was valid at some snapshot of time and will always return the newer of backingCache and mutationCache.
|
||||
func (c *mutationCache) GetByKey(key string) (interface{}, bool, error) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
obj, exists, err := c.backingCache.GetByKey(key)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if !exists {
|
||||
if !c.includeAdds {
|
||||
// we can't distinguish between, "didn't observe create" and "was deleted after create", so
|
||||
// if the key is missing, we always return it as missing
|
||||
return nil, false, nil
|
||||
}
|
||||
obj, exists = c.mutationCache.Get(key)
|
||||
if !exists {
|
||||
return nil, false, nil
|
||||
}
|
||||
}
|
||||
objRuntime, ok := obj.(runtime.Object)
|
||||
if !ok {
|
||||
return obj, true, nil
|
||||
}
|
||||
return c.newerObject(key, objRuntime), true, nil
|
||||
}
|
||||
|
||||
// ByIndex returns the newer objects that match the provided index and indexer key.
|
||||
// Will return an error if no indexer was provided.
|
||||
func (c *mutationCache) ByIndex(name string, indexKey string) ([]interface{}, error) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if c.indexer == nil {
|
||||
return nil, fmt.Errorf("no indexer has been provided to the mutation cache")
|
||||
}
|
||||
keys, err := c.indexer.IndexKeys(name, indexKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var items []interface{}
|
||||
keySet := sets.NewString()
|
||||
for _, key := range keys {
|
||||
keySet.Insert(key)
|
||||
obj, exists, err := c.indexer.GetByKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
if objRuntime, ok := obj.(runtime.Object); ok {
|
||||
items = append(items, c.newerObject(key, objRuntime))
|
||||
} else {
|
||||
items = append(items, obj)
|
||||
}
|
||||
}
|
||||
|
||||
if c.includeAdds {
|
||||
fn := c.indexer.GetIndexers()[name]
|
||||
// Keys() is returned oldest to newest, so full traversal does not alter the LRU behavior
|
||||
for _, key := range c.mutationCache.Keys() {
|
||||
updated, ok := c.mutationCache.Get(key)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if keySet.Has(key.(string)) {
|
||||
continue
|
||||
}
|
||||
elements, err := fn(updated)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err)
|
||||
continue
|
||||
}
|
||||
for _, inIndex := range elements {
|
||||
if inIndex != indexKey {
|
||||
continue
|
||||
}
|
||||
items = append(items, updated)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return items, nil
|
||||
}
|
||||
|
||||
// newerObject checks the mutation cache for a newer object and returns one if found. If the
|
||||
// mutated object is older than the backing object, it is removed from the Must be
|
||||
// called while the lock is held.
|
||||
func (c *mutationCache) newerObject(key string, backing runtime.Object) runtime.Object {
|
||||
mutatedObj, exists := c.mutationCache.Get(key)
|
||||
if !exists {
|
||||
return backing
|
||||
}
|
||||
mutatedObjRuntime, ok := mutatedObj.(runtime.Object)
|
||||
if !ok {
|
||||
return backing
|
||||
}
|
||||
if c.comparator.CompareResourceVersion(backing, mutatedObjRuntime) >= 0 {
|
||||
c.mutationCache.Remove(key)
|
||||
return backing
|
||||
}
|
||||
return mutatedObjRuntime
|
||||
}
|
||||
|
||||
// Mutation adds a change to the cache that can be returned in GetByKey if it is newer than the backingCache
|
||||
// copy. If you call Mutation twice with the same object on different threads, one will win, but its not defined
|
||||
// which one. This doesn't affect correctness, since the GetByKey guaranteed of "later of these two caches" is
|
||||
// preserved, but you may not get the version of the object you want. The object you get is only guaranteed to
|
||||
// "one that was valid at some point in time", not "the one that I want".
|
||||
func (c *mutationCache) Mutation(obj interface{}) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
key, err := DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
if err != nil {
|
||||
// this is a "nice to have", so failures shouldn't do anything weird
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
|
||||
if objRuntime, ok := obj.(runtime.Object); ok {
|
||||
if mutatedObj, exists := c.mutationCache.Get(key); exists {
|
||||
if mutatedObjRuntime, ok := mutatedObj.(runtime.Object); ok {
|
||||
if c.comparator.CompareResourceVersion(objRuntime, mutatedObjRuntime) < 0 {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
c.mutationCache.Add(key, obj, c.ttl)
|
||||
}
|
||||
|
||||
// etcdObjectVersioner implements versioning and extracting etcd node information
|
||||
// for objects that have an embedded ObjectMeta or ListMeta field.
|
||||
type etcdObjectVersioner struct{}
|
||||
|
||||
// ObjectResourceVersion implements Versioner
|
||||
func (a etcdObjectVersioner) ObjectResourceVersion(obj runtime.Object) (uint64, error) {
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
version := accessor.GetResourceVersion()
|
||||
if len(version) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
return strconv.ParseUint(version, 10, 64)
|
||||
}
|
||||
|
||||
// CompareResourceVersion compares etcd resource versions. Outside this API they are all strings,
|
||||
// but etcd resource versions are special, they're actually ints, so we can easily compare them.
|
||||
func (a etcdObjectVersioner) CompareResourceVersion(lhs, rhs runtime.Object) int {
|
||||
lhsVersion, err := a.ObjectResourceVersion(lhs)
|
||||
if err != nil {
|
||||
// coder error
|
||||
panic(err)
|
||||
}
|
||||
rhsVersion, err := a.ObjectResourceVersion(rhs)
|
||||
if err != nil {
|
||||
// coder error
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if lhsVersion == rhsVersion {
|
||||
return 0
|
||||
}
|
||||
if lhsVersion < rhsVersion {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
36
vendor/k8s.io/client-go/tools/cache/mutation_detector.go
generated
vendored
36
vendor/k8s.io/client-go/tools/cache/mutation_detector.go
generated
vendored
|
@ -24,9 +24,8 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/pkg/api"
|
||||
"k8s.io/client-go/pkg/runtime"
|
||||
"k8s.io/client-go/pkg/util/diff"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
)
|
||||
|
||||
var mutationDetectionEnabled = false
|
||||
|
@ -79,17 +78,15 @@ type cacheObj struct {
|
|||
|
||||
func (d *defaultCacheMutationDetector) Run(stopCh <-chan struct{}) {
|
||||
// we DON'T want protection from panics. If we're running this code, we want to die
|
||||
go func() {
|
||||
for {
|
||||
d.CompareObjects()
|
||||
for {
|
||||
d.CompareObjects()
|
||||
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
case <-time.After(d.period):
|
||||
}
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
case <-time.After(d.period):
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// AddObject makes a deep copy of the object for later comparison. It only works on runtime.Object
|
||||
|
@ -98,18 +95,13 @@ func (d *defaultCacheMutationDetector) AddObject(obj interface{}) {
|
|||
if _, ok := obj.(DeletedFinalStateUnknown); ok {
|
||||
return
|
||||
}
|
||||
if _, ok := obj.(runtime.Object); !ok {
|
||||
return
|
||||
}
|
||||
if obj, ok := obj.(runtime.Object); ok {
|
||||
copiedObj := obj.DeepCopyObject()
|
||||
|
||||
copiedObj, err := api.Scheme.Copy(obj.(runtime.Object))
|
||||
if err != nil {
|
||||
return
|
||||
d.lock.Lock()
|
||||
defer d.lock.Unlock()
|
||||
d.cachedObjs = append(d.cachedObjs, cacheObj{cached: obj, copied: copiedObj})
|
||||
}
|
||||
|
||||
d.lock.Lock()
|
||||
defer d.lock.Unlock()
|
||||
d.cachedObjs = append(d.cachedObjs, cacheObj{cached: obj, copied: copiedObj})
|
||||
}
|
||||
|
||||
func (d *defaultCacheMutationDetector) CompareObjects() {
|
||||
|
|
127
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
127
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
|
@ -30,23 +30,27 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/client-go/pkg/api"
|
||||
apierrs "k8s.io/client-go/pkg/api/errors"
|
||||
"k8s.io/client-go/pkg/api/meta"
|
||||
"k8s.io/client-go/pkg/runtime"
|
||||
utilruntime "k8s.io/client-go/pkg/util/runtime"
|
||||
"k8s.io/client-go/pkg/util/wait"
|
||||
"k8s.io/client-go/pkg/watch"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
)
|
||||
|
||||
// Reflector watches a specified resource and causes all changes to be reflected in the given store.
|
||||
type Reflector struct {
|
||||
// name identifies this reflector. By default it will be a file:line if possible.
|
||||
name string
|
||||
// metrics tracks basic metric information about the reflector
|
||||
metrics *reflectorMetrics
|
||||
|
||||
// The type of object we expect to place in the store.
|
||||
expectedType reflect.Type
|
||||
|
@ -58,8 +62,9 @@ type Reflector struct {
|
|||
// the beginning of the next one.
|
||||
period time.Duration
|
||||
resyncPeriod time.Duration
|
||||
// now() returns current time - exposed for testing purposes
|
||||
now func() time.Time
|
||||
ShouldResync func() bool
|
||||
// clock allows tests to manipulate time
|
||||
clock clock.Clock
|
||||
// lastSyncResourceVersion is the resource version token last
|
||||
// observed when doing a sync with the underlying store
|
||||
// it is thread safe, but not synchronized with the underlying store
|
||||
|
@ -94,23 +99,35 @@ func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyn
|
|||
return NewNamedReflector(getDefaultReflectorName(internalPackages...), lw, expectedType, store, resyncPeriod)
|
||||
}
|
||||
|
||||
// reflectorDisambiguator is used to disambiguate started reflectors.
|
||||
// initialized to an unstable value to ensure meaning isn't attributed to the suffix.
|
||||
var reflectorDisambiguator = int64(time.Now().UnixNano() % 12345)
|
||||
|
||||
// NewNamedReflector same as NewReflector, but with a specified name for logging
|
||||
func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
|
||||
reflectorSuffix := atomic.AddInt64(&reflectorDisambiguator, 1)
|
||||
r := &Reflector{
|
||||
name: name,
|
||||
name: name,
|
||||
// we need this to be unique per process (some names are still the same)but obvious who it belongs to
|
||||
metrics: newReflectorMetrics(makeValidPromethusMetricLabel(fmt.Sprintf("reflector_"+name+"_%d", reflectorSuffix))),
|
||||
listerWatcher: lw,
|
||||
store: store,
|
||||
expectedType: reflect.TypeOf(expectedType),
|
||||
period: time.Second,
|
||||
resyncPeriod: resyncPeriod,
|
||||
now: time.Now,
|
||||
clock: &clock.RealClock{},
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func makeValidPromethusMetricLabel(in string) string {
|
||||
// this isn't perfect, but it removes our common characters
|
||||
return strings.NewReplacer("/", "_", ".", "_", "-", "_", ":", "_").Replace(in)
|
||||
}
|
||||
|
||||
// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common
|
||||
// call chains to NewReflector, so they'd be low entropy names for reflectors
|
||||
var internalPackages = []string{"kubernetes/pkg/client/cache/", "/runtime/asm_"}
|
||||
var internalPackages = []string{"client-go/tools/cache/", "/runtime/asm_"}
|
||||
|
||||
// getDefaultReflectorName walks back through the call stack until we find a caller from outside of the ignoredPackages
|
||||
// it returns back a shortpath/filename:line to aid in identification of this reflector when it starts logging
|
||||
|
@ -180,21 +197,10 @@ func extractStackCreator() (string, int, bool) {
|
|||
}
|
||||
|
||||
// Run starts a watch and handles watch events. Will restart the watch if it is closed.
|
||||
// Run starts a goroutine and returns immediately.
|
||||
func (r *Reflector) Run() {
|
||||
// Run will exit when stopCh is closed.
|
||||
func (r *Reflector) Run(stopCh <-chan struct{}) {
|
||||
glog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name)
|
||||
go wait.Until(func() {
|
||||
if err := r.ListAndWatch(wait.NeverStop); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
}
|
||||
}, r.period, wait.NeverStop)
|
||||
}
|
||||
|
||||
// RunUntil starts a watch and handles watch events. Will restart the watch if it is closed.
|
||||
// RunUntil starts a goroutine and returns immediately. It will exit when stopCh is closed.
|
||||
func (r *Reflector) RunUntil(stopCh <-chan struct{}) {
|
||||
glog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name)
|
||||
go wait.Until(func() {
|
||||
wait.Until(func() {
|
||||
if err := r.ListAndWatch(stopCh); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
}
|
||||
|
@ -223,8 +229,8 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) {
|
|||
// always fail so we end up listing frequently. Then, if we don't
|
||||
// manually stop the timer, we could end up with many timers active
|
||||
// concurrently.
|
||||
t := time.NewTimer(r.resyncPeriod)
|
||||
return t.C, t.Stop
|
||||
t := r.clock.NewTimer(r.resyncPeriod)
|
||||
return t.C(), t.Stop
|
||||
}
|
||||
|
||||
// ListAndWatch first lists all items and get the resource version at the moment of call,
|
||||
|
@ -233,17 +239,18 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) {
|
|||
func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
glog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name)
|
||||
var resourceVersion string
|
||||
resyncCh, cleanup := r.resyncChan()
|
||||
defer cleanup()
|
||||
|
||||
// Explicitly set "0" as resource version - it's fine for the List()
|
||||
// to be served from cache and potentially be delayed relative to
|
||||
// etcd contents. Reflector framework will catch up via Watch() eventually.
|
||||
options := api.ListOptions{ResourceVersion: "0"}
|
||||
options := metav1.ListOptions{ResourceVersion: "0"}
|
||||
r.metrics.numberOfLists.Inc()
|
||||
start := r.clock.Now()
|
||||
list, err := r.listerWatcher.List(options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err)
|
||||
}
|
||||
r.metrics.listDuration.Observe(time.Since(start).Seconds())
|
||||
listMetaInterface, err := meta.ListAccessor(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err)
|
||||
|
@ -253,6 +260,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err)
|
||||
}
|
||||
r.metrics.numberOfItemsInList.Observe(float64(len(items)))
|
||||
if err := r.syncWith(items, resourceVersion); err != nil {
|
||||
return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err)
|
||||
}
|
||||
|
@ -262,6 +270,10 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
|||
cancelCh := make(chan struct{})
|
||||
defer close(cancelCh)
|
||||
go func() {
|
||||
resyncCh, cleanup := r.resyncChan()
|
||||
defer func() {
|
||||
cleanup() // Call the last one written into cleanup
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case <-resyncCh:
|
||||
|
@ -270,10 +282,12 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
|||
case <-cancelCh:
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("%s: forcing resync", r.name)
|
||||
if err := r.store.Resync(); err != nil {
|
||||
resyncerrc <- err
|
||||
return
|
||||
if r.ShouldResync == nil || r.ShouldResync() {
|
||||
glog.V(4).Infof("%s: forcing resync", r.name)
|
||||
if err := r.store.Resync(); err != nil {
|
||||
resyncerrc <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
cleanup()
|
||||
resyncCh, cleanup = r.resyncChan()
|
||||
|
@ -281,14 +295,22 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
|||
}()
|
||||
|
||||
for {
|
||||
// give the stopCh a chance to stop the loop, even in case of continue statements further down on errors
|
||||
select {
|
||||
case <-stopCh:
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
timemoutseconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0))
|
||||
options = api.ListOptions{
|
||||
options = metav1.ListOptions{
|
||||
ResourceVersion: resourceVersion,
|
||||
// We want to avoid situations of hanging watchers. Stop any wachers that do not
|
||||
// receive any events within the timeout window.
|
||||
TimeoutSeconds: &timemoutseconds,
|
||||
}
|
||||
|
||||
r.metrics.numberOfWatches.Inc()
|
||||
w, err := r.listerWatcher.Watch(options)
|
||||
if err != nil {
|
||||
switch err {
|
||||
|
@ -334,12 +356,17 @@ func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) err
|
|||
|
||||
// watchHandler watches w and keeps *resourceVersion up to date.
|
||||
func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error {
|
||||
start := time.Now()
|
||||
start := r.clock.Now()
|
||||
eventCount := 0
|
||||
|
||||
// Stopping the watcher should be idempotent and if we return from this function there's no way
|
||||
// we're coming back in with the same watch interface.
|
||||
defer w.Stop()
|
||||
// update metrics
|
||||
defer func() {
|
||||
r.metrics.numberOfItemsInWatch.Observe(float64(eventCount))
|
||||
r.metrics.watchDuration.Observe(time.Since(start).Seconds())
|
||||
}()
|
||||
|
||||
loop:
|
||||
for {
|
||||
|
@ -367,14 +394,23 @@ loop:
|
|||
newResourceVersion := meta.GetResourceVersion()
|
||||
switch event.Type {
|
||||
case watch.Added:
|
||||
r.store.Add(event.Object)
|
||||
err := r.store.Add(event.Object)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to add watch event object (%#v) to store: %v", r.name, event.Object, err))
|
||||
}
|
||||
case watch.Modified:
|
||||
r.store.Update(event.Object)
|
||||
err := r.store.Update(event.Object)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to update watch event object (%#v) to store: %v", r.name, event.Object, err))
|
||||
}
|
||||
case watch.Deleted:
|
||||
// TODO: Will any consumers need access to the "last known
|
||||
// state", which is passed in event.Object? If so, may need
|
||||
// to change this.
|
||||
r.store.Delete(event.Object)
|
||||
err := r.store.Delete(event.Object)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", r.name, event.Object, err))
|
||||
}
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
|
||||
}
|
||||
|
@ -384,10 +420,10 @@ loop:
|
|||
}
|
||||
}
|
||||
|
||||
watchDuration := time.Now().Sub(start)
|
||||
watchDuration := r.clock.Now().Sub(start)
|
||||
if watchDuration < 1*time.Second && eventCount == 0 {
|
||||
glog.V(4).Infof("%s: Unexpected watch close - watch lasted less than a second and no items received", r.name)
|
||||
return errors.New("very short watch")
|
||||
r.metrics.numberOfShortWatches.Inc()
|
||||
return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name)
|
||||
}
|
||||
glog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount)
|
||||
return nil
|
||||
|
@ -405,4 +441,9 @@ func (r *Reflector) setLastSyncResourceVersion(v string) {
|
|||
r.lastSyncResourceVersionMutex.Lock()
|
||||
defer r.lastSyncResourceVersionMutex.Unlock()
|
||||
r.lastSyncResourceVersion = v
|
||||
|
||||
rv, err := strconv.Atoi(v)
|
||||
if err == nil {
|
||||
r.metrics.lastResourceVersion.Set(float64(rv))
|
||||
}
|
||||
}
|
||||
|
|
119
vendor/k8s.io/client-go/tools/cache/reflector_metrics.go
generated
vendored
Normal file
119
vendor/k8s.io/client-go/tools/cache/reflector_metrics.go
generated
vendored
Normal file
|
@ -0,0 +1,119 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file provides abstractions for setting the provider (e.g., prometheus)
|
||||
// of metrics.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// GaugeMetric represents a single numerical value that can arbitrarily go up
|
||||
// and down.
|
||||
type GaugeMetric interface {
|
||||
Set(float64)
|
||||
}
|
||||
|
||||
// CounterMetric represents a single numerical value that only ever
|
||||
// goes up.
|
||||
type CounterMetric interface {
|
||||
Inc()
|
||||
}
|
||||
|
||||
// SummaryMetric captures individual observations.
|
||||
type SummaryMetric interface {
|
||||
Observe(float64)
|
||||
}
|
||||
|
||||
type noopMetric struct{}
|
||||
|
||||
func (noopMetric) Inc() {}
|
||||
func (noopMetric) Dec() {}
|
||||
func (noopMetric) Observe(float64) {}
|
||||
func (noopMetric) Set(float64) {}
|
||||
|
||||
type reflectorMetrics struct {
|
||||
numberOfLists CounterMetric
|
||||
listDuration SummaryMetric
|
||||
numberOfItemsInList SummaryMetric
|
||||
|
||||
numberOfWatches CounterMetric
|
||||
numberOfShortWatches CounterMetric
|
||||
watchDuration SummaryMetric
|
||||
numberOfItemsInWatch SummaryMetric
|
||||
|
||||
lastResourceVersion GaugeMetric
|
||||
}
|
||||
|
||||
// MetricsProvider generates various metrics used by the reflector.
|
||||
type MetricsProvider interface {
|
||||
NewListsMetric(name string) CounterMetric
|
||||
NewListDurationMetric(name string) SummaryMetric
|
||||
NewItemsInListMetric(name string) SummaryMetric
|
||||
|
||||
NewWatchesMetric(name string) CounterMetric
|
||||
NewShortWatchesMetric(name string) CounterMetric
|
||||
NewWatchDurationMetric(name string) SummaryMetric
|
||||
NewItemsInWatchMetric(name string) SummaryMetric
|
||||
|
||||
NewLastResourceVersionMetric(name string) GaugeMetric
|
||||
}
|
||||
|
||||
type noopMetricsProvider struct{}
|
||||
|
||||
func (noopMetricsProvider) NewListsMetric(name string) CounterMetric { return noopMetric{} }
|
||||
func (noopMetricsProvider) NewListDurationMetric(name string) SummaryMetric { return noopMetric{} }
|
||||
func (noopMetricsProvider) NewItemsInListMetric(name string) SummaryMetric { return noopMetric{} }
|
||||
func (noopMetricsProvider) NewWatchesMetric(name string) CounterMetric { return noopMetric{} }
|
||||
func (noopMetricsProvider) NewShortWatchesMetric(name string) CounterMetric { return noopMetric{} }
|
||||
func (noopMetricsProvider) NewWatchDurationMetric(name string) SummaryMetric { return noopMetric{} }
|
||||
func (noopMetricsProvider) NewItemsInWatchMetric(name string) SummaryMetric { return noopMetric{} }
|
||||
func (noopMetricsProvider) NewLastResourceVersionMetric(name string) GaugeMetric {
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
var metricsFactory = struct {
|
||||
metricsProvider MetricsProvider
|
||||
setProviders sync.Once
|
||||
}{
|
||||
metricsProvider: noopMetricsProvider{},
|
||||
}
|
||||
|
||||
func newReflectorMetrics(name string) *reflectorMetrics {
|
||||
var ret *reflectorMetrics
|
||||
if len(name) == 0 {
|
||||
return ret
|
||||
}
|
||||
return &reflectorMetrics{
|
||||
numberOfLists: metricsFactory.metricsProvider.NewListsMetric(name),
|
||||
listDuration: metricsFactory.metricsProvider.NewListDurationMetric(name),
|
||||
numberOfItemsInList: metricsFactory.metricsProvider.NewItemsInListMetric(name),
|
||||
numberOfWatches: metricsFactory.metricsProvider.NewWatchesMetric(name),
|
||||
numberOfShortWatches: metricsFactory.metricsProvider.NewShortWatchesMetric(name),
|
||||
watchDuration: metricsFactory.metricsProvider.NewWatchDurationMetric(name),
|
||||
numberOfItemsInWatch: metricsFactory.metricsProvider.NewItemsInWatchMetric(name),
|
||||
lastResourceVersion: metricsFactory.metricsProvider.NewLastResourceVersionMetric(name),
|
||||
}
|
||||
}
|
||||
|
||||
// SetReflectorMetricsProvider sets the metrics provider
|
||||
func SetReflectorMetricsProvider(metricsProvider MetricsProvider) {
|
||||
metricsFactory.setProviders.Do(func() {
|
||||
metricsFactory.metricsProvider = metricsProvider
|
||||
})
|
||||
}
|
447
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
447
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
|
@ -21,31 +21,44 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/pkg/runtime"
|
||||
utilruntime "k8s.io/client-go/pkg/util/runtime"
|
||||
"k8s.io/client-go/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/buffer"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// if you use this, there is one behavior change compared to a standard Informer.
|
||||
// When you receive a notification, the cache will be AT LEAST as fresh as the
|
||||
// notification, but it MAY be more fresh. You should NOT depend on the contents
|
||||
// of the cache exactly matching the notification you've received in handler
|
||||
// functions. If there was a create, followed by a delete, the cache may NOT
|
||||
// have your item. This has advantages over the broadcaster since it allows us
|
||||
// to share a common cache across many controllers. Extending the broadcaster
|
||||
// would have required us keep duplicate caches for each watch.
|
||||
// SharedInformer has a shared data cache and is capable of distributing notifications for changes
|
||||
// to the cache to multiple listeners who registered via AddEventHandler. If you use this, there is
|
||||
// one behavior change compared to a standard Informer. When you receive a notification, the cache
|
||||
// will be AT LEAST as fresh as the notification, but it MAY be more fresh. You should NOT depend
|
||||
// on the contents of the cache exactly matching the notification you've received in handler
|
||||
// functions. If there was a create, followed by a delete, the cache may NOT have your item. This
|
||||
// has advantages over the broadcaster since it allows us to share a common cache across many
|
||||
// controllers. Extending the broadcaster would have required us keep duplicate caches for each
|
||||
// watch.
|
||||
type SharedInformer interface {
|
||||
// events to a single handler are delivered sequentially, but there is no coordination between different handlers
|
||||
// You may NOT add a handler *after* the SharedInformer is running. That will result in an error being returned.
|
||||
// TODO we should try to remove this restriction eventually.
|
||||
AddEventHandler(handler ResourceEventHandler) error
|
||||
// AddEventHandler adds an event handler to the shared informer using the shared informer's resync
|
||||
// period. Events to a single handler are delivered sequentially, but there is no coordination
|
||||
// between different handlers.
|
||||
AddEventHandler(handler ResourceEventHandler)
|
||||
// AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the
|
||||
// specified resync period. Events to a single handler are delivered sequentially, but there is
|
||||
// no coordination between different handlers.
|
||||
AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration)
|
||||
// GetStore returns the Store.
|
||||
GetStore() Store
|
||||
// GetController gives back a synthetic interface that "votes" to start the informer
|
||||
GetController() ControllerInterface
|
||||
GetController() Controller
|
||||
// Run starts the shared informer, which will be stopped when stopCh is closed.
|
||||
Run(stopCh <-chan struct{})
|
||||
// HasSynced returns true if the shared informer's store has synced.
|
||||
HasSynced() bool
|
||||
// LastSyncResourceVersion is the resource version observed when last synced with the underlying
|
||||
// store. The value returned is not synchronized with access to the underlying store and is not
|
||||
// thread-safe.
|
||||
LastSyncResourceVersion() string
|
||||
}
|
||||
|
||||
|
@ -57,23 +70,22 @@ type SharedIndexInformer interface {
|
|||
}
|
||||
|
||||
// NewSharedInformer creates a new instance for the listwatcher.
|
||||
// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can
|
||||
// be shared amongst all consumers.
|
||||
func NewSharedInformer(lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer {
|
||||
return NewSharedIndexInformer(lw, objType, resyncPeriod, Indexers{})
|
||||
}
|
||||
|
||||
// NewSharedIndexInformer creates a new instance for the listwatcher.
|
||||
// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can
|
||||
// be shared amongst all consumers.
|
||||
func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, indexers Indexers) SharedIndexInformer {
|
||||
func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer {
|
||||
realClock := &clock.RealClock{}
|
||||
sharedIndexInformer := &sharedIndexInformer{
|
||||
processor: &sharedProcessor{},
|
||||
indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers),
|
||||
listerWatcher: lw,
|
||||
objectType: objType,
|
||||
fullResyncPeriod: resyncPeriod,
|
||||
cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", objType)),
|
||||
processor: &sharedProcessor{clock: realClock},
|
||||
indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers),
|
||||
listerWatcher: lw,
|
||||
objectType: objType,
|
||||
resyncCheckPeriod: defaultEventHandlerResyncPeriod,
|
||||
defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod,
|
||||
cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", objType)),
|
||||
clock: realClock,
|
||||
}
|
||||
return sharedIndexInformer
|
||||
}
|
||||
|
@ -81,11 +93,16 @@ func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, resyncPeri
|
|||
// InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced.
|
||||
type InformerSynced func() bool
|
||||
|
||||
// syncedPollPeriod controls how often you look at the status of your sync funcs
|
||||
const syncedPollPeriod = 100 * time.Millisecond
|
||||
const (
|
||||
// syncedPollPeriod controls how often you look at the status of your sync funcs
|
||||
syncedPollPeriod = 100 * time.Millisecond
|
||||
|
||||
// initialBufferSize is the initial number of event notifications that can be buffered.
|
||||
initialBufferSize = 1024
|
||||
)
|
||||
|
||||
// WaitForCacheSync waits for caches to populate. It returns true if it was successful, false
|
||||
// if the contoller should shutdown
|
||||
// if the controller should shutdown
|
||||
func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool {
|
||||
err := wait.PollUntil(syncedPollPeriod,
|
||||
func() (bool, error) {
|
||||
|
@ -108,29 +125,35 @@ func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool
|
|||
|
||||
type sharedIndexInformer struct {
|
||||
indexer Indexer
|
||||
controller *Controller
|
||||
controller Controller
|
||||
|
||||
processor *sharedProcessor
|
||||
cacheMutationDetector CacheMutationDetector
|
||||
|
||||
// This block is tracked to handle late initialization of the controller
|
||||
listerWatcher ListerWatcher
|
||||
objectType runtime.Object
|
||||
fullResyncPeriod time.Duration
|
||||
listerWatcher ListerWatcher
|
||||
objectType runtime.Object
|
||||
|
||||
started bool
|
||||
startedLock sync.Mutex
|
||||
// resyncCheckPeriod is how often we want the reflector's resync timer to fire so it can call
|
||||
// shouldResync to check if any of our listeners need a resync.
|
||||
resyncCheckPeriod time.Duration
|
||||
// defaultEventHandlerResyncPeriod is the default resync period for any handlers added via
|
||||
// AddEventHandler (i.e. they don't specify one and just want to use the shared informer's default
|
||||
// value).
|
||||
defaultEventHandlerResyncPeriod time.Duration
|
||||
// clock allows for testability
|
||||
clock clock.Clock
|
||||
|
||||
started, stopped bool
|
||||
startedLock sync.Mutex
|
||||
|
||||
// blockDeltas gives a way to stop all event distribution so that a late event handler
|
||||
// can safely join the shared informer.
|
||||
blockDeltas sync.Mutex
|
||||
// stopCh is the channel used to stop the main Run process. We have to track it so that
|
||||
// late joiners can have a proper stop
|
||||
stopCh <-chan struct{}
|
||||
}
|
||||
|
||||
// dummyController hides the fact that a SharedInformer is different from a dedicated one
|
||||
// where a caller can `Run`. The run method is disonnected in this case, because higher
|
||||
// where a caller can `Run`. The run method is disconnected in this case, because higher
|
||||
// level logic will decide when to start the SharedInformer and related controller.
|
||||
// Because returning information back is always asynchronous, the legacy callers shouldn't
|
||||
// notice any change in behavior.
|
||||
|
@ -145,6 +168,10 @@ func (v *dummyController) HasSynced() bool {
|
|||
return v.informer.HasSynced()
|
||||
}
|
||||
|
||||
func (c *dummyController) LastSyncResourceVersion() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
type updateNotification struct {
|
||||
oldObj interface{}
|
||||
newObj interface{}
|
||||
|
@ -167,8 +194,9 @@ func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
|
|||
Queue: fifo,
|
||||
ListerWatcher: s.listerWatcher,
|
||||
ObjectType: s.objectType,
|
||||
FullResyncPeriod: s.fullResyncPeriod,
|
||||
FullResyncPeriod: s.resyncCheckPeriod,
|
||||
RetryOnError: false,
|
||||
ShouldResync: s.processor.shouldResync,
|
||||
|
||||
Process: s.HandleDeltas,
|
||||
}
|
||||
|
@ -178,19 +206,24 @@ func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
|
|||
defer s.startedLock.Unlock()
|
||||
|
||||
s.controller = New(cfg)
|
||||
s.controller.(*controller).clock = s.clock
|
||||
s.started = true
|
||||
}()
|
||||
|
||||
s.stopCh = stopCh
|
||||
s.cacheMutationDetector.Run(stopCh)
|
||||
s.processor.run(stopCh)
|
||||
s.controller.Run(stopCh)
|
||||
}
|
||||
// Separate stop channel because Processor should be stopped strictly after controller
|
||||
processorStopCh := make(chan struct{})
|
||||
var wg wait.Group
|
||||
defer wg.Wait() // Wait for Processor to stop
|
||||
defer close(processorStopCh) // Tell Processor to stop
|
||||
wg.StartWithChannel(processorStopCh, s.cacheMutationDetector.Run)
|
||||
wg.StartWithChannel(processorStopCh, s.processor.run)
|
||||
|
||||
func (s *sharedIndexInformer) isStarted() bool {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
return s.started
|
||||
defer func() {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
s.stopped = true // Don't want any new listeners
|
||||
}()
|
||||
s.controller.Run(stopCh)
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) HasSynced() bool {
|
||||
|
@ -207,10 +240,10 @@ func (s *sharedIndexInformer) LastSyncResourceVersion() string {
|
|||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.controller == nil || s.controller.reflector == nil {
|
||||
if s.controller == nil {
|
||||
return ""
|
||||
}
|
||||
return s.controller.reflector.LastSyncResourceVersion()
|
||||
return s.controller.LastSyncResourceVersion()
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) GetStore() Store {
|
||||
|
@ -232,18 +265,65 @@ func (s *sharedIndexInformer) AddIndexers(indexers Indexers) error {
|
|||
return s.indexer.AddIndexers(indexers)
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) GetController() ControllerInterface {
|
||||
func (s *sharedIndexInformer) GetController() Controller {
|
||||
return &dummyController{informer: s}
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) error {
|
||||
func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) {
|
||||
s.AddEventHandlerWithResyncPeriod(handler, s.defaultEventHandlerResyncPeriod)
|
||||
}
|
||||
|
||||
func determineResyncPeriod(desired, check time.Duration) time.Duration {
|
||||
if desired == 0 {
|
||||
return desired
|
||||
}
|
||||
if check == 0 {
|
||||
glog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired)
|
||||
return 0
|
||||
}
|
||||
if desired < check {
|
||||
glog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check)
|
||||
return check
|
||||
}
|
||||
return desired
|
||||
}
|
||||
|
||||
const minimumResyncPeriod = 1 * time.Second
|
||||
|
||||
func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.stopped {
|
||||
glog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler)
|
||||
return
|
||||
}
|
||||
|
||||
if resyncPeriod > 0 {
|
||||
if resyncPeriod < minimumResyncPeriod {
|
||||
glog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod)
|
||||
resyncPeriod = minimumResyncPeriod
|
||||
}
|
||||
|
||||
if resyncPeriod < s.resyncCheckPeriod {
|
||||
if s.started {
|
||||
glog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod)
|
||||
resyncPeriod = s.resyncCheckPeriod
|
||||
} else {
|
||||
// if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update
|
||||
// resyncCheckPeriod to match resyncPeriod and adjust the resync periods of all the listeners
|
||||
// accordingly
|
||||
s.resyncCheckPeriod = resyncPeriod
|
||||
s.processor.resyncCheckPeriodChanged(resyncPeriod)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize)
|
||||
|
||||
if !s.started {
|
||||
listener := newProcessListener(handler)
|
||||
s.processor.listeners = append(s.processor.listeners, listener)
|
||||
return nil
|
||||
s.processor.addListener(listener)
|
||||
return
|
||||
}
|
||||
|
||||
// in order to safely join, we have to
|
||||
|
@ -254,18 +334,10 @@ func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) erro
|
|||
s.blockDeltas.Lock()
|
||||
defer s.blockDeltas.Unlock()
|
||||
|
||||
listener := newProcessListener(handler)
|
||||
s.processor.listeners = append(s.processor.listeners, listener)
|
||||
|
||||
go listener.run(s.stopCh)
|
||||
go listener.pop(s.stopCh)
|
||||
|
||||
items := s.indexer.List()
|
||||
for i := range items {
|
||||
listener.add(addNotification{newObj: items[i]})
|
||||
s.processor.addAndStartListener(listener)
|
||||
for _, item := range s.indexer.List() {
|
||||
listener.add(addNotification{newObj: item})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
|
||||
|
@ -276,133 +348,201 @@ func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
|
|||
for _, d := range obj.(Deltas) {
|
||||
switch d.Type {
|
||||
case Sync, Added, Updated:
|
||||
isSync := d.Type == Sync
|
||||
s.cacheMutationDetector.AddObject(d.Object)
|
||||
if old, exists, err := s.indexer.Get(d.Object); err == nil && exists {
|
||||
if err := s.indexer.Update(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object})
|
||||
s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object}, isSync)
|
||||
} else {
|
||||
if err := s.indexer.Add(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
s.processor.distribute(addNotification{newObj: d.Object})
|
||||
s.processor.distribute(addNotification{newObj: d.Object}, isSync)
|
||||
}
|
||||
case Deleted:
|
||||
if err := s.indexer.Delete(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
s.processor.distribute(deleteNotification{oldObj: d.Object})
|
||||
s.processor.distribute(deleteNotification{oldObj: d.Object}, false)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type sharedProcessor struct {
|
||||
listeners []*processorListener
|
||||
listenersLock sync.RWMutex
|
||||
listeners []*processorListener
|
||||
syncingListeners []*processorListener
|
||||
clock clock.Clock
|
||||
wg wait.Group
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) distribute(obj interface{}) {
|
||||
for _, listener := range p.listeners {
|
||||
listener.add(obj)
|
||||
func (p *sharedProcessor) addAndStartListener(listener *processorListener) {
|
||||
p.listenersLock.Lock()
|
||||
defer p.listenersLock.Unlock()
|
||||
|
||||
p.addListenerLocked(listener)
|
||||
p.wg.Start(listener.run)
|
||||
p.wg.Start(listener.pop)
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) addListener(listener *processorListener) {
|
||||
p.listenersLock.Lock()
|
||||
defer p.listenersLock.Unlock()
|
||||
|
||||
p.addListenerLocked(listener)
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) addListenerLocked(listener *processorListener) {
|
||||
p.listeners = append(p.listeners, listener)
|
||||
p.syncingListeners = append(p.syncingListeners, listener)
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) distribute(obj interface{}, sync bool) {
|
||||
p.listenersLock.RLock()
|
||||
defer p.listenersLock.RUnlock()
|
||||
|
||||
if sync {
|
||||
for _, listener := range p.syncingListeners {
|
||||
listener.add(obj)
|
||||
}
|
||||
} else {
|
||||
for _, listener := range p.listeners {
|
||||
listener.add(obj)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) run(stopCh <-chan struct{}) {
|
||||
func() {
|
||||
p.listenersLock.RLock()
|
||||
defer p.listenersLock.RUnlock()
|
||||
for _, listener := range p.listeners {
|
||||
p.wg.Start(listener.run)
|
||||
p.wg.Start(listener.pop)
|
||||
}
|
||||
}()
|
||||
<-stopCh
|
||||
p.listenersLock.RLock()
|
||||
defer p.listenersLock.RUnlock()
|
||||
for _, listener := range p.listeners {
|
||||
go listener.run(stopCh)
|
||||
go listener.pop(stopCh)
|
||||
close(listener.addCh) // Tell .pop() to stop. .pop() will tell .run() to stop
|
||||
}
|
||||
p.wg.Wait() // Wait for all .pop() and .run() to stop
|
||||
}
|
||||
|
||||
// shouldResync queries every listener to determine if any of them need a resync, based on each
|
||||
// listener's resyncPeriod.
|
||||
func (p *sharedProcessor) shouldResync() bool {
|
||||
p.listenersLock.Lock()
|
||||
defer p.listenersLock.Unlock()
|
||||
|
||||
p.syncingListeners = []*processorListener{}
|
||||
|
||||
resyncNeeded := false
|
||||
now := p.clock.Now()
|
||||
for _, listener := range p.listeners {
|
||||
// need to loop through all the listeners to see if they need to resync so we can prepare any
|
||||
// listeners that are going to be resyncing.
|
||||
if listener.shouldResync(now) {
|
||||
resyncNeeded = true
|
||||
p.syncingListeners = append(p.syncingListeners, listener)
|
||||
listener.determineNextResync(now)
|
||||
}
|
||||
}
|
||||
return resyncNeeded
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) resyncCheckPeriodChanged(resyncCheckPeriod time.Duration) {
|
||||
p.listenersLock.RLock()
|
||||
defer p.listenersLock.RUnlock()
|
||||
|
||||
for _, listener := range p.listeners {
|
||||
resyncPeriod := determineResyncPeriod(listener.requestedResyncPeriod, resyncCheckPeriod)
|
||||
listener.setResyncPeriod(resyncPeriod)
|
||||
}
|
||||
}
|
||||
|
||||
type processorListener struct {
|
||||
// lock/cond protects access to 'pendingNotifications'.
|
||||
lock sync.RWMutex
|
||||
cond sync.Cond
|
||||
|
||||
// pendingNotifications is an unbounded slice that holds all notifications not yet distributed
|
||||
// there is one per listener, but a failing/stalled listener will have infinite pendingNotifications
|
||||
// added until we OOM.
|
||||
// TODO This is no worse that before, since reflectors were backed by unbounded DeltaFIFOs, but
|
||||
// we should try to do something better
|
||||
pendingNotifications []interface{}
|
||||
|
||||
nextCh chan interface{}
|
||||
addCh chan interface{}
|
||||
|
||||
handler ResourceEventHandler
|
||||
|
||||
// pendingNotifications is an unbounded ring buffer that holds all notifications not yet distributed.
|
||||
// There is one per listener, but a failing/stalled listener will have infinite pendingNotifications
|
||||
// added until we OOM.
|
||||
// TODO: This is no worse than before, since reflectors were backed by unbounded DeltaFIFOs, but
|
||||
// we should try to do something better.
|
||||
pendingNotifications buffer.RingGrowing
|
||||
|
||||
// requestedResyncPeriod is how frequently the listener wants a full resync from the shared informer
|
||||
requestedResyncPeriod time.Duration
|
||||
// resyncPeriod is how frequently the listener wants a full resync from the shared informer. This
|
||||
// value may differ from requestedResyncPeriod if the shared informer adjusts it to align with the
|
||||
// informer's overall resync check period.
|
||||
resyncPeriod time.Duration
|
||||
// nextResync is the earliest time the listener should get a full resync
|
||||
nextResync time.Time
|
||||
// resyncLock guards access to resyncPeriod and nextResync
|
||||
resyncLock sync.Mutex
|
||||
}
|
||||
|
||||
func newProcessListener(handler ResourceEventHandler) *processorListener {
|
||||
func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int) *processorListener {
|
||||
ret := &processorListener{
|
||||
pendingNotifications: []interface{}{},
|
||||
nextCh: make(chan interface{}),
|
||||
handler: handler,
|
||||
nextCh: make(chan interface{}),
|
||||
addCh: make(chan interface{}),
|
||||
handler: handler,
|
||||
pendingNotifications: *buffer.NewRingGrowing(bufferSize),
|
||||
requestedResyncPeriod: requestedResyncPeriod,
|
||||
resyncPeriod: resyncPeriod,
|
||||
}
|
||||
|
||||
ret.cond.L = &ret.lock
|
||||
ret.determineNextResync(now)
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (p *processorListener) add(notification interface{}) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
p.pendingNotifications = append(p.pendingNotifications, notification)
|
||||
p.cond.Broadcast()
|
||||
p.addCh <- notification
|
||||
}
|
||||
|
||||
func (p *processorListener) pop(stopCh <-chan struct{}) {
|
||||
func (p *processorListener) pop() {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer close(p.nextCh) // Tell .run() to stop
|
||||
|
||||
var nextCh chan<- interface{}
|
||||
var notification interface{}
|
||||
for {
|
||||
blockingGet := func() (interface{}, bool) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
for len(p.pendingNotifications) == 0 {
|
||||
// check if we're shutdown
|
||||
select {
|
||||
case <-stopCh:
|
||||
return nil, true
|
||||
default:
|
||||
}
|
||||
p.cond.Wait()
|
||||
}
|
||||
|
||||
nt := p.pendingNotifications[0]
|
||||
p.pendingNotifications = p.pendingNotifications[1:]
|
||||
return nt, false
|
||||
}
|
||||
|
||||
notification, stopped := blockingGet()
|
||||
if stopped {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
case p.nextCh <- notification:
|
||||
case nextCh <- notification:
|
||||
// Notification dispatched
|
||||
var ok bool
|
||||
notification, ok = p.pendingNotifications.ReadOne()
|
||||
if !ok { // Nothing to pop
|
||||
nextCh = nil // Disable this select case
|
||||
}
|
||||
case notificationToAdd, ok := <-p.addCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if notification == nil { // No notification to pop (and pendingNotifications is empty)
|
||||
// Optimize the case - skip adding to pendingNotifications
|
||||
notification = notificationToAdd
|
||||
nextCh = p.nextCh
|
||||
} else { // There is already a notification waiting to be dispatched
|
||||
p.pendingNotifications.WriteOne(notificationToAdd)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *processorListener) run(stopCh <-chan struct{}) {
|
||||
func (p *processorListener) run() {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
for {
|
||||
var next interface{}
|
||||
select {
|
||||
case <-stopCh:
|
||||
func() {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
p.cond.Broadcast()
|
||||
}()
|
||||
return
|
||||
case next = <-p.nextCh:
|
||||
}
|
||||
|
||||
for next := range p.nextCh {
|
||||
switch notification := next.(type) {
|
||||
case updateNotification:
|
||||
p.handler.OnUpdate(notification.oldObj, notification.newObj)
|
||||
|
@ -415,3 +555,30 @@ func (p *processorListener) run(stopCh <-chan struct{}) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// shouldResync deterimines if the listener needs a resync. If the listener's resyncPeriod is 0,
|
||||
// this always returns false.
|
||||
func (p *processorListener) shouldResync(now time.Time) bool {
|
||||
p.resyncLock.Lock()
|
||||
defer p.resyncLock.Unlock()
|
||||
|
||||
if p.resyncPeriod == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return now.After(p.nextResync) || now.Equal(p.nextResync)
|
||||
}
|
||||
|
||||
func (p *processorListener) determineNextResync(now time.Time) {
|
||||
p.resyncLock.Lock()
|
||||
defer p.resyncLock.Unlock()
|
||||
|
||||
p.nextResync = now.Add(p.resyncPeriod)
|
||||
}
|
||||
|
||||
func (p *processorListener) setResyncPeriod(resyncPeriod time.Duration) {
|
||||
p.resyncLock.Lock()
|
||||
defer p.resyncLock.Unlock()
|
||||
|
||||
p.resyncPeriod = resyncPeriod
|
||||
}
|
||||
|
|
6
vendor/k8s.io/client-go/tools/cache/store.go
generated
vendored
6
vendor/k8s.io/client-go/tools/cache/store.go
generated
vendored
|
@ -20,7 +20,7 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/client-go/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
)
|
||||
|
||||
// Store is a generic object storage interface. Reflector knows how to watch a server
|
||||
|
@ -172,6 +172,10 @@ func (c *cache) Index(indexName string, obj interface{}) ([]interface{}, error)
|
|||
return c.cacheStorage.Index(indexName, obj)
|
||||
}
|
||||
|
||||
func (c *cache) IndexKeys(indexName, indexKey string) ([]string, error) {
|
||||
return c.cacheStorage.IndexKeys(indexName, indexKey)
|
||||
}
|
||||
|
||||
// ListIndexFuncValues returns the list of generated values of an Index func
|
||||
func (c *cache) ListIndexFuncValues(indexName string) []string {
|
||||
return c.cacheStorage.ListIndexFuncValues(indexName)
|
||||
|
|
30
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
30
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
|
@ -20,7 +20,7 @@ import (
|
|||
"fmt"
|
||||
"sync"
|
||||
|
||||
"k8s.io/client-go/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// ThreadSafeStore is an interface that allows concurrent access to a storage backend.
|
||||
|
@ -43,6 +43,7 @@ type ThreadSafeStore interface {
|
|||
ListKeys() []string
|
||||
Replace(map[string]interface{}, string)
|
||||
Index(indexName string, obj interface{}) ([]interface{}, error)
|
||||
IndexKeys(indexName, indexKey string) ([]string, error)
|
||||
ListIndexFuncValues(name string) []string
|
||||
ByIndex(indexName, indexKey string) ([]interface{}, error)
|
||||
GetIndexers() Indexers
|
||||
|
@ -184,6 +185,23 @@ func (c *threadSafeMap) ByIndex(indexName, indexKey string) ([]interface{}, erro
|
|||
return list, nil
|
||||
}
|
||||
|
||||
// IndexKeys returns a list of keys that match on the index function.
|
||||
// IndexKeys is thread-safe so long as you treat all items as immutable.
|
||||
func (c *threadSafeMap) IndexKeys(indexName, indexKey string) ([]string, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
indexFunc := c.indexers[indexName]
|
||||
if indexFunc == nil {
|
||||
return nil, fmt.Errorf("Index with name %s does not exist", indexName)
|
||||
}
|
||||
|
||||
index := c.indices[indexName]
|
||||
|
||||
set := index[indexKey]
|
||||
return set.List(), nil
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) ListIndexFuncValues(indexName string) []string {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
@ -223,7 +241,7 @@ func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error {
|
|||
|
||||
// updateIndices modifies the objects location in the managed indexes, if this is an update, you must provide an oldObj
|
||||
// updateIndices must be called from a function that already has a lock on the cache
|
||||
func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) error {
|
||||
func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) {
|
||||
// if we got an old object, we need to remove it before we add it again
|
||||
if oldObj != nil {
|
||||
c.deleteFromIndices(oldObj, key)
|
||||
|
@ -231,7 +249,7 @@ func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, ke
|
|||
for name, indexFunc := range c.indexers {
|
||||
indexValues, err := indexFunc(newObj)
|
||||
if err != nil {
|
||||
return err
|
||||
panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err))
|
||||
}
|
||||
index := c.indices[name]
|
||||
if index == nil {
|
||||
|
@ -248,16 +266,15 @@ func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, ke
|
|||
set.Insert(key)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteFromIndices removes the object from each of the managed indexes
|
||||
// it is intended to be called from a function that already has a lock on the cache
|
||||
func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) error {
|
||||
func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) {
|
||||
for name, indexFunc := range c.indexers {
|
||||
indexValues, err := indexFunc(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err))
|
||||
}
|
||||
|
||||
index := c.indices[name]
|
||||
|
@ -271,7 +288,6 @@ func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) error {
|
|||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) Resync() error {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue