chore: update docker and k8s
This commit is contained in:
parent
2b5c7f9e91
commit
c2d440a914
1283 changed files with 67741 additions and 27918 deletions
3
vendor/k8s.io/client-go/tools/auth/clientauth.go
generated
vendored
3
vendor/k8s.io/client-go/tools/auth/clientauth.go
generated
vendored
|
@ -105,7 +105,7 @@ func LoadFromFile(path string) (*Info, error) {
|
|||
// The fields of client.Config with a corresponding field in the Info are set
|
||||
// with the value from the Info.
|
||||
func (info Info) MergeWithConfig(c restclient.Config) (restclient.Config, error) {
|
||||
var config restclient.Config = c
|
||||
var config = c
|
||||
config.Username = info.User
|
||||
config.Password = info.Password
|
||||
config.CAFile = info.CAFile
|
||||
|
@ -118,6 +118,7 @@ func (info Info) MergeWithConfig(c restclient.Config) (restclient.Config, error)
|
|||
return config, nil
|
||||
}
|
||||
|
||||
// Complete returns true if the Kubernetes API authorization info is complete.
|
||||
func (info Info) Complete() bool {
|
||||
return len(info.User) > 0 ||
|
||||
len(info.CertFile) > 0 ||
|
||||
|
|
72
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
72
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
|
@ -28,9 +28,9 @@ import (
|
|||
|
||||
// Config contains all the settings for a Controller.
|
||||
type Config struct {
|
||||
// The queue for your objects; either a FIFO or
|
||||
// a DeltaFIFO. Your Process() function should accept
|
||||
// the output of this Queue's Pop() method.
|
||||
// The queue for your objects - has to be a DeltaFIFO due to
|
||||
// assumptions in the implementation. Your Process() function
|
||||
// should accept the output of this Queue's Pop() method.
|
||||
Queue
|
||||
|
||||
// Something that can list and watch your objects.
|
||||
|
@ -285,45 +285,7 @@ func NewInformer(
|
|||
// This will hold the client state, as we know it.
|
||||
clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
|
||||
|
||||
// This will hold incoming changes. Note how we pass clientState in as a
|
||||
// KeyLister, that way resync operations will result in the correct set
|
||||
// of update/delete deltas.
|
||||
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, clientState)
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
ListerWatcher: lw,
|
||||
ObjectType: objType,
|
||||
FullResyncPeriod: resyncPeriod,
|
||||
RetryOnError: false,
|
||||
|
||||
Process: func(obj interface{}) error {
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(Deltas) {
|
||||
switch d.Type {
|
||||
case Sync, Added, Updated:
|
||||
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
|
||||
if err := clientState.Update(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnUpdate(old, d.Object)
|
||||
} else {
|
||||
if err := clientState.Add(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnAdd(d.Object)
|
||||
}
|
||||
case Deleted:
|
||||
if err := clientState.Delete(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnDelete(d.Object)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
return clientState, New(cfg)
|
||||
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState)
|
||||
}
|
||||
|
||||
// NewIndexerInformer returns a Indexer and a controller for populating the index
|
||||
|
@ -352,6 +314,30 @@ func NewIndexerInformer(
|
|||
// This will hold the client state, as we know it.
|
||||
clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
|
||||
|
||||
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState)
|
||||
}
|
||||
|
||||
// newInformer returns a controller for populating the store while also
|
||||
// providing event notifications.
|
||||
//
|
||||
// Parameters
|
||||
// * lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// * objType is an object of the type that you expect to receive.
|
||||
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// * h is the object you want notifications sent to.
|
||||
// * clientState is the store you want to populate
|
||||
//
|
||||
func newInformer(
|
||||
lw ListerWatcher,
|
||||
objType runtime.Object,
|
||||
resyncPeriod time.Duration,
|
||||
h ResourceEventHandler,
|
||||
clientState Store,
|
||||
) Controller {
|
||||
// This will hold incoming changes. Note how we pass clientState in as a
|
||||
// KeyLister, that way resync operations will result in the correct set
|
||||
// of update/delete deltas.
|
||||
|
@ -390,5 +376,5 @@ func NewIndexerInformer(
|
|||
return nil
|
||||
},
|
||||
}
|
||||
return clientState, New(cfg)
|
||||
return New(cfg)
|
||||
}
|
||||
|
|
36
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
36
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
|
@ -23,7 +23,7 @@ import (
|
|||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// NewDeltaFIFO returns a Store which can be used process changes to items.
|
||||
|
@ -320,17 +320,15 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err
|
|||
newDeltas := append(f.items[id], Delta{actionType, obj})
|
||||
newDeltas = dedupDeltas(newDeltas)
|
||||
|
||||
_, exists := f.items[id]
|
||||
if len(newDeltas) > 0 {
|
||||
if !exists {
|
||||
if _, exists := f.items[id]; !exists {
|
||||
f.queue = append(f.queue, id)
|
||||
}
|
||||
f.items[id] = newDeltas
|
||||
f.cond.Broadcast()
|
||||
} else if exists {
|
||||
// We need to remove this from our map (extra items
|
||||
// in the queue are ignored if they are not in the
|
||||
// map).
|
||||
} else {
|
||||
// We need to remove this from our map (extra items in the queue are
|
||||
// ignored if they are not in the map).
|
||||
delete(f.items, id)
|
||||
}
|
||||
return nil
|
||||
|
@ -348,9 +346,6 @@ func (f *DeltaFIFO) List() []interface{} {
|
|||
func (f *DeltaFIFO) listLocked() []interface{} {
|
||||
list := make([]interface{}, 0, len(f.items))
|
||||
for _, item := range f.items {
|
||||
// Copy item's slice so operations on this slice
|
||||
// won't interfere with the object we return.
|
||||
item = copyDeltas(item)
|
||||
list = append(list, item.Newest().Object)
|
||||
}
|
||||
return list
|
||||
|
@ -398,10 +393,7 @@ func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err err
|
|||
func (f *DeltaFIFO) IsClosed() bool {
|
||||
f.closedLock.Lock()
|
||||
defer f.closedLock.Unlock()
|
||||
if f.closed {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return f.closed
|
||||
}
|
||||
|
||||
// Pop blocks until an item is added to the queue, and then returns it. If
|
||||
|
@ -432,10 +424,10 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
|||
}
|
||||
id := f.queue[0]
|
||||
f.queue = f.queue[1:]
|
||||
item, ok := f.items[id]
|
||||
if f.initialPopulationCount > 0 {
|
||||
f.initialPopulationCount--
|
||||
}
|
||||
item, ok := f.items[id]
|
||||
if !ok {
|
||||
// Item may have been deleted subsequently.
|
||||
continue
|
||||
|
@ -474,6 +466,7 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
|
|||
|
||||
if f.knownObjects == nil {
|
||||
// Do deletion detection against our own list.
|
||||
queuedDeletions := 0
|
||||
for k, oldItem := range f.items {
|
||||
if keys.Has(k) {
|
||||
continue
|
||||
|
@ -482,6 +475,7 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
|
|||
if n := oldItem.Newest(); n != nil {
|
||||
deletedObj = n.Object
|
||||
}
|
||||
queuedDeletions++
|
||||
if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -489,7 +483,9 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
|
|||
|
||||
if !f.populated {
|
||||
f.populated = true
|
||||
f.initialPopulationCount = len(list)
|
||||
// While there shouldn't be any queued deletions in the initial
|
||||
// population of the queue, it's better to be on the safe side.
|
||||
f.initialPopulationCount = len(list) + queuedDeletions
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -506,10 +502,10 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
|
|||
deletedObj, exists, err := f.knownObjects.GetByKey(k)
|
||||
if err != nil {
|
||||
deletedObj = nil
|
||||
glog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k)
|
||||
klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k)
|
||||
} else if !exists {
|
||||
deletedObj = nil
|
||||
glog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k)
|
||||
klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k)
|
||||
}
|
||||
queuedDeletions++
|
||||
if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
|
||||
|
@ -553,10 +549,10 @@ func (f *DeltaFIFO) syncKey(key string) error {
|
|||
func (f *DeltaFIFO) syncKeyLocked(key string) error {
|
||||
obj, exists, err := f.knownObjects.GetByKey(key)
|
||||
if err != nil {
|
||||
glog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key)
|
||||
klog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key)
|
||||
return nil
|
||||
} else if !exists {
|
||||
glog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key)
|
||||
klog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
60
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
generated
vendored
60
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
generated
vendored
|
@ -20,8 +20,8 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// ExpirationCache implements the store interface
|
||||
|
@ -48,7 +48,7 @@ type ExpirationCache struct {
|
|||
// ExpirationPolicy dictates when an object expires. Currently only abstracted out
|
||||
// so unittests don't rely on the system clock.
|
||||
type ExpirationPolicy interface {
|
||||
IsExpired(obj *timestampedEntry) bool
|
||||
IsExpired(obj *TimestampedEntry) bool
|
||||
}
|
||||
|
||||
// TTLPolicy implements a ttl based ExpirationPolicy.
|
||||
|
@ -63,26 +63,29 @@ type TTLPolicy struct {
|
|||
|
||||
// IsExpired returns true if the given object is older than the ttl, or it can't
|
||||
// determine its age.
|
||||
func (p *TTLPolicy) IsExpired(obj *timestampedEntry) bool {
|
||||
return p.Ttl > 0 && p.Clock.Since(obj.timestamp) > p.Ttl
|
||||
func (p *TTLPolicy) IsExpired(obj *TimestampedEntry) bool {
|
||||
return p.Ttl > 0 && p.Clock.Since(obj.Timestamp) > p.Ttl
|
||||
}
|
||||
|
||||
// timestampedEntry is the only type allowed in a ExpirationCache.
|
||||
type timestampedEntry struct {
|
||||
obj interface{}
|
||||
timestamp time.Time
|
||||
// TimestampedEntry is the only type allowed in a ExpirationCache.
|
||||
// Keep in mind that it is not safe to share timestamps between computers.
|
||||
// Behavior may be inconsistent if you get a timestamp from the API Server and
|
||||
// use it on the client machine as part of your ExpirationCache.
|
||||
type TimestampedEntry struct {
|
||||
Obj interface{}
|
||||
Timestamp time.Time
|
||||
}
|
||||
|
||||
// getTimestampedEntry returns the timestampedEntry stored under the given key.
|
||||
func (c *ExpirationCache) getTimestampedEntry(key string) (*timestampedEntry, bool) {
|
||||
// getTimestampedEntry returns the TimestampedEntry stored under the given key.
|
||||
func (c *ExpirationCache) getTimestampedEntry(key string) (*TimestampedEntry, bool) {
|
||||
item, _ := c.cacheStorage.Get(key)
|
||||
if tsEntry, ok := item.(*timestampedEntry); ok {
|
||||
if tsEntry, ok := item.(*TimestampedEntry); ok {
|
||||
return tsEntry, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// getOrExpire retrieves the object from the timestampedEntry if and only if it hasn't
|
||||
// getOrExpire retrieves the object from the TimestampedEntry if and only if it hasn't
|
||||
// already expired. It holds a write lock across deletion.
|
||||
func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) {
|
||||
// Prevent all inserts from the time we deem an item as "expired" to when we
|
||||
|
@ -95,11 +98,11 @@ func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) {
|
|||
return nil, false
|
||||
}
|
||||
if c.expirationPolicy.IsExpired(timestampedItem) {
|
||||
glog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj)
|
||||
klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.Obj)
|
||||
c.cacheStorage.Delete(key)
|
||||
return nil, false
|
||||
}
|
||||
return timestampedItem.obj, true
|
||||
return timestampedItem.Obj, true
|
||||
}
|
||||
|
||||
// GetByKey returns the item stored under the key, or sets exists=false.
|
||||
|
@ -126,7 +129,7 @@ func (c *ExpirationCache) List() []interface{} {
|
|||
|
||||
list := make([]interface{}, 0, len(items))
|
||||
for _, item := range items {
|
||||
obj := item.(*timestampedEntry).obj
|
||||
obj := item.(*TimestampedEntry).Obj
|
||||
if key, err := c.keyFunc(obj); err != nil {
|
||||
list = append(list, obj)
|
||||
} else if obj, exists := c.getOrExpire(key); exists {
|
||||
|
@ -144,14 +147,14 @@ func (c *ExpirationCache) ListKeys() []string {
|
|||
// Add timestamps an item and inserts it into the cache, overwriting entries
|
||||
// that might exist under the same key.
|
||||
func (c *ExpirationCache) Add(obj interface{}) error {
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
|
||||
key, err := c.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
c.cacheStorage.Add(key, ×tampedEntry{obj, c.clock.Now()})
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
|
||||
c.cacheStorage.Add(key, &TimestampedEntry{obj, c.clock.Now()})
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -163,12 +166,12 @@ func (c *ExpirationCache) Update(obj interface{}) error {
|
|||
|
||||
// Delete removes an item from the cache.
|
||||
func (c *ExpirationCache) Delete(obj interface{}) error {
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
key, err := c.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
c.cacheStorage.Delete(key)
|
||||
return nil
|
||||
}
|
||||
|
@ -177,17 +180,17 @@ func (c *ExpirationCache) Delete(obj interface{}) error {
|
|||
// before attempting the replace operation. The replace operation will
|
||||
// delete the contents of the ExpirationCache `c`.
|
||||
func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) error {
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
items := map[string]interface{}{}
|
||||
items := make(map[string]interface{}, len(list))
|
||||
ts := c.clock.Now()
|
||||
for _, item := range list {
|
||||
key, err := c.keyFunc(item)
|
||||
if err != nil {
|
||||
return KeyError{item, err}
|
||||
}
|
||||
items[key] = ×tampedEntry{item, ts}
|
||||
items[key] = &TimestampedEntry{item, ts}
|
||||
}
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
c.cacheStorage.Replace(items, resourceVersion)
|
||||
return nil
|
||||
}
|
||||
|
@ -199,10 +202,15 @@ func (c *ExpirationCache) Resync() error {
|
|||
|
||||
// NewTTLStore creates and returns a ExpirationCache with a TTLPolicy
|
||||
func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store {
|
||||
return NewExpirationStore(keyFunc, &TTLPolicy{ttl, clock.RealClock{}})
|
||||
}
|
||||
|
||||
// NewExpirationStore creates and returns a ExpirationCache for a given policy
|
||||
func NewExpirationStore(keyFunc KeyFunc, expirationPolicy ExpirationPolicy) Store {
|
||||
return &ExpirationCache{
|
||||
cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}),
|
||||
keyFunc: keyFunc,
|
||||
clock: clock.RealClock{},
|
||||
expirationPolicy: &TTLPolicy{ttl, clock.RealClock{}},
|
||||
expirationPolicy: expirationPolicy,
|
||||
}
|
||||
}
|
||||
|
|
2
vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go
generated
vendored
|
@ -38,7 +38,7 @@ type FakeExpirationPolicy struct {
|
|||
RetrieveKeyFunc KeyFunc
|
||||
}
|
||||
|
||||
func (p *FakeExpirationPolicy) IsExpired(obj *timestampedEntry) bool {
|
||||
func (p *FakeExpirationPolicy) IsExpired(obj *TimestampedEntry) bool {
|
||||
key, _ := p.RetrieveKeyFunc(obj)
|
||||
return !p.NeverExpire.Has(key)
|
||||
}
|
||||
|
|
2
vendor/k8s.io/client-go/tools/cache/fake_custom_store.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/fake_custom_store.go
generated
vendored
|
@ -40,7 +40,7 @@ func (f *FakeCustomStore) Add(obj interface{}) error {
|
|||
// Update calls the custom Update function if defined
|
||||
func (f *FakeCustomStore) Update(obj interface{}) error {
|
||||
if f.UpdateFunc != nil {
|
||||
return f.Update(obj)
|
||||
return f.UpdateFunc(obj)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
2
vendor/k8s.io/client-go/tools/cache/fifo.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/fifo.go
generated
vendored
|
@ -297,7 +297,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
|||
// after calling this function. f's queue is reset, too; upon return, it
|
||||
// will contain the items in the map, in no particular order.
|
||||
func (f *FIFO) Replace(list []interface{}, resourceVersion string) error {
|
||||
items := map[string]interface{}{}
|
||||
items := make(map[string]interface{}, len(list))
|
||||
for _, item := range list {
|
||||
key, err := f.keyFunc(item)
|
||||
if err != nil {
|
||||
|
|
2
vendor/k8s.io/client-go/tools/cache/heap.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/heap.go
generated
vendored
|
@ -204,7 +204,7 @@ func (h *Heap) AddIfNotPresent(obj interface{}) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// addIfNotPresentLocked assumes the lock is already held and adds the the provided
|
||||
// addIfNotPresentLocked assumes the lock is already held and adds the provided
|
||||
// item to the queue if it does not already exist.
|
||||
func (h *Heap) addIfNotPresentLocked(key string, obj interface{}) {
|
||||
if _, exists := h.data.items[key]; exists {
|
||||
|
|
24
vendor/k8s.io/client-go/tools/cache/listers.go
generated
vendored
24
vendor/k8s.io/client-go/tools/cache/listers.go
generated
vendored
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package cache
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
|
@ -31,7 +31,14 @@ import (
|
|||
type AppendFunc func(interface{})
|
||||
|
||||
func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error {
|
||||
selectAll := selector.Empty()
|
||||
for _, m := range store.List() {
|
||||
if selectAll {
|
||||
// Avoid computing labels of the objects to speed up common flows
|
||||
// of listing all objects.
|
||||
appendFn(m)
|
||||
continue
|
||||
}
|
||||
metadata, err := meta.Accessor(m)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -44,8 +51,15 @@ func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error {
|
|||
}
|
||||
|
||||
func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error {
|
||||
selectAll := selector.Empty()
|
||||
if namespace == metav1.NamespaceAll {
|
||||
for _, m := range indexer.List() {
|
||||
if selectAll {
|
||||
// Avoid computing labels of the objects to speed up common flows
|
||||
// of listing all objects.
|
||||
appendFn(m)
|
||||
continue
|
||||
}
|
||||
metadata, err := meta.Accessor(m)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -60,7 +74,7 @@ func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selec
|
|||
items, err := indexer.Index(NamespaceIndex, &metav1.ObjectMeta{Namespace: namespace})
|
||||
if err != nil {
|
||||
// Ignore error; do slow search without index.
|
||||
glog.Warningf("can not retrieve list of objects using index : %v", err)
|
||||
klog.Warningf("can not retrieve list of objects using index : %v", err)
|
||||
for _, m := range indexer.List() {
|
||||
metadata, err := meta.Accessor(m)
|
||||
if err != nil {
|
||||
|
@ -74,6 +88,12 @@ func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selec
|
|||
return nil
|
||||
}
|
||||
for _, m := range items {
|
||||
if selectAll {
|
||||
// Avoid computing labels of the objects to speed up common flows
|
||||
// of listing all objects.
|
||||
appendFn(m)
|
||||
continue
|
||||
}
|
||||
metadata, err := meta.Accessor(m)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
97
vendor/k8s.io/client-go/tools/cache/listwatch.go
generated
vendored
97
vendor/k8s.io/client-go/tools/cache/listwatch.go
generated
vendored
|
@ -18,27 +18,34 @@ package cache
|
|||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/pager"
|
||||
)
|
||||
|
||||
// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource.
|
||||
type ListerWatcher interface {
|
||||
// Lister is any object that knows how to perform an initial list.
|
||||
type Lister interface {
|
||||
// List should return a list type object; the Items field will be extracted, and the
|
||||
// ResourceVersion field will be used to start the watch in the right place.
|
||||
List(options metav1.ListOptions) (runtime.Object, error)
|
||||
}
|
||||
|
||||
// Watcher is any object that knows how to start a watch on a resource.
|
||||
type Watcher interface {
|
||||
// Watch should begin a watch at the specified version.
|
||||
Watch(options metav1.ListOptions) (watch.Interface, error)
|
||||
}
|
||||
|
||||
// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource.
|
||||
type ListerWatcher interface {
|
||||
Lister
|
||||
Watcher
|
||||
}
|
||||
|
||||
// ListFunc knows how to list resources
|
||||
type ListFunc func(options metav1.ListOptions) (runtime.Object, error)
|
||||
|
||||
|
@ -93,13 +100,6 @@ func NewFilteredListWatchFromClient(c Getter, resource string, namespace string,
|
|||
return &ListWatch{ListFunc: listFunc, WatchFunc: watchFunc}
|
||||
}
|
||||
|
||||
func timeoutFromListOptions(options metav1.ListOptions) time.Duration {
|
||||
if options.TimeoutSeconds != nil {
|
||||
return time.Duration(*options.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// List a set of apiserver resources
|
||||
func (lw *ListWatch) List(options metav1.ListOptions) (runtime.Object, error) {
|
||||
if !lw.DisableChunking {
|
||||
|
@ -112,76 +112,3 @@ func (lw *ListWatch) List(options metav1.ListOptions) (runtime.Object, error) {
|
|||
func (lw *ListWatch) Watch(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return lw.WatchFunc(options)
|
||||
}
|
||||
|
||||
// ListWatchUntil checks the provided conditions against the items returned by the list watcher, returning wait.ErrWaitTimeout
|
||||
// if timeout is exceeded without all conditions returning true, or an error if an error occurs.
|
||||
// TODO: check for watch expired error and retry watch from latest point? Same issue exists for Until.
|
||||
func ListWatchUntil(timeout time.Duration, lw ListerWatcher, conditions ...watch.ConditionFunc) (*watch.Event, error) {
|
||||
if len(conditions) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
list, err := lw.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
initialItems, err := meta.ExtractList(list)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// use the initial items as simulated "adds"
|
||||
var lastEvent *watch.Event
|
||||
currIndex := 0
|
||||
passedConditions := 0
|
||||
for _, condition := range conditions {
|
||||
// check the next condition against the previous event and short circuit waiting for the next watch
|
||||
if lastEvent != nil {
|
||||
done, err := condition(*lastEvent)
|
||||
if err != nil {
|
||||
return lastEvent, err
|
||||
}
|
||||
if done {
|
||||
passedConditions = passedConditions + 1
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
ConditionSucceeded:
|
||||
for currIndex < len(initialItems) {
|
||||
lastEvent = &watch.Event{Type: watch.Added, Object: initialItems[currIndex]}
|
||||
currIndex++
|
||||
|
||||
done, err := condition(*lastEvent)
|
||||
if err != nil {
|
||||
return lastEvent, err
|
||||
}
|
||||
if done {
|
||||
passedConditions = passedConditions + 1
|
||||
break ConditionSucceeded
|
||||
}
|
||||
}
|
||||
}
|
||||
if passedConditions == len(conditions) {
|
||||
return lastEvent, nil
|
||||
}
|
||||
remainingConditions := conditions[passedConditions:]
|
||||
|
||||
metaObj, err := meta.ListAccessor(list)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currResourceVersion := metaObj.GetResourceVersion()
|
||||
|
||||
watchInterface, err := lw.Watch(metav1.ListOptions{ResourceVersion: currResourceVersion})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
evt, err := watch.Until(timeout, watchInterface, remainingConditions...)
|
||||
if err == watch.ErrWatchClosed {
|
||||
// present a consistent error interface to callers
|
||||
err = wait.ErrWaitTimeout
|
||||
}
|
||||
return evt, err
|
||||
}
|
||||
|
|
4
vendor/k8s.io/client-go/tools/cache/mutation_cache.go
generated
vendored
4
vendor/k8s.io/client-go/tools/cache/mutation_cache.go
generated
vendored
|
@ -22,7 +22,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -156,7 +156,7 @@ func (c *mutationCache) ByIndex(name string, indexKey string) ([]interface{}, er
|
|||
}
|
||||
elements, err := fn(updated)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err)
|
||||
klog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err)
|
||||
continue
|
||||
}
|
||||
for _, inIndex := range elements {
|
||||
|
|
4
vendor/k8s.io/client-go/tools/cache/mutation_detector.go
generated
vendored
4
vendor/k8s.io/client-go/tools/cache/mutation_detector.go
generated
vendored
|
@ -24,7 +24,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
|
@ -45,7 +45,7 @@ func NewCacheMutationDetector(name string) CacheMutationDetector {
|
|||
if !mutationDetectionEnabled {
|
||||
return dummyMutationDetector{}
|
||||
}
|
||||
glog.Warningln("Mutation detector is enabled, this will result in memory leakage.")
|
||||
klog.Warningln("Mutation detector is enabled, this will result in memory leakage.")
|
||||
return &defaultCacheMutationDetector{name: name, period: 1 * time.Second}
|
||||
}
|
||||
|
||||
|
|
214
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
214
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -24,25 +25,22 @@ import (
|
|||
"net"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"regexp"
|
||||
goruntime "runtime"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/naming"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/pager"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/utils/trace"
|
||||
)
|
||||
|
||||
// Reflector watches a specified resource and causes all changes to be reflected in the given store.
|
||||
|
@ -71,20 +69,21 @@ type Reflector struct {
|
|||
lastSyncResourceVersion string
|
||||
// lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion
|
||||
lastSyncResourceVersionMutex sync.RWMutex
|
||||
// WatchListPageSize is the requested chunk size of initial and resync watch lists.
|
||||
// Defaults to pager.PageSize.
|
||||
WatchListPageSize int64
|
||||
}
|
||||
|
||||
var (
|
||||
// We try to spread the load on apiserver by setting timeouts for
|
||||
// watch requests - it is random in [minWatchTimeout, 2*minWatchTimeout].
|
||||
// However, it can be modified to avoid periodic resync to break the
|
||||
// TCP connection.
|
||||
minWatchTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
// NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector
|
||||
// The indexer is configured to key on namespace
|
||||
func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interface{}, resyncPeriod time.Duration) (indexer Indexer, reflector *Reflector) {
|
||||
indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{"namespace": MetaNamespaceIndexFunc})
|
||||
indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{NamespaceIndex: MetaNamespaceIndexFunc})
|
||||
reflector = NewReflector(lw, expectedType, indexer, resyncPeriod)
|
||||
return indexer, reflector
|
||||
}
|
||||
|
@ -96,20 +95,13 @@ func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interfa
|
|||
// resyncPeriod, so that you can use reflectors to periodically process everything as
|
||||
// well as incrementally processing the things that change.
|
||||
func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
|
||||
return NewNamedReflector(getDefaultReflectorName(internalPackages...), lw, expectedType, store, resyncPeriod)
|
||||
return NewNamedReflector(naming.GetNameFromCallsite(internalPackages...), lw, expectedType, store, resyncPeriod)
|
||||
}
|
||||
|
||||
// reflectorDisambiguator is used to disambiguate started reflectors.
|
||||
// initialized to an unstable value to ensure meaning isn't attributed to the suffix.
|
||||
var reflectorDisambiguator = int64(time.Now().UnixNano() % 12345)
|
||||
|
||||
// NewNamedReflector same as NewReflector, but with a specified name for logging
|
||||
func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
|
||||
reflectorSuffix := atomic.AddInt64(&reflectorDisambiguator, 1)
|
||||
r := &Reflector{
|
||||
name: name,
|
||||
// we need this to be unique per process (some names are still the same) but obvious who it belongs to
|
||||
metrics: newReflectorMetrics(makeValidPrometheusMetricLabel(fmt.Sprintf("reflector_"+name+"_%d", reflectorSuffix))),
|
||||
name: name,
|
||||
listerWatcher: lw,
|
||||
store: store,
|
||||
expectedType: reflect.TypeOf(expectedType),
|
||||
|
@ -120,86 +112,14 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{},
|
|||
return r
|
||||
}
|
||||
|
||||
func makeValidPrometheusMetricLabel(in string) string {
|
||||
// this isn't perfect, but it removes our common characters
|
||||
return strings.NewReplacer("/", "_", ".", "_", "-", "_", ":", "_").Replace(in)
|
||||
}
|
||||
|
||||
// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common
|
||||
// call chains to NewReflector, so they'd be low entropy names for reflectors
|
||||
var internalPackages = []string{"client-go/tools/cache/", "/runtime/asm_"}
|
||||
|
||||
// getDefaultReflectorName walks back through the call stack until we find a caller from outside of the ignoredPackages
|
||||
// it returns back a shortpath/filename:line to aid in identification of this reflector when it starts logging
|
||||
func getDefaultReflectorName(ignoredPackages ...string) string {
|
||||
name := "????"
|
||||
const maxStack = 10
|
||||
for i := 1; i < maxStack; i++ {
|
||||
_, file, line, ok := goruntime.Caller(i)
|
||||
if !ok {
|
||||
file, line, ok = extractStackCreator()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
i += maxStack
|
||||
}
|
||||
if hasPackage(file, ignoredPackages) {
|
||||
continue
|
||||
}
|
||||
|
||||
file = trimPackagePrefix(file)
|
||||
name = fmt.Sprintf("%s:%d", file, line)
|
||||
break
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// hasPackage returns true if the file is in one of the ignored packages.
|
||||
func hasPackage(file string, ignoredPackages []string) bool {
|
||||
for _, ignoredPackage := range ignoredPackages {
|
||||
if strings.Contains(file, ignoredPackage) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// trimPackagePrefix reduces duplicate values off the front of a package name.
|
||||
func trimPackagePrefix(file string) string {
|
||||
if l := strings.LastIndex(file, "k8s.io/client-go/pkg/"); l >= 0 {
|
||||
return file[l+len("k8s.io/client-go/"):]
|
||||
}
|
||||
if l := strings.LastIndex(file, "/src/"); l >= 0 {
|
||||
return file[l+5:]
|
||||
}
|
||||
if l := strings.LastIndex(file, "/pkg/"); l >= 0 {
|
||||
return file[l+1:]
|
||||
}
|
||||
return file
|
||||
}
|
||||
|
||||
var stackCreator = regexp.MustCompile(`(?m)^created by (.*)\n\s+(.*):(\d+) \+0x[[:xdigit:]]+$`)
|
||||
|
||||
// extractStackCreator retrieves the goroutine file and line that launched this stack. Returns false
|
||||
// if the creator cannot be located.
|
||||
// TODO: Go does not expose this via runtime https://github.com/golang/go/issues/11440
|
||||
func extractStackCreator() (string, int, bool) {
|
||||
stack := debug.Stack()
|
||||
matches := stackCreator.FindStringSubmatch(string(stack))
|
||||
if matches == nil || len(matches) != 4 {
|
||||
return "", 0, false
|
||||
}
|
||||
line, err := strconv.Atoi(matches[3])
|
||||
if err != nil {
|
||||
return "", 0, false
|
||||
}
|
||||
return matches[2], line, true
|
||||
}
|
||||
var internalPackages = []string{"client-go/tools/cache/"}
|
||||
|
||||
// Run starts a watch and handles watch events. Will restart the watch if it is closed.
|
||||
// Run will exit when stopCh is closed.
|
||||
func (r *Reflector) Run(stopCh <-chan struct{}) {
|
||||
glog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name)
|
||||
klog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name)
|
||||
wait.Until(func() {
|
||||
if err := r.ListAndWatch(stopCh); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
|
@ -237,34 +157,71 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) {
|
|||
// and then use the resource version to watch.
|
||||
// It returns error if ListAndWatch didn't even try to initialize watch.
|
||||
func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
glog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name)
|
||||
klog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name)
|
||||
var resourceVersion string
|
||||
|
||||
// Explicitly set "0" as resource version - it's fine for the List()
|
||||
// to be served from cache and potentially be delayed relative to
|
||||
// etcd contents. Reflector framework will catch up via Watch() eventually.
|
||||
options := metav1.ListOptions{ResourceVersion: "0"}
|
||||
r.metrics.numberOfLists.Inc()
|
||||
start := r.clock.Now()
|
||||
list, err := r.listerWatcher.List(options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err)
|
||||
|
||||
if err := func() error {
|
||||
initTrace := trace.New("Reflector " + r.name + " ListAndWatch")
|
||||
defer initTrace.LogIfLong(10 * time.Second)
|
||||
var list runtime.Object
|
||||
var err error
|
||||
listCh := make(chan struct{}, 1)
|
||||
panicCh := make(chan interface{}, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
panicCh <- r
|
||||
}
|
||||
}()
|
||||
// Attempt to gather list in chunks, if supported by listerWatcher, if not, the first
|
||||
// list request will return the full response.
|
||||
pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) {
|
||||
return r.listerWatcher.List(opts)
|
||||
}))
|
||||
if r.WatchListPageSize != 0 {
|
||||
pager.PageSize = r.WatchListPageSize
|
||||
}
|
||||
// Pager falls back to full list if paginated list calls fail due to an "Expired" error.
|
||||
list, err = pager.List(context.Background(), options)
|
||||
close(listCh)
|
||||
}()
|
||||
select {
|
||||
case <-stopCh:
|
||||
return nil
|
||||
case r := <-panicCh:
|
||||
panic(r)
|
||||
case <-listCh:
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err)
|
||||
}
|
||||
initTrace.Step("Objects listed")
|
||||
listMetaInterface, err := meta.ListAccessor(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err)
|
||||
}
|
||||
resourceVersion = listMetaInterface.GetResourceVersion()
|
||||
initTrace.Step("Resource version extracted")
|
||||
items, err := meta.ExtractList(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err)
|
||||
}
|
||||
initTrace.Step("Objects extracted")
|
||||
if err := r.syncWith(items, resourceVersion); err != nil {
|
||||
return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err)
|
||||
}
|
||||
initTrace.Step("SyncWith done")
|
||||
r.setLastSyncResourceVersion(resourceVersion)
|
||||
initTrace.Step("Resource version updated")
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return err
|
||||
}
|
||||
r.metrics.listDuration.Observe(time.Since(start).Seconds())
|
||||
listMetaInterface, err := meta.ListAccessor(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err)
|
||||
}
|
||||
resourceVersion = listMetaInterface.GetResourceVersion()
|
||||
items, err := meta.ExtractList(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err)
|
||||
}
|
||||
r.metrics.numberOfItemsInList.Observe(float64(len(items)))
|
||||
if err := r.syncWith(items, resourceVersion); err != nil {
|
||||
return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err)
|
||||
}
|
||||
r.setLastSyncResourceVersion(resourceVersion)
|
||||
|
||||
resyncerrc := make(chan error, 1)
|
||||
cancelCh := make(chan struct{})
|
||||
|
@ -283,7 +240,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
|||
return
|
||||
}
|
||||
if r.ShouldResync == nil || r.ShouldResync() {
|
||||
glog.V(4).Infof("%s: forcing resync", r.name)
|
||||
klog.V(4).Infof("%s: forcing resync", r.name)
|
||||
if err := r.store.Resync(); err != nil {
|
||||
resyncerrc <- err
|
||||
return
|
||||
|
@ -308,16 +265,20 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
|||
// We want to avoid situations of hanging watchers. Stop any wachers that do not
|
||||
// receive any events within the timeout window.
|
||||
TimeoutSeconds: &timeoutSeconds,
|
||||
// To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks.
|
||||
// Reflector doesn't assume bookmarks are returned at all (if the server do not support
|
||||
// watch bookmarks, it will ignore this field).
|
||||
// Disabled in Alpha release of watch bookmarks feature.
|
||||
AllowWatchBookmarks: false,
|
||||
}
|
||||
|
||||
r.metrics.numberOfWatches.Inc()
|
||||
w, err := r.listerWatcher.Watch(options)
|
||||
if err != nil {
|
||||
switch err {
|
||||
case io.EOF:
|
||||
// watch closed normally
|
||||
case io.ErrUnexpectedEOF:
|
||||
glog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err)
|
||||
klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedType, err))
|
||||
}
|
||||
|
@ -338,7 +299,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
|||
|
||||
if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil {
|
||||
if err != errorStopRequested {
|
||||
glog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err)
|
||||
klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -362,11 +323,6 @@ func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, err
|
|||
// Stopping the watcher should be idempotent and if we return from this function there's no way
|
||||
// we're coming back in with the same watch interface.
|
||||
defer w.Stop()
|
||||
// update metrics
|
||||
defer func() {
|
||||
r.metrics.numberOfItemsInWatch.Observe(float64(eventCount))
|
||||
r.metrics.watchDuration.Observe(time.Since(start).Seconds())
|
||||
}()
|
||||
|
||||
loop:
|
||||
for {
|
||||
|
@ -411,6 +367,8 @@ loop:
|
|||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", r.name, event.Object, err))
|
||||
}
|
||||
case watch.Bookmark:
|
||||
// A `Bookmark` means watch has synced here, just update the resourceVersion
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
|
||||
}
|
||||
|
@ -420,12 +378,11 @@ loop:
|
|||
}
|
||||
}
|
||||
|
||||
watchDuration := r.clock.Now().Sub(start)
|
||||
watchDuration := r.clock.Since(start)
|
||||
if watchDuration < 1*time.Second && eventCount == 0 {
|
||||
r.metrics.numberOfShortWatches.Inc()
|
||||
return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name)
|
||||
}
|
||||
glog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount)
|
||||
klog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -441,9 +398,4 @@ func (r *Reflector) setLastSyncResourceVersion(v string) {
|
|||
r.lastSyncResourceVersionMutex.Lock()
|
||||
defer r.lastSyncResourceVersionMutex.Unlock()
|
||||
r.lastSyncResourceVersion = v
|
||||
|
||||
rv, err := strconv.Atoi(v)
|
||||
if err == nil {
|
||||
r.metrics.lastResourceVersion.Set(float64(rv))
|
||||
}
|
||||
}
|
||||
|
|
17
vendor/k8s.io/client-go/tools/cache/reflector_metrics.go
generated
vendored
17
vendor/k8s.io/client-go/tools/cache/reflector_metrics.go
generated
vendored
|
@ -94,23 +94,6 @@ var metricsFactory = struct {
|
|||
metricsProvider: noopMetricsProvider{},
|
||||
}
|
||||
|
||||
func newReflectorMetrics(name string) *reflectorMetrics {
|
||||
var ret *reflectorMetrics
|
||||
if len(name) == 0 {
|
||||
return ret
|
||||
}
|
||||
return &reflectorMetrics{
|
||||
numberOfLists: metricsFactory.metricsProvider.NewListsMetric(name),
|
||||
listDuration: metricsFactory.metricsProvider.NewListDurationMetric(name),
|
||||
numberOfItemsInList: metricsFactory.metricsProvider.NewItemsInListMetric(name),
|
||||
numberOfWatches: metricsFactory.metricsProvider.NewWatchesMetric(name),
|
||||
numberOfShortWatches: metricsFactory.metricsProvider.NewShortWatchesMetric(name),
|
||||
watchDuration: metricsFactory.metricsProvider.NewWatchDurationMetric(name),
|
||||
numberOfItemsInWatch: metricsFactory.metricsProvider.NewItemsInWatchMetric(name),
|
||||
lastResourceVersion: metricsFactory.metricsProvider.NewLastResourceVersionMetric(name),
|
||||
}
|
||||
}
|
||||
|
||||
// SetReflectorMetricsProvider sets the metrics provider
|
||||
func SetReflectorMetricsProvider(metricsProvider MetricsProvider) {
|
||||
metricsFactory.setProviders.Do(func() {
|
||||
|
|
105
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
105
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
|
@ -25,37 +25,90 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/buffer"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/utils/buffer"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// SharedInformer has a shared data cache and is capable of distributing notifications for changes
|
||||
// to the cache to multiple listeners who registered via AddEventHandler. If you use this, there is
|
||||
// one behavior change compared to a standard Informer. When you receive a notification, the cache
|
||||
// will be AT LEAST as fresh as the notification, but it MAY be more fresh. You should NOT depend
|
||||
// on the contents of the cache exactly matching the notification you've received in handler
|
||||
// functions. If there was a create, followed by a delete, the cache may NOT have your item. This
|
||||
// has advantages over the broadcaster since it allows us to share a common cache across many
|
||||
// controllers. Extending the broadcaster would have required us keep duplicate caches for each
|
||||
// watch.
|
||||
// SharedInformer provides eventually consistent linkage of its
|
||||
// clients to the authoritative state of a given collection of
|
||||
// objects. An object is identified by its API group, kind/resource,
|
||||
// namespace, and name. One SharedInfomer provides linkage to objects
|
||||
// of a particular API group and kind/resource. The linked object
|
||||
// collection of a SharedInformer may be further restricted to one
|
||||
// namespace and/or by label selector and/or field selector.
|
||||
//
|
||||
// The authoritative state of an object is what apiservers provide
|
||||
// access to, and an object goes through a strict sequence of states.
|
||||
// A state is either "absent" or present with a ResourceVersion and
|
||||
// other appropriate content.
|
||||
//
|
||||
// A SharedInformer maintains a local cache, exposed by Store(), of
|
||||
// the state of each relevant object. This cache is eventually
|
||||
// consistent with the authoritative state. This means that, unless
|
||||
// prevented by persistent communication problems, if ever a
|
||||
// particular object ID X is authoritatively associated with a state S
|
||||
// then for every SharedInformer I whose collection includes (X, S)
|
||||
// eventually either (1) I's cache associates X with S or a later
|
||||
// state of X, (2) I is stopped, or (3) the authoritative state
|
||||
// service for X terminates. To be formally complete, we say that the
|
||||
// absent state meets any restriction by label selector or field
|
||||
// selector.
|
||||
//
|
||||
// As a simple example, if a collection of objects is henceforeth
|
||||
// unchanging and a SharedInformer is created that links to that
|
||||
// collection then that SharedInformer's cache eventually holds an
|
||||
// exact copy of that collection (unless it is stopped too soon, the
|
||||
// authoritative state service ends, or communication problems between
|
||||
// the two persistently thwart achievement).
|
||||
//
|
||||
// As another simple example, if the local cache ever holds a
|
||||
// non-absent state for some object ID and the object is eventually
|
||||
// removed from the authoritative state then eventually the object is
|
||||
// removed from the local cache (unless the SharedInformer is stopped
|
||||
// too soon, the authoritative state service emnds, or communication
|
||||
// problems persistently thwart the desired result).
|
||||
//
|
||||
// The keys in Store() are of the form namespace/name for namespaced
|
||||
// objects, and are simply the name for non-namespaced objects.
|
||||
//
|
||||
// A client is identified here by a ResourceEventHandler. For every
|
||||
// update to the SharedInformer's local cache and for every client,
|
||||
// eventually either the SharedInformer is stopped or the client is
|
||||
// notified of the update. These notifications happen after the
|
||||
// corresponding cache update and, in the case of a
|
||||
// SharedIndexInformer, after the corresponding index updates. It is
|
||||
// possible that additional cache and index updates happen before such
|
||||
// a prescribed notification. For a given SharedInformer and client,
|
||||
// all notifications are delivered sequentially. For a given
|
||||
// SharedInformer, client, and object ID, the notifications are
|
||||
// delivered in order.
|
||||
//
|
||||
// A delete notification exposes the last locally known non-absent
|
||||
// state, except that its ResourceVersion is replaced with a
|
||||
// ResourceVersion in which the object is actually absent.
|
||||
type SharedInformer interface {
|
||||
// AddEventHandler adds an event handler to the shared informer using the shared informer's resync
|
||||
// period. Events to a single handler are delivered sequentially, but there is no coordination
|
||||
// between different handlers.
|
||||
AddEventHandler(handler ResourceEventHandler)
|
||||
// AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the
|
||||
// specified resync period. Events to a single handler are delivered sequentially, but there is
|
||||
// no coordination between different handlers.
|
||||
// AddEventHandlerWithResyncPeriod adds an event handler to the
|
||||
// shared informer using the specified resync period. The resync
|
||||
// operation consists of delivering to the handler a create
|
||||
// notification for every object in the informer's local cache; it
|
||||
// does not add any interactions with the authoritative storage.
|
||||
AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration)
|
||||
// GetStore returns the Store.
|
||||
// GetStore returns the informer's local cache as a Store.
|
||||
GetStore() Store
|
||||
// GetController gives back a synthetic interface that "votes" to start the informer
|
||||
GetController() Controller
|
||||
// Run starts the shared informer, which will be stopped when stopCh is closed.
|
||||
// Run starts and runs the shared informer, returning after it stops.
|
||||
// The informer will be stopped when stopCh is closed.
|
||||
Run(stopCh <-chan struct{})
|
||||
// HasSynced returns true if the shared informer's store has synced.
|
||||
// HasSynced returns true if the shared informer's store has been
|
||||
// informed by at least one full LIST of the authoritative state
|
||||
// of the informer's object collection. This is unrelated to "resync".
|
||||
HasSynced() bool
|
||||
// LastSyncResourceVersion is the resource version observed when last synced with the underlying
|
||||
// store. The value returned is not synchronized with access to the underlying store and is not
|
||||
|
@ -86,7 +139,7 @@ func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, defaultEve
|
|||
resyncCheckPeriod: defaultEventHandlerResyncPeriod,
|
||||
defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod,
|
||||
cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", objType)),
|
||||
clock: realClock,
|
||||
clock: realClock,
|
||||
}
|
||||
return sharedIndexInformer
|
||||
}
|
||||
|
@ -116,11 +169,11 @@ func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool
|
|||
},
|
||||
stopCh)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("stop requested")
|
||||
klog.V(2).Infof("stop requested")
|
||||
return false
|
||||
}
|
||||
|
||||
glog.V(4).Infof("caches populated")
|
||||
klog.V(4).Infof("caches populated")
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -279,11 +332,11 @@ func determineResyncPeriod(desired, check time.Duration) time.Duration {
|
|||
return desired
|
||||
}
|
||||
if check == 0 {
|
||||
glog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired)
|
||||
klog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired)
|
||||
return 0
|
||||
}
|
||||
if desired < check {
|
||||
glog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check)
|
||||
klog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check)
|
||||
return check
|
||||
}
|
||||
return desired
|
||||
|
@ -296,19 +349,19 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv
|
|||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.stopped {
|
||||
glog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler)
|
||||
klog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler)
|
||||
return
|
||||
}
|
||||
|
||||
if resyncPeriod > 0 {
|
||||
if resyncPeriod < minimumResyncPeriod {
|
||||
glog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod)
|
||||
klog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod)
|
||||
resyncPeriod = minimumResyncPeriod
|
||||
}
|
||||
|
||||
if resyncPeriod < s.resyncCheckPeriod {
|
||||
if s.started {
|
||||
glog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod)
|
||||
klog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod)
|
||||
resyncPeriod = s.resyncCheckPeriod
|
||||
} else {
|
||||
// if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update
|
||||
|
@ -555,7 +608,7 @@ func (p *processorListener) run() {
|
|||
case deleteNotification:
|
||||
p.handler.OnDelete(notification.oldObj)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next))
|
||||
utilruntime.HandleError(fmt.Errorf("unrecognized notification: %T", next))
|
||||
}
|
||||
}
|
||||
// the only way to get here is if the p.nextCh is empty and closed
|
||||
|
|
2
vendor/k8s.io/client-go/tools/cache/store.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/store.go
generated
vendored
|
@ -210,7 +210,7 @@ func (c *cache) GetByKey(key string) (item interface{}, exists bool, err error)
|
|||
// 'c' takes ownership of the list, you should not reference the list again
|
||||
// after calling this function.
|
||||
func (c *cache) Replace(list []interface{}, resourceVersion string) error {
|
||||
items := map[string]interface{}{}
|
||||
items := make(map[string]interface{}, len(list))
|
||||
for _, item := range list {
|
||||
key, err := c.keyFunc(item)
|
||||
if err != nil {
|
||||
|
|
21
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
21
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
|
@ -148,12 +148,19 @@ func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{},
|
|||
}
|
||||
index := c.indices[indexName]
|
||||
|
||||
// need to de-dupe the return list. Since multiple keys are allowed, this can happen.
|
||||
returnKeySet := sets.String{}
|
||||
for _, indexKey := range indexKeys {
|
||||
set := index[indexKey]
|
||||
for _, key := range set.UnsortedList() {
|
||||
returnKeySet.Insert(key)
|
||||
var returnKeySet sets.String
|
||||
if len(indexKeys) == 1 {
|
||||
// In majority of cases, there is exactly one value matching.
|
||||
// Optimize the most common path - deduping is not needed here.
|
||||
returnKeySet = index[indexKeys[0]]
|
||||
} else {
|
||||
// Need to de-dupe the return list.
|
||||
// Since multiple keys are allowed, this can happen.
|
||||
returnKeySet = sets.String{}
|
||||
for _, indexKey := range indexKeys {
|
||||
for key := range index[indexKey] {
|
||||
returnKeySet.Insert(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -178,7 +185,7 @@ func (c *threadSafeMap) ByIndex(indexName, indexKey string) ([]interface{}, erro
|
|||
|
||||
set := index[indexKey]
|
||||
list := make([]interface{}, 0, set.Len())
|
||||
for _, key := range set.List() {
|
||||
for key := range set {
|
||||
list = append(list, c.items[key])
|
||||
}
|
||||
|
||||
|
|
1
vendor/k8s.io/client-go/tools/clientcmd/api/doc.go
generated
vendored
1
vendor/k8s.io/client-go/tools/clientcmd/api/doc.go
generated
vendored
|
@ -15,4 +15,5 @@ limitations under the License.
|
|||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
|
||||
package api
|
||||
|
|
9
vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go
generated
vendored
9
vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go
generated
vendored
|
@ -29,6 +29,8 @@ import (
|
|||
func init() {
|
||||
sDec, _ := base64.StdEncoding.DecodeString("REDACTED+")
|
||||
redactedBytes = []byte(string(sDec))
|
||||
sDec, _ = base64.StdEncoding.DecodeString("DATA+OMITTED")
|
||||
dataOmittedBytes = []byte(string(sDec))
|
||||
}
|
||||
|
||||
// IsConfigEmpty returns true if the config is empty.
|
||||
|
@ -79,7 +81,10 @@ func MinifyConfig(config *Config) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
var redactedBytes []byte
|
||||
var (
|
||||
redactedBytes []byte
|
||||
dataOmittedBytes []byte
|
||||
)
|
||||
|
||||
// Flatten redacts raw data entries from the config object for a human-readable view.
|
||||
func ShortenConfig(config *Config) {
|
||||
|
@ -97,7 +102,7 @@ func ShortenConfig(config *Config) {
|
|||
}
|
||||
for key, cluster := range config.Clusters {
|
||||
if len(cluster.CertificateAuthorityData) > 0 {
|
||||
cluster.CertificateAuthorityData = redactedBytes
|
||||
cluster.CertificateAuthorityData = dataOmittedBytes
|
||||
}
|
||||
config.Clusters[key] = cluster
|
||||
}
|
||||
|
|
11
vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go
generated
vendored
11
vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go
generated
vendored
|
@ -21,6 +21,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer/json"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer/versioning"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
)
|
||||
|
@ -47,14 +48,8 @@ var (
|
|||
|
||||
func init() {
|
||||
Scheme = runtime.NewScheme()
|
||||
if err := api.AddToScheme(Scheme); err != nil {
|
||||
// Programmer error, detect immediately
|
||||
panic(err)
|
||||
}
|
||||
if err := v1.AddToScheme(Scheme); err != nil {
|
||||
// Programmer error, detect immediately
|
||||
panic(err)
|
||||
}
|
||||
utilruntime.Must(api.AddToScheme(Scheme))
|
||||
utilruntime.Must(v1.AddToScheme(Scheme))
|
||||
yamlSerializer := json.NewYAMLSerializer(json.DefaultMetaFactory, Scheme, Scheme)
|
||||
Codec = versioning.NewDefaultingCodecForScheme(
|
||||
Scheme,
|
||||
|
|
44
vendor/k8s.io/client-go/tools/clientcmd/api/types.go
generated
vendored
44
vendor/k8s.io/client-go/tools/clientcmd/api/types.go
generated
vendored
|
@ -17,6 +17,8 @@ limitations under the License.
|
|||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
|
@ -150,6 +152,25 @@ type AuthProviderConfig struct {
|
|||
Config map[string]string `json:"config,omitempty"`
|
||||
}
|
||||
|
||||
var _ fmt.Stringer = new(AuthProviderConfig)
|
||||
var _ fmt.GoStringer = new(AuthProviderConfig)
|
||||
|
||||
// GoString implements fmt.GoStringer and sanitizes sensitive fields of
|
||||
// AuthProviderConfig to prevent accidental leaking via logs.
|
||||
func (c AuthProviderConfig) GoString() string {
|
||||
return c.String()
|
||||
}
|
||||
|
||||
// String implements fmt.Stringer and sanitizes sensitive fields of
|
||||
// AuthProviderConfig to prevent accidental leaking via logs.
|
||||
func (c AuthProviderConfig) String() string {
|
||||
cfg := "<nil>"
|
||||
if c.Config != nil {
|
||||
cfg = "--- REDACTED ---"
|
||||
}
|
||||
return fmt.Sprintf("api.AuthProviderConfig{Name: %q, Config: map[string]string{%s}}", c.Name, cfg)
|
||||
}
|
||||
|
||||
// ExecConfig specifies a command to provide client credentials. The command is exec'd
|
||||
// and outputs structured stdout holding credentials.
|
||||
//
|
||||
|
@ -172,6 +193,29 @@ type ExecConfig struct {
|
|||
APIVersion string `json:"apiVersion,omitempty"`
|
||||
}
|
||||
|
||||
var _ fmt.Stringer = new(ExecConfig)
|
||||
var _ fmt.GoStringer = new(ExecConfig)
|
||||
|
||||
// GoString implements fmt.GoStringer and sanitizes sensitive fields of
|
||||
// ExecConfig to prevent accidental leaking via logs.
|
||||
func (c ExecConfig) GoString() string {
|
||||
return c.String()
|
||||
}
|
||||
|
||||
// String implements fmt.Stringer and sanitizes sensitive fields of ExecConfig
|
||||
// to prevent accidental leaking via logs.
|
||||
func (c ExecConfig) String() string {
|
||||
var args []string
|
||||
if len(c.Args) > 0 {
|
||||
args = []string{"--- REDACTED ---"}
|
||||
}
|
||||
env := "[]ExecEnvVar(nil)"
|
||||
if len(c.Env) > 0 {
|
||||
env = "[]ExecEnvVar{--- REDACTED ---}"
|
||||
}
|
||||
return fmt.Sprintf("api.AuthProviderConfig{Command: %q, Args: %#v, Env: %s, APIVersion: %q}", c.Command, args, env, c.APIVersion)
|
||||
}
|
||||
|
||||
// ExecEnvVar is used for setting environment variables when executing an exec-based
|
||||
// credential plugin.
|
||||
type ExecEnvVar struct {
|
||||
|
|
1
vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go
generated
vendored
1
vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go
generated
vendored
|
@ -15,4 +15,5 @@ limitations under the License.
|
|||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
|
||||
package v1
|
||||
|
|
23
vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go
generated
vendored
23
vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go
generated
vendored
|
@ -46,31 +46,26 @@ func (in *AuthInfo) DeepCopyInto(out *AuthInfo) {
|
|||
in, out := &in.ImpersonateUserExtra, &out.ImpersonateUserExtra
|
||||
*out = make(map[string][]string, len(*in))
|
||||
for key, val := range *in {
|
||||
var outVal []string
|
||||
if val == nil {
|
||||
(*out)[key] = nil
|
||||
} else {
|
||||
(*out)[key] = make([]string, len(val))
|
||||
copy((*out)[key], val)
|
||||
in, out := &val, &outVal
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
(*out)[key] = outVal
|
||||
}
|
||||
}
|
||||
if in.AuthProvider != nil {
|
||||
in, out := &in.AuthProvider, &out.AuthProvider
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(AuthProviderConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
*out = new(AuthProviderConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Exec != nil {
|
||||
in, out := &in.Exec, &out.Exec
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(ExecConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
*out = new(ExecConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Extensions != nil {
|
||||
in, out := &in.Extensions, &out.Extensions
|
||||
|
|
44
vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go
generated
vendored
44
vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go
generated
vendored
|
@ -46,31 +46,26 @@ func (in *AuthInfo) DeepCopyInto(out *AuthInfo) {
|
|||
in, out := &in.ImpersonateUserExtra, &out.ImpersonateUserExtra
|
||||
*out = make(map[string][]string, len(*in))
|
||||
for key, val := range *in {
|
||||
var outVal []string
|
||||
if val == nil {
|
||||
(*out)[key] = nil
|
||||
} else {
|
||||
(*out)[key] = make([]string, len(val))
|
||||
copy((*out)[key], val)
|
||||
in, out := &val, &outVal
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
(*out)[key] = outVal
|
||||
}
|
||||
}
|
||||
if in.AuthProvider != nil {
|
||||
in, out := &in.AuthProvider, &out.AuthProvider
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(AuthProviderConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
*out = new(AuthProviderConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Exec != nil {
|
||||
in, out := &in.Exec, &out.Exec
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(ExecConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
*out = new(ExecConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Extensions != nil {
|
||||
in, out := &in.Extensions, &out.Extensions
|
||||
|
@ -159,36 +154,45 @@ func (in *Config) DeepCopyInto(out *Config) {
|
|||
in, out := &in.Clusters, &out.Clusters
|
||||
*out = make(map[string]*Cluster, len(*in))
|
||||
for key, val := range *in {
|
||||
var outVal *Cluster
|
||||
if val == nil {
|
||||
(*out)[key] = nil
|
||||
} else {
|
||||
(*out)[key] = new(Cluster)
|
||||
val.DeepCopyInto((*out)[key])
|
||||
in, out := &val, &outVal
|
||||
*out = new(Cluster)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
(*out)[key] = outVal
|
||||
}
|
||||
}
|
||||
if in.AuthInfos != nil {
|
||||
in, out := &in.AuthInfos, &out.AuthInfos
|
||||
*out = make(map[string]*AuthInfo, len(*in))
|
||||
for key, val := range *in {
|
||||
var outVal *AuthInfo
|
||||
if val == nil {
|
||||
(*out)[key] = nil
|
||||
} else {
|
||||
(*out)[key] = new(AuthInfo)
|
||||
val.DeepCopyInto((*out)[key])
|
||||
in, out := &val, &outVal
|
||||
*out = new(AuthInfo)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
(*out)[key] = outVal
|
||||
}
|
||||
}
|
||||
if in.Contexts != nil {
|
||||
in, out := &in.Contexts, &out.Contexts
|
||||
*out = make(map[string]*Context, len(*in))
|
||||
for key, val := range *in {
|
||||
var outVal *Context
|
||||
if val == nil {
|
||||
(*out)[key] = nil
|
||||
} else {
|
||||
(*out)[key] = new(Context)
|
||||
val.DeepCopyInto((*out)[key])
|
||||
in, out := &val, &outVal
|
||||
*out = new(Context)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
(*out)[key] = outVal
|
||||
}
|
||||
}
|
||||
if in.Extensions != nil {
|
||||
|
|
51
vendor/k8s.io/client-go/tools/clientcmd/client_config.go
generated
vendored
51
vendor/k8s.io/client-go/tools/clientcmd/client_config.go
generated
vendored
|
@ -24,8 +24,8 @@ import (
|
|||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/imdario/mergo"
|
||||
"k8s.io/klog"
|
||||
|
||||
restclient "k8s.io/client-go/rest"
|
||||
clientauth "k8s.io/client-go/tools/auth"
|
||||
|
@ -175,10 +175,6 @@ func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) {
|
|||
// only try to read the auth information if we are secure
|
||||
if restclient.IsConfigTransportTLS(*clientConfig) {
|
||||
var err error
|
||||
|
||||
// mergo is a first write wins for map value and a last writing wins for interface values
|
||||
// NOTE: This behavior changed with https://github.com/imdario/mergo/commit/d304790b2ed594794496464fadd89d2bb266600a.
|
||||
// Our mergo.Merge version is older than this change.
|
||||
var persister restclient.AuthProviderConfigPersister
|
||||
if config.configAccess != nil {
|
||||
authInfoName, _ := config.getAuthInfoName()
|
||||
|
@ -188,13 +184,13 @@ func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mergo.Merge(clientConfig, userAuthPartialConfig)
|
||||
mergo.MergeWithOverwrite(clientConfig, userAuthPartialConfig)
|
||||
|
||||
serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mergo.Merge(clientConfig, serverAuthPartialConfig)
|
||||
mergo.MergeWithOverwrite(clientConfig, serverAuthPartialConfig)
|
||||
}
|
||||
|
||||
return clientConfig, nil
|
||||
|
@ -214,7 +210,7 @@ func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo,
|
|||
configClientConfig.CAFile = configClusterInfo.CertificateAuthority
|
||||
configClientConfig.CAData = configClusterInfo.CertificateAuthorityData
|
||||
configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify
|
||||
mergo.Merge(mergedConfig, configClientConfig)
|
||||
mergo.MergeWithOverwrite(mergedConfig, configClientConfig)
|
||||
|
||||
return mergedConfig, nil
|
||||
}
|
||||
|
@ -232,12 +228,14 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI
|
|||
// blindly overwrite existing values based on precedence
|
||||
if len(configAuthInfo.Token) > 0 {
|
||||
mergedConfig.BearerToken = configAuthInfo.Token
|
||||
mergedConfig.BearerTokenFile = configAuthInfo.TokenFile
|
||||
} else if len(configAuthInfo.TokenFile) > 0 {
|
||||
tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mergedConfig.BearerToken = string(tokenBytes)
|
||||
mergedConfig.BearerTokenFile = configAuthInfo.TokenFile
|
||||
}
|
||||
if len(configAuthInfo.Impersonate) > 0 {
|
||||
mergedConfig.Impersonate = restclient.ImpersonationConfig{
|
||||
|
@ -279,8 +277,8 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI
|
|||
promptedConfig := makeUserIdentificationConfig(*promptedAuthInfo)
|
||||
previouslyMergedConfig := mergedConfig
|
||||
mergedConfig = &restclient.Config{}
|
||||
mergo.Merge(mergedConfig, promptedConfig)
|
||||
mergo.Merge(mergedConfig, previouslyMergedConfig)
|
||||
mergo.MergeWithOverwrite(mergedConfig, promptedConfig)
|
||||
mergo.MergeWithOverwrite(mergedConfig, previouslyMergedConfig)
|
||||
config.promptedCredentials.username = mergedConfig.Username
|
||||
config.promptedCredentials.password = mergedConfig.Password
|
||||
}
|
||||
|
@ -299,16 +297,6 @@ func makeUserIdentificationConfig(info clientauth.Info) *restclient.Config {
|
|||
return config
|
||||
}
|
||||
|
||||
// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only server identification information
|
||||
func makeServerIdentificationConfig(info clientauth.Info) restclient.Config {
|
||||
config := restclient.Config{}
|
||||
config.CAFile = info.CAFile
|
||||
if info.Insecure != nil {
|
||||
config.Insecure = *info.Insecure
|
||||
}
|
||||
return config
|
||||
}
|
||||
|
||||
func canIdentifyUser(config restclient.Config) bool {
|
||||
return len(config.Username) > 0 ||
|
||||
(len(config.CertFile) > 0 || len(config.CertData) > 0) ||
|
||||
|
@ -423,11 +411,11 @@ func (config *DirectClientConfig) getContext() (clientcmdapi.Context, error) {
|
|||
|
||||
mergedContext := clientcmdapi.NewContext()
|
||||
if configContext, exists := contexts[contextName]; exists {
|
||||
mergo.Merge(mergedContext, configContext)
|
||||
mergo.MergeWithOverwrite(mergedContext, configContext)
|
||||
} else if required {
|
||||
return clientcmdapi.Context{}, fmt.Errorf("context %q does not exist", contextName)
|
||||
}
|
||||
mergo.Merge(mergedContext, config.overrides.Context)
|
||||
mergo.MergeWithOverwrite(mergedContext, config.overrides.Context)
|
||||
|
||||
return *mergedContext, nil
|
||||
}
|
||||
|
@ -439,11 +427,11 @@ func (config *DirectClientConfig) getAuthInfo() (clientcmdapi.AuthInfo, error) {
|
|||
|
||||
mergedAuthInfo := clientcmdapi.NewAuthInfo()
|
||||
if configAuthInfo, exists := authInfos[authInfoName]; exists {
|
||||
mergo.Merge(mergedAuthInfo, configAuthInfo)
|
||||
mergo.MergeWithOverwrite(mergedAuthInfo, configAuthInfo)
|
||||
} else if required {
|
||||
return clientcmdapi.AuthInfo{}, fmt.Errorf("auth info %q does not exist", authInfoName)
|
||||
}
|
||||
mergo.Merge(mergedAuthInfo, config.overrides.AuthInfo)
|
||||
mergo.MergeWithOverwrite(mergedAuthInfo, config.overrides.AuthInfo)
|
||||
|
||||
return *mergedAuthInfo, nil
|
||||
}
|
||||
|
@ -454,13 +442,13 @@ func (config *DirectClientConfig) getCluster() (clientcmdapi.Cluster, error) {
|
|||
clusterInfoName, required := config.getClusterName()
|
||||
|
||||
mergedClusterInfo := clientcmdapi.NewCluster()
|
||||
mergo.Merge(mergedClusterInfo, config.overrides.ClusterDefaults)
|
||||
mergo.MergeWithOverwrite(mergedClusterInfo, config.overrides.ClusterDefaults)
|
||||
if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists {
|
||||
mergo.Merge(mergedClusterInfo, configClusterInfo)
|
||||
mergo.MergeWithOverwrite(mergedClusterInfo, configClusterInfo)
|
||||
} else if required {
|
||||
return clientcmdapi.Cluster{}, fmt.Errorf("cluster %q does not exist", clusterInfoName)
|
||||
}
|
||||
mergo.Merge(mergedClusterInfo, config.overrides.ClusterInfo)
|
||||
mergo.MergeWithOverwrite(mergedClusterInfo, config.overrides.ClusterInfo)
|
||||
// An override of --insecure-skip-tls-verify=true and no accompanying CA/CA data should clear already-set CA/CA data
|
||||
// otherwise, a kubeconfig containing a CA reference would return an error that "CA and insecure-skip-tls-verify couldn't both be set"
|
||||
caLen := len(config.overrides.ClusterInfo.CertificateAuthority)
|
||||
|
@ -502,8 +490,9 @@ func (config *inClusterClientConfig) ClientConfig() (*restclient.Config, error)
|
|||
if server := config.overrides.ClusterInfo.Server; len(server) > 0 {
|
||||
icc.Host = server
|
||||
}
|
||||
if token := config.overrides.AuthInfo.Token; len(token) > 0 {
|
||||
icc.BearerToken = token
|
||||
if len(config.overrides.AuthInfo.Token) > 0 || len(config.overrides.AuthInfo.TokenFile) > 0 {
|
||||
icc.BearerToken = config.overrides.AuthInfo.Token
|
||||
icc.BearerTokenFile = config.overrides.AuthInfo.TokenFile
|
||||
}
|
||||
if certificateAuthorityFile := config.overrides.ClusterInfo.CertificateAuthority; len(certificateAuthorityFile) > 0 {
|
||||
icc.TLSClientConfig.CAFile = certificateAuthorityFile
|
||||
|
@ -549,12 +538,12 @@ func (config *inClusterClientConfig) Possible() bool {
|
|||
// to the default config.
|
||||
func BuildConfigFromFlags(masterUrl, kubeconfigPath string) (*restclient.Config, error) {
|
||||
if kubeconfigPath == "" && masterUrl == "" {
|
||||
glog.Warningf("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.")
|
||||
klog.Warningf("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.")
|
||||
kubeconfig, err := restclient.InClusterConfig()
|
||||
if err == nil {
|
||||
return kubeconfig, nil
|
||||
}
|
||||
glog.Warning("error creating inClusterConfig, falling back to default config: ", err)
|
||||
klog.Warning("error creating inClusterConfig, falling back to default config: ", err)
|
||||
}
|
||||
return NewNonInteractiveDeferredLoadingClientConfig(
|
||||
&ClientConfigLoadingRules{ExplicitPath: kubeconfigPath},
|
||||
|
|
34
vendor/k8s.io/client-go/tools/clientcmd/config.go
generated
vendored
34
vendor/k8s.io/client-go/tools/clientcmd/config.go
generated
vendored
|
@ -24,7 +24,7 @@ import (
|
|||
"reflect"
|
||||
"sort"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
restclient "k8s.io/client-go/rest"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
@ -220,6 +220,9 @@ func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config, rela
|
|||
}
|
||||
}
|
||||
|
||||
// seenConfigs stores a map of config source filenames to computed config objects
|
||||
seenConfigs := map[string]*clientcmdapi.Config{}
|
||||
|
||||
for key, context := range newConfig.Contexts {
|
||||
startingContext, exists := startingConfig.Contexts[key]
|
||||
if !reflect.DeepEqual(context, startingContext) || !exists {
|
||||
|
@ -228,15 +231,28 @@ func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config, rela
|
|||
destinationFile = configAccess.GetDefaultFilename()
|
||||
}
|
||||
|
||||
configToWrite, err := getConfigFromFile(destinationFile)
|
||||
if err != nil {
|
||||
return err
|
||||
// we only obtain a fresh config object from its source file
|
||||
// if we have not seen it already - this prevents us from
|
||||
// reading and writing to the same number of files repeatedly
|
||||
// when multiple / all contexts share the same destination file.
|
||||
configToWrite, seen := seenConfigs[destinationFile]
|
||||
if !seen {
|
||||
var err error
|
||||
configToWrite, err = getConfigFromFile(destinationFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
seenConfigs[destinationFile] = configToWrite
|
||||
}
|
||||
configToWrite.Contexts[key] = context
|
||||
|
||||
if err := WriteToFile(*configToWrite, destinationFile); err != nil {
|
||||
return err
|
||||
}
|
||||
configToWrite.Contexts[key] = context
|
||||
}
|
||||
}
|
||||
|
||||
// actually persist config object changes
|
||||
for destinationFile, configToWrite := range seenConfigs {
|
||||
if err := WriteToFile(*configToWrite, destinationFile); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -467,7 +483,7 @@ func getConfigFromFile(filename string) (*clientcmdapi.Config, error) {
|
|||
func GetConfigFromFileOrDie(filename string) *clientcmdapi.Config {
|
||||
config, err := getConfigFromFile(filename)
|
||||
if err != nil {
|
||||
glog.FatalDepth(1, err)
|
||||
klog.FatalDepth(1, err)
|
||||
}
|
||||
|
||||
return config
|
||||
|
|
12
vendor/k8s.io/client-go/tools/clientcmd/loader.go
generated
vendored
12
vendor/k8s.io/client-go/tools/clientcmd/loader.go
generated
vendored
|
@ -27,8 +27,8 @@ import (
|
|||
goruntime "runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/imdario/mergo"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
@ -211,7 +211,7 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) {
|
|||
mapConfig := clientcmdapi.NewConfig()
|
||||
|
||||
for _, kubeconfig := range kubeconfigs {
|
||||
mergo.Merge(mapConfig, kubeconfig)
|
||||
mergo.MergeWithOverwrite(mapConfig, kubeconfig)
|
||||
}
|
||||
|
||||
// merge all of the struct values in the reverse order so that priority is given correctly
|
||||
|
@ -219,14 +219,14 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) {
|
|||
nonMapConfig := clientcmdapi.NewConfig()
|
||||
for i := len(kubeconfigs) - 1; i >= 0; i-- {
|
||||
kubeconfig := kubeconfigs[i]
|
||||
mergo.Merge(nonMapConfig, kubeconfig)
|
||||
mergo.MergeWithOverwrite(nonMapConfig, kubeconfig)
|
||||
}
|
||||
|
||||
// since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and
|
||||
// get the values we expect.
|
||||
config := clientcmdapi.NewConfig()
|
||||
mergo.Merge(config, mapConfig)
|
||||
mergo.Merge(config, nonMapConfig)
|
||||
mergo.MergeWithOverwrite(config, mapConfig)
|
||||
mergo.MergeWithOverwrite(config, nonMapConfig)
|
||||
|
||||
if rules.ResolvePaths() {
|
||||
if err := ResolveLocalPaths(config); err != nil {
|
||||
|
@ -356,7 +356,7 @@ func LoadFromFile(filename string) (*clientcmdapi.Config, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(6).Infoln("Config loaded from file", filename)
|
||||
klog.V(6).Infoln("Config loaded from file: ", filename)
|
||||
|
||||
// set LocationOfOrigin on every Cluster, User, and Context
|
||||
for key, obj := range config.AuthInfos {
|
||||
|
|
13
vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go
generated
vendored
13
vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go
generated
vendored
|
@ -20,7 +20,7 @@ import (
|
|||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
restclient "k8s.io/client-go/rest"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
@ -119,7 +119,7 @@ func (config *DeferredLoadingClientConfig) ClientConfig() (*restclient.Config, e
|
|||
|
||||
// check for in-cluster configuration and use it
|
||||
if config.icc.Possible() {
|
||||
glog.V(4).Infof("Using in-cluster configuration")
|
||||
klog.V(4).Infof("Using in-cluster configuration")
|
||||
return config.icc.ClientConfig()
|
||||
}
|
||||
|
||||
|
@ -150,13 +150,18 @@ func (config *DeferredLoadingClientConfig) Namespace() (string, bool, error) {
|
|||
|
||||
// if we got a default namespace, determine whether it was explicit or implicit
|
||||
if raw, err := mergedKubeConfig.RawConfig(); err == nil {
|
||||
if context := raw.Contexts[raw.CurrentContext]; context != nil && len(context.Namespace) > 0 {
|
||||
// determine the current context
|
||||
currentContext := raw.CurrentContext
|
||||
if config.overrides != nil && len(config.overrides.CurrentContext) > 0 {
|
||||
currentContext = config.overrides.CurrentContext
|
||||
}
|
||||
if context := raw.Contexts[currentContext]; context != nil && len(context.Namespace) > 0 {
|
||||
return ns, false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Using in-cluster namespace")
|
||||
klog.V(4).Infof("Using in-cluster namespace")
|
||||
|
||||
// allow the namespace from the service account token directory to be used.
|
||||
return config.icc.Namespace()
|
||||
|
|
114
vendor/k8s.io/client-go/tools/pager/pager.go
generated
vendored
114
vendor/k8s.io/client-go/tools/pager/pager.go
generated
vendored
|
@ -25,9 +25,11 @@ import (
|
|||
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
)
|
||||
|
||||
const defaultPageSize = 500
|
||||
const defaultPageBufferSize = 10
|
||||
|
||||
// ListPageFunc returns a list object for the given list options.
|
||||
type ListPageFunc func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error)
|
||||
|
@ -48,6 +50,9 @@ type ListPager struct {
|
|||
PageFn ListPageFunc
|
||||
|
||||
FullListIfExpired bool
|
||||
|
||||
// Number of pages to buffer
|
||||
PageBufferSize int32
|
||||
}
|
||||
|
||||
// New creates a new pager from the provided pager function using the default
|
||||
|
@ -58,6 +63,7 @@ func New(fn ListPageFunc) *ListPager {
|
|||
PageSize: defaultPageSize,
|
||||
PageFn: fn,
|
||||
FullListIfExpired: true,
|
||||
PageBufferSize: defaultPageBufferSize,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -73,6 +79,12 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti
|
|||
}
|
||||
var list *metainternalversion.List
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
obj, err := p.PageFn(ctx, options)
|
||||
if err != nil {
|
||||
if !errors.IsResourceExpired(err) || !p.FullListIfExpired {
|
||||
|
@ -115,3 +127,105 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti
|
|||
options.Continue = m.GetContinue()
|
||||
}
|
||||
}
|
||||
|
||||
// EachListItem fetches runtime.Object items using this ListPager and invokes fn on each item. If
|
||||
// fn returns an error, processing stops and that error is returned. If fn does not return an error,
|
||||
// any error encountered while retrieving the list from the server is returned. If the context
|
||||
// cancels or times out, the context error is returned. Since the list is retrieved in paginated
|
||||
// chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if the pagination list
|
||||
// requests exceed the expiration limit of the apiserver being called.
|
||||
//
|
||||
// Items are retrieved in chunks from the server to reduce the impact on the server with up to
|
||||
// ListPager.PageBufferSize chunks buffered concurrently in the background.
|
||||
func (p *ListPager) EachListItem(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error {
|
||||
return p.eachListChunkBuffered(ctx, options, func(obj runtime.Object) error {
|
||||
return meta.EachListItem(obj, fn)
|
||||
})
|
||||
}
|
||||
|
||||
// eachListChunkBuffered fetches runtimeObject list chunks using this ListPager and invokes fn on
|
||||
// each list chunk. If fn returns an error, processing stops and that error is returned. If fn does
|
||||
// not return an error, any error encountered while retrieving the list from the server is
|
||||
// returned. If the context cancels or times out, the context error is returned. Since the list is
|
||||
// retrieved in paginated chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if
|
||||
// the pagination list requests exceed the expiration limit of the apiserver being called.
|
||||
//
|
||||
// Up to ListPager.PageBufferSize chunks are buffered concurrently in the background.
|
||||
func (p *ListPager) eachListChunkBuffered(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error {
|
||||
if p.PageBufferSize < 0 {
|
||||
return fmt.Errorf("ListPager.PageBufferSize must be >= 0, got %d", p.PageBufferSize)
|
||||
}
|
||||
|
||||
// Ensure background goroutine is stopped if this call exits before all list items are
|
||||
// processed. Cancelation error from this deferred cancel call is never returned to caller;
|
||||
// either the list result has already been sent to bgResultC or the fn error is returned and
|
||||
// the cancelation error is discarded.
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
chunkC := make(chan runtime.Object, p.PageBufferSize)
|
||||
bgResultC := make(chan error, 1)
|
||||
go func() {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
var err error
|
||||
defer func() {
|
||||
close(chunkC)
|
||||
bgResultC <- err
|
||||
}()
|
||||
err = p.eachListChunk(ctx, options, func(chunk runtime.Object) error {
|
||||
select {
|
||||
case chunkC <- chunk: // buffer the chunk, this can block
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}()
|
||||
|
||||
for o := range chunkC {
|
||||
err := fn(o)
|
||||
if err != nil {
|
||||
return err // any fn error should be returned immediately
|
||||
}
|
||||
}
|
||||
// promote the results of our background goroutine to the foreground
|
||||
return <-bgResultC
|
||||
}
|
||||
|
||||
// eachListChunk fetches runtimeObject list chunks using this ListPager and invokes fn on each list
|
||||
// chunk. If fn returns an error, processing stops and that error is returned. If fn does not return
|
||||
// an error, any error encountered while retrieving the list from the server is returned. If the
|
||||
// context cancels or times out, the context error is returned. Since the list is retrieved in
|
||||
// paginated chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if the
|
||||
// pagination list requests exceed the expiration limit of the apiserver being called.
|
||||
func (p *ListPager) eachListChunk(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error {
|
||||
if options.Limit == 0 {
|
||||
options.Limit = p.PageSize
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
obj, err := p.PageFn(ctx, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m, err := meta.ListAccessor(obj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("returned object must be a list: %v", err)
|
||||
}
|
||||
if err := fn(obj); err != nil {
|
||||
return err
|
||||
}
|
||||
// if we have no more items, return.
|
||||
if len(m.GetContinue()) == 0 {
|
||||
return nil
|
||||
}
|
||||
// set the next loop up
|
||||
options.Continue = m.GetContinue()
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue