Update kubernetes/client-go to v7.0.0 (#1808)

This fix updates k8s' client-go to v7.0.0, which matches k8s 1.10.

Signed-off-by: Yong Tang <yong.tang.github@outlook.com>
This commit is contained in:
Yong Tang
2018-05-16 23:10:28 -07:00
committed by Miek Gieben
parent 9a82fa0374
commit b109a79cb5
730 changed files with 17431 additions and 43251 deletions

View File

@@ -16,7 +16,6 @@ go_library(
go_test(
name = "go_default_xtest",
srcs = ["clientauth_test.go"],
importpath = "k8s.io/client-go/tools/auth_test",
deps = ["//vendor/k8s.io/client-go/tools/auth:go_default_library"],
)

View File

@@ -23,7 +23,7 @@ location within a Container's file tree for Containers that
need access to the Kubernetes API.
Having a defined format allows:
- clients to be implmented in multiple languages
- clients to be implemented in multiple languages
- applications which link clients to be portable across
clusters with different authentication styles (e.g.
some may use SSL Client certs, others may not, etc)

View File

@@ -22,9 +22,8 @@ go_test(
"store_test.go",
"undelta_store_test.go",
],
features = ["-race"],
importpath = "k8s.io/client-go/tools/cache",
library = ":go_default_library",
embed = [":go_default_library"],
race = "off",
deps = [
"//vendor/github.com/google/gofuzz:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
@@ -83,6 +82,7 @@ go_library(
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/tools/pager:go_default_library",
"//vendor/k8s.io/client-go/util/buffer:go_default_library",
"//vendor/k8s.io/client-go/util/retry:go_default_library",
],
)

View File

@@ -288,7 +288,7 @@ func NewInformer(
// This will hold incoming changes. Note how we pass clientState in as a
// KeyLister, that way resync operations will result in the correct set
// of update/delete deltas.
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, clientState)
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, clientState)
cfg := &Config{
Queue: fifo,
@@ -355,7 +355,7 @@ func NewIndexerInformer(
// This will hold incoming changes. Note how we pass clientState in as a
// KeyLister, that way resync operations will result in the correct set
// of update/delete deltas.
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, clientState)
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, clientState)
cfg := &Config{
Queue: fifo,

View File

@@ -44,7 +44,7 @@ func Example() {
// This will hold incoming changes. Note how we pass downstream in as a
// KeyLister, that way resync operations will result in the correct set
// of update/delete deltas.
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, downstream)
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, downstream)
// Let's do threadsafe output to get predictable test results.
deletionCounter := make(chan string, 1000)

View File

@@ -31,11 +31,6 @@ import (
// keyFunc is used to figure out what key an object should have. (It's
// exposed in the returned DeltaFIFO's KeyOf() method, with bonus features.)
//
// 'compressor' may compress as many or as few items as it wants
// (including returning an empty slice), but it should do what it
// does quickly since it is called while the queue is locked.
// 'compressor' may be nil if you don't want any delta compression.
//
// 'keyLister' is expected to return a list of keys that the consumer of
// this queue "knows about". It is used to decide which items are missing
// when Replace() is called; 'Deleted' deltas are produced for these items.
@@ -43,18 +38,30 @@ import (
// TODO: consider merging keyLister with this object, tracking a list of
// "known" keys when Pop() is called. Have to think about how that
// affects error retrying.
// TODO(lavalamp): I believe there is a possible race only when using an
// external known object source that the above TODO would
// fix.
// NOTE: It is possible to misuse this and cause a race when using an
// external known object source.
// Whether there is a potential race depends on how the comsumer
// modifies knownObjects. In Pop(), process function is called under
// lock, so it is safe to update data structures in it that need to be
// in sync with the queue (e.g. knownObjects).
//
// Example:
// In case of sharedIndexInformer being a consumer
// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/
// src/k8s.io/client-go/tools/cache/shared_informer.go#L192),
// there is no race as knownObjects (s.indexer) is modified safely
// under DeltaFIFO's lock. The only exceptions are GetStore() and
// GetIndexer() methods, which expose ways to modify the underlying
// storage. Currently these two methods are used for creating Lister
// and internal tests.
//
// Also see the comment on DeltaFIFO.
func NewDeltaFIFO(keyFunc KeyFunc, compressor DeltaCompressor, knownObjects KeyListerGetter) *DeltaFIFO {
func NewDeltaFIFO(keyFunc KeyFunc, knownObjects KeyListerGetter) *DeltaFIFO {
f := &DeltaFIFO{
items: map[string]Deltas{},
queue: []string{},
keyFunc: keyFunc,
deltaCompressor: compressor,
knownObjects: knownObjects,
items: map[string]Deltas{},
queue: []string{},
keyFunc: keyFunc,
knownObjects: knownObjects,
}
f.cond.L = &f.lock
return f
@@ -86,9 +93,6 @@ func NewDeltaFIFO(keyFunc KeyFunc, compressor DeltaCompressor, knownObjects KeyL
// items have been deleted when Replace() or Delete() are called. The deleted
// object will be included in the DeleteFinalStateUnknown markers. These objects
// could be stale.
//
// You may provide a function to compress deltas (e.g., represent a
// series of Updates as a single Update).
type DeltaFIFO struct {
// lock/cond protects access to 'items' and 'queue'.
lock sync.RWMutex
@@ -110,10 +114,6 @@ type DeltaFIFO struct {
// insertion and retrieval, and should be deterministic.
keyFunc KeyFunc
// deltaCompressor tells us how to combine two or more
// deltas. It may be nil.
deltaCompressor DeltaCompressor
// knownObjects list keys that are "known", for the
// purpose of figuring out which items have been deleted
// when Replace() or Delete() is called.
@@ -133,7 +133,6 @@ var (
var (
// ErrZeroLengthDeltasObject is returned in a KeyError if a Deltas
// object with zero length is encountered (should be impossible,
// even if such an object is accidentally produced by a DeltaCompressor--
// but included for completeness).
ErrZeroLengthDeltasObject = errors.New("0 length Deltas object; can't get key")
)
@@ -213,8 +212,6 @@ func (f *DeltaFIFO) Delete(obj interface{}) error {
if err == nil && !exists && !itemsExist {
// Presumably, this was deleted when a relist happened.
// Don't provide a second report of the same deletion.
// TODO(lavalamp): This may be racy-- we aren't properly locked
// with knownObjects.
return nil
}
}
@@ -305,8 +302,8 @@ func (f *DeltaFIFO) willObjectBeDeletedLocked(id string) bool {
return len(deltas) > 0 && deltas[len(deltas)-1].Type == Deleted
}
// queueActionLocked appends to the delta list for the object, calling
// f.deltaCompressor if needed. Caller must lock first.
// queueActionLocked appends to the delta list for the object.
// Caller must lock first.
func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error {
id, err := f.KeyOf(obj)
if err != nil {
@@ -322,9 +319,6 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err
newDeltas := append(f.items[id], Delta{actionType, obj})
newDeltas = dedupDeltas(newDeltas)
if f.deltaCompressor != nil {
newDeltas = f.deltaCompressor.Compress(newDeltas)
}
_, exists := f.items[id]
if len(newDeltas) > 0 {
@@ -334,8 +328,7 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err
f.items[id] = newDeltas
f.cond.Broadcast()
} else if exists {
// The compression step removed all deltas, so
// we need to remove this from our map (extra items
// We need to remove this from our map (extra items
// in the queue are ignored if they are not in the
// map).
delete(f.items, id)
@@ -355,8 +348,8 @@ func (f *DeltaFIFO) List() []interface{} {
func (f *DeltaFIFO) listLocked() []interface{} {
list := make([]interface{}, 0, len(f.items))
for _, item := range f.items {
// Copy item's slice so operations on this slice (delta
// compression) won't interfere with the object we return.
// Copy item's slice so operations on this slice
// won't interfere with the object we return.
item = copyDeltas(item)
list = append(list, item.Newest().Object)
}
@@ -394,8 +387,8 @@ func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err err
defer f.lock.RUnlock()
d, exists := f.items[key]
if exists {
// Copy item's slice so operations on this slice (delta
// compression) won't interfere with the object we return.
// Copy item's slice so operations on this slice
// won't interfere with the object we return.
d = copyDeltas(d)
}
return d, exists, nil
@@ -503,8 +496,6 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
}
// Detect deletions not already in the queue.
// TODO(lavalamp): This may be racy-- we aren't properly locked
// with knownObjects. Unproven.
knownKeys := f.knownObjects.ListKeys()
queuedDeletions := 0
for _, k := range knownKeys {
@@ -603,23 +594,6 @@ type KeyGetter interface {
GetByKey(key string) (interface{}, bool, error)
}
// DeltaCompressor is an algorithm that removes redundant changes.
type DeltaCompressor interface {
Compress(Deltas) Deltas
}
// DeltaCompressorFunc should remove redundant changes; but changes that
// are redundant depend on one's desired semantics, so this is an
// injectable function.
//
// DeltaCompressorFunc adapts a raw function to be a DeltaCompressor.
type DeltaCompressorFunc func(Deltas) Deltas
// Compress just calls dc.
func (dc DeltaCompressorFunc) Compress(d Deltas) Deltas {
return dc(d)
}
// DeltaType is the type of a change (addition, deletion, etc)
type DeltaType string
@@ -668,7 +642,7 @@ func (d Deltas) Newest() *Delta {
// copyDeltas returns a shallow copy of d; that is, it copies the slice but not
// the objects in the slice. This allows Get/List to return an object that we
// know won't be clobbered by a subsequent call to a delta compressor.
// know won't be clobbered by a subsequent modifications.
func copyDeltas(d Deltas) Deltas {
d2 := make(Deltas, len(d))
copy(d2, d)

View File

@@ -51,7 +51,7 @@ func (kl keyLookupFunc) GetByKey(key string) (interface{}, bool, error) {
}
func TestDeltaFIFO_basic(t *testing.T) {
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil, nil)
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil)
const amount = 500
go func() {
for i := 0; i < amount; i++ {
@@ -86,7 +86,7 @@ func TestDeltaFIFO_basic(t *testing.T) {
}
func TestDeltaFIFO_requeueOnPop(t *testing.T) {
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil, nil)
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil)
f.Add(mkFifoObj("foo", 10))
_, err := f.Pop(func(obj interface{}) error {
@@ -129,43 +129,8 @@ func TestDeltaFIFO_requeueOnPop(t *testing.T) {
}
}
func TestDeltaFIFO_compressorWorks(t *testing.T) {
oldestTypes := []DeltaType{}
f := NewDeltaFIFO(
testFifoObjectKeyFunc,
// This function just keeps the most recent delta
// and puts deleted ones in the list.
DeltaCompressorFunc(func(d Deltas) Deltas {
if n := len(d); n > 1 {
oldestTypes = append(oldestTypes, d[0].Type)
d = d[1:]
}
return d
}),
nil,
)
if f.HasSynced() {
t.Errorf("Expected HasSynced to be false before completion of initial population")
}
f.Add(mkFifoObj("foo", 10))
f.Update(mkFifoObj("foo", 12))
f.Replace([]interface{}{mkFifoObj("foo", 20)}, "0")
f.Delete(mkFifoObj("foo", 22))
f.Add(mkFifoObj("foo", 25)) // flush the last one out
expect := []DeltaType{Added, Updated, Sync, Deleted}
if e, a := expect, oldestTypes; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %#v, got %#v", e, a)
}
if e, a := (Deltas{{Added, mkFifoObj("foo", 25)}}), Pop(f).(Deltas); !reflect.DeepEqual(e, a) {
t.Fatalf("Expected %#v, got %#v", e, a)
}
if !f.HasSynced() {
t.Errorf("Expected HasSynced to be true after completion of initial population")
}
}
func TestDeltaFIFO_addUpdate(t *testing.T) {
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil, nil)
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil)
f.Add(mkFifoObj("foo", 10))
f.Update(mkFifoObj("foo", 12))
f.Delete(mkFifoObj("foo", 15))
@@ -203,7 +168,7 @@ func TestDeltaFIFO_addUpdate(t *testing.T) {
}
func TestDeltaFIFO_enqueueingNoLister(t *testing.T) {
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil, nil)
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil)
f.Add(mkFifoObj("foo", 10))
f.Update(mkFifoObj("bar", 15))
f.Add(mkFifoObj("qux", 17))
@@ -226,7 +191,6 @@ func TestDeltaFIFO_enqueueingNoLister(t *testing.T) {
func TestDeltaFIFO_enqueueingWithLister(t *testing.T) {
f := NewDeltaFIFO(
testFifoObjectKeyFunc,
nil,
keyLookupFunc(func() []testFifoObject {
return []testFifoObject{mkFifoObj("foo", 5), mkFifoObj("bar", 6), mkFifoObj("baz", 7)}
}),
@@ -249,7 +213,7 @@ func TestDeltaFIFO_enqueueingWithLister(t *testing.T) {
}
func TestDeltaFIFO_addReplace(t *testing.T) {
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil, nil)
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil)
f.Add(mkFifoObj("foo", 10))
f.Replace([]interface{}{mkFifoObj("foo", 15)}, "0")
got := make(chan testFifoObject, 2)
@@ -277,7 +241,6 @@ func TestDeltaFIFO_addReplace(t *testing.T) {
func TestDeltaFIFO_ResyncNonExisting(t *testing.T) {
f := NewDeltaFIFO(
testFifoObjectKeyFunc,
nil,
keyLookupFunc(func() []testFifoObject {
return []testFifoObject{mkFifoObj("foo", 5)}
}),
@@ -297,7 +260,6 @@ func TestDeltaFIFO_ResyncNonExisting(t *testing.T) {
func TestDeltaFIFO_DeleteExistingNonPropagated(t *testing.T) {
f := NewDeltaFIFO(
testFifoObjectKeyFunc,
nil,
keyLookupFunc(func() []testFifoObject {
return []testFifoObject{}
}),
@@ -317,7 +279,6 @@ func TestDeltaFIFO_DeleteExistingNonPropagated(t *testing.T) {
func TestDeltaFIFO_ReplaceMakesDeletions(t *testing.T) {
f := NewDeltaFIFO(
testFifoObjectKeyFunc,
nil,
keyLookupFunc(func() []testFifoObject {
return []testFifoObject{mkFifoObj("foo", 5), mkFifoObj("bar", 6), mkFifoObj("baz", 7)}
}),
@@ -344,7 +305,6 @@ func TestDeltaFIFO_ReplaceMakesDeletions(t *testing.T) {
func TestDeltaFIFO_UpdateResyncRace(t *testing.T) {
f := NewDeltaFIFO(
testFifoObjectKeyFunc,
nil,
keyLookupFunc(func() []testFifoObject {
return []testFifoObject{mkFifoObj("foo", 5)}
}),
@@ -367,7 +327,6 @@ func TestDeltaFIFO_UpdateResyncRace(t *testing.T) {
func TestDeltaFIFO_HasSyncedCorrectOnDeletion(t *testing.T) {
f := NewDeltaFIFO(
testFifoObjectKeyFunc,
nil,
keyLookupFunc(func() []testFifoObject {
return []testFifoObject{mkFifoObj("foo", 5), mkFifoObj("bar", 6), mkFifoObj("baz", 7)}
}),
@@ -396,7 +355,7 @@ func TestDeltaFIFO_HasSyncedCorrectOnDeletion(t *testing.T) {
}
func TestDeltaFIFO_detectLineJumpers(t *testing.T) {
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil, nil)
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil)
f.Add(mkFifoObj("foo", 10))
f.Add(mkFifoObj("bar", 1))
@@ -424,7 +383,7 @@ func TestDeltaFIFO_detectLineJumpers(t *testing.T) {
}
func TestDeltaFIFO_addIfNotPresent(t *testing.T) {
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil, nil)
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil)
f.Add(mkFifoObj("b", 3))
b3 := Pop(f)
@@ -521,7 +480,7 @@ func TestDeltaFIFO_HasSynced(t *testing.T) {
}
for i, test := range tests {
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil, nil)
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil)
for _, action := range test.actions {
action(f)

View File

@@ -59,7 +59,7 @@ type Queue interface {
// has since been added.
AddIfNotPresent(interface{}) error
// Return true if the first batch of items has been popped
// HasSynced returns true if the first batch of items has been popped
HasSynced() bool
// Close queue

View File

@@ -63,8 +63,18 @@ type Getter interface {
// NewListWatchFromClient creates a new ListWatch from the specified client, resource, namespace and field selector.
func NewListWatchFromClient(c Getter, resource string, namespace string, fieldSelector fields.Selector) *ListWatch {
listFunc := func(options metav1.ListOptions) (runtime.Object, error) {
optionsModifier := func(options *metav1.ListOptions) {
options.FieldSelector = fieldSelector.String()
}
return NewFilteredListWatchFromClient(c, resource, namespace, optionsModifier)
}
// NewFilteredListWatchFromClient creates a new ListWatch from the specified client, resource, namespace, and option modifier.
// Option modifier is a function takes a ListOptions and modifies the consumed ListOptions. Provide customized modifier function
// to apply modification to ListOptions with a field selector, a label selector, or any other desired options.
func NewFilteredListWatchFromClient(c Getter, resource string, namespace string, optionsModifier func(options *metav1.ListOptions)) *ListWatch {
listFunc := func(options metav1.ListOptions) (runtime.Object, error) {
optionsModifier(&options)
return c.Get().
Namespace(namespace).
Resource(resource).
@@ -74,7 +84,7 @@ func NewListWatchFromClient(c Getter, resource string, namespace string, fieldSe
}
watchFunc := func(options metav1.ListOptions) (watch.Interface, error) {
options.Watch = true
options.FieldSelector = fieldSelector.String()
optionsModifier(&options)
return c.Get().
Namespace(namespace).
Resource(resource).

View File

@@ -108,8 +108,8 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{},
reflectorSuffix := atomic.AddInt64(&reflectorDisambiguator, 1)
r := &Reflector{
name: name,
// we need this to be unique per process (some names are still the same)but obvious who it belongs to
metrics: newReflectorMetrics(makeValidPromethusMetricLabel(fmt.Sprintf("reflector_"+name+"_%d", reflectorSuffix))),
// we need this to be unique per process (some names are still the same) but obvious who it belongs to
metrics: newReflectorMetrics(makeValidPrometheusMetricLabel(fmt.Sprintf("reflector_"+name+"_%d", reflectorSuffix))),
listerWatcher: lw,
store: store,
expectedType: reflect.TypeOf(expectedType),
@@ -120,7 +120,7 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{},
return r
}
func makeValidPromethusMetricLabel(in string) string {
func makeValidPrometheusMetricLabel(in string) string {
// this isn't perfect, but it removes our common characters
return strings.NewReplacer("/", "_", ".", "_", "-", "_", ":", "_").Replace(in)
}
@@ -302,12 +302,12 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
default:
}
timemoutseconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0))
timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0))
options = metav1.ListOptions{
ResourceVersion: resourceVersion,
// We want to avoid situations of hanging watchers. Stop any wachers that do not
// receive any events within the timeout window.
TimeoutSeconds: &timemoutseconds,
TimeoutSeconds: &timeoutSeconds,
}
r.metrics.numberOfWatches.Inc()

View File

@@ -26,6 +26,7 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/buffer"
"k8s.io/client-go/util/retry"
"github.com/golang/glog"
)
@@ -188,7 +189,7 @@ type deleteNotification struct {
func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, s.indexer)
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, s.indexer)
cfg := &Config{
Queue: fifo,
@@ -334,7 +335,7 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv
s.blockDeltas.Lock()
defer s.blockDeltas.Unlock()
s.processor.addAndStartListener(listener)
s.processor.addListener(listener)
for _, item := range s.indexer.List() {
listener.add(addNotification{newObj: item})
}
@@ -372,6 +373,7 @@ func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
}
type sharedProcessor struct {
listenersStarted bool
listenersLock sync.RWMutex
listeners []*processorListener
syncingListeners []*processorListener
@@ -379,20 +381,15 @@ type sharedProcessor struct {
wg wait.Group
}
func (p *sharedProcessor) addAndStartListener(listener *processorListener) {
p.listenersLock.Lock()
defer p.listenersLock.Unlock()
p.addListenerLocked(listener)
p.wg.Start(listener.run)
p.wg.Start(listener.pop)
}
func (p *sharedProcessor) addListener(listener *processorListener) {
p.listenersLock.Lock()
defer p.listenersLock.Unlock()
p.addListenerLocked(listener)
if p.listenersStarted {
p.wg.Start(listener.run)
p.wg.Start(listener.pop)
}
}
func (p *sharedProcessor) addListenerLocked(listener *processorListener) {
@@ -423,6 +420,7 @@ func (p *sharedProcessor) run(stopCh <-chan struct{}) {
p.wg.Start(listener.run)
p.wg.Start(listener.pop)
}
p.listenersStarted = true
}()
<-stopCh
p.listenersLock.RLock()
@@ -540,20 +538,35 @@ func (p *processorListener) pop() {
}
func (p *processorListener) run() {
defer utilruntime.HandleCrash()
// this call blocks until the channel is closed. When a panic happens during the notification
// we will catch it, **the offending item will be skipped!**, and after a short delay (one second)
// the next notification will be attempted. This is usually better than the alternative of never
// delivering again.
stopCh := make(chan struct{})
wait.Until(func() {
// this gives us a few quick retries before a long pause and then a few more quick retries
err := wait.ExponentialBackoff(retry.DefaultRetry, func() (bool, error) {
for next := range p.nextCh {
switch notification := next.(type) {
case updateNotification:
p.handler.OnUpdate(notification.oldObj, notification.newObj)
case addNotification:
p.handler.OnAdd(notification.newObj)
case deleteNotification:
p.handler.OnDelete(notification.oldObj)
default:
utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next))
}
}
// the only way to get here is if the p.nextCh is empty and closed
return true, nil
})
for next := range p.nextCh {
switch notification := next.(type) {
case updateNotification:
p.handler.OnUpdate(notification.oldObj, notification.newObj)
case addNotification:
p.handler.OnAdd(notification.newObj)
case deleteNotification:
p.handler.OnDelete(notification.oldObj)
default:
utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next))
// the only way to get here is if the p.nextCh is empty and closed
if err == nil {
close(stopCh)
}
}
}, 1*time.Minute, stopCh)
}
// shouldResync deterimines if the listener needs a resync. If the listener's resyncPeriod is 0,

View File

@@ -251,3 +251,15 @@ func TestResyncCheckPeriod(t *testing.T) {
t.Errorf("expected %d, got %d", e, a)
}
}
// verify that https://github.com/kubernetes/kubernetes/issues/59822 is fixed
func TestSharedInformerInitializationRace(t *testing.T) {
source := fcache.NewFakeControllerSource()
informer := NewSharedInformer(source, &v1.Pod{}, 1*time.Second).(*sharedIndexInformer)
listener := newTestListener("raceListener", 0)
stop := make(chan struct{})
go informer.AddEventHandlerWithResyncPeriod(listener, listener.resyncPeriod)
go informer.Run(stop)
close(stop)
}

View File

@@ -15,8 +15,7 @@ go_test(
"overrides_test.go",
"validation_test.go",
],
importpath = "k8s.io/client-go/tools/clientcmd",
library = ":go_default_library",
embed = [":go_default_library"],
deps = [
"//vendor/github.com/ghodss/yaml:go_default_library",
"//vendor/github.com/imdario/mergo:go_default_library",

View File

@@ -12,8 +12,7 @@ go_test(
"helpers_test.go",
"types_test.go",
],
importpath = "k8s.io/client-go/tools/clientcmd/api",
library = ":go_default_library",
embed = [":go_default_library"],
deps = ["//vendor/github.com/ghodss/yaml:go_default_library"],
)

View File

@@ -119,6 +119,9 @@ type AuthInfo struct {
// AuthProvider specifies a custom authentication plugin for the kubernetes cluster.
// +optional
AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"`
// Exec specifies a custom exec-based authentication plugin for the kubernetes cluster.
// +optional
Exec *ExecConfig `json:"exec,omitempty"`
// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
// +optional
Extensions map[string]runtime.Object `json:"extensions,omitempty"`
@@ -147,6 +150,35 @@ type AuthProviderConfig struct {
Config map[string]string `json:"config,omitempty"`
}
// ExecConfig specifies a command to provide client credentials. The command is exec'd
// and outputs structured stdout holding credentials.
//
// See the client.authentiction.k8s.io API group for specifications of the exact input
// and output format
type ExecConfig struct {
// Command to execute.
Command string `json:"command"`
// Arguments to pass to the command when executing it.
// +optional
Args []string `json:"args"`
// Env defines additional environment variables to expose to the process. These
// are unioned with the host's environment, as well as variables client-go uses
// to pass argument to the plugin.
// +optional
Env []ExecEnvVar `json:"env"`
// Preferred input version of the ExecInfo. The returned ExecCredentials MUST use
// the same encoding version as the input.
APIVersion string `json:"apiVersion,omitempty"`
}
// ExecEnvVar is used for setting environment variables when executing an exec-based
// credential plugin.
type ExecEnvVar struct {
Name string `json:"name"`
Value string `json:"value"`
}
// NewConfig is a convenience function that returns a new Config object with non-nil maps
func NewConfig() *Config {
return &Config{

View File

@@ -113,6 +113,9 @@ type AuthInfo struct {
// AuthProvider specifies a custom authentication plugin for the kubernetes cluster.
// +optional
AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"`
// Exec specifies a custom exec-based authentication plugin for the kubernetes cluster.
// +optional
Exec *ExecConfig `json:"exec,omitempty"`
// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
// +optional
Extensions []NamedExtension `json:"extensions,omitempty"`
@@ -169,3 +172,32 @@ type AuthProviderConfig struct {
Name string `json:"name"`
Config map[string]string `json:"config"`
}
// ExecConfig specifies a command to provide client credentials. The command is exec'd
// and outputs structured stdout holding credentials.
//
// See the client.authentiction.k8s.io API group for specifications of the exact input
// and output format
type ExecConfig struct {
// Command to execute.
Command string `json:"command"`
// Arguments to pass to the command when executing it.
// +optional
Args []string `json:"args"`
// Env defines additional environment variables to expose to the process. These
// are unioned with the host's environment, as well as variables client-go uses
// to pass argument to the plugin.
// +optional
Env []ExecEnvVar `json:"env"`
// Preferred input version of the ExecInfo. The returned ExecCredentials MUST use
// the same encoding version as the input.
APIVersion string `json:"apiVersion,omitempty"`
}
// ExecEnvVar is used for setting environment variables when executing an exec-based
// credential plugin.
type ExecEnvVar struct {
Name string `json:"name"`
Value string `json:"value"`
}

View File

@@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
@@ -63,6 +63,15 @@ func (in *AuthInfo) DeepCopyInto(out *AuthInfo) {
(*in).DeepCopyInto(*out)
}
}
if in.Exec != nil {
in, out := &in.Exec, &out.Exec
if *in == nil {
*out = nil
} else {
*out = new(ExecConfig)
(*in).DeepCopyInto(*out)
}
}
if in.Extensions != nil {
in, out := &in.Extensions, &out.Extensions
*out = make([]NamedExtension, len(*in))
@@ -183,9 +192,8 @@ func (in *Config) DeepCopy() *Config {
func (in *Config) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -211,6 +219,48 @@ func (in *Context) DeepCopy() *Context {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExecConfig) DeepCopyInto(out *ExecConfig) {
*out = *in
if in.Args != nil {
in, out := &in.Args, &out.Args
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]ExecEnvVar, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecConfig.
func (in *ExecConfig) DeepCopy() *ExecConfig {
if in == nil {
return nil
}
out := new(ExecConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExecEnvVar) DeepCopyInto(out *ExecEnvVar) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecEnvVar.
func (in *ExecEnvVar) DeepCopy() *ExecEnvVar {
if in == nil {
return nil
}
out := new(ExecEnvVar)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedAuthInfo) DeepCopyInto(out *NamedAuthInfo) {
*out = *in

View File

@@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package api
@@ -63,6 +63,15 @@ func (in *AuthInfo) DeepCopyInto(out *AuthInfo) {
(*in).DeepCopyInto(*out)
}
}
if in.Exec != nil {
in, out := &in.Exec, &out.Exec
if *in == nil {
*out = nil
} else {
*out = new(ExecConfig)
(*in).DeepCopyInto(*out)
}
}
if in.Extensions != nil {
in, out := &in.Extensions, &out.Extensions
*out = make(map[string]runtime.Object, len(*in))
@@ -210,9 +219,8 @@ func (in *Config) DeepCopy() *Config {
func (in *Config) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -242,6 +250,48 @@ func (in *Context) DeepCopy() *Context {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExecConfig) DeepCopyInto(out *ExecConfig) {
*out = *in
if in.Args != nil {
in, out := &in.Args, &out.Args
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]ExecEnvVar, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecConfig.
func (in *ExecConfig) DeepCopy() *ExecConfig {
if in == nil {
return nil
}
out := new(ExecConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExecEnvVar) DeepCopyInto(out *ExecEnvVar) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecEnvVar.
func (in *ExecEnvVar) DeepCopy() *ExecEnvVar {
if in == nil {
return nil
}
out := new(ExecEnvVar)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Preferences) DeepCopyInto(out *Preferences) {
*out = *in

View File

@@ -107,7 +107,7 @@ func (config *DirectClientConfig) RawConfig() (clientcmdapi.Config, error) {
// ClientConfig implements ClientConfig
func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) {
// check that getAuthInfo, getContext, and getCluster do not return an error.
// Do this before checking if the curent config is usable in the event that an
// Do this before checking if the current config is usable in the event that an
// AuthInfo, Context, or Cluster config with user-defined names are not found.
// This provides a user with the immediate cause for error if one is found
configAuthInfo, err := config.getAuthInfo()
@@ -202,7 +202,7 @@ func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo,
// clientauth.Info object contain both user identification and server identification. We want different precedence orders for
// both, so we have to split the objects and merge them separately
// we want this order of precedence for user identifcation
// we want this order of precedence for user identification
// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files)
// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
// 3. if there is not enough information to identify the user, load try the ~/.kubernetes_auth file
@@ -241,6 +241,9 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI
mergedConfig.AuthProvider = configAuthInfo.AuthProvider
mergedConfig.AuthConfigPersister = persistAuthConfig
}
if configAuthInfo.Exec != nil {
mergedConfig.ExecProvider = configAuthInfo.Exec
}
// if there still isn't enough information to authenticate the user, try prompting
if !canIdentifyUser(*mergedConfig) && (fallbackReader != nil) {
@@ -291,7 +294,8 @@ func canIdentifyUser(config restclient.Config) bool {
return len(config.Username) > 0 ||
(len(config.CertFile) > 0 || len(config.CertData) > 0) ||
len(config.BearerToken) > 0 ||
config.AuthProvider != nil
config.AuthProvider != nil ||
config.ExecProvider != nil
}
// Namespace implements ClientConfig
@@ -474,7 +478,7 @@ func (config *inClusterClientConfig) ClientConfig() (*restclient.Config, error)
}
// in-cluster configs only takes a host, token, or CA file
// if any of them were individually provided, ovewrite anything else
// if any of them were individually provided, overwrite anything else
if config.overrides != nil {
if server := config.overrides.ClusterInfo.Server; len(server) > 0 {
icc.Host = server

View File

@@ -94,7 +94,7 @@ func TestInsecureOverridesCA(t *testing.T) {
}
func TestMergeContext(t *testing.T) {
const namespace = "overriden-namespace"
const namespace = "overridden-namespace"
config := createValidTestConfig()
clientBuilder := NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{}, nil)

View File

@@ -111,7 +111,7 @@ func (g *ClientConfigGetter) IsDefaultConfig(config *restclient.Config) bool {
// ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config
// Callers can put the chain together however they want, but we'd recommend:
// EnvVarPathFiles if set (a list of files if set) OR the HomeDirectoryPath
// ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if thie file is not present
// ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if this file is not present
type ClientConfigLoadingRules struct {
ExplicitPath string
Precedence []string
@@ -420,7 +420,7 @@ func WriteToFile(config clientcmdapi.Config, filename string) error {
func lockFile(filename string) error {
// TODO: find a way to do this with actual file locks. Will
// probably need seperate solution for windows and linux.
// probably need separate solution for windows and Linux.
// Make sure the dir exists before we try to create a lock file.
dir := filepath.Dir(filename)
@@ -557,7 +557,12 @@ func GetClusterFileReferences(cluster *clientcmdapi.Cluster) []*string {
}
func GetAuthInfoFileReferences(authInfo *clientcmdapi.AuthInfo) []*string {
return []*string{&authInfo.ClientCertificate, &authInfo.ClientKey, &authInfo.TokenFile}
s := []*string{&authInfo.ClientCertificate, &authInfo.ClientKey, &authInfo.TokenFile}
// Only resolve exec command if it isn't PATH based.
if authInfo.Exec != nil && strings.ContainsRune(authInfo.Exec.Command, filepath.Separator) {
s = append(s, &authInfo.Exec.Command)
}
return s
}
// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory

View File

@@ -206,6 +206,9 @@ func TestResolveRelativePaths(t *testing.T) {
AuthInfos: map[string]*clientcmdapi.AuthInfo{
"relative-user-1": {ClientCertificate: "relative/client/cert", ClientKey: "../relative/client/key"},
"absolute-user-1": {ClientCertificate: "/absolute/client/cert", ClientKey: "/absolute/client/key"},
"relative-cmd-1": {Exec: &clientcmdapi.ExecConfig{Command: "../relative/client/cmd"}},
"absolute-cmd-1": {Exec: &clientcmdapi.ExecConfig{Command: "/absolute/client/cmd"}},
"PATH-cmd-1": {Exec: &clientcmdapi.ExecConfig{Command: "cmd"}},
},
Clusters: map[string]*clientcmdapi.Cluster{
"relative-server-1": {CertificateAuthority: "../relative/ca"},
@@ -291,9 +294,21 @@ func TestResolveRelativePaths(t *testing.T) {
matchStringArg(pathResolutionConfig2.AuthInfos["absolute-user-2"].ClientCertificate, authInfo.ClientCertificate, t)
matchStringArg(pathResolutionConfig2.AuthInfos["absolute-user-2"].ClientKey, authInfo.ClientKey, t)
}
if key == "relative-cmd-1" {
foundAuthInfoCount++
matchStringArg(path.Join(configDir1, pathResolutionConfig1.AuthInfos[key].Exec.Command), authInfo.Exec.Command, t)
}
if key == "absolute-cmd-1" {
foundAuthInfoCount++
matchStringArg(pathResolutionConfig1.AuthInfos[key].Exec.Command, authInfo.Exec.Command, t)
}
if key == "PATH-cmd-1" {
foundAuthInfoCount++
matchStringArg(pathResolutionConfig1.AuthInfos[key].Exec.Command, authInfo.Exec.Command, t)
}
}
if foundAuthInfoCount != 4 {
t.Errorf("Expected 4 users, found %v: %v", foundAuthInfoCount, mergedConfig.AuthInfos)
if foundAuthInfoCount != 7 {
t.Errorf("Expected 7 users, found %v: %v", foundAuthInfoCount, mergedConfig.AuthInfos)
}
}

View File

@@ -237,6 +237,25 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []err
}
}
if authInfo.Exec != nil {
if authInfo.AuthProvider != nil {
validationErrors = append(validationErrors, fmt.Errorf("authProvider cannot be provided in combination with an exec plugin for %s", authInfoName))
}
if len(authInfo.Exec.Command) == 0 {
validationErrors = append(validationErrors, fmt.Errorf("command must be specified for %v to use exec authentication plugin", authInfoName))
}
if len(authInfo.Exec.APIVersion) == 0 {
validationErrors = append(validationErrors, fmt.Errorf("apiVersion must be specified for %v to use exec authentication plugin", authInfoName))
}
for _, v := range authInfo.Exec.Env {
if len(v.Name) == 0 {
validationErrors = append(validationErrors, fmt.Errorf("env variable name must be specified for %v to use exec authentication plugin", authInfoName))
} else if len(v.Value) == 0 {
validationErrors = append(validationErrors, fmt.Errorf("env variable %s value must be specified for %v to use exec authentication plugin", v.Name, authInfoName))
}
}
}
// authPath also provides information for the client to identify the server, so allow multiple auth methods in that case
if (len(methods) > 1) && (!usingAuthPath) {
validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods))
@@ -253,6 +272,10 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []err
func validateContext(contextName string, context clientcmdapi.Context, config clientcmdapi.Config) []error {
validationErrors := make([]error, 0)
if len(contextName) == 0 {
validationErrors = append(validationErrors, fmt.Errorf("empty context name for %#v is not allowed", context))
}
if len(context.AuthInfo) == 0 {
validationErrors = append(validationErrors, fmt.Errorf("user was not specified for context %q", contextName))
} else if _, exists := config.AuthInfos[context.AuthInfo]; !exists {

View File

@@ -62,6 +62,7 @@ func TestConfirmUsableBadInfoButOkConfig(t *testing.T) {
okTest.testConfirmUsable("clean", t)
badValidation.testConfig(t)
}
func TestConfirmUsableBadInfoConfig(t *testing.T) {
config := clientcmdapi.NewConfig()
config.Clusters["missing ca"] = &clientcmdapi.Cluster{
@@ -83,6 +84,7 @@ func TestConfirmUsableBadInfoConfig(t *testing.T) {
test.testConfirmUsable("first", t)
}
func TestConfirmUsableEmptyConfig(t *testing.T) {
config := clientcmdapi.NewConfig()
test := configValidationTest{
@@ -92,6 +94,7 @@ func TestConfirmUsableEmptyConfig(t *testing.T) {
test.testConfirmUsable("", t)
}
func TestConfirmUsableMissingConfig(t *testing.T) {
config := clientcmdapi.NewConfig()
test := configValidationTest{
@@ -101,6 +104,7 @@ func TestConfirmUsableMissingConfig(t *testing.T) {
test.testConfirmUsable("not-here", t)
}
func TestValidateEmptyConfig(t *testing.T) {
config := clientcmdapi.NewConfig()
test := configValidationTest{
@@ -110,6 +114,7 @@ func TestValidateEmptyConfig(t *testing.T) {
test.testConfig(t)
}
func TestValidateMissingCurrentContextConfig(t *testing.T) {
config := clientcmdapi.NewConfig()
config.CurrentContext = "anything"
@@ -120,6 +125,7 @@ func TestValidateMissingCurrentContextConfig(t *testing.T) {
test.testConfig(t)
}
func TestIsContextNotFound(t *testing.T) {
config := clientcmdapi.NewConfig()
config.CurrentContext = "anything"
@@ -172,6 +178,7 @@ func TestValidateMissingReferencesConfig(t *testing.T) {
test.testContext("anything", t)
test.testConfig(t)
}
func TestValidateEmptyContext(t *testing.T) {
config := clientcmdapi.NewConfig()
config.CurrentContext = "anything"
@@ -185,6 +192,19 @@ func TestValidateEmptyContext(t *testing.T) {
test.testConfig(t)
}
func TestValidateEmptyContextName(t *testing.T) {
config := clientcmdapi.NewConfig()
config.CurrentContext = "anything"
config.Contexts[""] = &clientcmdapi.Context{Cluster: "missing", AuthInfo: "missing"}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"empty context name", "is not allowed"},
}
test.testContext("", t)
test.testConfig(t)
}
func TestValidateEmptyClusterInfo(t *testing.T) {
config := clientcmdapi.NewConfig()
config.Clusters["empty"] = clientcmdapi.NewCluster()
@@ -223,6 +243,7 @@ func TestValidateMissingCAFileClusterInfo(t *testing.T) {
test.testCluster("missing ca", t)
test.testConfig(t)
}
func TestValidateCleanClusterInfo(t *testing.T) {
config := clientcmdapi.NewConfig()
config.Clusters["clean"] = &clientcmdapi.Cluster{
@@ -235,6 +256,7 @@ func TestValidateCleanClusterInfo(t *testing.T) {
test.testCluster("clean", t)
test.testConfig(t)
}
func TestValidateCleanWithCAClusterInfo(t *testing.T) {
tempFile, _ := ioutil.TempFile("", "")
defer os.Remove(tempFile.Name())
@@ -262,6 +284,7 @@ func TestValidateEmptyAuthInfo(t *testing.T) {
test.testAuthInfo("error", t)
test.testConfig(t)
}
func TestValidateCertFilesNotFoundAuthInfo(t *testing.T) {
config := clientcmdapi.NewConfig()
config.AuthInfos["error"] = &clientcmdapi.AuthInfo{
@@ -276,6 +299,7 @@ func TestValidateCertFilesNotFoundAuthInfo(t *testing.T) {
test.testAuthInfo("error", t)
test.testConfig(t)
}
func TestValidateCertDataOverridesFiles(t *testing.T) {
tempFile, _ := ioutil.TempFile("", "")
defer os.Remove(tempFile.Name())
@@ -295,6 +319,7 @@ func TestValidateCertDataOverridesFiles(t *testing.T) {
test.testAuthInfo("clean", t)
test.testConfig(t)
}
func TestValidateCleanCertFilesAuthInfo(t *testing.T) {
tempFile, _ := ioutil.TempFile("", "")
defer os.Remove(tempFile.Name())
@@ -311,6 +336,7 @@ func TestValidateCleanCertFilesAuthInfo(t *testing.T) {
test.testAuthInfo("clean", t)
test.testConfig(t)
}
func TestValidateCleanTokenAuthInfo(t *testing.T) {
config := clientcmdapi.NewConfig()
config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{
@@ -339,6 +365,106 @@ func TestValidateMultipleMethodsAuthInfo(t *testing.T) {
test.testConfig(t)
}
func TestValidateAuthInfoExec(t *testing.T) {
config := clientcmdapi.NewConfig()
config.AuthInfos["user"] = &clientcmdapi.AuthInfo{
Exec: &clientcmdapi.ExecConfig{
Command: "/bin/example",
APIVersion: "clientauthentication.k8s.io/v1alpha1",
Args: []string{"hello", "world"},
Env: []clientcmdapi.ExecEnvVar{
{Name: "foo", Value: "bar"},
},
},
}
test := configValidationTest{
config: config,
}
test.testAuthInfo("user", t)
test.testConfig(t)
}
func TestValidateAuthInfoExecNoVersion(t *testing.T) {
config := clientcmdapi.NewConfig()
config.AuthInfos["user"] = &clientcmdapi.AuthInfo{
Exec: &clientcmdapi.ExecConfig{
Command: "/bin/example",
},
}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{
"apiVersion must be specified for user to use exec authentication plugin",
},
}
test.testAuthInfo("user", t)
test.testConfig(t)
}
func TestValidateAuthInfoExecNoCommand(t *testing.T) {
config := clientcmdapi.NewConfig()
config.AuthInfos["user"] = &clientcmdapi.AuthInfo{
Exec: &clientcmdapi.ExecConfig{
APIVersion: "clientauthentication.k8s.io/v1alpha1",
},
}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{
"command must be specified for user to use exec authentication plugin",
},
}
test.testAuthInfo("user", t)
test.testConfig(t)
}
func TestValidateAuthInfoExecWithAuthProvider(t *testing.T) {
config := clientcmdapi.NewConfig()
config.AuthInfos["user"] = &clientcmdapi.AuthInfo{
AuthProvider: &clientcmdapi.AuthProviderConfig{
Name: "oidc",
},
Exec: &clientcmdapi.ExecConfig{
Command: "/bin/example",
APIVersion: "clientauthentication.k8s.io/v1alpha1",
},
}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{
"authProvider cannot be provided in combination with an exec plugin for user",
},
}
test.testAuthInfo("user", t)
test.testConfig(t)
}
func TestValidateAuthInfoExecInvalidEnv(t *testing.T) {
config := clientcmdapi.NewConfig()
config.AuthInfos["user"] = &clientcmdapi.AuthInfo{
Exec: &clientcmdapi.ExecConfig{
Command: "/bin/example",
APIVersion: "clientauthentication.k8s.io/v1alpha1",
Env: []clientcmdapi.ExecEnvVar{
{Name: "foo"}, // No value
},
},
}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{
"env variable foo value must be specified for user to use exec authentication plugin",
},
}
test.testAuthInfo("user", t)
test.testConfig(t)
}
type configValidationTest struct {
config *clientcmdapi.Config
expectedErrorSubstring []string
@@ -363,6 +489,7 @@ func (c configValidationTest) testContext(contextName string, t *testing.T) {
}
}
}
func (c configValidationTest) testConfirmUsable(contextName string, t *testing.T) {
err := ConfirmUsable(*c.config, contextName)
@@ -382,6 +509,7 @@ func (c configValidationTest) testConfirmUsable(contextName string, t *testing.T
}
}
}
func (c configValidationTest) testConfig(t *testing.T) {
err := Validate(*c.config)
@@ -404,6 +532,7 @@ func (c configValidationTest) testConfig(t *testing.T) {
}
}
}
func (c configValidationTest) testCluster(clusterName string, t *testing.T) {
errs := validateClusterInfo(clusterName, *c.config.Clusters[clusterName])

View File

@@ -37,14 +37,13 @@ filegroup(
go_test(
name = "go_default_test",
srcs = ["pager_test.go"],
importpath = "k8s.io/client-go/tools/pager",
library = ":go_default_library",
embed = [":go_default_library"],
deps = [
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
],
)

View File

@@ -25,14 +25,14 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1alpha1 "k8s.io/apimachinery/pkg/apis/meta/v1alpha1"
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
"k8s.io/apimachinery/pkg/runtime"
)
func list(count int, rv string) *metainternalversion.List {
var list metainternalversion.List
for i := 0; i < count; i++ {
list.Items = append(list.Items, &metav1alpha1.PartialObjectMetadata{
list.Items = append(list.Items, &metav1beta1.PartialObjectMetadata{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%d", i),
},
@@ -80,7 +80,7 @@ func (p *testPager) PagedList(ctx context.Context, options metav1.ListOptions) (
if p.remaining <= 0 {
break
}
list.Items = append(list.Items, &metav1alpha1.PartialObjectMetadata{
list.Items = append(list.Items, &metav1beta1.PartialObjectMetadata{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%d", p.index),
},