aboutsummaryrefslogtreecommitdiff
path: root/vendor/k8s.io/apimachinery/pkg/util
diff options
context:
space:
mode:
authorMatthew Heon <matthew.heon@gmail.com>2017-11-01 11:24:59 -0400
committerMatthew Heon <matthew.heon@gmail.com>2017-11-01 11:24:59 -0400
commita031b83a09a8628435317a03f199cdc18b78262f (patch)
treebc017a96769ce6de33745b8b0b1304ccf38e9df0 /vendor/k8s.io/apimachinery/pkg/util
parent2b74391cd5281f6fdf391ff8ad50fd1490f6bf89 (diff)
downloadpodman-a031b83a09a8628435317a03f199cdc18b78262f.tar.gz
podman-a031b83a09a8628435317a03f199cdc18b78262f.tar.bz2
podman-a031b83a09a8628435317a03f199cdc18b78262f.zip
Initial checkin from CRI-O repo
Signed-off-by: Matthew Heon <matthew.heon@gmail.com>
Diffstat (limited to 'vendor/k8s.io/apimachinery/pkg/util')
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/cache/cache.go83
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go102
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/clock/clock.go327
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/diff/diff.go280
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/errors/doc.go18
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/errors/errors.go201
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/framer/framer.go167
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go19
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go149
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go145
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go326
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go107
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go381
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto43
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go177
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/json/json.go107
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go101
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go133
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/net/http.go401
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/net/interface.go278
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/net/port_range.go113
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/net/port_split.go77
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/net/util.go46
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/rand/rand.go85
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go53
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go161
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/sets/byte.go203
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/sets/doc.go20
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/sets/empty.go23
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/sets/int.go203
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/sets/int64.go203
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/sets/string.go203
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go2115
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go254
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go91
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/validation/validation.go343
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/wait/doc.go19
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/wait/wait.go349
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go346
39 files changed, 8452 insertions, 0 deletions
diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/cache.go b/vendor/k8s.io/apimachinery/pkg/util/cache/cache.go
new file mode 100644
index 000000000..9a09fe54d
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/cache/cache.go
@@ -0,0 +1,83 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+ "sync"
+)
+
+const (
+ shardsCount int = 32
+)
+
+type Cache []*cacheShard
+
+func NewCache(maxSize int) Cache {
+ if maxSize < shardsCount {
+ maxSize = shardsCount
+ }
+ cache := make(Cache, shardsCount)
+ for i := 0; i < shardsCount; i++ {
+ cache[i] = &cacheShard{
+ items: make(map[uint64]interface{}),
+ maxSize: maxSize / shardsCount,
+ }
+ }
+ return cache
+}
+
+func (c Cache) getShard(index uint64) *cacheShard {
+ return c[index%uint64(shardsCount)]
+}
+
+// Returns true if object already existed, false otherwise.
+func (c *Cache) Add(index uint64, obj interface{}) bool {
+ return c.getShard(index).add(index, obj)
+}
+
+func (c *Cache) Get(index uint64) (obj interface{}, found bool) {
+ return c.getShard(index).get(index)
+}
+
+type cacheShard struct {
+ items map[uint64]interface{}
+ sync.RWMutex
+ maxSize int
+}
+
+// Returns true if object already existed, false otherwise.
+func (s *cacheShard) add(index uint64, obj interface{}) bool {
+ s.Lock()
+ defer s.Unlock()
+ _, isOverwrite := s.items[index]
+ if !isOverwrite && len(s.items) >= s.maxSize {
+ var randomKey uint64
+ for randomKey = range s.items {
+ break
+ }
+ delete(s.items, randomKey)
+ }
+ s.items[index] = obj
+ return isOverwrite
+}
+
+func (s *cacheShard) get(index uint64) (obj interface{}, found bool) {
+ s.RLock()
+ defer s.RUnlock()
+ obj, found = s.items[index]
+ return
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go b/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go
new file mode 100644
index 000000000..f6b307aa6
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go
@@ -0,0 +1,102 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+ "sync"
+ "time"
+
+ "github.com/hashicorp/golang-lru"
+)
+
+// Clock defines an interface for obtaining the current time
+type Clock interface {
+ Now() time.Time
+}
+
+// realClock implements the Clock interface by calling time.Now()
+type realClock struct{}
+
+func (realClock) Now() time.Time { return time.Now() }
+
+// LRUExpireCache is a cache that ensures the mostly recently accessed keys are returned with
+// a ttl beyond which keys are forcibly expired.
+type LRUExpireCache struct {
+ // clock is used to obtain the current time
+ clock Clock
+
+ cache *lru.Cache
+ lock sync.Mutex
+}
+
+// NewLRUExpireCache creates an expiring cache with the given size
+func NewLRUExpireCache(maxSize int) *LRUExpireCache {
+ return NewLRUExpireCacheWithClock(maxSize, realClock{})
+}
+
+// NewLRUExpireCacheWithClock creates an expiring cache with the given size, using the specified clock to obtain the current time.
+func NewLRUExpireCacheWithClock(maxSize int, clock Clock) *LRUExpireCache {
+ cache, err := lru.New(maxSize)
+ if err != nil {
+ // if called with an invalid size
+ panic(err)
+ }
+ return &LRUExpireCache{clock: clock, cache: cache}
+}
+
+type cacheEntry struct {
+ value interface{}
+ expireTime time.Time
+}
+
+// Add adds the value to the cache at key with the specified maximum duration.
+func (c *LRUExpireCache) Add(key interface{}, value interface{}, ttl time.Duration) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ c.cache.Add(key, &cacheEntry{value, c.clock.Now().Add(ttl)})
+}
+
+// Get returns the value at the specified key from the cache if it exists and is not
+// expired, or returns false.
+func (c *LRUExpireCache) Get(key interface{}) (interface{}, bool) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ e, ok := c.cache.Get(key)
+ if !ok {
+ return nil, false
+ }
+ if c.clock.Now().After(e.(*cacheEntry).expireTime) {
+ c.cache.Remove(key)
+ return nil, false
+ }
+ return e.(*cacheEntry).value, true
+}
+
+// Remove removes the specified key from the cache if it exists
+func (c *LRUExpireCache) Remove(key interface{}) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ c.cache.Remove(key)
+}
+
+// Keys returns all the keys in the cache, even if they are expired. Subsequent calls to
+// get may return not found. It returns all keys from oldest to newest.
+func (c *LRUExpireCache) Keys() []interface{} {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ return c.cache.Keys()
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go
new file mode 100644
index 000000000..c303a212a
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go
@@ -0,0 +1,327 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clock
+
+import (
+ "sync"
+ "time"
+)
+
+// Clock allows for injecting fake or real clocks into code that
+// needs to do arbitrary things based on time.
+type Clock interface {
+ Now() time.Time
+ Since(time.Time) time.Duration
+ After(d time.Duration) <-chan time.Time
+ NewTimer(d time.Duration) Timer
+ Sleep(d time.Duration)
+ Tick(d time.Duration) <-chan time.Time
+}
+
+var (
+ _ = Clock(RealClock{})
+ _ = Clock(&FakeClock{})
+ _ = Clock(&IntervalClock{})
+)
+
+// RealClock really calls time.Now()
+type RealClock struct{}
+
+// Now returns the current time.
+func (RealClock) Now() time.Time {
+ return time.Now()
+}
+
+// Since returns time since the specified timestamp.
+func (RealClock) Since(ts time.Time) time.Duration {
+ return time.Since(ts)
+}
+
+// Same as time.After(d).
+func (RealClock) After(d time.Duration) <-chan time.Time {
+ return time.After(d)
+}
+
+func (RealClock) NewTimer(d time.Duration) Timer {
+ return &realTimer{
+ timer: time.NewTimer(d),
+ }
+}
+
+func (RealClock) Tick(d time.Duration) <-chan time.Time {
+ return time.Tick(d)
+}
+
+func (RealClock) Sleep(d time.Duration) {
+ time.Sleep(d)
+}
+
+// FakeClock implements Clock, but returns an arbitrary time.
+type FakeClock struct {
+ lock sync.RWMutex
+ time time.Time
+
+ // waiters are waiting for the fake time to pass their specified time
+ waiters []fakeClockWaiter
+}
+
+type fakeClockWaiter struct {
+ targetTime time.Time
+ stepInterval time.Duration
+ skipIfBlocked bool
+ destChan chan time.Time
+ fired bool
+}
+
+func NewFakeClock(t time.Time) *FakeClock {
+ return &FakeClock{
+ time: t,
+ }
+}
+
+// Now returns f's time.
+func (f *FakeClock) Now() time.Time {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ return f.time
+}
+
+// Since returns time since the time in f.
+func (f *FakeClock) Since(ts time.Time) time.Duration {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ return f.time.Sub(ts)
+}
+
+// Fake version of time.After(d).
+func (f *FakeClock) After(d time.Duration) <-chan time.Time {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ stopTime := f.time.Add(d)
+ ch := make(chan time.Time, 1) // Don't block!
+ f.waiters = append(f.waiters, fakeClockWaiter{
+ targetTime: stopTime,
+ destChan: ch,
+ })
+ return ch
+}
+
+// Fake version of time.NewTimer(d).
+func (f *FakeClock) NewTimer(d time.Duration) Timer {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ stopTime := f.time.Add(d)
+ ch := make(chan time.Time, 1) // Don't block!
+ timer := &fakeTimer{
+ fakeClock: f,
+ waiter: fakeClockWaiter{
+ targetTime: stopTime,
+ destChan: ch,
+ },
+ }
+ f.waiters = append(f.waiters, timer.waiter)
+ return timer
+}
+
+func (f *FakeClock) Tick(d time.Duration) <-chan time.Time {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ tickTime := f.time.Add(d)
+ ch := make(chan time.Time, 1) // hold one tick
+ f.waiters = append(f.waiters, fakeClockWaiter{
+ targetTime: tickTime,
+ stepInterval: d,
+ skipIfBlocked: true,
+ destChan: ch,
+ })
+
+ return ch
+}
+
+// Move clock by Duration, notify anyone that's called After, Tick, or NewTimer
+func (f *FakeClock) Step(d time.Duration) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ f.setTimeLocked(f.time.Add(d))
+}
+
+// Sets the time.
+func (f *FakeClock) SetTime(t time.Time) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ f.setTimeLocked(t)
+}
+
+// Actually changes the time and checks any waiters. f must be write-locked.
+func (f *FakeClock) setTimeLocked(t time.Time) {
+ f.time = t
+ newWaiters := make([]fakeClockWaiter, 0, len(f.waiters))
+ for i := range f.waiters {
+ w := &f.waiters[i]
+ if !w.targetTime.After(t) {
+
+ if w.skipIfBlocked {
+ select {
+ case w.destChan <- t:
+ w.fired = true
+ default:
+ }
+ } else {
+ w.destChan <- t
+ w.fired = true
+ }
+
+ if w.stepInterval > 0 {
+ for !w.targetTime.After(t) {
+ w.targetTime = w.targetTime.Add(w.stepInterval)
+ }
+ newWaiters = append(newWaiters, *w)
+ }
+
+ } else {
+ newWaiters = append(newWaiters, f.waiters[i])
+ }
+ }
+ f.waiters = newWaiters
+}
+
+// Returns true if After has been called on f but not yet satisfied (so you can
+// write race-free tests).
+func (f *FakeClock) HasWaiters() bool {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ return len(f.waiters) > 0
+}
+
+func (f *FakeClock) Sleep(d time.Duration) {
+ f.Step(d)
+}
+
+// IntervalClock implements Clock, but each invocation of Now steps the clock forward the specified duration
+type IntervalClock struct {
+ Time time.Time
+ Duration time.Duration
+}
+
+// Now returns i's time.
+func (i *IntervalClock) Now() time.Time {
+ i.Time = i.Time.Add(i.Duration)
+ return i.Time
+}
+
+// Since returns time since the time in i.
+func (i *IntervalClock) Since(ts time.Time) time.Duration {
+ return i.Time.Sub(ts)
+}
+
+// Unimplemented, will panic.
+// TODO: make interval clock use FakeClock so this can be implemented.
+func (*IntervalClock) After(d time.Duration) <-chan time.Time {
+ panic("IntervalClock doesn't implement After")
+}
+
+// Unimplemented, will panic.
+// TODO: make interval clock use FakeClock so this can be implemented.
+func (*IntervalClock) NewTimer(d time.Duration) Timer {
+ panic("IntervalClock doesn't implement NewTimer")
+}
+
+// Unimplemented, will panic.
+// TODO: make interval clock use FakeClock so this can be implemented.
+func (*IntervalClock) Tick(d time.Duration) <-chan time.Time {
+ panic("IntervalClock doesn't implement Tick")
+}
+
+func (*IntervalClock) Sleep(d time.Duration) {
+ panic("IntervalClock doesn't implement Sleep")
+}
+
+// Timer allows for injecting fake or real timers into code that
+// needs to do arbitrary things based on time.
+type Timer interface {
+ C() <-chan time.Time
+ Stop() bool
+ Reset(d time.Duration) bool
+}
+
+var (
+ _ = Timer(&realTimer{})
+ _ = Timer(&fakeTimer{})
+)
+
+// realTimer is backed by an actual time.Timer.
+type realTimer struct {
+ timer *time.Timer
+}
+
+// C returns the underlying timer's channel.
+func (r *realTimer) C() <-chan time.Time {
+ return r.timer.C
+}
+
+// Stop calls Stop() on the underlying timer.
+func (r *realTimer) Stop() bool {
+ return r.timer.Stop()
+}
+
+// Reset calls Reset() on the underlying timer.
+func (r *realTimer) Reset(d time.Duration) bool {
+ return r.timer.Reset(d)
+}
+
+// fakeTimer implements Timer based on a FakeClock.
+type fakeTimer struct {
+ fakeClock *FakeClock
+ waiter fakeClockWaiter
+}
+
+// C returns the channel that notifies when this timer has fired.
+func (f *fakeTimer) C() <-chan time.Time {
+ return f.waiter.destChan
+}
+
+// Stop stops the timer and returns true if the timer has not yet fired, or false otherwise.
+func (f *fakeTimer) Stop() bool {
+ f.fakeClock.lock.Lock()
+ defer f.fakeClock.lock.Unlock()
+
+ newWaiters := make([]fakeClockWaiter, 0, len(f.fakeClock.waiters))
+ for i := range f.fakeClock.waiters {
+ w := &f.fakeClock.waiters[i]
+ if w != &f.waiter {
+ newWaiters = append(newWaiters, *w)
+ }
+ }
+
+ f.fakeClock.waiters = newWaiters
+
+ return !f.waiter.fired
+}
+
+// Reset resets the timer to the fake clock's "now" + d. It returns true if the timer has not yet
+// fired, or false otherwise.
+func (f *fakeTimer) Reset(d time.Duration) bool {
+ f.fakeClock.lock.Lock()
+ defer f.fakeClock.lock.Unlock()
+
+ active := !f.waiter.fired
+
+ f.waiter.fired = false
+ f.waiter.targetTime = f.fakeClock.time.Add(d)
+
+ return active
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go
new file mode 100644
index 000000000..0f730875e
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go
@@ -0,0 +1,280 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package diff
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "sort"
+ "strings"
+ "text/tabwriter"
+
+ "github.com/davecgh/go-spew/spew"
+
+ "k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+// StringDiff diffs a and b and returns a human readable diff.
+func StringDiff(a, b string) string {
+ ba := []byte(a)
+ bb := []byte(b)
+ out := []byte{}
+ i := 0
+ for ; i < len(ba) && i < len(bb); i++ {
+ if ba[i] != bb[i] {
+ break
+ }
+ out = append(out, ba[i])
+ }
+ out = append(out, []byte("\n\nA: ")...)
+ out = append(out, ba[i:]...)
+ out = append(out, []byte("\n\nB: ")...)
+ out = append(out, bb[i:]...)
+ out = append(out, []byte("\n\n")...)
+ return string(out)
+}
+
+// ObjectDiff writes the two objects out as JSON and prints out the identical part of
+// the objects followed by the remaining part of 'a' and finally the remaining part of 'b'.
+// For debugging tests.
+func ObjectDiff(a, b interface{}) string {
+ ab, err := json.Marshal(a)
+ if err != nil {
+ panic(fmt.Sprintf("a: %v", err))
+ }
+ bb, err := json.Marshal(b)
+ if err != nil {
+ panic(fmt.Sprintf("b: %v", err))
+ }
+ return StringDiff(string(ab), string(bb))
+}
+
+// ObjectGoPrintDiff is like ObjectDiff, but uses go-spew to print the objects,
+// which shows absolutely everything by recursing into every single pointer
+// (go's %#v formatters OTOH stop at a certain point). This is needed when you
+// can't figure out why reflect.DeepEqual is returning false and nothing is
+// showing you differences. This will.
+func ObjectGoPrintDiff(a, b interface{}) string {
+ s := spew.ConfigState{DisableMethods: true}
+ return StringDiff(
+ s.Sprintf("%#v", a),
+ s.Sprintf("%#v", b),
+ )
+}
+
+func ObjectReflectDiff(a, b interface{}) string {
+ vA, vB := reflect.ValueOf(a), reflect.ValueOf(b)
+ if vA.Type() != vB.Type() {
+ return fmt.Sprintf("type A %T and type B %T do not match", a, b)
+ }
+ diffs := objectReflectDiff(field.NewPath("object"), vA, vB)
+ if len(diffs) == 0 {
+ return "<no diffs>"
+ }
+ out := []string{""}
+ for _, d := range diffs {
+ out = append(out,
+ fmt.Sprintf("%s:", d.path),
+ limit(fmt.Sprintf(" a: %#v", d.a), 80),
+ limit(fmt.Sprintf(" b: %#v", d.b), 80),
+ )
+ }
+ return strings.Join(out, "\n")
+}
+
+func limit(s string, max int) string {
+ if len(s) > max {
+ return s[:max]
+ }
+ return s
+}
+
+func public(s string) bool {
+ if len(s) == 0 {
+ return false
+ }
+ return s[:1] == strings.ToUpper(s[:1])
+}
+
+type diff struct {
+ path *field.Path
+ a, b interface{}
+}
+
+type orderedDiffs []diff
+
+func (d orderedDiffs) Len() int { return len(d) }
+func (d orderedDiffs) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
+func (d orderedDiffs) Less(i, j int) bool {
+ a, b := d[i].path.String(), d[j].path.String()
+ if a < b {
+ return true
+ }
+ return false
+}
+
+func objectReflectDiff(path *field.Path, a, b reflect.Value) []diff {
+ switch a.Type().Kind() {
+ case reflect.Struct:
+ var changes []diff
+ for i := 0; i < a.Type().NumField(); i++ {
+ if !public(a.Type().Field(i).Name) {
+ if reflect.DeepEqual(a.Interface(), b.Interface()) {
+ continue
+ }
+ return []diff{{path: path, a: fmt.Sprintf("%#v", a), b: fmt.Sprintf("%#v", b)}}
+ }
+ if sub := objectReflectDiff(path.Child(a.Type().Field(i).Name), a.Field(i), b.Field(i)); len(sub) > 0 {
+ changes = append(changes, sub...)
+ } else {
+ if !reflect.DeepEqual(a.Field(i).Interface(), b.Field(i).Interface()) {
+ changes = append(changes, diff{path: path, a: a.Field(i).Interface(), b: b.Field(i).Interface()})
+ }
+ }
+ }
+ return changes
+ case reflect.Ptr, reflect.Interface:
+ if a.IsNil() || b.IsNil() {
+ switch {
+ case a.IsNil() && b.IsNil():
+ return nil
+ case a.IsNil():
+ return []diff{{path: path, a: nil, b: b.Interface()}}
+ default:
+ return []diff{{path: path, a: a.Interface(), b: nil}}
+ }
+ }
+ return objectReflectDiff(path, a.Elem(), b.Elem())
+ case reflect.Chan:
+ if !reflect.DeepEqual(a.Interface(), b.Interface()) {
+ return []diff{{path: path, a: a.Interface(), b: b.Interface()}}
+ }
+ return nil
+ case reflect.Slice:
+ lA, lB := a.Len(), b.Len()
+ l := lA
+ if lB < lA {
+ l = lB
+ }
+ if lA == lB && lA == 0 {
+ if a.IsNil() != b.IsNil() {
+ return []diff{{path: path, a: a.Interface(), b: b.Interface()}}
+ }
+ return nil
+ }
+ for i := 0; i < l; i++ {
+ if !reflect.DeepEqual(a.Index(i), b.Index(i)) {
+ return objectReflectDiff(path.Index(i), a.Index(i), b.Index(i))
+ }
+ }
+ var diffs []diff
+ for i := l; i < lA; i++ {
+ diffs = append(diffs, diff{path: path.Index(i), a: a.Index(i), b: nil})
+ }
+ for i := l; i < lB; i++ {
+ diffs = append(diffs, diff{path: path.Index(i), a: nil, b: b.Index(i)})
+ }
+ if len(diffs) == 0 {
+ diffs = append(diffs, diff{path: path, a: a, b: b})
+ }
+ return diffs
+ case reflect.Map:
+ if reflect.DeepEqual(a.Interface(), b.Interface()) {
+ return nil
+ }
+ aKeys := make(map[interface{}]interface{})
+ for _, key := range a.MapKeys() {
+ aKeys[key.Interface()] = a.MapIndex(key).Interface()
+ }
+ var missing []diff
+ for _, key := range b.MapKeys() {
+ if _, ok := aKeys[key.Interface()]; ok {
+ delete(aKeys, key.Interface())
+ if reflect.DeepEqual(a.MapIndex(key).Interface(), b.MapIndex(key).Interface()) {
+ continue
+ }
+ missing = append(missing, objectReflectDiff(path.Key(fmt.Sprintf("%s", key.Interface())), a.MapIndex(key), b.MapIndex(key))...)
+ continue
+ }
+ missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key.Interface())), a: nil, b: b.MapIndex(key).Interface()})
+ }
+ for key, value := range aKeys {
+ missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key)), a: value, b: nil})
+ }
+ if len(missing) == 0 {
+ missing = append(missing, diff{path: path, a: a.Interface(), b: b.Interface()})
+ }
+ sort.Sort(orderedDiffs(missing))
+ return missing
+ default:
+ if reflect.DeepEqual(a.Interface(), b.Interface()) {
+ return nil
+ }
+ if !a.CanInterface() {
+ return []diff{{path: path, a: fmt.Sprintf("%#v", a), b: fmt.Sprintf("%#v", b)}}
+ }
+ return []diff{{path: path, a: a.Interface(), b: b.Interface()}}
+ }
+}
+
+// ObjectGoPrintSideBySide prints a and b as textual dumps side by side,
+// enabling easy visual scanning for mismatches.
+func ObjectGoPrintSideBySide(a, b interface{}) string {
+ s := spew.ConfigState{
+ Indent: " ",
+ // Extra deep spew.
+ DisableMethods: true,
+ }
+ sA := s.Sdump(a)
+ sB := s.Sdump(b)
+
+ linesA := strings.Split(sA, "\n")
+ linesB := strings.Split(sB, "\n")
+ width := 0
+ for _, s := range linesA {
+ l := len(s)
+ if l > width {
+ width = l
+ }
+ }
+ for _, s := range linesB {
+ l := len(s)
+ if l > width {
+ width = l
+ }
+ }
+ buf := &bytes.Buffer{}
+ w := tabwriter.NewWriter(buf, width, 0, 1, ' ', 0)
+ max := len(linesA)
+ if len(linesB) > max {
+ max = len(linesB)
+ }
+ for i := 0; i < max; i++ {
+ var a, b string
+ if i < len(linesA) {
+ a = linesA[i]
+ }
+ if i < len(linesB) {
+ b = linesB[i]
+ }
+ fmt.Fprintf(w, "%s\t%s\n", a, b)
+ }
+ w.Flush()
+ return buf.String()
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go b/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go
new file mode 100644
index 000000000..5d4d6250a
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package errors implements various utility functions and types around errors.
+package errors // import "k8s.io/apimachinery/pkg/util/errors"
diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go
new file mode 100644
index 000000000..bdea0e16c
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go
@@ -0,0 +1,201 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package errors
+
+import (
+ "errors"
+ "fmt"
+)
+
+// MessagesgCountMap contains occurance for each error message.
+type MessageCountMap map[string]int
+
+// Aggregate represents an object that contains multiple errors, but does not
+// necessarily have singular semantic meaning.
+type Aggregate interface {
+ error
+ Errors() []error
+}
+
+// NewAggregate converts a slice of errors into an Aggregate interface, which
+// is itself an implementation of the error interface. If the slice is empty,
+// this returns nil.
+// It will check if any of the element of input error list is nil, to avoid
+// nil pointer panic when call Error().
+func NewAggregate(errlist []error) Aggregate {
+ if len(errlist) == 0 {
+ return nil
+ }
+ // In case of input error list contains nil
+ var errs []error
+ for _, e := range errlist {
+ if e != nil {
+ errs = append(errs, e)
+ }
+ }
+ if len(errs) == 0 {
+ return nil
+ }
+ return aggregate(errs)
+}
+
+// This helper implements the error and Errors interfaces. Keeping it private
+// prevents people from making an aggregate of 0 errors, which is not
+// an error, but does satisfy the error interface.
+type aggregate []error
+
+// Error is part of the error interface.
+func (agg aggregate) Error() string {
+ if len(agg) == 0 {
+ // This should never happen, really.
+ return ""
+ }
+ if len(agg) == 1 {
+ return agg[0].Error()
+ }
+ result := fmt.Sprintf("[%s", agg[0].Error())
+ for i := 1; i < len(agg); i++ {
+ result += fmt.Sprintf(", %s", agg[i].Error())
+ }
+ result += "]"
+ return result
+}
+
+// Errors is part of the Aggregate interface.
+func (agg aggregate) Errors() []error {
+ return []error(agg)
+}
+
+// Matcher is used to match errors. Returns true if the error matches.
+type Matcher func(error) bool
+
+// FilterOut removes all errors that match any of the matchers from the input
+// error. If the input is a singular error, only that error is tested. If the
+// input implements the Aggregate interface, the list of errors will be
+// processed recursively.
+//
+// This can be used, for example, to remove known-OK errors (such as io.EOF or
+// os.PathNotFound) from a list of errors.
+func FilterOut(err error, fns ...Matcher) error {
+ if err == nil {
+ return nil
+ }
+ if agg, ok := err.(Aggregate); ok {
+ return NewAggregate(filterErrors(agg.Errors(), fns...))
+ }
+ if !matchesError(err, fns...) {
+ return err
+ }
+ return nil
+}
+
+// matchesError returns true if any Matcher returns true
+func matchesError(err error, fns ...Matcher) bool {
+ for _, fn := range fns {
+ if fn(err) {
+ return true
+ }
+ }
+ return false
+}
+
+// filterErrors returns any errors (or nested errors, if the list contains
+// nested Errors) for which all fns return false. If no errors
+// remain a nil list is returned. The resulting silec will have all
+// nested slices flattened as a side effect.
+func filterErrors(list []error, fns ...Matcher) []error {
+ result := []error{}
+ for _, err := range list {
+ r := FilterOut(err, fns...)
+ if r != nil {
+ result = append(result, r)
+ }
+ }
+ return result
+}
+
+// Flatten takes an Aggregate, which may hold other Aggregates in arbitrary
+// nesting, and flattens them all into a single Aggregate, recursively.
+func Flatten(agg Aggregate) Aggregate {
+ result := []error{}
+ if agg == nil {
+ return nil
+ }
+ for _, err := range agg.Errors() {
+ if a, ok := err.(Aggregate); ok {
+ r := Flatten(a)
+ if r != nil {
+ result = append(result, r.Errors()...)
+ }
+ } else {
+ if err != nil {
+ result = append(result, err)
+ }
+ }
+ }
+ return NewAggregate(result)
+}
+
+// CreateAggregateFromMessageCountMap converts MessageCountMap Aggregate
+func CreateAggregateFromMessageCountMap(m MessageCountMap) Aggregate {
+ if m == nil {
+ return nil
+ }
+ result := make([]error, 0, len(m))
+ for errStr, count := range m {
+ var countStr string
+ if count > 1 {
+ countStr = fmt.Sprintf(" (repeated %v times)", count)
+ }
+ result = append(result, fmt.Errorf("%v%v", errStr, countStr))
+ }
+ return NewAggregate(result)
+}
+
+// Reduce will return err or, if err is an Aggregate and only has one item,
+// the first item in the aggregate.
+func Reduce(err error) error {
+ if agg, ok := err.(Aggregate); ok && err != nil {
+ switch len(agg.Errors()) {
+ case 1:
+ return agg.Errors()[0]
+ case 0:
+ return nil
+ }
+ }
+ return err
+}
+
+// AggregateGoroutines runs the provided functions in parallel, stuffing all
+// non-nil errors into the returned Aggregate.
+// Returns nil if all the functions complete successfully.
+func AggregateGoroutines(funcs ...func() error) Aggregate {
+ errChan := make(chan error, len(funcs))
+ for _, f := range funcs {
+ go func(f func() error) { errChan <- f() }(f)
+ }
+ errs := make([]error, 0)
+ for i := 0; i < cap(errChan); i++ {
+ if err := <-errChan; err != nil {
+ errs = append(errs, err)
+ }
+ }
+ return NewAggregate(errs)
+}
+
+// ErrPreconditionViolated is returned when the precondition is violated
+var ErrPreconditionViolated = errors.New("precondition is violated")
diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go
new file mode 100644
index 000000000..066680f44
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go
@@ -0,0 +1,167 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package framer implements simple frame decoding techniques for an io.ReadCloser
+package framer
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "io"
+)
+
+type lengthDelimitedFrameWriter struct {
+ w io.Writer
+ h [4]byte
+}
+
+func NewLengthDelimitedFrameWriter(w io.Writer) io.Writer {
+ return &lengthDelimitedFrameWriter{w: w}
+}
+
+// Write writes a single frame to the nested writer, prepending it with the length in
+// in bytes of data (as a 4 byte, bigendian uint32).
+func (w *lengthDelimitedFrameWriter) Write(data []byte) (int, error) {
+ binary.BigEndian.PutUint32(w.h[:], uint32(len(data)))
+ n, err := w.w.Write(w.h[:])
+ if err != nil {
+ return 0, err
+ }
+ if n != len(w.h) {
+ return 0, io.ErrShortWrite
+ }
+ return w.w.Write(data)
+}
+
+type lengthDelimitedFrameReader struct {
+ r io.ReadCloser
+ remaining int
+}
+
+// NewLengthDelimitedFrameReader returns an io.Reader that will decode length-prefixed
+// frames off of a stream.
+//
+// The protocol is:
+//
+// stream: message ...
+// message: prefix body
+// prefix: 4 byte uint32 in BigEndian order, denotes length of body
+// body: bytes (0..prefix)
+//
+// If the buffer passed to Read is not long enough to contain an entire frame, io.ErrShortRead
+// will be returned along with the number of bytes read.
+func NewLengthDelimitedFrameReader(r io.ReadCloser) io.ReadCloser {
+ return &lengthDelimitedFrameReader{r: r}
+}
+
+// Read attempts to read an entire frame into data. If that is not possible, io.ErrShortBuffer
+// is returned and subsequent calls will attempt to read the last frame. A frame is complete when
+// err is nil.
+func (r *lengthDelimitedFrameReader) Read(data []byte) (int, error) {
+ if r.remaining <= 0 {
+ header := [4]byte{}
+ n, err := io.ReadAtLeast(r.r, header[:4], 4)
+ if err != nil {
+ return 0, err
+ }
+ if n != 4 {
+ return 0, io.ErrUnexpectedEOF
+ }
+ frameLength := int(binary.BigEndian.Uint32(header[:]))
+ r.remaining = frameLength
+ }
+
+ expect := r.remaining
+ max := expect
+ if max > len(data) {
+ max = len(data)
+ }
+ n, err := io.ReadAtLeast(r.r, data[:max], int(max))
+ r.remaining -= n
+ if err == io.ErrShortBuffer || r.remaining > 0 {
+ return n, io.ErrShortBuffer
+ }
+ if err != nil {
+ return n, err
+ }
+ if n != expect {
+ return n, io.ErrUnexpectedEOF
+ }
+
+ return n, nil
+}
+
+func (r *lengthDelimitedFrameReader) Close() error {
+ return r.r.Close()
+}
+
+type jsonFrameReader struct {
+ r io.ReadCloser
+ decoder *json.Decoder
+ remaining []byte
+}
+
+// NewJSONFramedReader returns an io.Reader that will decode individual JSON objects off
+// of a wire.
+//
+// The boundaries between each frame are valid JSON objects. A JSON parsing error will terminate
+// the read.
+func NewJSONFramedReader(r io.ReadCloser) io.ReadCloser {
+ return &jsonFrameReader{
+ r: r,
+ decoder: json.NewDecoder(r),
+ }
+}
+
+// ReadFrame decodes the next JSON object in the stream, or returns an error. The returned
+// byte slice will be modified the next time ReadFrame is invoked and should not be altered.
+func (r *jsonFrameReader) Read(data []byte) (int, error) {
+ // Return whatever remaining data exists from an in progress frame
+ if n := len(r.remaining); n > 0 {
+ if n <= len(data) {
+ data = append(data[0:0], r.remaining...)
+ r.remaining = nil
+ return n, nil
+ }
+
+ n = len(data)
+ data = append(data[0:0], r.remaining[:n]...)
+ r.remaining = r.remaining[n:]
+ return n, io.ErrShortBuffer
+ }
+
+ // RawMessage#Unmarshal appends to data - we reset the slice down to 0 and will either see
+ // data written to data, or be larger than data and a different array.
+ n := len(data)
+ m := json.RawMessage(data[:0])
+ if err := r.decoder.Decode(&m); err != nil {
+ return 0, err
+ }
+
+ // If capacity of data is less than length of the message, decoder will allocate a new slice
+ // and set m to it, which means we need to copy the partial result back into data and preserve
+ // the remaining result for subsequent reads.
+ if len(m) > n {
+ data = append(data[0:0], m[:n]...)
+ r.remaining = m[n:]
+ return n, io.ErrShortBuffer
+ }
+ return len(m), nil
+}
+
+func (r *jsonFrameReader) Close() error {
+ return r.r.Close()
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go
new file mode 100644
index 000000000..5893df5bd
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package httpstream adds multiplexed streaming support to HTTP requests and
+// responses via connection upgrades.
+package httpstream // import "k8s.io/apimachinery/pkg/util/httpstream"
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go
new file mode 100644
index 000000000..7c9b791d4
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go
@@ -0,0 +1,149 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package httpstream
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+ "time"
+)
+
+const (
+ HeaderConnection = "Connection"
+ HeaderUpgrade = "Upgrade"
+ HeaderProtocolVersion = "X-Stream-Protocol-Version"
+ HeaderAcceptedProtocolVersions = "X-Accepted-Stream-Protocol-Versions"
+)
+
+// NewStreamHandler defines a function that is called when a new Stream is
+// received. If no error is returned, the Stream is accepted; otherwise,
+// the stream is rejected. After the reply frame has been sent, replySent is closed.
+type NewStreamHandler func(stream Stream, replySent <-chan struct{}) error
+
+// NoOpNewStreamHandler is a stream handler that accepts a new stream and
+// performs no other logic.
+func NoOpNewStreamHandler(stream Stream, replySent <-chan struct{}) error { return nil }
+
+// Dialer knows how to open a streaming connection to a server.
+type Dialer interface {
+
+ // Dial opens a streaming connection to a server using one of the protocols
+ // specified (in order of most preferred to least preferred).
+ Dial(protocols ...string) (Connection, string, error)
+}
+
+// UpgradeRoundTripper is a type of http.RoundTripper that is able to upgrade
+// HTTP requests to support multiplexed bidirectional streams. After RoundTrip()
+// is invoked, if the upgrade is successful, clients may retrieve the upgraded
+// connection by calling UpgradeRoundTripper.Connection().
+type UpgradeRoundTripper interface {
+ http.RoundTripper
+ // NewConnection validates the response and creates a new Connection.
+ NewConnection(resp *http.Response) (Connection, error)
+}
+
+// ResponseUpgrader knows how to upgrade HTTP requests and responses to
+// add streaming support to them.
+type ResponseUpgrader interface {
+ // UpgradeResponse upgrades an HTTP response to one that supports multiplexed
+ // streams. newStreamHandler will be called asynchronously whenever the
+ // other end of the upgraded connection creates a new stream.
+ UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler NewStreamHandler) Connection
+}
+
+// Connection represents an upgraded HTTP connection.
+type Connection interface {
+ // CreateStream creates a new Stream with the supplied headers.
+ CreateStream(headers http.Header) (Stream, error)
+ // Close resets all streams and closes the connection.
+ Close() error
+ // CloseChan returns a channel that is closed when the underlying connection is closed.
+ CloseChan() <-chan bool
+ // SetIdleTimeout sets the amount of time the connection may remain idle before
+ // it is automatically closed.
+ SetIdleTimeout(timeout time.Duration)
+}
+
+// Stream represents a bidirectional communications channel that is part of an
+// upgraded connection.
+type Stream interface {
+ io.ReadWriteCloser
+ // Reset closes both directions of the stream, indicating that neither client
+ // or server can use it any more.
+ Reset() error
+ // Headers returns the headers used to create the stream.
+ Headers() http.Header
+ // Identifier returns the stream's ID.
+ Identifier() uint32
+}
+
+// IsUpgradeRequest returns true if the given request is a connection upgrade request
+func IsUpgradeRequest(req *http.Request) bool {
+ for _, h := range req.Header[http.CanonicalHeaderKey(HeaderConnection)] {
+ if strings.Contains(strings.ToLower(h), strings.ToLower(HeaderUpgrade)) {
+ return true
+ }
+ }
+ return false
+}
+
+func negotiateProtocol(clientProtocols, serverProtocols []string) string {
+ for i := range clientProtocols {
+ for j := range serverProtocols {
+ if clientProtocols[i] == serverProtocols[j] {
+ return clientProtocols[i]
+ }
+ }
+ }
+ return ""
+}
+
+// Handshake performs a subprotocol negotiation. If the client did request a
+// subprotocol, Handshake will select the first common value found in
+// serverProtocols. If a match is found, Handshake adds a response header
+// indicating the chosen subprotocol. If no match is found, HTTP forbidden is
+// returned, along with a response header containing the list of protocols the
+// server can accept.
+func Handshake(req *http.Request, w http.ResponseWriter, serverProtocols []string) (string, error) {
+ clientProtocols := req.Header[http.CanonicalHeaderKey(HeaderProtocolVersion)]
+ if len(clientProtocols) == 0 {
+ // Kube 1.0 clients didn't support subprotocol negotiation.
+ // TODO require clientProtocols once Kube 1.0 is no longer supported
+ return "", nil
+ }
+
+ if len(serverProtocols) == 0 {
+ // Kube 1.0 servers didn't support subprotocol negotiation. This is mainly for testing.
+ // TODO require serverProtocols once Kube 1.0 is no longer supported
+ return "", nil
+ }
+
+ negotiatedProtocol := negotiateProtocol(clientProtocols, serverProtocols)
+ if len(negotiatedProtocol) == 0 {
+ w.WriteHeader(http.StatusForbidden)
+ for i := range serverProtocols {
+ w.Header().Add(HeaderAcceptedProtocolVersions, serverProtocols[i])
+ }
+ fmt.Fprintf(w, "unable to upgrade: unable to negotiate protocol: client supports %v, server accepts %v", clientProtocols, serverProtocols)
+ return "", fmt.Errorf("unable to upgrade: unable to negotiate protocol: client supports %v, server supports %v", clientProtocols, serverProtocols)
+ }
+
+ w.Header().Add(HeaderProtocolVersion, negotiatedProtocol)
+ return negotiatedProtocol, nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go
new file mode 100644
index 000000000..3dc8e23ae
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go
@@ -0,0 +1,145 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package spdy
+
+import (
+ "net"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/docker/spdystream"
+ "github.com/golang/glog"
+ "k8s.io/apimachinery/pkg/util/httpstream"
+)
+
+// connection maintains state about a spdystream.Connection and its associated
+// streams.
+type connection struct {
+ conn *spdystream.Connection
+ streams []httpstream.Stream
+ streamLock sync.Mutex
+ newStreamHandler httpstream.NewStreamHandler
+}
+
+// NewClientConnection creates a new SPDY client connection.
+func NewClientConnection(conn net.Conn) (httpstream.Connection, error) {
+ spdyConn, err := spdystream.NewConnection(conn, false)
+ if err != nil {
+ defer conn.Close()
+ return nil, err
+ }
+
+ return newConnection(spdyConn, httpstream.NoOpNewStreamHandler), nil
+}
+
+// NewServerConnection creates a new SPDY server connection. newStreamHandler
+// will be invoked when the server receives a newly created stream from the
+// client.
+func NewServerConnection(conn net.Conn, newStreamHandler httpstream.NewStreamHandler) (httpstream.Connection, error) {
+ spdyConn, err := spdystream.NewConnection(conn, true)
+ if err != nil {
+ defer conn.Close()
+ return nil, err
+ }
+
+ return newConnection(spdyConn, newStreamHandler), nil
+}
+
+// newConnection returns a new connection wrapping conn. newStreamHandler
+// will be invoked when the server receives a newly created stream from the
+// client.
+func newConnection(conn *spdystream.Connection, newStreamHandler httpstream.NewStreamHandler) httpstream.Connection {
+ c := &connection{conn: conn, newStreamHandler: newStreamHandler}
+ go conn.Serve(c.newSpdyStream)
+ return c
+}
+
+// createStreamResponseTimeout indicates how long to wait for the other side to
+// acknowledge the new stream before timing out.
+const createStreamResponseTimeout = 30 * time.Second
+
+// Close first sends a reset for all of the connection's streams, and then
+// closes the underlying spdystream.Connection.
+func (c *connection) Close() error {
+ c.streamLock.Lock()
+ for _, s := range c.streams {
+ // calling Reset instead of Close ensures that all streams are fully torn down
+ s.Reset()
+ }
+ c.streams = make([]httpstream.Stream, 0)
+ c.streamLock.Unlock()
+
+ // now that all streams are fully torn down, it's safe to call close on the underlying connection,
+ // which should be able to terminate immediately at this point, instead of waiting for any
+ // remaining graceful stream termination.
+ return c.conn.Close()
+}
+
+// CreateStream creates a new stream with the specified headers and registers
+// it with the connection.
+func (c *connection) CreateStream(headers http.Header) (httpstream.Stream, error) {
+ stream, err := c.conn.CreateStream(headers, nil, false)
+ if err != nil {
+ return nil, err
+ }
+ if err = stream.WaitTimeout(createStreamResponseTimeout); err != nil {
+ return nil, err
+ }
+
+ c.registerStream(stream)
+ return stream, nil
+}
+
+// registerStream adds the stream s to the connection's list of streams that
+// it owns.
+func (c *connection) registerStream(s httpstream.Stream) {
+ c.streamLock.Lock()
+ c.streams = append(c.streams, s)
+ c.streamLock.Unlock()
+}
+
+// CloseChan returns a channel that, when closed, indicates that the underlying
+// spdystream.Connection has been closed.
+func (c *connection) CloseChan() <-chan bool {
+ return c.conn.CloseChan()
+}
+
+// newSpdyStream is the internal new stream handler used by spdystream.Connection.Serve.
+// It calls connection's newStreamHandler, giving it the opportunity to accept or reject
+// the stream. If newStreamHandler returns an error, the stream is rejected. If not, the
+// stream is accepted and registered with the connection.
+func (c *connection) newSpdyStream(stream *spdystream.Stream) {
+ replySent := make(chan struct{})
+ err := c.newStreamHandler(stream, replySent)
+ rejectStream := (err != nil)
+ if rejectStream {
+ glog.Warningf("Stream rejected: %v", err)
+ stream.Reset()
+ return
+ }
+
+ c.registerStream(stream)
+ stream.SendReply(http.Header{}, rejectStream)
+ close(replySent)
+}
+
+// SetIdleTimeout sets the amount of time the connection may remain idle before
+// it is automatically closed.
+func (c *connection) SetIdleTimeout(timeout time.Duration) {
+ c.conn.SetIdleTimeout(timeout)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go
new file mode 100644
index 000000000..12bef075d
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go
@@ -0,0 +1,326 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package spdy
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+ "strings"
+
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ "k8s.io/apimachinery/pkg/util/httpstream"
+ utilnet "k8s.io/apimachinery/pkg/util/net"
+ "k8s.io/apimachinery/third_party/forked/golang/netutil"
+)
+
+// SpdyRoundTripper knows how to upgrade an HTTP request to one that supports
+// multiplexed streams. After RoundTrip() is invoked, Conn will be set
+// and usable. SpdyRoundTripper implements the UpgradeRoundTripper interface.
+type SpdyRoundTripper struct {
+ //tlsConfig holds the TLS configuration settings to use when connecting
+ //to the remote server.
+ tlsConfig *tls.Config
+
+ /* TODO according to http://golang.org/pkg/net/http/#RoundTripper, a RoundTripper
+ must be safe for use by multiple concurrent goroutines. If this is absolutely
+ necessary, we could keep a map from http.Request to net.Conn. In practice,
+ a client will create an http.Client, set the transport to a new insteace of
+ SpdyRoundTripper, and use it a single time, so this hopefully won't be an issue.
+ */
+ // conn is the underlying network connection to the remote server.
+ conn net.Conn
+
+ // Dialer is the dialer used to connect. Used if non-nil.
+ Dialer *net.Dialer
+
+ // proxier knows which proxy to use given a request, defaults to http.ProxyFromEnvironment
+ // Used primarily for mocking the proxy discovery in tests.
+ proxier func(req *http.Request) (*url.URL, error)
+
+ // followRedirects indicates if the round tripper should examine responses for redirects and
+ // follow them.
+ followRedirects bool
+}
+
+var _ utilnet.TLSClientConfigHolder = &SpdyRoundTripper{}
+var _ httpstream.UpgradeRoundTripper = &SpdyRoundTripper{}
+var _ utilnet.Dialer = &SpdyRoundTripper{}
+
+// NewRoundTripper creates a new SpdyRoundTripper that will use
+// the specified tlsConfig.
+func NewRoundTripper(tlsConfig *tls.Config, followRedirects bool) httpstream.UpgradeRoundTripper {
+ return NewSpdyRoundTripper(tlsConfig, followRedirects)
+}
+
+// NewSpdyRoundTripper creates a new SpdyRoundTripper that will use
+// the specified tlsConfig. This function is mostly meant for unit tests.
+func NewSpdyRoundTripper(tlsConfig *tls.Config, followRedirects bool) *SpdyRoundTripper {
+ return &SpdyRoundTripper{tlsConfig: tlsConfig, followRedirects: followRedirects}
+}
+
+// TLSClientConfig implements pkg/util/net.TLSClientConfigHolder for proper TLS checking during
+// proxying with a spdy roundtripper.
+func (s *SpdyRoundTripper) TLSClientConfig() *tls.Config {
+ return s.tlsConfig
+}
+
+// Dial implements k8s.io/apimachinery/pkg/util/net.Dialer.
+func (s *SpdyRoundTripper) Dial(req *http.Request) (net.Conn, error) {
+ conn, err := s.dial(req)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := req.Write(conn); err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ return conn, nil
+}
+
+// dial dials the host specified by req, using TLS if appropriate, optionally
+// using a proxy server if one is configured via environment variables.
+func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) {
+ proxier := s.proxier
+ if proxier == nil {
+ proxier = http.ProxyFromEnvironment
+ }
+ proxyURL, err := proxier(req)
+ if err != nil {
+ return nil, err
+ }
+
+ if proxyURL == nil {
+ return s.dialWithoutProxy(req.URL)
+ }
+
+ // ensure we use a canonical host with proxyReq
+ targetHost := netutil.CanonicalAddr(req.URL)
+
+ // proxying logic adapted from http://blog.h6t.eu/post/74098062923/golang-websocket-with-http-proxy-support
+ proxyReq := http.Request{
+ Method: "CONNECT",
+ URL: &url.URL{},
+ Host: targetHost,
+ }
+
+ if pa := s.proxyAuth(proxyURL); pa != "" {
+ proxyReq.Header = http.Header{}
+ proxyReq.Header.Set("Proxy-Authorization", pa)
+ }
+
+ proxyDialConn, err := s.dialWithoutProxy(proxyURL)
+ if err != nil {
+ return nil, err
+ }
+
+ proxyClientConn := httputil.NewProxyClientConn(proxyDialConn, nil)
+ _, err = proxyClientConn.Do(&proxyReq)
+ if err != nil && err != httputil.ErrPersistEOF {
+ return nil, err
+ }
+
+ rwc, _ := proxyClientConn.Hijack()
+
+ if req.URL.Scheme != "https" {
+ return rwc, nil
+ }
+
+ host, _, err := net.SplitHostPort(targetHost)
+ if err != nil {
+ return nil, err
+ }
+
+ tlsConfig := s.tlsConfig
+ switch {
+ case tlsConfig == nil:
+ tlsConfig = &tls.Config{ServerName: host}
+ case len(tlsConfig.ServerName) == 0:
+ tlsConfig = tlsConfig.Clone()
+ tlsConfig.ServerName = host
+ }
+
+ tlsConn := tls.Client(rwc, tlsConfig)
+
+ // need to manually call Handshake() so we can call VerifyHostname() below
+ if err := tlsConn.Handshake(); err != nil {
+ return nil, err
+ }
+
+ // Return if we were configured to skip validation
+ if tlsConfig.InsecureSkipVerify {
+ return tlsConn, nil
+ }
+
+ if err := tlsConn.VerifyHostname(tlsConfig.ServerName); err != nil {
+ return nil, err
+ }
+
+ return tlsConn, nil
+}
+
+// dialWithoutProxy dials the host specified by url, using TLS if appropriate.
+func (s *SpdyRoundTripper) dialWithoutProxy(url *url.URL) (net.Conn, error) {
+ dialAddr := netutil.CanonicalAddr(url)
+
+ if url.Scheme == "http" {
+ if s.Dialer == nil {
+ return net.Dial("tcp", dialAddr)
+ } else {
+ return s.Dialer.Dial("tcp", dialAddr)
+ }
+ }
+
+ // TODO validate the TLSClientConfig is set up?
+ var conn *tls.Conn
+ var err error
+ if s.Dialer == nil {
+ conn, err = tls.Dial("tcp", dialAddr, s.tlsConfig)
+ } else {
+ conn, err = tls.DialWithDialer(s.Dialer, "tcp", dialAddr, s.tlsConfig)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // Return if we were configured to skip validation
+ if s.tlsConfig != nil && s.tlsConfig.InsecureSkipVerify {
+ return conn, nil
+ }
+
+ host, _, err := net.SplitHostPort(dialAddr)
+ if err != nil {
+ return nil, err
+ }
+ if s.tlsConfig != nil && len(s.tlsConfig.ServerName) > 0 {
+ host = s.tlsConfig.ServerName
+ }
+ err = conn.VerifyHostname(host)
+ if err != nil {
+ return nil, err
+ }
+
+ return conn, nil
+}
+
+// proxyAuth returns, for a given proxy URL, the value to be used for the Proxy-Authorization header
+func (s *SpdyRoundTripper) proxyAuth(proxyURL *url.URL) string {
+ if proxyURL == nil || proxyURL.User == nil {
+ return ""
+ }
+ credentials := proxyURL.User.String()
+ encodedAuth := base64.StdEncoding.EncodeToString([]byte(credentials))
+ return fmt.Sprintf("Basic %s", encodedAuth)
+}
+
+// RoundTrip executes the Request and upgrades it. After a successful upgrade,
+// clients may call SpdyRoundTripper.Connection() to retrieve the upgraded
+// connection.
+func (s *SpdyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ header := utilnet.CloneHeader(req.Header)
+ header.Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade)
+ header.Add(httpstream.HeaderUpgrade, HeaderSpdy31)
+
+ var (
+ conn net.Conn
+ rawResponse []byte
+ err error
+ )
+
+ if s.followRedirects {
+ conn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, req.URL, header, req.Body, s)
+ } else {
+ clone := utilnet.CloneRequest(req)
+ clone.Header = header
+ conn, err = s.Dial(clone)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ responseReader := bufio.NewReader(
+ io.MultiReader(
+ bytes.NewBuffer(rawResponse),
+ conn,
+ ),
+ )
+
+ resp, err := http.ReadResponse(responseReader, nil)
+ if err != nil {
+ if conn != nil {
+ conn.Close()
+ }
+ return nil, err
+ }
+
+ s.conn = conn
+
+ return resp, nil
+}
+
+// NewConnection validates the upgrade response, creating and returning a new
+// httpstream.Connection if there were no errors.
+func (s *SpdyRoundTripper) NewConnection(resp *http.Response) (httpstream.Connection, error) {
+ connectionHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderConnection))
+ upgradeHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderUpgrade))
+ if (resp.StatusCode != http.StatusSwitchingProtocols) || !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) {
+ defer resp.Body.Close()
+ responseError := ""
+ responseErrorBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ responseError = "unable to read error from server response"
+ } else {
+ // TODO: I don't belong here, I should be abstracted from this class
+ if obj, _, err := statusCodecs.UniversalDecoder().Decode(responseErrorBytes, nil, &metav1.Status{}); err == nil {
+ if status, ok := obj.(*metav1.Status); ok {
+ return nil, &apierrors.StatusError{ErrStatus: *status}
+ }
+ }
+ responseError = string(responseErrorBytes)
+ responseError = strings.TrimSpace(responseError)
+ }
+
+ return nil, fmt.Errorf("unable to upgrade connection: %s", responseError)
+ }
+
+ return NewClientConnection(s.conn)
+}
+
+// statusScheme is private scheme for the decoding here until someone fixes the TODO in NewConnection
+var statusScheme = runtime.NewScheme()
+
+// ParameterCodec knows about query parameters used with the meta v1 API spec.
+var statusCodecs = serializer.NewCodecFactory(statusScheme)
+
+func init() {
+ statusScheme.AddUnversionedTypes(metav1.SchemeGroupVersion,
+ &metav1.Status{},
+ )
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go
new file mode 100644
index 000000000..13353988f
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go
@@ -0,0 +1,107 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package spdy
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "strings"
+ "sync/atomic"
+
+ "k8s.io/apimachinery/pkg/util/httpstream"
+ "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+const HeaderSpdy31 = "SPDY/3.1"
+
+// responseUpgrader knows how to upgrade HTTP responses. It
+// implements the httpstream.ResponseUpgrader interface.
+type responseUpgrader struct {
+}
+
+// connWrapper is used to wrap a hijacked connection and its bufio.Reader. All
+// calls will be handled directly by the underlying net.Conn with the exception
+// of Read and Close calls, which will consider data in the bufio.Reader. This
+// ensures that data already inside the used bufio.Reader instance is also
+// read.
+type connWrapper struct {
+ net.Conn
+ closed int32
+ bufReader *bufio.Reader
+}
+
+func (w *connWrapper) Read(b []byte) (n int, err error) {
+ if atomic.LoadInt32(&w.closed) == 1 {
+ return 0, io.EOF
+ }
+ return w.bufReader.Read(b)
+}
+
+func (w *connWrapper) Close() error {
+ err := w.Conn.Close()
+ atomic.StoreInt32(&w.closed, 1)
+ return err
+}
+
+// NewResponseUpgrader returns a new httpstream.ResponseUpgrader that is
+// capable of upgrading HTTP responses using SPDY/3.1 via the
+// spdystream package.
+func NewResponseUpgrader() httpstream.ResponseUpgrader {
+ return responseUpgrader{}
+}
+
+// UpgradeResponse upgrades an HTTP response to one that supports multiplexed
+// streams. newStreamHandler will be called synchronously whenever the
+// other end of the upgraded connection creates a new stream.
+func (u responseUpgrader) UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler httpstream.NewStreamHandler) httpstream.Connection {
+ connectionHeader := strings.ToLower(req.Header.Get(httpstream.HeaderConnection))
+ upgradeHeader := strings.ToLower(req.Header.Get(httpstream.HeaderUpgrade))
+ if !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) {
+ w.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintf(w, "unable to upgrade: missing upgrade headers in request: %#v", req.Header)
+ return nil
+ }
+
+ hijacker, ok := w.(http.Hijacker)
+ if !ok {
+ w.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprintf(w, "unable to upgrade: unable to hijack response")
+ return nil
+ }
+
+ w.Header().Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade)
+ w.Header().Add(httpstream.HeaderUpgrade, HeaderSpdy31)
+ w.WriteHeader(http.StatusSwitchingProtocols)
+
+ conn, bufrw, err := hijacker.Hijack()
+ if err != nil {
+ runtime.HandleError(fmt.Errorf("unable to upgrade: error hijacking response: %v", err))
+ return nil
+ }
+
+ connWithBuf := &connWrapper{Conn: conn, bufReader: bufrw.Reader}
+ spdyConn, err := NewServerConnection(connWithBuf, newStreamHandler)
+ if err != nil {
+ runtime.HandleError(fmt.Errorf("unable to upgrade: error creating SPDY server connection: %v", err))
+ return nil
+ }
+
+ return spdyConn
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go
new file mode 100644
index 000000000..433dfa5cd
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go
@@ -0,0 +1,381 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo.
+// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto
+// DO NOT EDIT!
+
+/*
+ Package intstr is a generated protocol buffer package.
+
+ It is generated from these files:
+ k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto
+
+ It has these top-level messages:
+ IntOrString
+*/
+package intstr
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+func (m *IntOrString) Reset() { *m = IntOrString{} }
+func (*IntOrString) ProtoMessage() {}
+func (*IntOrString) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} }
+
+func init() {
+ proto.RegisterType((*IntOrString)(nil), "k8s.io.apimachinery.pkg.util.intstr.IntOrString")
+}
+func (m *IntOrString) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IntOrString) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Type))
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.IntVal))
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.StrVal)))
+ i += copy(dAtA[i:], m.StrVal)
+ return i, nil
+}
+
+func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
+ dAtA[offset] = uint8(v)
+ dAtA[offset+1] = uint8(v >> 8)
+ dAtA[offset+2] = uint8(v >> 16)
+ dAtA[offset+3] = uint8(v >> 24)
+ dAtA[offset+4] = uint8(v >> 32)
+ dAtA[offset+5] = uint8(v >> 40)
+ dAtA[offset+6] = uint8(v >> 48)
+ dAtA[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
+ dAtA[offset] = uint8(v)
+ dAtA[offset+1] = uint8(v >> 8)
+ dAtA[offset+2] = uint8(v >> 16)
+ dAtA[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return offset + 1
+}
+func (m *IntOrString) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.Type))
+ n += 1 + sovGenerated(uint64(m.IntVal))
+ l = len(m.StrVal)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *IntOrString) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IntOrString: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IntOrString: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Type |= (Type(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IntVal", wireType)
+ }
+ m.IntVal = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.IntVal |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StrVal", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.StrVal = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipGenerated(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+ proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptorGenerated)
+}
+
+var fileDescriptorGenerated = []byte{
+ // 292 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0x33, 0x31,
+ 0x1c, 0xc6, 0x93, 0xb7, 0x7d, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x28, 0xc8, 0x0d,
+ 0x9a, 0xac, 0xe2, 0xd8, 0xad, 0x20, 0x08, 0x57, 0x71, 0x70, 0xbb, 0x6b, 0x63, 0x1a, 0xae, 0x4d,
+ 0x42, 0xee, 0x7f, 0xc2, 0x6d, 0xfd, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x6e, 0xec, 0xd8, 0x41, 0x8a,
+ 0x17, 0xbf, 0x85, 0x93, 0x5c, 0xee, 0x40, 0xa7, 0xe4, 0x79, 0x9e, 0xdf, 0x2f, 0x90, 0xe0, 0x36,
+ 0xbb, 0xce, 0xa9, 0xd4, 0x2c, 0x2b, 0x52, 0x6e, 0x15, 0x07, 0x9e, 0xb3, 0x67, 0xae, 0x16, 0xda,
+ 0xb2, 0x6e, 0x48, 0x8c, 0x5c, 0x27, 0xf3, 0xa5, 0x54, 0xdc, 0x96, 0xcc, 0x64, 0x82, 0x15, 0x20,
+ 0x57, 0x4c, 0x2a, 0xc8, 0xc1, 0x32, 0xc1, 0x15, 0xb7, 0x09, 0xf0, 0x05, 0x35, 0x56, 0x83, 0x1e,
+ 0x9e, 0xb7, 0x12, 0xfd, 0x2b, 0x51, 0x93, 0x09, 0xda, 0x48, 0xb4, 0x95, 0x4e, 0xaf, 0x84, 0x84,
+ 0x65, 0x91, 0xd2, 0xb9, 0x5e, 0x33, 0xa1, 0x85, 0x66, 0xde, 0x4d, 0x8b, 0x27, 0x9f, 0x7c, 0xf0,
+ 0xb7, 0xf6, 0xcd, 0xb3, 0x57, 0x1c, 0x1c, 0x4d, 0x15, 0xdc, 0xd9, 0x19, 0x58, 0xa9, 0xc4, 0x30,
+ 0x0a, 0xfa, 0x50, 0x1a, 0x3e, 0xc2, 0x21, 0x8e, 0x7a, 0x93, 0x93, 0x6a, 0x3f, 0x46, 0x6e, 0x3f,
+ 0xee, 0xdf, 0x97, 0x86, 0x7f, 0x77, 0x67, 0xec, 0x89, 0xe1, 0x45, 0x30, 0x90, 0x0a, 0x1e, 0x92,
+ 0xd5, 0xe8, 0x5f, 0x88, 0xa3, 0xff, 0x93, 0xe3, 0x8e, 0x1d, 0x4c, 0x7d, 0x1b, 0x77, 0x6b, 0xc3,
+ 0xe5, 0x60, 0x1b, 0xae, 0x17, 0xe2, 0xe8, 0xf0, 0x97, 0x9b, 0xf9, 0x36, 0xee, 0xd6, 0x9b, 0x83,
+ 0xb7, 0xf7, 0x31, 0xda, 0x7c, 0x84, 0x68, 0x72, 0x59, 0xd5, 0x04, 0x6d, 0x6b, 0x82, 0x76, 0x35,
+ 0x41, 0x1b, 0x47, 0x70, 0xe5, 0x08, 0xde, 0x3a, 0x82, 0x77, 0x8e, 0xe0, 0x4f, 0x47, 0xf0, 0xcb,
+ 0x17, 0x41, 0x8f, 0x83, 0xf6, 0xc3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x52, 0xa0, 0xb5, 0xc9,
+ 0x64, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto
new file mode 100644
index 000000000..cccaf6f68
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto
@@ -0,0 +1,43 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.apimachinery.pkg.util.intstr;
+
+// Package-wide variables from generator "generated".
+option go_package = "intstr";
+
+// IntOrString is a type that can hold an int32 or a string. When used in
+// JSON or YAML marshalling and unmarshalling, it produces or consumes the
+// inner type. This allows you to have, for example, a JSON field that can
+// accept a name or number.
+// TODO: Rename to Int32OrString
+//
+// +protobuf=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+// +k8s:openapi-gen=true
+message IntOrString {
+ optional int64 type = 1;
+
+ optional int32 intVal = 2;
+
+ optional string strVal = 3;
+}
+
diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
new file mode 100644
index 000000000..02586b348
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
@@ -0,0 +1,177 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package intstr
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "runtime/debug"
+ "strconv"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/openapi"
+
+ "github.com/go-openapi/spec"
+ "github.com/golang/glog"
+ "github.com/google/gofuzz"
+)
+
+// IntOrString is a type that can hold an int32 or a string. When used in
+// JSON or YAML marshalling and unmarshalling, it produces or consumes the
+// inner type. This allows you to have, for example, a JSON field that can
+// accept a name or number.
+// TODO: Rename to Int32OrString
+//
+// +protobuf=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+// +k8s:openapi-gen=true
+type IntOrString struct {
+ Type Type `protobuf:"varint,1,opt,name=type,casttype=Type"`
+ IntVal int32 `protobuf:"varint,2,opt,name=intVal"`
+ StrVal string `protobuf:"bytes,3,opt,name=strVal"`
+}
+
+// Type represents the stored type of IntOrString.
+type Type int
+
+const (
+ Int Type = iota // The IntOrString holds an int.
+ String // The IntOrString holds a string.
+)
+
+// FromInt creates an IntOrString object with an int32 value. It is
+// your responsibility not to call this method with a value greater
+// than int32.
+// TODO: convert to (val int32)
+func FromInt(val int) IntOrString {
+ if val > math.MaxInt32 || val < math.MinInt32 {
+ glog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack())
+ }
+ return IntOrString{Type: Int, IntVal: int32(val)}
+}
+
+// FromString creates an IntOrString object with a string value.
+func FromString(val string) IntOrString {
+ return IntOrString{Type: String, StrVal: val}
+}
+
+// Parse the given string and try to convert it to an integer before
+// setting it as a string value.
+func Parse(val string) IntOrString {
+ i, err := strconv.Atoi(val)
+ if err != nil {
+ return FromString(val)
+ }
+ return FromInt(i)
+}
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+func (intstr *IntOrString) UnmarshalJSON(value []byte) error {
+ if value[0] == '"' {
+ intstr.Type = String
+ return json.Unmarshal(value, &intstr.StrVal)
+ }
+ intstr.Type = Int
+ return json.Unmarshal(value, &intstr.IntVal)
+}
+
+// String returns the string value, or the Itoa of the int value.
+func (intstr *IntOrString) String() string {
+ if intstr.Type == String {
+ return intstr.StrVal
+ }
+ return strconv.Itoa(intstr.IntValue())
+}
+
+// IntValue returns the IntVal if type Int, or if
+// it is a String, will attempt a conversion to int.
+func (intstr *IntOrString) IntValue() int {
+ if intstr.Type == String {
+ i, _ := strconv.Atoi(intstr.StrVal)
+ return i
+ }
+ return int(intstr.IntVal)
+}
+
+// MarshalJSON implements the json.Marshaller interface.
+func (intstr IntOrString) MarshalJSON() ([]byte, error) {
+ switch intstr.Type {
+ case Int:
+ return json.Marshal(intstr.IntVal)
+ case String:
+ return json.Marshal(intstr.StrVal)
+ default:
+ return []byte{}, fmt.Errorf("impossible IntOrString.Type")
+ }
+}
+
+func (_ IntOrString) OpenAPIDefinition() openapi.OpenAPIDefinition {
+ return openapi.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "int-or-string",
+ },
+ },
+ }
+}
+
+func (intstr *IntOrString) Fuzz(c fuzz.Continue) {
+ if intstr == nil {
+ return
+ }
+ if c.RandBool() {
+ intstr.Type = Int
+ c.Fuzz(&intstr.IntVal)
+ intstr.StrVal = ""
+ } else {
+ intstr.Type = String
+ intstr.IntVal = 0
+ c.Fuzz(&intstr.StrVal)
+ }
+}
+
+func GetValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool) (int, error) {
+ value, isPercent, err := getIntOrPercentValue(intOrPercent)
+ if err != nil {
+ return 0, fmt.Errorf("invalid value for IntOrString: %v", err)
+ }
+ if isPercent {
+ if roundUp {
+ value = int(math.Ceil(float64(value) * (float64(total)) / 100))
+ } else {
+ value = int(math.Floor(float64(value) * (float64(total)) / 100))
+ }
+ }
+ return value, nil
+}
+
+func getIntOrPercentValue(intOrStr *IntOrString) (int, bool, error) {
+ switch intOrStr.Type {
+ case Int:
+ return intOrStr.IntValue(), false, nil
+ case String:
+ s := strings.Replace(intOrStr.StrVal, "%", "", -1)
+ v, err := strconv.Atoi(s)
+ if err != nil {
+ return 0, false, fmt.Errorf("invalid value %q: %v", intOrStr.StrVal, err)
+ }
+ return int(v), true, nil
+ }
+ return 0, false, fmt.Errorf("invalid type: neither int nor percentage")
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/json/json.go b/vendor/k8s.io/apimachinery/pkg/util/json/json.go
new file mode 100644
index 000000000..e8054a12e
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/json/json.go
@@ -0,0 +1,107 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package json
+
+import (
+ "bytes"
+ "encoding/json"
+ "io"
+)
+
+// NewEncoder delegates to json.NewEncoder
+// It is only here so this package can be a drop-in for common encoding/json uses
+func NewEncoder(w io.Writer) *json.Encoder {
+ return json.NewEncoder(w)
+}
+
+// Marshal delegates to json.Marshal
+// It is only here so this package can be a drop-in for common encoding/json uses
+func Marshal(v interface{}) ([]byte, error) {
+ return json.Marshal(v)
+}
+
+// Unmarshal unmarshals the given data
+// If v is a *map[string]interface{}, numbers are converted to int64 or float64
+func Unmarshal(data []byte, v interface{}) error {
+ switch v := v.(type) {
+ case *map[string]interface{}:
+ // Build a decoder from the given data
+ decoder := json.NewDecoder(bytes.NewBuffer(data))
+ // Preserve numbers, rather than casting to float64 automatically
+ decoder.UseNumber()
+ // Run the decode
+ if err := decoder.Decode(v); err != nil {
+ return err
+ }
+ // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
+ return convertMapNumbers(*v)
+
+ default:
+ return json.Unmarshal(data, v)
+ }
+}
+
+// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64.
+// values which are map[string]interface{} or []interface{} are recursively visited
+func convertMapNumbers(m map[string]interface{}) error {
+ var err error
+ for k, v := range m {
+ switch v := v.(type) {
+ case json.Number:
+ m[k], err = convertNumber(v)
+ case map[string]interface{}:
+ err = convertMapNumbers(v)
+ case []interface{}:
+ err = convertSliceNumbers(v)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64.
+// values which are map[string]interface{} or []interface{} are recursively visited
+func convertSliceNumbers(s []interface{}) error {
+ var err error
+ for i, v := range s {
+ switch v := v.(type) {
+ case json.Number:
+ s[i], err = convertNumber(v)
+ case map[string]interface{}:
+ err = convertMapNumbers(v)
+ case []interface{}:
+ err = convertSliceNumbers(v)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// convertNumber converts a json.Number to an int64 or float64, or returns an error
+func convertNumber(n json.Number) (interface{}, error) {
+ // Attempt to convert to an int64 first
+ if i, err := n.Int64(); err == nil {
+ return i, nil
+ }
+ // Return a float64 (default json.Decode() behavior)
+ // An overflow will return an error
+ return n.Float64()
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go
new file mode 100644
index 000000000..ac3c1e8cf
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go
@@ -0,0 +1,101 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package mergepatch
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+)
+
+var (
+ ErrBadJSONDoc = errors.New("invalid JSON document")
+ ErrNoListOfLists = errors.New("lists of lists are not supported")
+ ErrBadPatchFormatForPrimitiveList = errors.New("invalid patch format of primitive list")
+ ErrBadPatchFormatForRetainKeys = errors.New("invalid patch format of retainKeys")
+ ErrBadPatchFormatForSetElementOrderList = errors.New("invalid patch format of setElementOrder list")
+ ErrPatchContentNotMatchRetainKeys = errors.New("patch content doesn't match retainKeys list")
+)
+
+func ErrNoMergeKey(m map[string]interface{}, k string) error {
+ return fmt.Errorf("map: %v does not contain declared merge key: %s", m, k)
+}
+
+func ErrBadArgType(expected, actual interface{}) error {
+ return fmt.Errorf("expected a %s, but received a %s",
+ reflect.TypeOf(expected),
+ reflect.TypeOf(actual))
+}
+
+func ErrBadArgKind(expected, actual interface{}) error {
+ var expectedKindString, actualKindString string
+ if expected == nil {
+ expectedKindString = "nil"
+ } else {
+ expectedKindString = reflect.TypeOf(expected).Kind().String()
+ }
+ if actual == nil {
+ actualKindString = "nil"
+ } else {
+ actualKindString = reflect.TypeOf(actual).Kind().String()
+ }
+ return fmt.Errorf("expected a %s, but received a %s", expectedKindString, actualKindString)
+}
+
+func ErrBadPatchType(t interface{}, m map[string]interface{}) error {
+ return fmt.Errorf("unknown patch type: %s in map: %v", t, m)
+}
+
+// IsPreconditionFailed returns true if the provided error indicates
+// a precondition failed.
+func IsPreconditionFailed(err error) bool {
+ _, ok := err.(ErrPreconditionFailed)
+ return ok
+}
+
+type ErrPreconditionFailed struct {
+ message string
+}
+
+func NewErrPreconditionFailed(target map[string]interface{}) ErrPreconditionFailed {
+ s := fmt.Sprintf("precondition failed for: %v", target)
+ return ErrPreconditionFailed{s}
+}
+
+func (err ErrPreconditionFailed) Error() string {
+ return err.message
+}
+
+type ErrConflict struct {
+ message string
+}
+
+func NewErrConflict(patch, current string) ErrConflict {
+ s := fmt.Sprintf("patch:\n%s\nconflicts with changes made from original to current:\n%s\n", patch, current)
+ return ErrConflict{s}
+}
+
+func (err ErrConflict) Error() string {
+ return err.message
+}
+
+// IsConflict returns true if the provided error indicates
+// a conflict between the patch and the current configuration.
+func IsConflict(err error) bool {
+ _, ok := err.(ErrConflict)
+ return ok
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go
new file mode 100644
index 000000000..9261290a7
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go
@@ -0,0 +1,133 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package mergepatch
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/davecgh/go-spew/spew"
+ "github.com/ghodss/yaml"
+)
+
+// PreconditionFunc asserts that an incompatible change is not present within a patch.
+type PreconditionFunc func(interface{}) bool
+
+// RequireKeyUnchanged returns a precondition function that fails if the provided key
+// is present in the patch (indicating that its value has changed).
+func RequireKeyUnchanged(key string) PreconditionFunc {
+ return func(patch interface{}) bool {
+ patchMap, ok := patch.(map[string]interface{})
+ if !ok {
+ return true
+ }
+
+ // The presence of key means that its value has been changed, so the test fails.
+ _, ok = patchMap[key]
+ return !ok
+ }
+}
+
+// RequireMetadataKeyUnchanged creates a precondition function that fails
+// if the metadata.key is present in the patch (indicating its value
+// has changed).
+func RequireMetadataKeyUnchanged(key string) PreconditionFunc {
+ return func(patch interface{}) bool {
+ patchMap, ok := patch.(map[string]interface{})
+ if !ok {
+ return true
+ }
+ patchMap1, ok := patchMap["metadata"]
+ if !ok {
+ return true
+ }
+ patchMap2, ok := patchMap1.(map[string]interface{})
+ if !ok {
+ return true
+ }
+ _, ok = patchMap2[key]
+ return !ok
+ }
+}
+
+func ToYAMLOrError(v interface{}) string {
+ y, err := toYAML(v)
+ if err != nil {
+ return err.Error()
+ }
+
+ return y
+}
+
+func toYAML(v interface{}) (string, error) {
+ y, err := yaml.Marshal(v)
+ if err != nil {
+ return "", fmt.Errorf("yaml marshal failed:%v\n%v\n", err, spew.Sdump(v))
+ }
+
+ return string(y), nil
+}
+
+// HasConflicts returns true if the left and right JSON interface objects overlap with
+// different values in any key. All keys are required to be strings. Since patches of the
+// same Type have congruent keys, this is valid for multiple patch types. This method
+// supports JSON merge patch semantics.
+//
+// NOTE: Numbers with different types (e.g. int(0) vs int64(0)) will be detected as conflicts.
+// Make sure the unmarshaling of left and right are consistent (e.g. use the same library).
+func HasConflicts(left, right interface{}) (bool, error) {
+ switch typedLeft := left.(type) {
+ case map[string]interface{}:
+ switch typedRight := right.(type) {
+ case map[string]interface{}:
+ for key, leftValue := range typedLeft {
+ rightValue, ok := typedRight[key]
+ if !ok {
+ continue
+ }
+ if conflict, err := HasConflicts(leftValue, rightValue); err != nil || conflict {
+ return conflict, err
+ }
+ }
+
+ return false, nil
+ default:
+ return true, nil
+ }
+ case []interface{}:
+ switch typedRight := right.(type) {
+ case []interface{}:
+ if len(typedLeft) != len(typedRight) {
+ return true, nil
+ }
+
+ for i := range typedLeft {
+ if conflict, err := HasConflicts(typedLeft[i], typedRight[i]); err != nil || conflict {
+ return conflict, err
+ }
+ }
+
+ return false, nil
+ default:
+ return true, nil
+ }
+ case string, float64, bool, int, int64, nil:
+ return !reflect.DeepEqual(left, right), nil
+ default:
+ return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left))
+ }
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go
new file mode 100644
index 000000000..adb80813b
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go
@@ -0,0 +1,401 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package net
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/golang/glog"
+ "golang.org/x/net/http2"
+)
+
+// IsProbableEOF returns true if the given error resembles a connection termination
+// scenario that would justify assuming that the watch is empty.
+// These errors are what the Go http stack returns back to us which are general
+// connection closure errors (strongly correlated) and callers that need to
+// differentiate probable errors in connection behavior between normal "this is
+// disconnected" should use the method.
+func IsProbableEOF(err error) bool {
+ if uerr, ok := err.(*url.Error); ok {
+ err = uerr.Err
+ }
+ switch {
+ case err == io.EOF:
+ return true
+ case err.Error() == "http: can't write HTTP request on broken connection":
+ return true
+ case strings.Contains(err.Error(), "connection reset by peer"):
+ return true
+ case strings.Contains(strings.ToLower(err.Error()), "use of closed network connection"):
+ return true
+ }
+ return false
+}
+
+var defaultTransport = http.DefaultTransport.(*http.Transport)
+
+// SetOldTransportDefaults applies the defaults from http.DefaultTransport
+// for the Proxy, Dial, and TLSHandshakeTimeout fields if unset
+func SetOldTransportDefaults(t *http.Transport) *http.Transport {
+ if t.Proxy == nil || isDefault(t.Proxy) {
+ // http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings
+ // ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY
+ t.Proxy = NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment)
+ }
+ if t.Dial == nil {
+ t.Dial = defaultTransport.Dial
+ }
+ if t.TLSHandshakeTimeout == 0 {
+ t.TLSHandshakeTimeout = defaultTransport.TLSHandshakeTimeout
+ }
+ return t
+}
+
+// SetTransportDefaults applies the defaults from http.DefaultTransport
+// for the Proxy, Dial, and TLSHandshakeTimeout fields if unset
+func SetTransportDefaults(t *http.Transport) *http.Transport {
+ t = SetOldTransportDefaults(t)
+ // Allow clients to disable http2 if needed.
+ if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 {
+ glog.Infof("HTTP2 has been explicitly disabled")
+ } else {
+ if err := http2.ConfigureTransport(t); err != nil {
+ glog.Warningf("Transport failed http2 configuration: %v", err)
+ }
+ }
+ return t
+}
+
+type RoundTripperWrapper interface {
+ http.RoundTripper
+ WrappedRoundTripper() http.RoundTripper
+}
+
+type DialFunc func(net, addr string) (net.Conn, error)
+
+func DialerFor(transport http.RoundTripper) (DialFunc, error) {
+ if transport == nil {
+ return nil, nil
+ }
+
+ switch transport := transport.(type) {
+ case *http.Transport:
+ return transport.Dial, nil
+ case RoundTripperWrapper:
+ return DialerFor(transport.WrappedRoundTripper())
+ default:
+ return nil, fmt.Errorf("unknown transport type: %v", transport)
+ }
+}
+
+type TLSClientConfigHolder interface {
+ TLSClientConfig() *tls.Config
+}
+
+func TLSClientConfig(transport http.RoundTripper) (*tls.Config, error) {
+ if transport == nil {
+ return nil, nil
+ }
+
+ switch transport := transport.(type) {
+ case *http.Transport:
+ return transport.TLSClientConfig, nil
+ case TLSClientConfigHolder:
+ return transport.TLSClientConfig(), nil
+ case RoundTripperWrapper:
+ return TLSClientConfig(transport.WrappedRoundTripper())
+ default:
+ return nil, fmt.Errorf("unknown transport type: %v", transport)
+ }
+}
+
+func FormatURL(scheme string, host string, port int, path string) *url.URL {
+ return &url.URL{
+ Scheme: scheme,
+ Host: net.JoinHostPort(host, strconv.Itoa(port)),
+ Path: path,
+ }
+}
+
+func GetHTTPClient(req *http.Request) string {
+ if userAgent, ok := req.Header["User-Agent"]; ok {
+ if len(userAgent) > 0 {
+ return userAgent[0]
+ }
+ }
+ return "unknown"
+}
+
+// SourceIPs splits the comma separated X-Forwarded-For header or returns the X-Real-Ip header or req.RemoteAddr,
+// in that order, ignoring invalid IPs. It returns nil if all of these are empty or invalid.
+func SourceIPs(req *http.Request) []net.IP {
+ hdr := req.Header
+ // First check the X-Forwarded-For header for requests via proxy.
+ hdrForwardedFor := hdr.Get("X-Forwarded-For")
+ forwardedForIPs := []net.IP{}
+ if hdrForwardedFor != "" {
+ // X-Forwarded-For can be a csv of IPs in case of multiple proxies.
+ // Use the first valid one.
+ parts := strings.Split(hdrForwardedFor, ",")
+ for _, part := range parts {
+ ip := net.ParseIP(strings.TrimSpace(part))
+ if ip != nil {
+ forwardedForIPs = append(forwardedForIPs, ip)
+ }
+ }
+ }
+ if len(forwardedForIPs) > 0 {
+ return forwardedForIPs
+ }
+
+ // Try the X-Real-Ip header.
+ hdrRealIp := hdr.Get("X-Real-Ip")
+ if hdrRealIp != "" {
+ ip := net.ParseIP(hdrRealIp)
+ if ip != nil {
+ return []net.IP{ip}
+ }
+ }
+
+ // Fallback to Remote Address in request, which will give the correct client IP when there is no proxy.
+ // Remote Address in Go's HTTP server is in the form host:port so we need to split that first.
+ host, _, err := net.SplitHostPort(req.RemoteAddr)
+ if err == nil {
+ if remoteIP := net.ParseIP(host); remoteIP != nil {
+ return []net.IP{remoteIP}
+ }
+ }
+
+ // Fallback if Remote Address was just IP.
+ if remoteIP := net.ParseIP(req.RemoteAddr); remoteIP != nil {
+ return []net.IP{remoteIP}
+ }
+
+ return nil
+}
+
+// Extracts and returns the clients IP from the given request.
+// Looks at X-Forwarded-For header, X-Real-Ip header and request.RemoteAddr in that order.
+// Returns nil if none of them are set or is set to an invalid value.
+func GetClientIP(req *http.Request) net.IP {
+ ips := SourceIPs(req)
+ if len(ips) == 0 {
+ return nil
+ }
+ return ips[0]
+}
+
+// Prepares the X-Forwarded-For header for another forwarding hop by appending the previous sender's
+// IP address to the X-Forwarded-For chain.
+func AppendForwardedForHeader(req *http.Request) {
+ // Copied from net/http/httputil/reverseproxy.go:
+ if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil {
+ // If we aren't the first proxy retain prior
+ // X-Forwarded-For information as a comma+space
+ // separated list and fold multiple headers into one.
+ if prior, ok := req.Header["X-Forwarded-For"]; ok {
+ clientIP = strings.Join(prior, ", ") + ", " + clientIP
+ }
+ req.Header.Set("X-Forwarded-For", clientIP)
+ }
+}
+
+var defaultProxyFuncPointer = fmt.Sprintf("%p", http.ProxyFromEnvironment)
+
+// isDefault checks to see if the transportProxierFunc is pointing to the default one
+func isDefault(transportProxier func(*http.Request) (*url.URL, error)) bool {
+ transportProxierPointer := fmt.Sprintf("%p", transportProxier)
+ return transportProxierPointer == defaultProxyFuncPointer
+}
+
+// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if
+// no matching CIDRs are found
+func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) {
+ // we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it
+ noProxyEnv := os.Getenv("NO_PROXY")
+ noProxyRules := strings.Split(noProxyEnv, ",")
+
+ cidrs := []*net.IPNet{}
+ for _, noProxyRule := range noProxyRules {
+ _, cidr, _ := net.ParseCIDR(noProxyRule)
+ if cidr != nil {
+ cidrs = append(cidrs, cidr)
+ }
+ }
+
+ if len(cidrs) == 0 {
+ return delegate
+ }
+
+ return func(req *http.Request) (*url.URL, error) {
+ host := req.URL.Host
+ // for some urls, the Host is already the host, not the host:port
+ if net.ParseIP(host) == nil {
+ var err error
+ host, _, err = net.SplitHostPort(req.URL.Host)
+ if err != nil {
+ return delegate(req)
+ }
+ }
+
+ ip := net.ParseIP(host)
+ if ip == nil {
+ return delegate(req)
+ }
+
+ for _, cidr := range cidrs {
+ if cidr.Contains(ip) {
+ return nil, nil
+ }
+ }
+
+ return delegate(req)
+ }
+}
+
+// Dialer dials a host and writes a request to it.
+type Dialer interface {
+ // Dial connects to the host specified by req's URL, writes the request to the connection, and
+ // returns the opened net.Conn.
+ Dial(req *http.Request) (net.Conn, error)
+}
+
+// ConnectWithRedirects uses dialer to send req, following up to 10 redirects (relative to
+// originalLocation). It returns the opened net.Conn and the raw response bytes.
+func ConnectWithRedirects(originalMethod string, originalLocation *url.URL, header http.Header, originalBody io.Reader, dialer Dialer) (net.Conn, []byte, error) {
+ const (
+ maxRedirects = 10
+ maxResponseSize = 16384 // play it safe to allow the potential for lots of / large headers
+ )
+
+ var (
+ location = originalLocation
+ method = originalMethod
+ intermediateConn net.Conn
+ rawResponse = bytes.NewBuffer(make([]byte, 0, 256))
+ body = originalBody
+ )
+
+ defer func() {
+ if intermediateConn != nil {
+ intermediateConn.Close()
+ }
+ }()
+
+redirectLoop:
+ for redirects := 0; ; redirects++ {
+ if redirects > maxRedirects {
+ return nil, nil, fmt.Errorf("too many redirects (%d)", redirects)
+ }
+
+ req, err := http.NewRequest(method, location.String(), body)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req.Header = header
+
+ intermediateConn, err = dialer.Dial(req)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Peek at the backend response.
+ rawResponse.Reset()
+ respReader := bufio.NewReader(io.TeeReader(
+ io.LimitReader(intermediateConn, maxResponseSize), // Don't read more than maxResponseSize bytes.
+ rawResponse)) // Save the raw response.
+ resp, err := http.ReadResponse(respReader, nil)
+ if err != nil {
+ // Unable to read the backend response; let the client handle it.
+ glog.Warningf("Error reading backend response: %v", err)
+ break redirectLoop
+ }
+
+ switch resp.StatusCode {
+ case http.StatusFound:
+ // Redirect, continue.
+ default:
+ // Don't redirect.
+ break redirectLoop
+ }
+
+ // Redirected requests switch to "GET" according to the HTTP spec:
+ // https://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3
+ method = "GET"
+ // don't send a body when following redirects
+ body = nil
+
+ resp.Body.Close() // not used
+
+ // Reset the connection.
+ intermediateConn.Close()
+ intermediateConn = nil
+
+ // Prepare to follow the redirect.
+ redirectStr := resp.Header.Get("Location")
+ if redirectStr == "" {
+ return nil, nil, fmt.Errorf("%d response missing Location header", resp.StatusCode)
+ }
+ // We have to parse relative to the current location, NOT originalLocation. For example,
+ // if we request http://foo.com/a and get back "http://bar.com/b", the result should be
+ // http://bar.com/b. If we then make that request and get back a redirect to "/c", the result
+ // should be http://bar.com/c, not http://foo.com/c.
+ location, err = location.Parse(redirectStr)
+ if err != nil {
+ return nil, nil, fmt.Errorf("malformed Location header: %v", err)
+ }
+ }
+
+ connToReturn := intermediateConn
+ intermediateConn = nil // Don't close the connection when we return it.
+ return connToReturn, rawResponse.Bytes(), nil
+}
+
+// CloneRequest creates a shallow copy of the request along with a deep copy of the Headers.
+func CloneRequest(req *http.Request) *http.Request {
+ r := new(http.Request)
+
+ // shallow clone
+ *r = *req
+
+ // deep copy headers
+ r.Header = CloneHeader(req.Header)
+
+ return r
+}
+
+// CloneHeader creates a deep copy of an http.Header.
+func CloneHeader(in http.Header) http.Header {
+ out := make(http.Header, len(in))
+ for key, values := range in {
+ newValues := make([]string, len(values))
+ copy(newValues, values)
+ out[key] = newValues
+ }
+ return out
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go
new file mode 100644
index 000000000..a1e53d2e4
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go
@@ -0,0 +1,278 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package net
+
+import (
+ "bufio"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "net"
+ "os"
+
+ "strings"
+
+ "github.com/golang/glog"
+)
+
+type Route struct {
+ Interface string
+ Destination net.IP
+ Gateway net.IP
+ // TODO: add more fields here if needed
+}
+
+func getRoutes(input io.Reader) ([]Route, error) {
+ routes := []Route{}
+ if input == nil {
+ return nil, fmt.Errorf("input is nil")
+ }
+ scanner := bufio.NewReader(input)
+ for {
+ line, err := scanner.ReadString('\n')
+ if err == io.EOF {
+ break
+ }
+ //ignore the headers in the route info
+ if strings.HasPrefix(line, "Iface") {
+ continue
+ }
+ fields := strings.Fields(line)
+ routes = append(routes, Route{})
+ route := &routes[len(routes)-1]
+ route.Interface = fields[0]
+ ip, err := parseIP(fields[1])
+ if err != nil {
+ return nil, err
+ }
+ route.Destination = ip
+ ip, err = parseIP(fields[2])
+ if err != nil {
+ return nil, err
+ }
+ route.Gateway = ip
+ }
+ return routes, nil
+}
+
+func parseIP(str string) (net.IP, error) {
+ if str == "" {
+ return nil, fmt.Errorf("input is nil")
+ }
+ bytes, err := hex.DecodeString(str)
+ if err != nil {
+ return nil, err
+ }
+ //TODO add ipv6 support
+ if len(bytes) != net.IPv4len {
+ return nil, fmt.Errorf("only IPv4 is supported")
+ }
+ bytes[0], bytes[1], bytes[2], bytes[3] = bytes[3], bytes[2], bytes[1], bytes[0]
+ return net.IP(bytes), nil
+}
+
+func isInterfaceUp(intf *net.Interface) bool {
+ if intf == nil {
+ return false
+ }
+ if intf.Flags&net.FlagUp != 0 {
+ glog.V(4).Infof("Interface %v is up", intf.Name)
+ return true
+ }
+ return false
+}
+
+//getFinalIP method receives all the IP addrs of a Interface
+//and returns a nil if the address is Loopback, Ipv6, link-local or nil.
+//It returns a valid IPv4 if an Ipv4 address is found in the array.
+func getFinalIP(addrs []net.Addr) (net.IP, error) {
+ if len(addrs) > 0 {
+ for i := range addrs {
+ glog.V(4).Infof("Checking addr %s.", addrs[i].String())
+ ip, _, err := net.ParseCIDR(addrs[i].String())
+ if err != nil {
+ return nil, err
+ }
+ //Only IPv4
+ //TODO : add IPv6 support
+ if ip.To4() != nil {
+ if !ip.IsLoopback() && !ip.IsLinkLocalMulticast() && !ip.IsLinkLocalUnicast() {
+ glog.V(4).Infof("IP found %v", ip)
+ return ip, nil
+ } else {
+ glog.V(4).Infof("Loopback/link-local found %v", ip)
+ }
+ } else {
+ glog.V(4).Infof("%v is not a valid IPv4 address", ip)
+ }
+
+ }
+ }
+ return nil, nil
+}
+
+func getIPFromInterface(intfName string, nw networkInterfacer) (net.IP, error) {
+ intf, err := nw.InterfaceByName(intfName)
+ if err != nil {
+ return nil, err
+ }
+ if isInterfaceUp(intf) {
+ addrs, err := nw.Addrs(intf)
+ if err != nil {
+ return nil, err
+ }
+ glog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs)
+ finalIP, err := getFinalIP(addrs)
+ if err != nil {
+ return nil, err
+ }
+ if finalIP != nil {
+ glog.V(4).Infof("valid IPv4 address for interface %q found as %v.", intfName, finalIP)
+ return finalIP, nil
+ }
+ }
+
+ return nil, nil
+}
+
+func flagsSet(flags net.Flags, test net.Flags) bool {
+ return flags&test != 0
+}
+
+func flagsClear(flags net.Flags, test net.Flags) bool {
+ return flags&test == 0
+}
+
+func chooseHostInterfaceNativeGo() (net.IP, error) {
+ intfs, err := net.Interfaces()
+ if err != nil {
+ return nil, err
+ }
+ i := 0
+ var ip net.IP
+ for i = range intfs {
+ if flagsSet(intfs[i].Flags, net.FlagUp) && flagsClear(intfs[i].Flags, net.FlagLoopback|net.FlagPointToPoint) {
+ addrs, err := intfs[i].Addrs()
+ if err != nil {
+ return nil, err
+ }
+ if len(addrs) > 0 {
+ for _, addr := range addrs {
+ if addrIP, _, err := net.ParseCIDR(addr.String()); err == nil {
+ if addrIP.To4() != nil {
+ ip = addrIP.To4()
+ if !ip.IsLinkLocalMulticast() && !ip.IsLinkLocalUnicast() {
+ break
+ }
+ }
+ }
+ }
+ if ip != nil {
+ // This interface should suffice.
+ break
+ }
+ }
+ }
+ }
+ if ip == nil {
+ return nil, fmt.Errorf("no acceptable interface from host")
+ }
+ glog.V(4).Infof("Choosing interface %s (IP %v) as default", intfs[i].Name, ip)
+ return ip, nil
+}
+
+//ChooseHostInterface is a method used fetch an IP for a daemon.
+//It uses data from /proc/net/route file.
+//For a node with no internet connection ,it returns error
+//For a multi n/w interface node it returns the IP of the interface with gateway on it.
+func ChooseHostInterface() (net.IP, error) {
+ inFile, err := os.Open("/proc/net/route")
+ if err != nil {
+ if os.IsNotExist(err) {
+ return chooseHostInterfaceNativeGo()
+ }
+ return nil, err
+ }
+ defer inFile.Close()
+ var nw networkInterfacer = networkInterface{}
+ return chooseHostInterfaceFromRoute(inFile, nw)
+}
+
+type networkInterfacer interface {
+ InterfaceByName(intfName string) (*net.Interface, error)
+ Addrs(intf *net.Interface) ([]net.Addr, error)
+}
+
+type networkInterface struct{}
+
+func (_ networkInterface) InterfaceByName(intfName string) (*net.Interface, error) {
+ intf, err := net.InterfaceByName(intfName)
+ if err != nil {
+ return nil, err
+ }
+ return intf, nil
+}
+
+func (_ networkInterface) Addrs(intf *net.Interface) ([]net.Addr, error) {
+ addrs, err := intf.Addrs()
+ if err != nil {
+ return nil, err
+ }
+ return addrs, nil
+}
+
+func chooseHostInterfaceFromRoute(inFile io.Reader, nw networkInterfacer) (net.IP, error) {
+ routes, err := getRoutes(inFile)
+ if err != nil {
+ return nil, err
+ }
+ zero := net.IP{0, 0, 0, 0}
+ var finalIP net.IP
+ for i := range routes {
+ //find interface with gateway
+ if routes[i].Destination.Equal(zero) {
+ glog.V(4).Infof("Default route transits interface %q", routes[i].Interface)
+ finalIP, err := getIPFromInterface(routes[i].Interface, nw)
+ if err != nil {
+ return nil, err
+ }
+ if finalIP != nil {
+ glog.V(4).Infof("Choosing IP %v ", finalIP)
+ return finalIP, nil
+ }
+ }
+ }
+ glog.V(4).Infof("No valid IP found")
+ if finalIP == nil {
+ return nil, fmt.Errorf("Unable to select an IP.")
+ }
+ return nil, nil
+}
+
+// If bind-address is usable, return it directly
+// If bind-address is not usable (unset, 0.0.0.0, or loopback), we will use the host's default
+// interface.
+func ChooseBindAddress(bindAddress net.IP) (net.IP, error) {
+ if bindAddress == nil || bindAddress.IsUnspecified() || bindAddress.IsLoopback() {
+ hostIP, err := ChooseHostInterface()
+ if err != nil {
+ return nil, err
+ }
+ bindAddress = hostIP
+ }
+ return bindAddress, nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go
new file mode 100644
index 000000000..6a50e6186
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go
@@ -0,0 +1,113 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package net
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// PortRange represents a range of TCP/UDP ports. To represent a single port,
+// set Size to 1.
+type PortRange struct {
+ Base int
+ Size int
+}
+
+// Contains tests whether a given port falls within the PortRange.
+func (pr *PortRange) Contains(p int) bool {
+ return (p >= pr.Base) && ((p - pr.Base) < pr.Size)
+}
+
+// String converts the PortRange to a string representation, which can be
+// parsed by PortRange.Set or ParsePortRange.
+func (pr PortRange) String() string {
+ if pr.Size == 0 {
+ return ""
+ }
+ return fmt.Sprintf("%d-%d", pr.Base, pr.Base+pr.Size-1)
+}
+
+// Set parses a string of the form "min-max", inclusive at both ends, and
+// sets the PortRange from it. This is part of the flag.Value and pflag.Value
+// interfaces.
+func (pr *PortRange) Set(value string) error {
+ value = strings.TrimSpace(value)
+
+ // TODO: Accept "80" syntax
+ // TODO: Accept "80+8" syntax
+
+ if value == "" {
+ pr.Base = 0
+ pr.Size = 0
+ return nil
+ }
+
+ hyphenIndex := strings.Index(value, "-")
+ if hyphenIndex == -1 {
+ return fmt.Errorf("expected hyphen in port range")
+ }
+
+ var err error
+ var low int
+ var high int
+ low, err = strconv.Atoi(value[:hyphenIndex])
+ if err == nil {
+ high, err = strconv.Atoi(value[hyphenIndex+1:])
+ }
+ if err != nil {
+ return fmt.Errorf("unable to parse port range: %s: %v", value, err)
+ }
+
+ if low > 65535 || high > 65535 {
+ return fmt.Errorf("the port range cannot be greater than 65535: %s", value)
+ }
+
+ if high < low {
+ return fmt.Errorf("end port cannot be less than start port: %s", value)
+ }
+
+ pr.Base = low
+ pr.Size = 1 + high - low
+ return nil
+}
+
+// Type returns a descriptive string about this type. This is part of the
+// pflag.Value interface.
+func (*PortRange) Type() string {
+ return "portRange"
+}
+
+// ParsePortRange parses a string of the form "min-max", inclusive at both
+// ends, and initializs a new PortRange from it.
+func ParsePortRange(value string) (*PortRange, error) {
+ pr := &PortRange{}
+ err := pr.Set(value)
+ if err != nil {
+ return nil, err
+ }
+ return pr, nil
+}
+
+func ParsePortRangeOrDie(value string) *PortRange {
+ pr, err := ParsePortRange(value)
+ if err != nil {
+ panic(fmt.Sprintf("couldn't parse port range %q: %v", value, err))
+ }
+ return pr
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go b/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go
new file mode 100644
index 000000000..c0fd4e20f
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go
@@ -0,0 +1,77 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package net
+
+import (
+ "strings"
+
+ "k8s.io/apimachinery/pkg/util/sets"
+)
+
+var validSchemes = sets.NewString("http", "https", "")
+
+// SplitSchemeNamePort takes a string of the following forms:
+// * "<name>", returns "", "<name>","", true
+// * "<name>:<port>", returns "", "<name>","<port>",true
+// * "<scheme>:<name>:<port>", returns "<scheme>","<name>","<port>",true
+//
+// Name must be non-empty or valid will be returned false.
+// Scheme must be "http" or "https" if specified
+// Port is returned as a string, and it is not required to be numeric (could be
+// used for a named port, for example).
+func SplitSchemeNamePort(id string) (scheme, name, port string, valid bool) {
+ parts := strings.Split(id, ":")
+ switch len(parts) {
+ case 1:
+ name = parts[0]
+ case 2:
+ name = parts[0]
+ port = parts[1]
+ case 3:
+ scheme = parts[0]
+ name = parts[1]
+ port = parts[2]
+ default:
+ return "", "", "", false
+ }
+
+ if len(name) > 0 && validSchemes.Has(scheme) {
+ return scheme, name, port, true
+ } else {
+ return "", "", "", false
+ }
+}
+
+// JoinSchemeNamePort returns a string that specifies the scheme, name, and port:
+// * "<name>"
+// * "<name>:<port>"
+// * "<scheme>:<name>:<port>"
+// None of the parameters may contain a ':' character
+// Name is required
+// Scheme must be "", "http", or "https"
+func JoinSchemeNamePort(scheme, name, port string) string {
+ if len(scheme) > 0 {
+ // Must include three segments to specify scheme
+ return scheme + ":" + name + ":" + port
+ }
+ if len(port) > 0 {
+ // Must include two segments to specify port
+ return name + ":" + port
+ }
+ // Return name alone
+ return name
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/util.go b/vendor/k8s.io/apimachinery/pkg/util/net/util.go
new file mode 100644
index 000000000..461144f0b
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/net/util.go
@@ -0,0 +1,46 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package net
+
+import (
+ "net"
+ "reflect"
+ "syscall"
+)
+
+// IPNetEqual checks if the two input IPNets are representing the same subnet.
+// For example,
+// 10.0.0.1/24 and 10.0.0.0/24 are the same subnet.
+// 10.0.0.1/24 and 10.0.0.0/25 are not the same subnet.
+func IPNetEqual(ipnet1, ipnet2 *net.IPNet) bool {
+ if ipnet1 == nil || ipnet2 == nil {
+ return false
+ }
+ if reflect.DeepEqual(ipnet1.Mask, ipnet2.Mask) && ipnet1.Contains(ipnet2.IP) && ipnet2.Contains(ipnet1.IP) {
+ return true
+ }
+ return false
+}
+
+// Returns if the given err is "connection reset by peer" error.
+func IsConnectionReset(err error) bool {
+ opErr, ok := err.(*net.OpError)
+ if ok && opErr.Err.Error() == syscall.ECONNRESET.Error() {
+ return true
+ }
+ return false
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go b/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go
new file mode 100644
index 000000000..db109c2cd
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go
@@ -0,0 +1,85 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package rand provides utilities related to randomization.
+package rand
+
+import (
+ "math/rand"
+ "sync"
+ "time"
+)
+
+var rng = struct {
+ sync.Mutex
+ rand *rand.Rand
+}{
+ rand: rand.New(rand.NewSource(time.Now().UTC().UnixNano())),
+}
+
+// Intn generates an integer in range [0,max).
+// By design this should panic if input is invalid, <= 0.
+func Intn(max int) int {
+ rng.Lock()
+ defer rng.Unlock()
+ return rng.rand.Intn(max)
+}
+
+// IntnRange generates an integer in range [min,max).
+// By design this should panic if input is invalid, <= 0.
+func IntnRange(min, max int) int {
+ rng.Lock()
+ defer rng.Unlock()
+ return rng.rand.Intn(max-min) + min
+}
+
+// IntnRange generates an int64 integer in range [min,max).
+// By design this should panic if input is invalid, <= 0.
+func Int63nRange(min, max int64) int64 {
+ rng.Lock()
+ defer rng.Unlock()
+ return rng.rand.Int63n(max-min) + min
+}
+
+// Seed seeds the rng with the provided seed.
+func Seed(seed int64) {
+ rng.Lock()
+ defer rng.Unlock()
+
+ rng.rand = rand.New(rand.NewSource(seed))
+}
+
+// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n)
+// from the default Source.
+func Perm(n int) []int {
+ rng.Lock()
+ defer rng.Unlock()
+ return rng.rand.Perm(n)
+}
+
+// We omit vowels from the set of available characters to reduce the chances
+// of "bad words" being formed.
+var alphanums = []rune("bcdfghjklmnpqrstvwxz0123456789")
+
+// String generates a random alphanumeric string, without vowels, which is n
+// characters long. This will panic if n is less than zero.
+func String(length int) string {
+ b := make([]rune, length)
+ for i := range b {
+ b[i] = alphanums[Intn(len(alphanums))]
+ }
+ return string(b)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go b/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go
new file mode 100644
index 000000000..acfeb827c
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package remotecommand
+
+import (
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const (
+ DefaultStreamCreationTimeout = 30 * time.Second
+
+ // The SPDY subprotocol "channel.k8s.io" is used for remote command
+ // attachment/execution. This represents the initial unversioned subprotocol,
+ // which has the known bugs http://issues.k8s.io/13394 and
+ // http://issues.k8s.io/13395.
+ StreamProtocolV1Name = "channel.k8s.io"
+
+ // The SPDY subprotocol "v2.channel.k8s.io" is used for remote command
+ // attachment/execution. It is the second version of the subprotocol and
+ // resolves the issues present in the first version.
+ StreamProtocolV2Name = "v2.channel.k8s.io"
+
+ // The SPDY subprotocol "v3.channel.k8s.io" is used for remote command
+ // attachment/execution. It is the third version of the subprotocol and
+ // adds support for resizing container terminals.
+ StreamProtocolV3Name = "v3.channel.k8s.io"
+
+ // The SPDY subprotocol "v4.channel.k8s.io" is used for remote command
+ // attachment/execution. It is the 4th version of the subprotocol and
+ // adds support for exit codes.
+ StreamProtocolV4Name = "v4.channel.k8s.io"
+
+ NonZeroExitCodeReason = metav1.StatusReason("NonZeroExitCode")
+ ExitCodeCauseType = metav1.CauseType("ExitCode")
+)
+
+var SupportedStreamingProtocols = []string{StreamProtocolV4Name, StreamProtocolV3Name, StreamProtocolV2Name, StreamProtocolV1Name}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
new file mode 100644
index 000000000..748174e19
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
@@ -0,0 +1,161 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+ "fmt"
+ "runtime"
+ "sync"
+ "time"
+
+ "github.com/golang/glog"
+)
+
+var (
+ // ReallyCrash controls the behavior of HandleCrash and now defaults
+ // true. It's still exposed so components can optionally set to false
+ // to restore prior behavior.
+ ReallyCrash = true
+)
+
+// PanicHandlers is a list of functions which will be invoked when a panic happens.
+var PanicHandlers = []func(interface{}){logPanic}
+
+// HandleCrash simply catches a crash and logs an error. Meant to be called via
+// defer. Additional context-specific handlers can be provided, and will be
+// called in case of panic. HandleCrash actually crashes, after calling the
+// handlers and logging the panic message.
+//
+// TODO: remove this function. We are switching to a world where it's safe for
+// apiserver to panic, since it will be restarted by kubelet. At the beginning
+// of the Kubernetes project, nothing was going to restart apiserver and so
+// catching panics was important. But it's actually much simpler for montoring
+// software if we just exit when an unexpected panic happens.
+func HandleCrash(additionalHandlers ...func(interface{})) {
+ if r := recover(); r != nil {
+ for _, fn := range PanicHandlers {
+ fn(r)
+ }
+ for _, fn := range additionalHandlers {
+ fn(r)
+ }
+ if ReallyCrash {
+ // Actually proceed to panic.
+ panic(r)
+ }
+ }
+}
+
+// logPanic logs the caller tree when a panic occurs.
+func logPanic(r interface{}) {
+ callers := getCallers(r)
+ glog.Errorf("Observed a panic: %#v (%v)\n%v", r, r, callers)
+}
+
+func getCallers(r interface{}) string {
+ callers := ""
+ for i := 0; true; i++ {
+ _, file, line, ok := runtime.Caller(i)
+ if !ok {
+ break
+ }
+ callers = callers + fmt.Sprintf("%v:%v\n", file, line)
+ }
+
+ return callers
+}
+
+// ErrorHandlers is a list of functions which will be invoked when an unreturnable
+// error occurs.
+// TODO(lavalamp): for testability, this and the below HandleError function
+// should be packaged up into a testable and reusable object.
+var ErrorHandlers = []func(error){
+ logError,
+ (&rudimentaryErrorBackoff{
+ lastErrorTime: time.Now(),
+ // 1ms was the number folks were able to stomach as a global rate limit.
+ // If you need to log errors more than 1000 times a second you
+ // should probably consider fixing your code instead. :)
+ minPeriod: time.Millisecond,
+ }).OnError,
+}
+
+// HandlerError is a method to invoke when a non-user facing piece of code cannot
+// return an error and needs to indicate it has been ignored. Invoking this method
+// is preferable to logging the error - the default behavior is to log but the
+// errors may be sent to a remote server for analysis.
+func HandleError(err error) {
+ // this is sometimes called with a nil error. We probably shouldn't fail and should do nothing instead
+ if err == nil {
+ return
+ }
+
+ for _, fn := range ErrorHandlers {
+ fn(err)
+ }
+}
+
+// logError prints an error with the call stack of the location it was reported
+func logError(err error) {
+ glog.ErrorDepth(2, err)
+}
+
+type rudimentaryErrorBackoff struct {
+ minPeriod time.Duration // immutable
+ // TODO(lavalamp): use the clock for testability. Need to move that
+ // package for that to be accessible here.
+ lastErrorTimeLock sync.Mutex
+ lastErrorTime time.Time
+}
+
+// OnError will block if it is called more often than the embedded period time.
+// This will prevent overly tight hot error loops.
+func (r *rudimentaryErrorBackoff) OnError(error) {
+ r.lastErrorTimeLock.Lock()
+ defer r.lastErrorTimeLock.Unlock()
+ d := time.Since(r.lastErrorTime)
+ if d < r.minPeriod {
+ time.Sleep(r.minPeriod - d)
+ }
+ r.lastErrorTime = time.Now()
+}
+
+// GetCaller returns the caller of the function that calls it.
+func GetCaller() string {
+ var pc [1]uintptr
+ runtime.Callers(3, pc[:])
+ f := runtime.FuncForPC(pc[0])
+ if f == nil {
+ return fmt.Sprintf("Unable to find caller")
+ }
+ return f.Name()
+}
+
+// RecoverFromPanic replaces the specified error with an error containing the
+// original error, and the call tree when a panic occurs. This enables error
+// handlers to handle errors and panics the same way.
+func RecoverFromPanic(err *error) {
+ if r := recover(); r != nil {
+ callers := getCallers(r)
+
+ *err = fmt.Errorf(
+ "recovered from panic %q. (err=%v) Call stack:\n%v",
+ r,
+ *err,
+ callers)
+ }
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go
new file mode 100644
index 000000000..a460e4b1f
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go
@@ -0,0 +1,203 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by set-gen. Do not edit it manually!
+
+package sets
+
+import (
+ "reflect"
+ "sort"
+)
+
+// sets.Byte is a set of bytes, implemented via map[byte]struct{} for minimal memory consumption.
+type Byte map[byte]Empty
+
+// New creates a Byte from a list of values.
+func NewByte(items ...byte) Byte {
+ ss := Byte{}
+ ss.Insert(items...)
+ return ss
+}
+
+// ByteKeySet creates a Byte from a keys of a map[byte](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func ByteKeySet(theMap interface{}) Byte {
+ v := reflect.ValueOf(theMap)
+ ret := Byte{}
+
+ for _, keyValue := range v.MapKeys() {
+ ret.Insert(keyValue.Interface().(byte))
+ }
+ return ret
+}
+
+// Insert adds items to the set.
+func (s Byte) Insert(items ...byte) {
+ for _, item := range items {
+ s[item] = Empty{}
+ }
+}
+
+// Delete removes all items from the set.
+func (s Byte) Delete(items ...byte) {
+ for _, item := range items {
+ delete(s, item)
+ }
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s Byte) Has(item byte) bool {
+ _, contained := s[item]
+ return contained
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s Byte) HasAll(items ...byte) bool {
+ for _, item := range items {
+ if !s.Has(item) {
+ return false
+ }
+ }
+ return true
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s Byte) HasAny(items ...byte) bool {
+ for _, item := range items {
+ if s.Has(item) {
+ return true
+ }
+ }
+ return false
+}
+
+// Difference returns a set of objects that are not in s2
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s Byte) Difference(s2 Byte) Byte {
+ result := NewByte()
+ for key := range s {
+ if !s2.Has(key) {
+ result.Insert(key)
+ }
+ }
+ return result
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 Byte) Union(s2 Byte) Byte {
+ result := NewByte()
+ for key := range s1 {
+ result.Insert(key)
+ }
+ for key := range s2 {
+ result.Insert(key)
+ }
+ return result
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 Byte) Intersection(s2 Byte) Byte {
+ var walk, other Byte
+ result := NewByte()
+ if s1.Len() < s2.Len() {
+ walk = s1
+ other = s2
+ } else {
+ walk = s2
+ other = s1
+ }
+ for key := range walk {
+ if other.Has(key) {
+ result.Insert(key)
+ }
+ }
+ return result
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 Byte) IsSuperset(s2 Byte) bool {
+ for item := range s2 {
+ if !s1.Has(item) {
+ return false
+ }
+ }
+ return true
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 Byte) Equal(s2 Byte) bool {
+ return len(s1) == len(s2) && s1.IsSuperset(s2)
+}
+
+type sortableSliceOfByte []byte
+
+func (s sortableSliceOfByte) Len() int { return len(s) }
+func (s sortableSliceOfByte) Less(i, j int) bool { return lessByte(s[i], s[j]) }
+func (s sortableSliceOfByte) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// List returns the contents as a sorted byte slice.
+func (s Byte) List() []byte {
+ res := make(sortableSliceOfByte, 0, len(s))
+ for key := range s {
+ res = append(res, key)
+ }
+ sort.Sort(res)
+ return []byte(res)
+}
+
+// UnsortedList returns the slice with contents in random order.
+func (s Byte) UnsortedList() []byte {
+ res := make([]byte, 0, len(s))
+ for key := range s {
+ res = append(res, key)
+ }
+ return res
+}
+
+// Returns a single element from the set.
+func (s Byte) PopAny() (byte, bool) {
+ for key := range s {
+ s.Delete(key)
+ return key, true
+ }
+ var zeroValue byte
+ return zeroValue, false
+}
+
+// Len returns the size of the set.
+func (s Byte) Len() int {
+ return len(s)
+}
+
+func lessByte(lhs, rhs byte) bool {
+ return lhs < rhs
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go
new file mode 100644
index 000000000..28a6a7d5c
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by set-gen. Do not edit it manually!
+
+// Package sets has auto-generated set types.
+package sets
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go
new file mode 100644
index 000000000..cd22b953a
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by set-gen. Do not edit it manually!
+
+package sets
+
+// Empty is public since it is used by some internal API objects for conversions between external
+// string arrays and internal sets, and conversion logic requires public types today.
+type Empty struct{}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go
new file mode 100644
index 000000000..0614e9fb0
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go
@@ -0,0 +1,203 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by set-gen. Do not edit it manually!
+
+package sets
+
+import (
+ "reflect"
+ "sort"
+)
+
+// sets.Int is a set of ints, implemented via map[int]struct{} for minimal memory consumption.
+type Int map[int]Empty
+
+// New creates a Int from a list of values.
+func NewInt(items ...int) Int {
+ ss := Int{}
+ ss.Insert(items...)
+ return ss
+}
+
+// IntKeySet creates a Int from a keys of a map[int](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func IntKeySet(theMap interface{}) Int {
+ v := reflect.ValueOf(theMap)
+ ret := Int{}
+
+ for _, keyValue := range v.MapKeys() {
+ ret.Insert(keyValue.Interface().(int))
+ }
+ return ret
+}
+
+// Insert adds items to the set.
+func (s Int) Insert(items ...int) {
+ for _, item := range items {
+ s[item] = Empty{}
+ }
+}
+
+// Delete removes all items from the set.
+func (s Int) Delete(items ...int) {
+ for _, item := range items {
+ delete(s, item)
+ }
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s Int) Has(item int) bool {
+ _, contained := s[item]
+ return contained
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s Int) HasAll(items ...int) bool {
+ for _, item := range items {
+ if !s.Has(item) {
+ return false
+ }
+ }
+ return true
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s Int) HasAny(items ...int) bool {
+ for _, item := range items {
+ if s.Has(item) {
+ return true
+ }
+ }
+ return false
+}
+
+// Difference returns a set of objects that are not in s2
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s Int) Difference(s2 Int) Int {
+ result := NewInt()
+ for key := range s {
+ if !s2.Has(key) {
+ result.Insert(key)
+ }
+ }
+ return result
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 Int) Union(s2 Int) Int {
+ result := NewInt()
+ for key := range s1 {
+ result.Insert(key)
+ }
+ for key := range s2 {
+ result.Insert(key)
+ }
+ return result
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 Int) Intersection(s2 Int) Int {
+ var walk, other Int
+ result := NewInt()
+ if s1.Len() < s2.Len() {
+ walk = s1
+ other = s2
+ } else {
+ walk = s2
+ other = s1
+ }
+ for key := range walk {
+ if other.Has(key) {
+ result.Insert(key)
+ }
+ }
+ return result
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 Int) IsSuperset(s2 Int) bool {
+ for item := range s2 {
+ if !s1.Has(item) {
+ return false
+ }
+ }
+ return true
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 Int) Equal(s2 Int) bool {
+ return len(s1) == len(s2) && s1.IsSuperset(s2)
+}
+
+type sortableSliceOfInt []int
+
+func (s sortableSliceOfInt) Len() int { return len(s) }
+func (s sortableSliceOfInt) Less(i, j int) bool { return lessInt(s[i], s[j]) }
+func (s sortableSliceOfInt) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// List returns the contents as a sorted int slice.
+func (s Int) List() []int {
+ res := make(sortableSliceOfInt, 0, len(s))
+ for key := range s {
+ res = append(res, key)
+ }
+ sort.Sort(res)
+ return []int(res)
+}
+
+// UnsortedList returns the slice with contents in random order.
+func (s Int) UnsortedList() []int {
+ res := make([]int, 0, len(s))
+ for key := range s {
+ res = append(res, key)
+ }
+ return res
+}
+
+// Returns a single element from the set.
+func (s Int) PopAny() (int, bool) {
+ for key := range s {
+ s.Delete(key)
+ return key, true
+ }
+ var zeroValue int
+ return zeroValue, false
+}
+
+// Len returns the size of the set.
+func (s Int) Len() int {
+ return len(s)
+}
+
+func lessInt(lhs, rhs int) bool {
+ return lhs < rhs
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go
new file mode 100644
index 000000000..82e1ba782
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go
@@ -0,0 +1,203 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by set-gen. Do not edit it manually!
+
+package sets
+
+import (
+ "reflect"
+ "sort"
+)
+
+// sets.Int64 is a set of int64s, implemented via map[int64]struct{} for minimal memory consumption.
+type Int64 map[int64]Empty
+
+// New creates a Int64 from a list of values.
+func NewInt64(items ...int64) Int64 {
+ ss := Int64{}
+ ss.Insert(items...)
+ return ss
+}
+
+// Int64KeySet creates a Int64 from a keys of a map[int64](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func Int64KeySet(theMap interface{}) Int64 {
+ v := reflect.ValueOf(theMap)
+ ret := Int64{}
+
+ for _, keyValue := range v.MapKeys() {
+ ret.Insert(keyValue.Interface().(int64))
+ }
+ return ret
+}
+
+// Insert adds items to the set.
+func (s Int64) Insert(items ...int64) {
+ for _, item := range items {
+ s[item] = Empty{}
+ }
+}
+
+// Delete removes all items from the set.
+func (s Int64) Delete(items ...int64) {
+ for _, item := range items {
+ delete(s, item)
+ }
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s Int64) Has(item int64) bool {
+ _, contained := s[item]
+ return contained
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s Int64) HasAll(items ...int64) bool {
+ for _, item := range items {
+ if !s.Has(item) {
+ return false
+ }
+ }
+ return true
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s Int64) HasAny(items ...int64) bool {
+ for _, item := range items {
+ if s.Has(item) {
+ return true
+ }
+ }
+ return false
+}
+
+// Difference returns a set of objects that are not in s2
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s Int64) Difference(s2 Int64) Int64 {
+ result := NewInt64()
+ for key := range s {
+ if !s2.Has(key) {
+ result.Insert(key)
+ }
+ }
+ return result
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 Int64) Union(s2 Int64) Int64 {
+ result := NewInt64()
+ for key := range s1 {
+ result.Insert(key)
+ }
+ for key := range s2 {
+ result.Insert(key)
+ }
+ return result
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 Int64) Intersection(s2 Int64) Int64 {
+ var walk, other Int64
+ result := NewInt64()
+ if s1.Len() < s2.Len() {
+ walk = s1
+ other = s2
+ } else {
+ walk = s2
+ other = s1
+ }
+ for key := range walk {
+ if other.Has(key) {
+ result.Insert(key)
+ }
+ }
+ return result
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 Int64) IsSuperset(s2 Int64) bool {
+ for item := range s2 {
+ if !s1.Has(item) {
+ return false
+ }
+ }
+ return true
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 Int64) Equal(s2 Int64) bool {
+ return len(s1) == len(s2) && s1.IsSuperset(s2)
+}
+
+type sortableSliceOfInt64 []int64
+
+func (s sortableSliceOfInt64) Len() int { return len(s) }
+func (s sortableSliceOfInt64) Less(i, j int) bool { return lessInt64(s[i], s[j]) }
+func (s sortableSliceOfInt64) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// List returns the contents as a sorted int64 slice.
+func (s Int64) List() []int64 {
+ res := make(sortableSliceOfInt64, 0, len(s))
+ for key := range s {
+ res = append(res, key)
+ }
+ sort.Sort(res)
+ return []int64(res)
+}
+
+// UnsortedList returns the slice with contents in random order.
+func (s Int64) UnsortedList() []int64 {
+ res := make([]int64, 0, len(s))
+ for key := range s {
+ res = append(res, key)
+ }
+ return res
+}
+
+// Returns a single element from the set.
+func (s Int64) PopAny() (int64, bool) {
+ for key := range s {
+ s.Delete(key)
+ return key, true
+ }
+ var zeroValue int64
+ return zeroValue, false
+}
+
+// Len returns the size of the set.
+func (s Int64) Len() int {
+ return len(s)
+}
+
+func lessInt64(lhs, rhs int64) bool {
+ return lhs < rhs
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go
new file mode 100644
index 000000000..baef7a6a2
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go
@@ -0,0 +1,203 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by set-gen. Do not edit it manually!
+
+package sets
+
+import (
+ "reflect"
+ "sort"
+)
+
+// sets.String is a set of strings, implemented via map[string]struct{} for minimal memory consumption.
+type String map[string]Empty
+
+// New creates a String from a list of values.
+func NewString(items ...string) String {
+ ss := String{}
+ ss.Insert(items...)
+ return ss
+}
+
+// StringKeySet creates a String from a keys of a map[string](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func StringKeySet(theMap interface{}) String {
+ v := reflect.ValueOf(theMap)
+ ret := String{}
+
+ for _, keyValue := range v.MapKeys() {
+ ret.Insert(keyValue.Interface().(string))
+ }
+ return ret
+}
+
+// Insert adds items to the set.
+func (s String) Insert(items ...string) {
+ for _, item := range items {
+ s[item] = Empty{}
+ }
+}
+
+// Delete removes all items from the set.
+func (s String) Delete(items ...string) {
+ for _, item := range items {
+ delete(s, item)
+ }
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s String) Has(item string) bool {
+ _, contained := s[item]
+ return contained
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s String) HasAll(items ...string) bool {
+ for _, item := range items {
+ if !s.Has(item) {
+ return false
+ }
+ }
+ return true
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s String) HasAny(items ...string) bool {
+ for _, item := range items {
+ if s.Has(item) {
+ return true
+ }
+ }
+ return false
+}
+
+// Difference returns a set of objects that are not in s2
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s String) Difference(s2 String) String {
+ result := NewString()
+ for key := range s {
+ if !s2.Has(key) {
+ result.Insert(key)
+ }
+ }
+ return result
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 String) Union(s2 String) String {
+ result := NewString()
+ for key := range s1 {
+ result.Insert(key)
+ }
+ for key := range s2 {
+ result.Insert(key)
+ }
+ return result
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 String) Intersection(s2 String) String {
+ var walk, other String
+ result := NewString()
+ if s1.Len() < s2.Len() {
+ walk = s1
+ other = s2
+ } else {
+ walk = s2
+ other = s1
+ }
+ for key := range walk {
+ if other.Has(key) {
+ result.Insert(key)
+ }
+ }
+ return result
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 String) IsSuperset(s2 String) bool {
+ for item := range s2 {
+ if !s1.Has(item) {
+ return false
+ }
+ }
+ return true
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 String) Equal(s2 String) bool {
+ return len(s1) == len(s2) && s1.IsSuperset(s2)
+}
+
+type sortableSliceOfString []string
+
+func (s sortableSliceOfString) Len() int { return len(s) }
+func (s sortableSliceOfString) Less(i, j int) bool { return lessString(s[i], s[j]) }
+func (s sortableSliceOfString) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// List returns the contents as a sorted string slice.
+func (s String) List() []string {
+ res := make(sortableSliceOfString, 0, len(s))
+ for key := range s {
+ res = append(res, key)
+ }
+ sort.Sort(res)
+ return []string(res)
+}
+
+// UnsortedList returns the slice with contents in random order.
+func (s String) UnsortedList() []string {
+ res := make([]string, 0, len(s))
+ for key := range s {
+ res = append(res, key)
+ }
+ return res
+}
+
+// Returns a single element from the set.
+func (s String) PopAny() (string, bool) {
+ for key := range s {
+ s.Delete(key)
+ return key, true
+ }
+ var zeroValue string
+ return zeroValue, false
+}
+
+// Len returns the size of the set.
+func (s String) Len() int {
+ return len(s)
+}
+
+func lessString(lhs, rhs string) bool {
+ return lhs < rhs
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go
new file mode 100644
index 000000000..8884c738e
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go
@@ -0,0 +1,2115 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package strategicpatch
+
+import (
+ "fmt"
+ "reflect"
+ "sort"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/util/json"
+ "k8s.io/apimachinery/pkg/util/mergepatch"
+ forkedjson "k8s.io/apimachinery/third_party/forked/golang/json"
+)
+
+// An alternate implementation of JSON Merge Patch
+// (https://tools.ietf.org/html/rfc7386) which supports the ability to annotate
+// certain fields with metadata that indicates whether the elements of JSON
+// lists should be merged or replaced.
+//
+// For more information, see the PATCH section of docs/devel/api-conventions.md.
+//
+// Some of the content of this package was borrowed with minor adaptations from
+// evanphx/json-patch and openshift/origin.
+
+const (
+ directiveMarker = "$patch"
+ deleteDirective = "delete"
+ replaceDirective = "replace"
+ mergeDirective = "merge"
+
+ retainKeysStrategy = "retainKeys"
+
+ deleteFromPrimitiveListDirectivePrefix = "$deleteFromPrimitiveList"
+ retainKeysDirective = "$" + retainKeysStrategy
+ setElementOrderDirectivePrefix = "$setElementOrder"
+)
+
+// JSONMap is a representations of JSON object encoded as map[string]interface{}
+// where the children can be either map[string]interface{}, []interface{} or
+// primitive type).
+// Operating on JSONMap representation is much faster as it doesn't require any
+// json marshaling and/or unmarshaling operations.
+type JSONMap map[string]interface{}
+
+type DiffOptions struct {
+ // SetElementOrder determines whether we generate the $setElementOrder parallel list.
+ SetElementOrder bool
+ // IgnoreChangesAndAdditions indicates if we keep the changes and additions in the patch.
+ IgnoreChangesAndAdditions bool
+ // IgnoreDeletions indicates if we keep the deletions in the patch.
+ IgnoreDeletions bool
+ // We introduce a new value retainKeys for patchStrategy.
+ // It indicates that all fields needing to be preserved must be
+ // present in the `retainKeys` list.
+ // And the fields that are present will be merged with live object.
+ // All the missing fields will be cleared when patching.
+ BuildRetainKeysDirective bool
+}
+
+type MergeOptions struct {
+ // MergeParallelList indicates if we are merging the parallel list.
+ // We don't merge parallel list when calling mergeMap() in CreateThreeWayMergePatch()
+ // which is called client-side.
+ // We merge parallel list iff when calling mergeMap() in StrategicMergeMapPatch()
+ // which is called server-side
+ MergeParallelList bool
+ // IgnoreUnmatchedNulls indicates if we should process the unmatched nulls.
+ IgnoreUnmatchedNulls bool
+}
+
+// The following code is adapted from github.com/openshift/origin/pkg/util/jsonmerge.
+// Instead of defining a Delta that holds an original, a patch and a set of preconditions,
+// the reconcile method accepts a set of preconditions as an argument.
+
+// CreateTwoWayMergePatch creates a patch that can be passed to StrategicMergePatch from an original
+// document and a modified document, which are passed to the method as json encoded content. It will
+// return a patch that yields the modified document when applied to the original document, or an error
+// if either of the two documents is invalid.
+func CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}, fns ...mergepatch.PreconditionFunc) ([]byte, error) {
+ originalMap := map[string]interface{}{}
+ if len(original) > 0 {
+ if err := json.Unmarshal(original, &originalMap); err != nil {
+ return nil, mergepatch.ErrBadJSONDoc
+ }
+ }
+
+ modifiedMap := map[string]interface{}{}
+ if len(modified) > 0 {
+ if err := json.Unmarshal(modified, &modifiedMap); err != nil {
+ return nil, mergepatch.ErrBadJSONDoc
+ }
+ }
+
+ patchMap, err := CreateTwoWayMergeMapPatch(originalMap, modifiedMap, dataStruct, fns...)
+ if err != nil {
+ return nil, err
+ }
+
+ return json.Marshal(patchMap)
+}
+
+// CreateTwoWayMergeMapPatch creates a patch from an original and modified JSON objects,
+// encoded JSONMap.
+// The serialized version of the map can then be passed to StrategicMergeMapPatch.
+func CreateTwoWayMergeMapPatch(original, modified JSONMap, dataStruct interface{}, fns ...mergepatch.PreconditionFunc) (JSONMap, error) {
+ t, err := getTagStructType(dataStruct)
+ if err != nil {
+ return nil, err
+ }
+
+ diffOptions := DiffOptions{
+ SetElementOrder: true,
+ }
+ patchMap, err := diffMaps(original, modified, t, diffOptions)
+ if err != nil {
+ return nil, err
+ }
+
+ // Apply the preconditions to the patch, and return an error if any of them fail.
+ for _, fn := range fns {
+ if !fn(patchMap) {
+ return nil, mergepatch.NewErrPreconditionFailed(patchMap)
+ }
+ }
+
+ return patchMap, nil
+}
+
+// Returns a (recursive) strategic merge patch that yields modified when applied to original.
+// Including:
+// - Adding fields to the patch present in modified, missing from original
+// - Setting fields to the patch present in modified and original with different values
+// - Delete fields present in original, missing from modified through
+// - IFF map field - set to nil in patch
+// - IFF list of maps && merge strategy - use deleteDirective for the elements
+// - IFF list of primitives && merge strategy - use parallel deletion list
+// - IFF list of maps or primitives with replace strategy (default) - set patch value to the value in modified
+// - Build $retainKeys directive for fields with retainKeys patch strategy
+func diffMaps(original, modified map[string]interface{}, t reflect.Type, diffOptions DiffOptions) (map[string]interface{}, error) {
+ patch := map[string]interface{}{}
+ // Get the underlying type for pointers
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ // This will be used to build the $retainKeys directive sent in the patch
+ retainKeysList := make([]interface{}, 0, len(modified))
+
+ // Compare each value in the modified map against the value in the original map
+ for key, modifiedValue := range modified {
+ // Get the underlying type for pointers
+ if diffOptions.BuildRetainKeysDirective && modifiedValue != nil {
+ retainKeysList = append(retainKeysList, key)
+ }
+
+ originalValue, ok := original[key]
+ if !ok {
+ // Key was added, so add to patch
+ if !diffOptions.IgnoreChangesAndAdditions {
+ patch[key] = modifiedValue
+ }
+ continue
+ }
+
+ // The patch may have a patch directive
+ // TODO: figure out if we need this. This shouldn't be needed by apply. When would the original map have patch directives in it?
+ foundDirectiveMarker, err := handleDirectiveMarker(key, originalValue, modifiedValue, patch)
+ if err != nil {
+ return nil, err
+ }
+ if foundDirectiveMarker {
+ continue
+ }
+
+ if reflect.TypeOf(originalValue) != reflect.TypeOf(modifiedValue) {
+ // Types have changed, so add to patch
+ if !diffOptions.IgnoreChangesAndAdditions {
+ patch[key] = modifiedValue
+ }
+ continue
+ }
+
+ // Types are the same, so compare values
+ switch originalValueTyped := originalValue.(type) {
+ case map[string]interface{}:
+ modifiedValueTyped := modifiedValue.(map[string]interface{})
+ err = handleMapDiff(key, originalValueTyped, modifiedValueTyped, patch, t, diffOptions)
+ case []interface{}:
+ modifiedValueTyped := modifiedValue.([]interface{})
+ err = handleSliceDiff(key, originalValueTyped, modifiedValueTyped, patch, t, diffOptions)
+ default:
+ replacePatchFieldIfNotEqual(key, originalValue, modifiedValue, patch, diffOptions)
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ updatePatchIfMissing(original, modified, patch, diffOptions)
+ // Insert the retainKeysList iff there are values present in the retainKeysList and
+ // either of the following is true:
+ // - the patch is not empty
+ // - there are additional field in original that need to be cleared
+ if len(retainKeysList) > 0 &&
+ (len(patch) > 0 || hasAdditionalNewField(original, modified)) {
+ patch[retainKeysDirective] = sortScalars(retainKeysList)
+ }
+ return patch, nil
+}
+
+// handleDirectiveMarker handles how to diff directive marker between 2 objects
+func handleDirectiveMarker(key string, originalValue, modifiedValue interface{}, patch map[string]interface{}) (bool, error) {
+ if key == directiveMarker {
+ originalString, ok := originalValue.(string)
+ if !ok {
+ return false, fmt.Errorf("invalid value for special key: %s", directiveMarker)
+ }
+ modifiedString, ok := modifiedValue.(string)
+ if !ok {
+ return false, fmt.Errorf("invalid value for special key: %s", directiveMarker)
+ }
+ if modifiedString != originalString {
+ patch[directiveMarker] = modifiedValue
+ }
+ return true, nil
+ }
+ return false, nil
+}
+
+// handleMapDiff diff between 2 maps `originalValueTyped` and `modifiedValue`,
+// puts the diff in the `patch` associated with `key`
+// key is the key associated with originalValue and modifiedValue.
+// originalValue, modifiedValue are the old and new value respectively.They are both maps
+// patch is the patch map that contains key and the updated value, and it is the parent of originalValue, modifiedValue
+// diffOptions contains multiple options to control how we do the diff.
+func handleMapDiff(key string, originalValue, modifiedValue, patch map[string]interface{},
+ t reflect.Type, diffOptions DiffOptions) error {
+ fieldType, fieldPatchStrategies, _, err := forkedjson.LookupPatchMetadata(t, key)
+ if err != nil {
+ // We couldn't look up metadata for the field
+ // If the values are identical, this doesn't matter, no patch is needed
+ if reflect.DeepEqual(originalValue, modifiedValue) {
+ return nil
+ }
+ // Otherwise, return the error
+ return err
+ }
+ retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(fieldPatchStrategies)
+ if err != nil {
+ return err
+ }
+ diffOptions.BuildRetainKeysDirective = retainKeys
+ switch patchStrategy {
+ // The patch strategic from metadata tells us to replace the entire object instead of diffing it
+ case replaceDirective:
+ if !diffOptions.IgnoreChangesAndAdditions {
+ patch[key] = modifiedValue
+ }
+ default:
+ patchValue, err := diffMaps(originalValue, modifiedValue, fieldType, diffOptions)
+ if err != nil {
+ return err
+ }
+ // Maps were not identical, use provided patch value
+ if len(patchValue) > 0 {
+ patch[key] = patchValue
+ }
+ }
+ return nil
+}
+
+// handleSliceDiff diff between 2 slices `originalValueTyped` and `modifiedValue`,
+// puts the diff in the `patch` associated with `key`
+// key is the key associated with originalValue and modifiedValue.
+// originalValue, modifiedValue are the old and new value respectively.They are both slices
+// patch is the patch map that contains key and the updated value, and it is the parent of originalValue, modifiedValue
+// diffOptions contains multiple options to control how we do the diff.
+func handleSliceDiff(key string, originalValue, modifiedValue []interface{}, patch map[string]interface{},
+ t reflect.Type, diffOptions DiffOptions) error {
+ fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, key)
+ if err != nil {
+ // We couldn't look up metadata for the field
+ // If the values are identical, this doesn't matter, no patch is needed
+ if reflect.DeepEqual(originalValue, modifiedValue) {
+ return nil
+ }
+ // Otherwise, return the error
+ return err
+ }
+ retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(fieldPatchStrategies)
+ if err != nil {
+ return err
+ }
+ switch patchStrategy {
+ // Merge the 2 slices using mergePatchKey
+ case mergeDirective:
+ diffOptions.BuildRetainKeysDirective = retainKeys
+ addList, deletionList, setOrderList, err := diffLists(originalValue, modifiedValue, fieldType.Elem(), fieldPatchMergeKey, diffOptions)
+ if err != nil {
+ return err
+ }
+ if len(addList) > 0 {
+ patch[key] = addList
+ }
+ // generate a parallel list for deletion
+ if len(deletionList) > 0 {
+ parallelDeletionListKey := fmt.Sprintf("%s/%s", deleteFromPrimitiveListDirectivePrefix, key)
+ patch[parallelDeletionListKey] = deletionList
+ }
+ if len(setOrderList) > 0 {
+ parallelSetOrderListKey := fmt.Sprintf("%s/%s", setElementOrderDirectivePrefix, key)
+ patch[parallelSetOrderListKey] = setOrderList
+ }
+ default:
+ replacePatchFieldIfNotEqual(key, originalValue, modifiedValue, patch, diffOptions)
+ }
+ return nil
+}
+
+// replacePatchFieldIfNotEqual updates the patch if original and modified are not deep equal
+// if diffOptions.IgnoreChangesAndAdditions is false.
+// original is the old value, maybe either the live cluster object or the last applied configuration
+// modified is the new value, is always the users new config
+func replacePatchFieldIfNotEqual(key string, original, modified interface{},
+ patch map[string]interface{}, diffOptions DiffOptions) {
+ if diffOptions.IgnoreChangesAndAdditions {
+ // Ignoring changes - do nothing
+ return
+ }
+ if reflect.DeepEqual(original, modified) {
+ // Contents are identical - do nothing
+ return
+ }
+ // Create a patch to replace the old value with the new one
+ patch[key] = modified
+}
+
+// updatePatchIfMissing iterates over `original` when ignoreDeletions is false.
+// Clear the field whose key is not present in `modified`.
+// original is the old value, maybe either the live cluster object or the last applied configuration
+// modified is the new value, is always the users new config
+func updatePatchIfMissing(original, modified, patch map[string]interface{}, diffOptions DiffOptions) {
+ if diffOptions.IgnoreDeletions {
+ // Ignoring deletion - do nothing
+ return
+ }
+ // Add nils for deleted values
+ for key := range original {
+ if _, found := modified[key]; !found {
+ patch[key] = nil
+ }
+ }
+}
+
+// validateMergeKeyInLists checks if each map in the list has the mentryerge key.
+func validateMergeKeyInLists(mergeKey string, lists ...[]interface{}) error {
+ for _, list := range lists {
+ for _, item := range list {
+ m, ok := item.(map[string]interface{})
+ if !ok {
+ return mergepatch.ErrBadArgType(m, item)
+ }
+ if _, ok = m[mergeKey]; !ok {
+ return mergepatch.ErrNoMergeKey(m, mergeKey)
+ }
+ }
+ }
+ return nil
+}
+
+// normalizeElementOrder sort `patch` list by `patchOrder` and sort `serverOnly` list by `serverOrder`.
+// Then it merges the 2 sorted lists.
+// It guarantee the relative order in the patch list and in the serverOnly list is kept.
+// `patch` is a list of items in the patch, and `serverOnly` is a list of items in the live object.
+// `patchOrder` is the order we want `patch` list to have and
+// `serverOrder` is the order we want `serverOnly` list to have.
+// kind is the kind of each item in the lists `patch` and `serverOnly`.
+func normalizeElementOrder(patch, serverOnly, patchOrder, serverOrder []interface{}, mergeKey string, kind reflect.Kind) ([]interface{}, error) {
+ patch, err := normalizeSliceOrder(patch, patchOrder, mergeKey, kind)
+ if err != nil {
+ return nil, err
+ }
+ serverOnly, err = normalizeSliceOrder(serverOnly, serverOrder, mergeKey, kind)
+ if err != nil {
+ return nil, err
+ }
+ all := mergeSortedSlice(serverOnly, patch, serverOrder, mergeKey, kind)
+
+ return all, nil
+}
+
+// mergeSortedSlice merges the 2 sorted lists by serverOrder with best effort.
+// It will insert each item in `left` list to `right` list. In most cases, the 2 lists will be interleaved.
+// The relative order of left and right are guaranteed to be kept.
+// They have higher precedence than the order in the live list.
+// The place for a item in `left` is found by:
+// scan from the place of last insertion in `right` to the end of `right`,
+// the place is before the first item that is greater than the item we want to insert.
+// example usage: using server-only items as left and patch items as right. We insert server-only items
+// to patch list. We use the order of live object as record for comparision.
+func mergeSortedSlice(left, right, serverOrder []interface{}, mergeKey string, kind reflect.Kind) []interface{} {
+ // Returns if l is less than r, and if both have been found.
+ // If l and r both present and l is in front of r, l is less than r.
+ less := func(l, r interface{}) (bool, bool) {
+ li := index(serverOrder, l, mergeKey, kind)
+ ri := index(serverOrder, r, mergeKey, kind)
+ if li >= 0 && ri >= 0 {
+ return li < ri, true
+ } else {
+ return false, false
+ }
+ }
+
+ // left and right should be non-overlapping.
+ size := len(left) + len(right)
+ i, j := 0, 0
+ s := make([]interface{}, size, size)
+
+ for k := 0; k < size; k++ {
+ if i >= len(left) && j < len(right) {
+ // have items left in `right` list
+ s[k] = right[j]
+ j++
+ } else if j >= len(right) && i < len(left) {
+ // have items left in `left` list
+ s[k] = left[i]
+ i++
+ } else {
+ // compare them if i and j are both in bound
+ less, foundBoth := less(left[i], right[j])
+ if foundBoth && less {
+ s[k] = left[i]
+ i++
+ } else {
+ s[k] = right[j]
+ j++
+ }
+ }
+ }
+ return s
+}
+
+// index returns the index of the item in the given items, or -1 if it doesn't exist
+// l must NOT be a slice of slices, this should be checked before calling.
+func index(l []interface{}, valToLookUp interface{}, mergeKey string, kind reflect.Kind) int {
+ var getValFn func(interface{}) interface{}
+ // Get the correct `getValFn` based on item `kind`.
+ // It should return the value of merge key for maps and
+ // return the item for other kinds.
+ switch kind {
+ case reflect.Map:
+ getValFn = func(item interface{}) interface{} {
+ typedItem, ok := item.(map[string]interface{})
+ if !ok {
+ return nil
+ }
+ val := typedItem[mergeKey]
+ return val
+ }
+ default:
+ getValFn = func(item interface{}) interface{} {
+ return item
+ }
+ }
+
+ for i, v := range l {
+ if getValFn(valToLookUp) == getValFn(v) {
+ return i
+ }
+ }
+ return -1
+}
+
+// extractToDeleteItems takes a list and
+// returns 2 lists: one contains items that should be kept and the other contains items to be deleted.
+func extractToDeleteItems(l []interface{}) ([]interface{}, []interface{}, error) {
+ var nonDelete, toDelete []interface{}
+ for _, v := range l {
+ m, ok := v.(map[string]interface{})
+ if !ok {
+ return nil, nil, mergepatch.ErrBadArgType(m, v)
+ }
+
+ directive, foundDirective := m[directiveMarker]
+ if foundDirective && directive == deleteDirective {
+ toDelete = append(toDelete, v)
+ } else {
+ nonDelete = append(nonDelete, v)
+ }
+ }
+ return nonDelete, toDelete, nil
+}
+
+// normalizeSliceOrder sort `toSort` list by `order`
+func normalizeSliceOrder(toSort, order []interface{}, mergeKey string, kind reflect.Kind) ([]interface{}, error) {
+ var toDelete []interface{}
+ if kind == reflect.Map {
+ // make sure each item in toSort, order has merge key
+ err := validateMergeKeyInLists(mergeKey, toSort, order)
+ if err != nil {
+ return nil, err
+ }
+ toSort, toDelete, err = extractToDeleteItems(toSort)
+ }
+
+ sort.SliceStable(toSort, func(i, j int) bool {
+ if ii := index(order, toSort[i], mergeKey, kind); ii >= 0 {
+ if ij := index(order, toSort[j], mergeKey, kind); ij >= 0 {
+ return ii < ij
+ }
+ }
+ return true
+ })
+ toSort = append(toSort, toDelete...)
+ return toSort, nil
+}
+
+// Returns a (recursive) strategic merge patch, a parallel deletion list if necessary and
+// another list to set the order of the list
+// Only list of primitives with merge strategy will generate a parallel deletion list.
+// These two lists should yield modified when applied to original, for lists with merge semantics.
+func diffLists(original, modified []interface{}, t reflect.Type, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, []interface{}, error) {
+ if len(original) == 0 {
+ // Both slices are empty - do nothing
+ if len(modified) == 0 || diffOptions.IgnoreChangesAndAdditions {
+ return nil, nil, nil, nil
+ }
+
+ // Old slice was empty - add all elements from the new slice
+ return modified, nil, nil, nil
+ }
+
+ elementType, err := sliceElementType(original, modified)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ var patchList, deleteList, setOrderList []interface{}
+ kind := elementType.Kind()
+ switch kind {
+ case reflect.Map:
+ patchList, deleteList, err = diffListsOfMaps(original, modified, t, mergeKey, diffOptions)
+ patchList, err = normalizeSliceOrder(patchList, modified, mergeKey, kind)
+ orderSame, err := isOrderSame(original, modified, mergeKey)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ // append the deletions to the end of the patch list.
+ patchList = append(patchList, deleteList...)
+ deleteList = nil
+ // generate the setElementOrder list when there are content changes or order changes
+ if diffOptions.SetElementOrder &&
+ ((!diffOptions.IgnoreChangesAndAdditions && (len(patchList) > 0 || !orderSame)) ||
+ (!diffOptions.IgnoreDeletions && len(patchList) > 0)) {
+ // Generate a list of maps that each item contains only the merge key.
+ setOrderList = make([]interface{}, len(modified))
+ for i, v := range modified {
+ typedV := v.(map[string]interface{})
+ setOrderList[i] = map[string]interface{}{
+ mergeKey: typedV[mergeKey],
+ }
+ }
+ }
+ case reflect.Slice:
+ // Lists of Lists are not permitted by the api
+ return nil, nil, nil, mergepatch.ErrNoListOfLists
+ default:
+ patchList, deleteList, err = diffListsOfScalars(original, modified, diffOptions)
+ patchList, err = normalizeSliceOrder(patchList, modified, mergeKey, kind)
+ // generate the setElementOrder list when there are content changes or order changes
+ if diffOptions.SetElementOrder && ((!diffOptions.IgnoreDeletions && len(deleteList) > 0) ||
+ (!diffOptions.IgnoreChangesAndAdditions && !reflect.DeepEqual(original, modified))) {
+ setOrderList = modified
+ }
+ }
+ return patchList, deleteList, setOrderList, err
+}
+
+// isOrderSame checks if the order in a list has changed
+func isOrderSame(original, modified []interface{}, mergeKey string) (bool, error) {
+ if len(original) != len(modified) {
+ return false, nil
+ }
+ for i, modifiedItem := range modified {
+ equal, err := mergeKeyValueEqual(original[i], modifiedItem, mergeKey)
+ if err != nil || !equal {
+ return equal, err
+ }
+ }
+ return true, nil
+}
+
+// diffListsOfScalars returns 2 lists, the first one is addList and the second one is deletionList.
+// Argument diffOptions.IgnoreChangesAndAdditions controls if calculate addList. true means not calculate.
+// Argument diffOptions.IgnoreDeletions controls if calculate deletionList. true means not calculate.
+// original may be changed, but modified is guaranteed to not be changed
+func diffListsOfScalars(original, modified []interface{}, diffOptions DiffOptions) ([]interface{}, []interface{}, error) {
+ modifiedCopy := make([]interface{}, len(modified))
+ copy(modifiedCopy, modified)
+ // Sort the scalars for easier calculating the diff
+ originalScalars := sortScalars(original)
+ modifiedScalars := sortScalars(modifiedCopy)
+
+ originalIndex, modifiedIndex := 0, 0
+ addList := []interface{}{}
+ deletionList := []interface{}{}
+
+ for {
+ originalInBounds := originalIndex < len(originalScalars)
+ modifiedInBounds := modifiedIndex < len(modifiedScalars)
+ if !originalInBounds && !modifiedInBounds {
+ break
+ }
+ // we need to compare the string representation of the scalar,
+ // because the scalar is an interface which doesn't support either < or >
+ // And that's how func sortScalars compare scalars.
+ var originalString, modifiedString string
+ var originalValue, modifiedValue interface{}
+ if originalInBounds {
+ originalValue = originalScalars[originalIndex]
+ originalString = fmt.Sprintf("%v", originalValue)
+ }
+ if modifiedInBounds {
+ modifiedValue = modifiedScalars[modifiedIndex]
+ modifiedString = fmt.Sprintf("%v", modifiedValue)
+ }
+
+ originalV, modifiedV := compareListValuesAtIndex(originalInBounds, modifiedInBounds, originalString, modifiedString)
+ switch {
+ case originalV == nil && modifiedV == nil:
+ originalIndex++
+ modifiedIndex++
+ case originalV != nil && modifiedV == nil:
+ if !diffOptions.IgnoreDeletions {
+ deletionList = append(deletionList, originalValue)
+ }
+ originalIndex++
+ case originalV == nil && modifiedV != nil:
+ if !diffOptions.IgnoreChangesAndAdditions {
+ addList = append(addList, modifiedValue)
+ }
+ modifiedIndex++
+ default:
+ return nil, nil, fmt.Errorf("Unexpected returned value from compareListValuesAtIndex: %v and %v", originalV, modifiedV)
+ }
+ }
+
+ return addList, deduplicateScalars(deletionList), nil
+}
+
+// If first return value is non-nil, list1 contains an element not present in list2
+// If second return value is non-nil, list2 contains an element not present in list1
+func compareListValuesAtIndex(list1Inbounds, list2Inbounds bool, list1Value, list2Value string) (interface{}, interface{}) {
+ bothInBounds := list1Inbounds && list2Inbounds
+ switch {
+ // scalars are identical
+ case bothInBounds && list1Value == list2Value:
+ return nil, nil
+ // only list2 is in bound
+ case !list1Inbounds:
+ fallthrough
+ // list2 has additional scalar
+ case bothInBounds && list1Value > list2Value:
+ return nil, list2Value
+ // only original is in bound
+ case !list2Inbounds:
+ fallthrough
+ // original has additional scalar
+ case bothInBounds && list1Value < list2Value:
+ return list1Value, nil
+ default:
+ return nil, nil
+ }
+}
+
+// diffListsOfMaps takes a pair of lists and
+// returns a (recursive) strategic merge patch list contains additions and changes and
+// a deletion list contains deletions
+func diffListsOfMaps(original, modified []interface{}, t reflect.Type, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, error) {
+ patch := make([]interface{}, 0, len(modified))
+ deletionList := make([]interface{}, 0, len(original))
+
+ originalSorted, err := sortMergeListsByNameArray(original, t, mergeKey, false)
+ if err != nil {
+ return nil, nil, err
+ }
+ modifiedSorted, err := sortMergeListsByNameArray(modified, t, mergeKey, false)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ originalIndex, modifiedIndex := 0, 0
+ for {
+ originalInBounds := originalIndex < len(originalSorted)
+ modifiedInBounds := modifiedIndex < len(modifiedSorted)
+ bothInBounds := originalInBounds && modifiedInBounds
+ if !originalInBounds && !modifiedInBounds {
+ break
+ }
+
+ var originalElementMergeKeyValueString, modifiedElementMergeKeyValueString string
+ var originalElementMergeKeyValue, modifiedElementMergeKeyValue interface{}
+ var originalElement, modifiedElement map[string]interface{}
+ if originalInBounds {
+ originalElement, originalElementMergeKeyValue, err = getMapAndMergeKeyValueByIndex(originalIndex, mergeKey, originalSorted)
+ if err != nil {
+ return nil, nil, err
+ }
+ originalElementMergeKeyValueString = fmt.Sprintf("%v", originalElementMergeKeyValue)
+ }
+ if modifiedInBounds {
+ modifiedElement, modifiedElementMergeKeyValue, err = getMapAndMergeKeyValueByIndex(modifiedIndex, mergeKey, modifiedSorted)
+ if err != nil {
+ return nil, nil, err
+ }
+ modifiedElementMergeKeyValueString = fmt.Sprintf("%v", modifiedElementMergeKeyValue)
+ }
+
+ switch {
+ case bothInBounds && ItemMatchesOriginalAndModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString):
+ // Merge key values are equal, so recurse
+ patchValue, err := diffMaps(originalElement, modifiedElement, t, diffOptions)
+ if err != nil {
+ return nil, nil, err
+ }
+ if len(patchValue) > 0 {
+ patchValue[mergeKey] = modifiedElementMergeKeyValue
+ patch = append(patch, patchValue)
+ }
+ originalIndex++
+ modifiedIndex++
+ // only modified is in bound
+ case !originalInBounds:
+ fallthrough
+ // modified has additional map
+ case bothInBounds && ItemAddedToModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString):
+ if !diffOptions.IgnoreChangesAndAdditions {
+ patch = append(patch, modifiedElement)
+ }
+ modifiedIndex++
+ // only original is in bound
+ case !modifiedInBounds:
+ fallthrough
+ // original has additional map
+ case bothInBounds && ItemRemovedFromModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString):
+ if !diffOptions.IgnoreDeletions {
+ // Item was deleted, so add delete directive
+ deletionList = append(deletionList, CreateDeleteDirective(mergeKey, originalElementMergeKeyValue))
+ }
+ originalIndex++
+ }
+ }
+
+ return patch, deletionList, nil
+}
+
+// getMapAndMergeKeyValueByIndex return a map in the list and its merge key value given the index of the map.
+func getMapAndMergeKeyValueByIndex(index int, mergeKey string, listOfMaps []interface{}) (map[string]interface{}, interface{}, error) {
+ m, ok := listOfMaps[index].(map[string]interface{})
+ if !ok {
+ return nil, nil, mergepatch.ErrBadArgType(m, listOfMaps[index])
+ }
+
+ val, ok := m[mergeKey]
+ if !ok {
+ return nil, nil, mergepatch.ErrNoMergeKey(m, mergeKey)
+ }
+ return m, val, nil
+}
+
+// StrategicMergePatch applies a strategic merge patch. The patch and the original document
+// must be json encoded content. A patch can be created from an original and a modified document
+// by calling CreateStrategicMergePatch.
+func StrategicMergePatch(original, patch []byte, dataStruct interface{}) ([]byte, error) {
+ originalMap, err := handleUnmarshal(original)
+ if err != nil {
+ return nil, err
+ }
+ patchMap, err := handleUnmarshal(patch)
+ if err != nil {
+ return nil, err
+ }
+
+ result, err := StrategicMergeMapPatch(originalMap, patchMap, dataStruct)
+ if err != nil {
+ return nil, err
+ }
+
+ return json.Marshal(result)
+}
+
+func handleUnmarshal(j []byte) (map[string]interface{}, error) {
+ if j == nil {
+ j = []byte("{}")
+ }
+
+ m := map[string]interface{}{}
+ err := json.Unmarshal(j, &m)
+ if err != nil {
+ return nil, mergepatch.ErrBadJSONDoc
+ }
+ return m, nil
+}
+
+// StrategicMergePatch applies a strategic merge patch. The original and patch documents
+// must be JSONMap. A patch can be created from an original and modified document by
+// calling CreateTwoWayMergeMapPatch.
+// Warning: the original and patch JSONMap objects are mutated by this function and should not be reused.
+func StrategicMergeMapPatch(original, patch JSONMap, dataStruct interface{}) (JSONMap, error) {
+ t, err := getTagStructType(dataStruct)
+ if err != nil {
+ return nil, err
+ }
+ mergeOptions := MergeOptions{
+ MergeParallelList: true,
+ IgnoreUnmatchedNulls: true,
+ }
+ return mergeMap(original, patch, t, mergeOptions)
+}
+
+func getTagStructType(dataStruct interface{}) (reflect.Type, error) {
+ if dataStruct == nil {
+ return nil, mergepatch.ErrBadArgKind(struct{}{}, nil)
+ }
+
+ t := reflect.TypeOf(dataStruct)
+ // Get the underlying type for pointers
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+
+ if t.Kind() != reflect.Struct {
+ return nil, mergepatch.ErrBadArgKind(struct{}{}, dataStruct)
+ }
+
+ return t, nil
+}
+
+// handleDirectiveInMergeMap handles the patch directive when merging 2 maps.
+func handleDirectiveInMergeMap(directive interface{}, patch map[string]interface{}) (map[string]interface{}, error) {
+ if directive == replaceDirective {
+ // If the patch contains "$patch: replace", don't merge it, just use the
+ // patch directly. Later on, we can add a single level replace that only
+ // affects the map that the $patch is in.
+ delete(patch, directiveMarker)
+ return patch, nil
+ }
+
+ if directive == deleteDirective {
+ // If the patch contains "$patch: delete", don't merge it, just return
+ // an empty map.
+ return map[string]interface{}{}, nil
+ }
+
+ return nil, mergepatch.ErrBadPatchType(directive, patch)
+}
+
+func containsDirectiveMarker(item interface{}) bool {
+ m, ok := item.(map[string]interface{})
+ if ok {
+ if _, foundDirectiveMarker := m[directiveMarker]; foundDirectiveMarker {
+ return true
+ }
+ }
+ return false
+}
+
+func mergeKeyValueEqual(left, right interface{}, mergeKey string) (bool, error) {
+ if len(mergeKey) == 0 {
+ return left == right, nil
+ }
+ typedLeft, ok := left.(map[string]interface{})
+ if !ok {
+ return false, mergepatch.ErrBadArgType(typedLeft, left)
+ }
+ typedRight, ok := right.(map[string]interface{})
+ if !ok {
+ return false, mergepatch.ErrBadArgType(typedRight, right)
+ }
+ mergeKeyLeft, ok := typedLeft[mergeKey]
+ if !ok {
+ return false, mergepatch.ErrNoMergeKey(typedLeft, mergeKey)
+ }
+ mergeKeyRight, ok := typedRight[mergeKey]
+ if !ok {
+ return false, mergepatch.ErrNoMergeKey(typedRight, mergeKey)
+ }
+ return mergeKeyLeft == mergeKeyRight, nil
+}
+
+// extractKey trims the prefix and return the original key
+func extractKey(s, prefix string) (string, error) {
+ substrings := strings.SplitN(s, "/", 2)
+ if len(substrings) <= 1 || substrings[0] != prefix {
+ switch prefix {
+ case deleteFromPrimitiveListDirectivePrefix:
+ return "", mergepatch.ErrBadPatchFormatForPrimitiveList
+ case setElementOrderDirectivePrefix:
+ return "", mergepatch.ErrBadPatchFormatForSetElementOrderList
+ default:
+ return "", fmt.Errorf("fail to find unknown prefix %q in %s\n", prefix, s)
+ }
+ }
+ return substrings[1], nil
+}
+
+// validatePatchUsingSetOrderList verifies:
+// the relative order of any two items in the setOrderList list matches that in the patch list.
+// the items in the patch list must be a subset or the same as the $setElementOrder list (deletions are ignored).
+func validatePatchWithSetOrderList(patchList, setOrderList interface{}, mergeKey string) error {
+ typedSetOrderList, ok := setOrderList.([]interface{})
+ if !ok {
+ return mergepatch.ErrBadPatchFormatForSetElementOrderList
+ }
+ typedPatchList, ok := patchList.([]interface{})
+ if !ok {
+ return mergepatch.ErrBadPatchFormatForSetElementOrderList
+ }
+ if len(typedSetOrderList) == 0 || len(typedPatchList) == 0 {
+ return nil
+ }
+
+ var nonDeleteList, toDeleteList []interface{}
+ var err error
+ if len(mergeKey) > 0 {
+ nonDeleteList, toDeleteList, err = extractToDeleteItems(typedPatchList)
+ if err != nil {
+ return err
+ }
+ } else {
+ nonDeleteList = typedPatchList
+ }
+
+ patchIndex, setOrderIndex := 0, 0
+ for patchIndex < len(nonDeleteList) && setOrderIndex < len(typedSetOrderList) {
+ if containsDirectiveMarker(nonDeleteList[patchIndex]) {
+ patchIndex++
+ continue
+ }
+ mergeKeyEqual, err := mergeKeyValueEqual(nonDeleteList[patchIndex], typedSetOrderList[setOrderIndex], mergeKey)
+ if err != nil {
+ return err
+ }
+ if mergeKeyEqual {
+ patchIndex++
+ }
+ setOrderIndex++
+ }
+ // If patchIndex is inbound but setOrderIndex if out of bound mean there are items mismatching between the patch list and setElementOrder list.
+ // the second check is is a sanity check, and should always be true if the first is true.
+ if patchIndex < len(nonDeleteList) && setOrderIndex >= len(typedSetOrderList) {
+ return fmt.Errorf("The order in patch list:\n%v\n doesn't match %s list:\n%v\n", typedPatchList, setElementOrderDirectivePrefix, setOrderList)
+ }
+ typedPatchList = append(nonDeleteList, toDeleteList...)
+ return nil
+}
+
+// preprocessDeletionListForMerging preprocesses the deletion list.
+// it returns shouldContinue, isDeletionList, noPrefixKey
+func preprocessDeletionListForMerging(key string, original map[string]interface{},
+ patchVal interface{}, mergeDeletionList bool) (bool, bool, string, error) {
+ // If found a parallel list for deletion and we are going to merge the list,
+ // overwrite the key to the original key and set flag isDeleteList
+ foundParallelListPrefix := strings.HasPrefix(key, deleteFromPrimitiveListDirectivePrefix)
+ if foundParallelListPrefix {
+ if !mergeDeletionList {
+ original[key] = patchVal
+ return true, false, "", nil
+ }
+ originalKey, err := extractKey(key, deleteFromPrimitiveListDirectivePrefix)
+ return false, true, originalKey, err
+ }
+ return false, false, "", nil
+}
+
+// applyRetainKeysDirective looks for a retainKeys directive and applies to original
+// - if no directive exists do nothing
+// - if directive is found, clear keys in original missing from the directive list
+// - validate that all keys present in the patch are present in the retainKeys directive
+// note: original may be another patch request, e.g. applying the add+modified patch to the deletions patch. In this case it may have directives
+func applyRetainKeysDirective(original, patch map[string]interface{}, options MergeOptions) error {
+ retainKeysInPatch, foundInPatch := patch[retainKeysDirective]
+ if !foundInPatch {
+ return nil
+ }
+ // cleanup the directive
+ delete(patch, retainKeysDirective)
+
+ if !options.MergeParallelList {
+ // If original is actually a patch, make sure the retainKeys directives are the same in both patches if present in both.
+ // If not present in the original patch, copy from the modified patch.
+ retainKeysInOriginal, foundInOriginal := original[retainKeysDirective]
+ if foundInOriginal {
+ if !reflect.DeepEqual(retainKeysInOriginal, retainKeysInPatch) {
+ // This error actually should never happen.
+ return fmt.Errorf("%v and %v are not deep equal: this may happen when calculating the 3-way diff patch", retainKeysInOriginal, retainKeysInPatch)
+ }
+ } else {
+ original[retainKeysDirective] = retainKeysInPatch
+ }
+ return nil
+ }
+
+ retainKeysList, ok := retainKeysInPatch.([]interface{})
+ if !ok {
+ return mergepatch.ErrBadPatchFormatForRetainKeys
+ }
+
+ // validate patch to make sure all fields in the patch are present in the retainKeysList.
+ // The map is used only as a set, the value is never referenced
+ m := map[interface{}]struct{}{}
+ for _, v := range retainKeysList {
+ m[v] = struct{}{}
+ }
+ for k, v := range patch {
+ if v == nil || strings.HasPrefix(k, deleteFromPrimitiveListDirectivePrefix) ||
+ strings.HasPrefix(k, setElementOrderDirectivePrefix) {
+ continue
+ }
+ // If there is an item present in the patch but not in the retainKeys list,
+ // the patch is invalid.
+ if _, found := m[k]; !found {
+ return mergepatch.ErrBadPatchFormatForRetainKeys
+ }
+ }
+
+ // clear not present fields
+ for k := range original {
+ if _, found := m[k]; !found {
+ delete(original, k)
+ }
+ }
+ return nil
+}
+
+// mergePatchIntoOriginal processes $setElementOrder list.
+// When not merging the directive, it will make sure $setElementOrder list exist only in original.
+// When merging the directive, it will try to find the $setElementOrder list and
+// its corresponding patch list, validate it and merge it.
+// Then, sort them by the relative order in setElementOrder, patch list and live list.
+// The precedence is $setElementOrder > order in patch list > order in live list.
+// This function will delete the item after merging it to prevent process it again in the future.
+// Ref: https://git.k8s.io/community/contributors/design-proposals/preserve-order-in-strategic-merge-patch.md
+func mergePatchIntoOriginal(original, patch map[string]interface{}, t reflect.Type, mergeOptions MergeOptions) error {
+ for key, patchV := range patch {
+ // Do nothing if there is no ordering directive
+ if !strings.HasPrefix(key, setElementOrderDirectivePrefix) {
+ continue
+ }
+
+ setElementOrderInPatch := patchV
+ // Copies directive from the second patch (`patch`) to the first patch (`original`)
+ // and checks they are equal and delete the directive in the second patch
+ if !mergeOptions.MergeParallelList {
+ setElementOrderListInOriginal, ok := original[key]
+ if ok {
+ // check if the setElementOrder list in original and the one in patch matches
+ if !reflect.DeepEqual(setElementOrderListInOriginal, setElementOrderInPatch) {
+ return mergepatch.ErrBadPatchFormatForSetElementOrderList
+ }
+ } else {
+ // move the setElementOrder list from patch to original
+ original[key] = setElementOrderInPatch
+ }
+ }
+ delete(patch, key)
+
+ var (
+ ok bool
+ originalFieldValue, patchFieldValue, merged []interface{}
+ patchStrategy, mergeKey string
+ patchStrategies []string
+ fieldType reflect.Type
+ )
+ typedSetElementOrderList, ok := setElementOrderInPatch.([]interface{})
+ if !ok {
+ return mergepatch.ErrBadArgType(typedSetElementOrderList, setElementOrderInPatch)
+ }
+ // Trim the setElementOrderDirectivePrefix to get the key of the list field in original.
+ originalKey, err := extractKey(key, setElementOrderDirectivePrefix)
+ if err != nil {
+ return err
+ }
+ // try to find the list with `originalKey` in `original` and `modified` and merge them.
+ originalList, foundOriginal := original[originalKey]
+ patchList, foundPatch := patch[originalKey]
+ if foundOriginal {
+ originalFieldValue, ok = originalList.([]interface{})
+ if !ok {
+ return mergepatch.ErrBadArgType(originalFieldValue, originalList)
+ }
+ }
+ if foundPatch {
+ patchFieldValue, ok = patchList.([]interface{})
+ if !ok {
+ return mergepatch.ErrBadArgType(patchFieldValue, patchList)
+ }
+ }
+ fieldType, patchStrategies, mergeKey, err = forkedjson.LookupPatchMetadata(t, originalKey)
+ if err != nil {
+ return err
+ }
+ _, patchStrategy, err = extractRetainKeysPatchStrategy(patchStrategies)
+ if err != nil {
+ return err
+ }
+ // Check for consistency between the element order list and the field it applies to
+ err = validatePatchWithSetOrderList(patchFieldValue, typedSetElementOrderList, mergeKey)
+ if err != nil {
+ return err
+ }
+
+ switch {
+ case foundOriginal && !foundPatch:
+ // no change to list contents
+ merged = originalFieldValue
+ case !foundOriginal && foundPatch:
+ // list was added
+ merged = patchFieldValue
+ case foundOriginal && foundPatch:
+ merged, err = mergeSliceHandler(originalList, patchList, fieldType,
+ patchStrategy, mergeKey, false, mergeOptions)
+ if err != nil {
+ return err
+ }
+ case !foundOriginal && !foundPatch:
+ continue
+ }
+
+ // Split all items into patch items and server-only items and then enforce the order.
+ var patchItems, serverOnlyItems []interface{}
+ if len(mergeKey) == 0 {
+ // Primitives doesn't need merge key to do partitioning.
+ patchItems, serverOnlyItems = partitionPrimitivesByPresentInList(merged, typedSetElementOrderList)
+
+ } else {
+ // Maps need merge key to do partitioning.
+ patchItems, serverOnlyItems, err = partitionMapsByPresentInList(merged, typedSetElementOrderList, mergeKey)
+ if err != nil {
+ return err
+ }
+ }
+
+ elementType, err := sliceElementType(originalFieldValue, patchFieldValue)
+ if err != nil {
+ return err
+ }
+ kind := elementType.Kind()
+ // normalize merged list
+ // typedSetElementOrderList contains all the relative order in typedPatchList,
+ // so don't need to use typedPatchList
+ both, err := normalizeElementOrder(patchItems, serverOnlyItems, typedSetElementOrderList, originalFieldValue, mergeKey, kind)
+ if err != nil {
+ return err
+ }
+ original[originalKey] = both
+ // delete patch list from patch to prevent process again in the future
+ delete(patch, originalKey)
+ }
+ return nil
+}
+
+// partitionPrimitivesByPresentInList partitions elements into 2 slices, the first containing items present in partitionBy, the other not.
+func partitionPrimitivesByPresentInList(original, partitionBy []interface{}) ([]interface{}, []interface{}) {
+ patch := make([]interface{}, 0, len(original))
+ serverOnly := make([]interface{}, 0, len(original))
+ inPatch := map[interface{}]bool{}
+ for _, v := range partitionBy {
+ inPatch[v] = true
+ }
+ for _, v := range original {
+ if !inPatch[v] {
+ serverOnly = append(serverOnly, v)
+ } else {
+ patch = append(patch, v)
+ }
+ }
+ return patch, serverOnly
+}
+
+// partitionMapsByPresentInList partitions elements into 2 slices, the first containing items present in partitionBy, the other not.
+func partitionMapsByPresentInList(original, partitionBy []interface{}, mergeKey string) ([]interface{}, []interface{}, error) {
+ patch := make([]interface{}, 0, len(original))
+ serverOnly := make([]interface{}, 0, len(original))
+ for _, v := range original {
+ typedV, ok := v.(map[string]interface{})
+ if !ok {
+ return nil, nil, mergepatch.ErrBadArgType(typedV, v)
+ }
+ mergeKeyValue, foundMergeKey := typedV[mergeKey]
+ if !foundMergeKey {
+ return nil, nil, mergepatch.ErrNoMergeKey(typedV, mergeKey)
+ }
+ _, _, found, err := findMapInSliceBasedOnKeyValue(partitionBy, mergeKey, mergeKeyValue)
+ if err != nil {
+ return nil, nil, err
+ }
+ if !found {
+ serverOnly = append(serverOnly, v)
+ } else {
+ patch = append(patch, v)
+ }
+ }
+ return patch, serverOnly, nil
+}
+
+// Merge fields from a patch map into the original map. Note: This may modify
+// both the original map and the patch because getting a deep copy of a map in
+// golang is highly non-trivial.
+// flag mergeOptions.MergeParallelList controls if using the parallel list to delete or keeping the list.
+// If patch contains any null field (e.g. field_1: null) that is not
+// present in original, then to propagate it to the end result use
+// mergeOptions.IgnoreUnmatchedNulls == false.
+func mergeMap(original, patch map[string]interface{}, t reflect.Type, mergeOptions MergeOptions) (map[string]interface{}, error) {
+ if v, ok := patch[directiveMarker]; ok {
+ return handleDirectiveInMergeMap(v, patch)
+ }
+
+ // nil is an accepted value for original to simplify logic in other places.
+ // If original is nil, replace it with an empty map and then apply the patch.
+ if original == nil {
+ original = map[string]interface{}{}
+ }
+
+ err := applyRetainKeysDirective(original, patch, mergeOptions)
+ if err != nil {
+ return nil, err
+ }
+
+ // Process $setElementOrder list and other lists sharing the same key.
+ // When not merging the directive, it will make sure $setElementOrder list exist only in original.
+ // When merging the directive, it will process $setElementOrder and its patch list together.
+ // This function will delete the merged elements from patch so they will not be reprocessed
+ err = mergePatchIntoOriginal(original, patch, t, mergeOptions)
+ if err != nil {
+ return nil, err
+ }
+
+ // Start merging the patch into the original.
+ for k, patchV := range patch {
+ skipProcessing, isDeleteList, noPrefixKey, err := preprocessDeletionListForMerging(k, original, patchV, mergeOptions.MergeParallelList)
+ if err != nil {
+ return nil, err
+ }
+ if skipProcessing {
+ continue
+ }
+ if len(noPrefixKey) > 0 {
+ k = noPrefixKey
+ }
+
+ // If the value of this key is null, delete the key if it exists in the
+ // original. Otherwise, check if we want to preserve it or skip it.
+ // Preserving the null value is useful when we want to send an explicit
+ // delete to the API server.
+ if patchV == nil {
+ if _, ok := original[k]; ok {
+ delete(original, k)
+ }
+ if mergeOptions.IgnoreUnmatchedNulls {
+ continue
+ }
+ }
+
+ _, ok := original[k]
+ if !ok {
+ // If it's not in the original document, just take the patch value.
+ original[k] = patchV
+ continue
+ }
+
+ // If the data type is a pointer, resolve the element.
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+
+ originalType := reflect.TypeOf(original[k])
+ patchType := reflect.TypeOf(patchV)
+ if originalType != patchType {
+ original[k] = patchV
+ continue
+ }
+ // If they're both maps or lists, recurse into the value.
+ // First find the fieldPatchStrategy and fieldPatchMergeKey.
+ fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, k)
+ if err != nil {
+ return nil, err
+ }
+ _, patchStrategy, err := extractRetainKeysPatchStrategy(fieldPatchStrategies)
+ if err != nil {
+ return nil, err
+ }
+
+ switch originalType.Kind() {
+ case reflect.Map:
+
+ original[k], err = mergeMapHandler(original[k], patchV, fieldType, patchStrategy, mergeOptions)
+ case reflect.Slice:
+ original[k], err = mergeSliceHandler(original[k], patchV, fieldType, patchStrategy, fieldPatchMergeKey, isDeleteList, mergeOptions)
+ default:
+ original[k] = patchV
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ return original, nil
+}
+
+// mergeMapHandler handles how to merge `patchV` whose key is `key` with `original` respecting
+// fieldPatchStrategy and mergeOptions.
+func mergeMapHandler(original, patch interface{}, fieldType reflect.Type,
+ fieldPatchStrategy string, mergeOptions MergeOptions) (map[string]interface{}, error) {
+ typedOriginal, typedPatch, err := mapTypeAssertion(original, patch)
+ if err != nil {
+ return nil, err
+ }
+
+ if fieldPatchStrategy != replaceDirective {
+ return mergeMap(typedOriginal, typedPatch, fieldType, mergeOptions)
+ } else {
+ return typedPatch, nil
+ }
+}
+
+// mergeSliceHandler handles how to merge `patchV` whose key is `key` with `original` respecting
+// fieldPatchStrategy, fieldPatchMergeKey, isDeleteList and mergeOptions.
+func mergeSliceHandler(original, patch interface{}, fieldType reflect.Type,
+ fieldPatchStrategy, fieldPatchMergeKey string, isDeleteList bool, mergeOptions MergeOptions) ([]interface{}, error) {
+ typedOriginal, typedPatch, err := sliceTypeAssertion(original, patch)
+ if err != nil {
+ return nil, err
+ }
+
+ if fieldPatchStrategy == mergeDirective {
+ elemType := fieldType.Elem()
+ return mergeSlice(typedOriginal, typedPatch, elemType, fieldPatchMergeKey, mergeOptions, isDeleteList)
+ } else {
+ return typedPatch, nil
+ }
+}
+
+// Merge two slices together. Note: This may modify both the original slice and
+// the patch because getting a deep copy of a slice in golang is highly
+// non-trivial.
+func mergeSlice(original, patch []interface{}, elemType reflect.Type, mergeKey string, mergeOptions MergeOptions, isDeleteList bool) ([]interface{}, error) {
+ if len(original) == 0 && len(patch) == 0 {
+ return original, nil
+ }
+
+ // All the values must be of the same type, but not a list.
+ t, err := sliceElementType(original, patch)
+ if err != nil {
+ return nil, err
+ }
+
+ var merged []interface{}
+ kind := t.Kind()
+ // If the elements are not maps, merge the slices of scalars.
+ if kind != reflect.Map {
+ if mergeOptions.MergeParallelList && isDeleteList {
+ return deleteFromSlice(original, patch), nil
+ }
+ // Maybe in the future add a "concat" mode that doesn't
+ // deduplicate.
+ both := append(original, patch...)
+ merged = deduplicateScalars(both)
+
+ } else {
+ if mergeKey == "" {
+ return nil, fmt.Errorf("cannot merge lists without merge key for type %s", elemType.Kind().String())
+ }
+
+ original, patch, err = mergeSliceWithSpecialElements(original, patch, mergeKey)
+ if err != nil {
+ return nil, err
+ }
+
+ merged, err = mergeSliceWithoutSpecialElements(original, patch, mergeKey, elemType, mergeOptions)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // enforce the order
+ var patchItems, serverOnlyItems []interface{}
+ if len(mergeKey) == 0 {
+ patchItems, serverOnlyItems = partitionPrimitivesByPresentInList(merged, patch)
+ } else {
+ patchItems, serverOnlyItems, err = partitionMapsByPresentInList(merged, patch, mergeKey)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return normalizeElementOrder(patchItems, serverOnlyItems, patch, original, mergeKey, kind)
+}
+
+// mergeSliceWithSpecialElements handles special elements with directiveMarker
+// before merging the slices. It returns a updated `original` and a patch without special elements.
+// original and patch must be slices of maps, they should be checked before calling this function.
+func mergeSliceWithSpecialElements(original, patch []interface{}, mergeKey string) ([]interface{}, []interface{}, error) {
+ patchWithoutSpecialElements := []interface{}{}
+ replace := false
+ for _, v := range patch {
+ typedV := v.(map[string]interface{})
+ patchType, ok := typedV[directiveMarker]
+ if !ok {
+ patchWithoutSpecialElements = append(patchWithoutSpecialElements, v)
+ } else {
+ switch patchType {
+ case deleteDirective:
+ mergeValue, ok := typedV[mergeKey]
+ if ok {
+ var err error
+ original, err = deleteMatchingEntries(original, mergeKey, mergeValue)
+ if err != nil {
+ return nil, nil, err
+ }
+ } else {
+ return nil, nil, mergepatch.ErrNoMergeKey(typedV, mergeKey)
+ }
+ case replaceDirective:
+ replace = true
+ // Continue iterating through the array to prune any other $patch elements.
+ case mergeDirective:
+ return nil, nil, fmt.Errorf("merging lists cannot yet be specified in the patch")
+ default:
+ return nil, nil, mergepatch.ErrBadPatchType(patchType, typedV)
+ }
+ }
+ }
+ if replace {
+ return patchWithoutSpecialElements, nil, nil
+ }
+ return original, patchWithoutSpecialElements, nil
+}
+
+// delete all matching entries (based on merge key) from a merging list
+func deleteMatchingEntries(original []interface{}, mergeKey string, mergeValue interface{}) ([]interface{}, error) {
+ for {
+ _, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue)
+ if err != nil {
+ return nil, err
+ }
+
+ if !found {
+ break
+ }
+ // Delete the element at originalKey.
+ original = append(original[:originalKey], original[originalKey+1:]...)
+ }
+ return original, nil
+}
+
+// mergeSliceWithoutSpecialElements merges slices with non-special elements.
+// original and patch must be slices of maps, they should be checked before calling this function.
+func mergeSliceWithoutSpecialElements(original, patch []interface{}, mergeKey string, elemType reflect.Type, mergeOptions MergeOptions) ([]interface{}, error) {
+ for _, v := range patch {
+ typedV := v.(map[string]interface{})
+ mergeValue, ok := typedV[mergeKey]
+ if !ok {
+ return nil, mergepatch.ErrNoMergeKey(typedV, mergeKey)
+ }
+
+ // If we find a value with this merge key value in original, merge the
+ // maps. Otherwise append onto original.
+ originalMap, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue)
+ if err != nil {
+ return nil, err
+ }
+
+ if found {
+ var mergedMaps interface{}
+ var err error
+ // Merge into original.
+ mergedMaps, err = mergeMap(originalMap, typedV, elemType, mergeOptions)
+ if err != nil {
+ return nil, err
+ }
+
+ original[originalKey] = mergedMaps
+ } else {
+ original = append(original, v)
+ }
+ }
+ return original, nil
+}
+
+// deleteFromSlice uses the parallel list to delete the items in a list of scalars
+func deleteFromSlice(current, toDelete []interface{}) []interface{} {
+ toDeleteMap := map[interface{}]interface{}{}
+ processed := make([]interface{}, 0, len(current))
+ for _, v := range toDelete {
+ toDeleteMap[v] = true
+ }
+ for _, v := range current {
+ if _, found := toDeleteMap[v]; !found {
+ processed = append(processed, v)
+ }
+ }
+ return processed
+}
+
+// This method no longer panics if any element of the slice is not a map.
+func findMapInSliceBasedOnKeyValue(m []interface{}, key string, value interface{}) (map[string]interface{}, int, bool, error) {
+ for k, v := range m {
+ typedV, ok := v.(map[string]interface{})
+ if !ok {
+ return nil, 0, false, fmt.Errorf("value for key %v is not a map.", k)
+ }
+
+ valueToMatch, ok := typedV[key]
+ if ok && valueToMatch == value {
+ return typedV, k, true, nil
+ }
+ }
+
+ return nil, 0, false, nil
+}
+
+// This function takes a JSON map and sorts all the lists that should be merged
+// by key. This is needed by tests because in JSON, list order is significant,
+// but in Strategic Merge Patch, merge lists do not have significant order.
+// Sorting the lists allows for order-insensitive comparison of patched maps.
+func sortMergeListsByName(mapJSON []byte, dataStruct interface{}) ([]byte, error) {
+ var m map[string]interface{}
+ err := json.Unmarshal(mapJSON, &m)
+ if err != nil {
+ return nil, err
+ }
+
+ newM, err := sortMergeListsByNameMap(m, reflect.TypeOf(dataStruct))
+ if err != nil {
+ return nil, err
+ }
+
+ return json.Marshal(newM)
+}
+
+// Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in a map.
+func sortMergeListsByNameMap(s map[string]interface{}, t reflect.Type) (map[string]interface{}, error) {
+ newS := map[string]interface{}{}
+ for k, v := range s {
+ if k == retainKeysDirective {
+ typedV, ok := v.([]interface{})
+ if !ok {
+ return nil, mergepatch.ErrBadPatchFormatForRetainKeys
+ }
+ v = sortScalars(typedV)
+ } else if strings.HasPrefix(k, deleteFromPrimitiveListDirectivePrefix) {
+ typedV, ok := v.([]interface{})
+ if !ok {
+ return nil, mergepatch.ErrBadPatchFormatForPrimitiveList
+ }
+ v = sortScalars(typedV)
+ } else if strings.HasPrefix(k, setElementOrderDirectivePrefix) {
+ _, ok := v.([]interface{})
+ if !ok {
+ return nil, mergepatch.ErrBadPatchFormatForSetElementOrderList
+ }
+ } else if k != directiveMarker {
+ fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, k)
+ if err != nil {
+ return nil, err
+ }
+ _, patchStrategy, err := extractRetainKeysPatchStrategy(fieldPatchStrategies)
+ if err != nil {
+ return nil, err
+ }
+
+ // If v is a map or a merge slice, recurse.
+ if typedV, ok := v.(map[string]interface{}); ok {
+ var err error
+ v, err = sortMergeListsByNameMap(typedV, fieldType)
+ if err != nil {
+ return nil, err
+ }
+ } else if typedV, ok := v.([]interface{}); ok {
+ if patchStrategy == mergeDirective {
+ var err error
+ v, err = sortMergeListsByNameArray(typedV, fieldType.Elem(), fieldPatchMergeKey, true)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ }
+
+ newS[k] = v
+ }
+
+ return newS, nil
+}
+
+// Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in an array.
+func sortMergeListsByNameArray(s []interface{}, elemType reflect.Type, mergeKey string, recurse bool) ([]interface{}, error) {
+ if len(s) == 0 {
+ return s, nil
+ }
+
+ // We don't support lists of lists yet.
+ t, err := sliceElementType(s)
+ if err != nil {
+ return nil, err
+ }
+
+ // If the elements are not maps...
+ if t.Kind() != reflect.Map {
+ // Sort the elements, because they may have been merged out of order.
+ return deduplicateAndSortScalars(s), nil
+ }
+
+ // Elements are maps - if one of the keys of the map is a map or a
+ // list, we may need to recurse into it.
+ newS := []interface{}{}
+ for _, elem := range s {
+ if recurse {
+ typedElem := elem.(map[string]interface{})
+ newElem, err := sortMergeListsByNameMap(typedElem, elemType)
+ if err != nil {
+ return nil, err
+ }
+
+ newS = append(newS, newElem)
+ } else {
+ newS = append(newS, elem)
+ }
+ }
+
+ // Sort the maps.
+ newS = sortMapsBasedOnField(newS, mergeKey)
+ return newS, nil
+}
+
+func sortMapsBasedOnField(m []interface{}, fieldName string) []interface{} {
+ mapM := mapSliceFromSlice(m)
+ ss := SortableSliceOfMaps{mapM, fieldName}
+ sort.Sort(ss)
+ newS := sliceFromMapSlice(ss.s)
+ return newS
+}
+
+func mapSliceFromSlice(m []interface{}) []map[string]interface{} {
+ newM := []map[string]interface{}{}
+ for _, v := range m {
+ vt := v.(map[string]interface{})
+ newM = append(newM, vt)
+ }
+
+ return newM
+}
+
+func sliceFromMapSlice(s []map[string]interface{}) []interface{} {
+ newS := []interface{}{}
+ for _, v := range s {
+ newS = append(newS, v)
+ }
+
+ return newS
+}
+
+type SortableSliceOfMaps struct {
+ s []map[string]interface{}
+ k string // key to sort on
+}
+
+func (ss SortableSliceOfMaps) Len() int {
+ return len(ss.s)
+}
+
+func (ss SortableSliceOfMaps) Less(i, j int) bool {
+ iStr := fmt.Sprintf("%v", ss.s[i][ss.k])
+ jStr := fmt.Sprintf("%v", ss.s[j][ss.k])
+ return sort.StringsAreSorted([]string{iStr, jStr})
+}
+
+func (ss SortableSliceOfMaps) Swap(i, j int) {
+ tmp := ss.s[i]
+ ss.s[i] = ss.s[j]
+ ss.s[j] = tmp
+}
+
+func deduplicateAndSortScalars(s []interface{}) []interface{} {
+ s = deduplicateScalars(s)
+ return sortScalars(s)
+}
+
+func sortScalars(s []interface{}) []interface{} {
+ ss := SortableSliceOfScalars{s}
+ sort.Sort(ss)
+ return ss.s
+}
+
+func deduplicateScalars(s []interface{}) []interface{} {
+ // Clever algorithm to deduplicate.
+ length := len(s) - 1
+ for i := 0; i < length; i++ {
+ for j := i + 1; j <= length; j++ {
+ if s[i] == s[j] {
+ s[j] = s[length]
+ s = s[0:length]
+ length--
+ j--
+ }
+ }
+ }
+
+ return s
+}
+
+type SortableSliceOfScalars struct {
+ s []interface{}
+}
+
+func (ss SortableSliceOfScalars) Len() int {
+ return len(ss.s)
+}
+
+func (ss SortableSliceOfScalars) Less(i, j int) bool {
+ iStr := fmt.Sprintf("%v", ss.s[i])
+ jStr := fmt.Sprintf("%v", ss.s[j])
+ return sort.StringsAreSorted([]string{iStr, jStr})
+}
+
+func (ss SortableSliceOfScalars) Swap(i, j int) {
+ tmp := ss.s[i]
+ ss.s[i] = ss.s[j]
+ ss.s[j] = tmp
+}
+
+// Returns the type of the elements of N slice(s). If the type is different,
+// another slice or undefined, returns an error.
+func sliceElementType(slices ...[]interface{}) (reflect.Type, error) {
+ var prevType reflect.Type
+ for _, s := range slices {
+ // Go through elements of all given slices and make sure they are all the same type.
+ for _, v := range s {
+ currentType := reflect.TypeOf(v)
+ if prevType == nil {
+ prevType = currentType
+ // We don't support lists of lists yet.
+ if prevType.Kind() == reflect.Slice {
+ return nil, mergepatch.ErrNoListOfLists
+ }
+ } else {
+ if prevType != currentType {
+ return nil, fmt.Errorf("list element types are not identical: %v", fmt.Sprint(slices))
+ }
+ prevType = currentType
+ }
+ }
+ }
+
+ if prevType == nil {
+ return nil, fmt.Errorf("no elements in any of the given slices")
+ }
+
+ return prevType, nil
+}
+
+// MergingMapsHaveConflicts returns true if the left and right JSON interface
+// objects overlap with different values in any key. All keys are required to be
+// strings. Since patches of the same Type have congruent keys, this is valid
+// for multiple patch types. This method supports strategic merge patch semantics.
+func MergingMapsHaveConflicts(left, right map[string]interface{}, dataStruct interface{}) (bool, error) {
+ t, err := getTagStructType(dataStruct)
+ if err != nil {
+ return true, err
+ }
+
+ return mergingMapFieldsHaveConflicts(left, right, t, "", "")
+}
+
+func mergingMapFieldsHaveConflicts(
+ left, right interface{},
+ fieldType reflect.Type,
+ fieldPatchStrategy, fieldPatchMergeKey string,
+) (bool, error) {
+ switch leftType := left.(type) {
+ case map[string]interface{}:
+ rightType, ok := right.(map[string]interface{})
+ if !ok {
+ return true, nil
+ }
+ leftMarker, okLeft := leftType[directiveMarker]
+ rightMarker, okRight := rightType[directiveMarker]
+ // if one or the other has a directive marker,
+ // then we need to consider that before looking at the individual keys,
+ // since a directive operates on the whole map.
+ if okLeft || okRight {
+ // if one has a directive marker and the other doesn't,
+ // then we have a conflict, since one is deleting or replacing the whole map,
+ // and the other is doing things to individual keys.
+ if okLeft != okRight {
+ return true, nil
+ }
+ // if they both have markers, but they are not the same directive,
+ // then we have a conflict because they're doing different things to the map.
+ if leftMarker != rightMarker {
+ return true, nil
+ }
+ }
+ if fieldPatchStrategy == replaceDirective {
+ return false, nil
+ }
+ // Check the individual keys.
+ return mapsHaveConflicts(leftType, rightType, fieldType)
+
+ case []interface{}:
+ rightType, ok := right.([]interface{})
+ if !ok {
+ return true, nil
+ }
+ return slicesHaveConflicts(leftType, rightType, fieldType, fieldPatchStrategy, fieldPatchMergeKey)
+
+ case string, float64, bool, int, int64, nil:
+ return !reflect.DeepEqual(left, right), nil
+ default:
+ return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left))
+ }
+}
+
+func mapsHaveConflicts(typedLeft, typedRight map[string]interface{}, structType reflect.Type) (bool, error) {
+ for key, leftValue := range typedLeft {
+ if key != directiveMarker && key != retainKeysDirective {
+ if rightValue, ok := typedRight[key]; ok {
+ fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(structType, key)
+ if err != nil {
+ return true, err
+ }
+ _, patchStrategy, err := extractRetainKeysPatchStrategy(fieldPatchStrategies)
+ if err != nil {
+ return true, err
+ }
+
+ if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue,
+ fieldType, patchStrategy, fieldPatchMergeKey); hasConflicts {
+ return true, err
+ }
+ }
+ }
+ }
+
+ return false, nil
+}
+
+func slicesHaveConflicts(
+ typedLeft, typedRight []interface{},
+ fieldType reflect.Type,
+ fieldPatchStrategy, fieldPatchMergeKey string,
+) (bool, error) {
+ elementType, err := sliceElementType(typedLeft, typedRight)
+ if err != nil {
+ return true, err
+ }
+
+ valueType := fieldType.Elem()
+ if fieldPatchStrategy == mergeDirective {
+ // Merging lists of scalars have no conflicts by definition
+ // So we only need to check further if the elements are maps
+ if elementType.Kind() != reflect.Map {
+ return false, nil
+ }
+
+ // Build a map for each slice and then compare the two maps
+ leftMap, err := sliceOfMapsToMapOfMaps(typedLeft, fieldPatchMergeKey)
+ if err != nil {
+ return true, err
+ }
+
+ rightMap, err := sliceOfMapsToMapOfMaps(typedRight, fieldPatchMergeKey)
+ if err != nil {
+ return true, err
+ }
+
+ return mapsOfMapsHaveConflicts(leftMap, rightMap, valueType)
+ }
+
+ // Either we don't have type information, or these are non-merging lists
+ if len(typedLeft) != len(typedRight) {
+ return true, nil
+ }
+
+ // Sort scalar slices to prevent ordering issues
+ // We have no way to sort non-merging lists of maps
+ if elementType.Kind() != reflect.Map {
+ typedLeft = deduplicateAndSortScalars(typedLeft)
+ typedRight = deduplicateAndSortScalars(typedRight)
+ }
+
+ // Compare the slices element by element in order
+ // This test will fail if the slices are not sorted
+ for i := range typedLeft {
+ if hasConflicts, err := mergingMapFieldsHaveConflicts(typedLeft[i], typedRight[i], valueType, "", ""); hasConflicts {
+ return true, err
+ }
+ }
+
+ return false, nil
+}
+
+func sliceOfMapsToMapOfMaps(slice []interface{}, mergeKey string) (map[string]interface{}, error) {
+ result := make(map[string]interface{}, len(slice))
+ for _, value := range slice {
+ typedValue, ok := value.(map[string]interface{})
+ if !ok {
+ return nil, fmt.Errorf("invalid element type in merging list:%v", slice)
+ }
+
+ mergeValue, ok := typedValue[mergeKey]
+ if !ok {
+ return nil, fmt.Errorf("cannot find merge key `%s` in merging list element:%v", mergeKey, typedValue)
+ }
+
+ result[fmt.Sprintf("%s", mergeValue)] = typedValue
+ }
+
+ return result, nil
+}
+
+func mapsOfMapsHaveConflicts(typedLeft, typedRight map[string]interface{}, structType reflect.Type) (bool, error) {
+ for key, leftValue := range typedLeft {
+ if rightValue, ok := typedRight[key]; ok {
+ if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, structType, "", ""); hasConflicts {
+ return true, err
+ }
+ }
+ }
+
+ return false, nil
+}
+
+// CreateThreeWayMergePatch reconciles a modified configuration with an original configuration,
+// while preserving any changes or deletions made to the original configuration in the interim,
+// and not overridden by the current configuration. All three documents must be passed to the
+// method as json encoded content. It will return a strategic merge patch, or an error if any
+// of the documents is invalid, or if there are any preconditions that fail against the modified
+// configuration, or, if overwrite is false and there are conflicts between the modified and current
+// configurations. Conflicts are defined as keys changed differently from original to modified
+// than from original to current. In other words, a conflict occurs if modified changes any key
+// in a way that is different from how it is changed in current (e.g., deleting it, changing its
+// value). We also propagate values fields that do not exist in original but are explicitly
+// defined in modified.
+func CreateThreeWayMergePatch(original, modified, current []byte, dataStruct interface{}, overwrite bool, fns ...mergepatch.PreconditionFunc) ([]byte, error) {
+ originalMap := map[string]interface{}{}
+ if len(original) > 0 {
+ if err := json.Unmarshal(original, &originalMap); err != nil {
+ return nil, mergepatch.ErrBadJSONDoc
+ }
+ }
+
+ modifiedMap := map[string]interface{}{}
+ if len(modified) > 0 {
+ if err := json.Unmarshal(modified, &modifiedMap); err != nil {
+ return nil, mergepatch.ErrBadJSONDoc
+ }
+ }
+
+ currentMap := map[string]interface{}{}
+ if len(current) > 0 {
+ if err := json.Unmarshal(current, &currentMap); err != nil {
+ return nil, mergepatch.ErrBadJSONDoc
+ }
+ }
+
+ t, err := getTagStructType(dataStruct)
+ if err != nil {
+ return nil, err
+ }
+
+ // The patch is the difference from current to modified without deletions, plus deletions
+ // from original to modified. To find it, we compute deletions, which are the deletions from
+ // original to modified, and delta, which is the difference from current to modified without
+ // deletions, and then apply delta to deletions as a patch, which should be strictly additive.
+ deltaMapDiffOptions := DiffOptions{
+ IgnoreDeletions: true,
+ SetElementOrder: true,
+ }
+ deltaMap, err := diffMaps(currentMap, modifiedMap, t, deltaMapDiffOptions)
+ if err != nil {
+ return nil, err
+ }
+ deletionsMapDiffOptions := DiffOptions{
+ SetElementOrder: true,
+ IgnoreChangesAndAdditions: true,
+ }
+ deletionsMap, err := diffMaps(originalMap, modifiedMap, t, deletionsMapDiffOptions)
+ if err != nil {
+ return nil, err
+ }
+
+ mergeOptions := MergeOptions{}
+ patchMap, err := mergeMap(deletionsMap, deltaMap, t, mergeOptions)
+ if err != nil {
+ return nil, err
+ }
+
+ // Apply the preconditions to the patch, and return an error if any of them fail.
+ for _, fn := range fns {
+ if !fn(patchMap) {
+ return nil, mergepatch.NewErrPreconditionFailed(patchMap)
+ }
+ }
+
+ // If overwrite is false, and the patch contains any keys that were changed differently,
+ // then return a conflict error.
+ if !overwrite {
+ changeMapDiffOptions := DiffOptions{}
+ changedMap, err := diffMaps(originalMap, currentMap, t, changeMapDiffOptions)
+ if err != nil {
+ return nil, err
+ }
+
+ hasConflicts, err := MergingMapsHaveConflicts(patchMap, changedMap, dataStruct)
+ if err != nil {
+ return nil, err
+ }
+
+ if hasConflicts {
+ return nil, mergepatch.NewErrConflict(mergepatch.ToYAMLOrError(patchMap), mergepatch.ToYAMLOrError(changedMap))
+ }
+ }
+
+ return json.Marshal(patchMap)
+}
+
+func ItemAddedToModifiedSlice(original, modified string) bool { return original > modified }
+
+func ItemRemovedFromModifiedSlice(original, modified string) bool { return original < modified }
+
+func ItemMatchesOriginalAndModifiedSlice(original, modified string) bool { return original == modified }
+
+func CreateDeleteDirective(mergeKey string, mergeKeyValue interface{}) map[string]interface{} {
+ return map[string]interface{}{mergeKey: mergeKeyValue, directiveMarker: deleteDirective}
+}
+
+func mapTypeAssertion(original, patch interface{}) (map[string]interface{}, map[string]interface{}, error) {
+ typedOriginal, ok := original.(map[string]interface{})
+ if !ok {
+ return nil, nil, mergepatch.ErrBadArgType(typedOriginal, original)
+ }
+ typedPatch, ok := patch.(map[string]interface{})
+ if !ok {
+ return nil, nil, mergepatch.ErrBadArgType(typedPatch, patch)
+ }
+ return typedOriginal, typedPatch, nil
+}
+
+func sliceTypeAssertion(original, patch interface{}) ([]interface{}, []interface{}, error) {
+ typedOriginal, ok := original.([]interface{})
+ if !ok {
+ return nil, nil, mergepatch.ErrBadArgType(typedOriginal, original)
+ }
+ typedPatch, ok := patch.([]interface{})
+ if !ok {
+ return nil, nil, mergepatch.ErrBadArgType(typedPatch, patch)
+ }
+ return typedOriginal, typedPatch, nil
+}
+
+// extractRetainKeysPatchStrategy process patch strategy, which is a string may contains multiple
+// patch strategies seperated by ",". It returns a boolean var indicating if it has
+// retainKeys strategies and a string for the other strategy.
+func extractRetainKeysPatchStrategy(strategies []string) (bool, string, error) {
+ switch len(strategies) {
+ case 0:
+ return false, "", nil
+ case 1:
+ singleStrategy := strategies[0]
+ switch singleStrategy {
+ case retainKeysStrategy:
+ return true, "", nil
+ default:
+ return false, singleStrategy, nil
+ }
+ case 2:
+ switch {
+ case strategies[0] == retainKeysStrategy:
+ return true, strategies[1], nil
+ case strategies[1] == retainKeysStrategy:
+ return true, strategies[0], nil
+ default:
+ return false, "", fmt.Errorf("unexpected patch strategy: %v", strategies)
+ }
+ default:
+ return false, "", fmt.Errorf("unexpected patch strategy: %v", strategies)
+ }
+}
+
+// hasAdditionalNewField returns if original map has additional key with non-nil value than modified.
+func hasAdditionalNewField(original, modified map[string]interface{}) bool {
+ for k, v := range original {
+ if v == nil {
+ continue
+ }
+ if _, found := modified[k]; !found {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
new file mode 100644
index 000000000..43c779a11
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
@@ -0,0 +1,254 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package field
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ utilerrors "k8s.io/apimachinery/pkg/util/errors"
+ "k8s.io/apimachinery/pkg/util/sets"
+)
+
+// Error is an implementation of the 'error' interface, which represents a
+// field-level validation error.
+type Error struct {
+ Type ErrorType
+ Field string
+ BadValue interface{}
+ Detail string
+}
+
+var _ error = &Error{}
+
+// Error implements the error interface.
+func (v *Error) Error() string {
+ return fmt.Sprintf("%s: %s", v.Field, v.ErrorBody())
+}
+
+// ErrorBody returns the error message without the field name. This is useful
+// for building nice-looking higher-level error reporting.
+func (v *Error) ErrorBody() string {
+ var s string
+ switch v.Type {
+ case ErrorTypeRequired, ErrorTypeForbidden, ErrorTypeTooLong, ErrorTypeInternal:
+ s = fmt.Sprintf("%s", v.Type)
+ default:
+ value := v.BadValue
+ valueType := reflect.TypeOf(value)
+ if value == nil || valueType == nil {
+ value = "null"
+ } else if valueType.Kind() == reflect.Ptr {
+ if reflectValue := reflect.ValueOf(value); reflectValue.IsNil() {
+ value = "null"
+ } else {
+ value = reflectValue.Elem().Interface()
+ }
+ }
+ switch t := value.(type) {
+ case int64, int32, float64, float32, bool:
+ // use simple printer for simple types
+ s = fmt.Sprintf("%s: %v", v.Type, value)
+ case string:
+ s = fmt.Sprintf("%s: %q", v.Type, t)
+ case fmt.Stringer:
+ // anything that defines String() is better than raw struct
+ s = fmt.Sprintf("%s: %s", v.Type, t.String())
+ default:
+ // fallback to raw struct
+ // TODO: internal types have panic guards against json.Marshalling to prevent
+ // accidental use of internal types in external serialized form. For now, use
+ // %#v, although it would be better to show a more expressive output in the future
+ s = fmt.Sprintf("%s: %#v", v.Type, value)
+ }
+ }
+ if len(v.Detail) != 0 {
+ s += fmt.Sprintf(": %s", v.Detail)
+ }
+ return s
+}
+
+// ErrorType is a machine readable value providing more detail about why
+// a field is invalid. These values are expected to match 1-1 with
+// CauseType in api/types.go.
+type ErrorType string
+
+// TODO: These values are duplicated in api/types.go, but there's a circular dep. Fix it.
+const (
+ // ErrorTypeNotFound is used to report failure to find a requested value
+ // (e.g. looking up an ID). See NotFound().
+ ErrorTypeNotFound ErrorType = "FieldValueNotFound"
+ // ErrorTypeRequired is used to report required values that are not
+ // provided (e.g. empty strings, null values, or empty arrays). See
+ // Required().
+ ErrorTypeRequired ErrorType = "FieldValueRequired"
+ // ErrorTypeDuplicate is used to report collisions of values that must be
+ // unique (e.g. unique IDs). See Duplicate().
+ ErrorTypeDuplicate ErrorType = "FieldValueDuplicate"
+ // ErrorTypeInvalid is used to report malformed values (e.g. failed regex
+ // match, too long, out of bounds). See Invalid().
+ ErrorTypeInvalid ErrorType = "FieldValueInvalid"
+ // ErrorTypeNotSupported is used to report unknown values for enumerated
+ // fields (e.g. a list of valid values). See NotSupported().
+ ErrorTypeNotSupported ErrorType = "FieldValueNotSupported"
+ // ErrorTypeForbidden is used to report valid (as per formatting rules)
+ // values which would be accepted under some conditions, but which are not
+ // permitted by the current conditions (such as security policy). See
+ // Forbidden().
+ ErrorTypeForbidden ErrorType = "FieldValueForbidden"
+ // ErrorTypeTooLong is used to report that the given value is too long.
+ // This is similar to ErrorTypeInvalid, but the error will not include the
+ // too-long value. See TooLong().
+ ErrorTypeTooLong ErrorType = "FieldValueTooLong"
+ // ErrorTypeInternal is used to report other errors that are not related
+ // to user input. See InternalError().
+ ErrorTypeInternal ErrorType = "InternalError"
+)
+
+// String converts a ErrorType into its corresponding canonical error message.
+func (t ErrorType) String() string {
+ switch t {
+ case ErrorTypeNotFound:
+ return "Not found"
+ case ErrorTypeRequired:
+ return "Required value"
+ case ErrorTypeDuplicate:
+ return "Duplicate value"
+ case ErrorTypeInvalid:
+ return "Invalid value"
+ case ErrorTypeNotSupported:
+ return "Unsupported value"
+ case ErrorTypeForbidden:
+ return "Forbidden"
+ case ErrorTypeTooLong:
+ return "Too long"
+ case ErrorTypeInternal:
+ return "Internal error"
+ default:
+ panic(fmt.Sprintf("unrecognized validation error: %q", string(t)))
+ }
+}
+
+// NotFound returns a *Error indicating "value not found". This is
+// used to report failure to find a requested value (e.g. looking up an ID).
+func NotFound(field *Path, value interface{}) *Error {
+ return &Error{ErrorTypeNotFound, field.String(), value, ""}
+}
+
+// Required returns a *Error indicating "value required". This is used
+// to report required values that are not provided (e.g. empty strings, null
+// values, or empty arrays).
+func Required(field *Path, detail string) *Error {
+ return &Error{ErrorTypeRequired, field.String(), "", detail}
+}
+
+// Duplicate returns a *Error indicating "duplicate value". This is
+// used to report collisions of values that must be unique (e.g. names or IDs).
+func Duplicate(field *Path, value interface{}) *Error {
+ return &Error{ErrorTypeDuplicate, field.String(), value, ""}
+}
+
+// Invalid returns a *Error indicating "invalid value". This is used
+// to report malformed values (e.g. failed regex match, too long, out of bounds).
+func Invalid(field *Path, value interface{}, detail string) *Error {
+ return &Error{ErrorTypeInvalid, field.String(), value, detail}
+}
+
+// NotSupported returns a *Error indicating "unsupported value".
+// This is used to report unknown values for enumerated fields (e.g. a list of
+// valid values).
+func NotSupported(field *Path, value interface{}, validValues []string) *Error {
+ detail := ""
+ if validValues != nil && len(validValues) > 0 {
+ detail = "supported values: " + strings.Join(validValues, ", ")
+ }
+ return &Error{ErrorTypeNotSupported, field.String(), value, detail}
+}
+
+// Forbidden returns a *Error indicating "forbidden". This is used to
+// report valid (as per formatting rules) values which would be accepted under
+// some conditions, but which are not permitted by current conditions (e.g.
+// security policy).
+func Forbidden(field *Path, detail string) *Error {
+ return &Error{ErrorTypeForbidden, field.String(), "", detail}
+}
+
+// TooLong returns a *Error indicating "too long". This is used to
+// report that the given value is too long. This is similar to
+// Invalid, but the returned error will not include the too-long
+// value.
+func TooLong(field *Path, value interface{}, maxLength int) *Error {
+ return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d characters", maxLength)}
+}
+
+// InternalError returns a *Error indicating "internal error". This is used
+// to signal that an error was found that was not directly related to user
+// input. The err argument must be non-nil.
+func InternalError(field *Path, err error) *Error {
+ return &Error{ErrorTypeInternal, field.String(), nil, err.Error()}
+}
+
+// ErrorList holds a set of Errors. It is plausible that we might one day have
+// non-field errors in this same umbrella package, but for now we don't, so
+// we can keep it simple and leave ErrorList here.
+type ErrorList []*Error
+
+// NewErrorTypeMatcher returns an errors.Matcher that returns true
+// if the provided error is a Error and has the provided ErrorType.
+func NewErrorTypeMatcher(t ErrorType) utilerrors.Matcher {
+ return func(err error) bool {
+ if e, ok := err.(*Error); ok {
+ return e.Type == t
+ }
+ return false
+ }
+}
+
+// ToAggregate converts the ErrorList into an errors.Aggregate.
+func (list ErrorList) ToAggregate() utilerrors.Aggregate {
+ errs := make([]error, 0, len(list))
+ errorMsgs := sets.NewString()
+ for _, err := range list {
+ msg := fmt.Sprintf("%v", err)
+ if errorMsgs.Has(msg) {
+ continue
+ }
+ errorMsgs.Insert(msg)
+ errs = append(errs, err)
+ }
+ return utilerrors.NewAggregate(errs)
+}
+
+func fromAggregate(agg utilerrors.Aggregate) ErrorList {
+ errs := agg.Errors()
+ list := make(ErrorList, len(errs))
+ for i := range errs {
+ list[i] = errs[i].(*Error)
+ }
+ return list
+}
+
+// Filter removes items from the ErrorList that match the provided fns.
+func (list ErrorList) Filter(fns ...utilerrors.Matcher) ErrorList {
+ err := utilerrors.FilterOut(list.ToAggregate(), fns...)
+ if err == nil {
+ return nil
+ }
+ // FilterOut takes an Aggregate and returns an Aggregate
+ return fromAggregate(err.(utilerrors.Aggregate))
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go
new file mode 100644
index 000000000..2efc8eec7
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go
@@ -0,0 +1,91 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package field
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+)
+
+// Path represents the path from some root to a particular field.
+type Path struct {
+ name string // the name of this field or "" if this is an index
+ index string // if name == "", this is a subscript (index or map key) of the previous element
+ parent *Path // nil if this is the root element
+}
+
+// NewPath creates a root Path object.
+func NewPath(name string, moreNames ...string) *Path {
+ r := &Path{name: name, parent: nil}
+ for _, anotherName := range moreNames {
+ r = &Path{name: anotherName, parent: r}
+ }
+ return r
+}
+
+// Root returns the root element of this Path.
+func (p *Path) Root() *Path {
+ for ; p.parent != nil; p = p.parent {
+ // Do nothing.
+ }
+ return p
+}
+
+// Child creates a new Path that is a child of the method receiver.
+func (p *Path) Child(name string, moreNames ...string) *Path {
+ r := NewPath(name, moreNames...)
+ r.Root().parent = p
+ return r
+}
+
+// Index indicates that the previous Path is to be subscripted by an int.
+// This sets the same underlying value as Key.
+func (p *Path) Index(index int) *Path {
+ return &Path{index: strconv.Itoa(index), parent: p}
+}
+
+// Key indicates that the previous Path is to be subscripted by a string.
+// This sets the same underlying value as Index.
+func (p *Path) Key(key string) *Path {
+ return &Path{index: key, parent: p}
+}
+
+// String produces a string representation of the Path.
+func (p *Path) String() string {
+ // make a slice to iterate
+ elems := []*Path{}
+ for ; p != nil; p = p.parent {
+ elems = append(elems, p)
+ }
+
+ // iterate, but it has to be backwards
+ buf := bytes.NewBuffer(nil)
+ for i := range elems {
+ p := elems[len(elems)-1-i]
+ if p.parent != nil && len(p.name) > 0 {
+ // This is either the root or it is a subscript.
+ buf.WriteString(".")
+ }
+ if len(p.name) > 0 {
+ buf.WriteString(p.name)
+ } else {
+ fmt.Fprintf(buf, "[%s]", p.index)
+ }
+ }
+ return buf.String()
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
new file mode 100644
index 000000000..b1fcc5708
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
@@ -0,0 +1,343 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "fmt"
+ "math"
+ "net"
+ "regexp"
+ "strings"
+)
+
+const qnameCharFmt string = "[A-Za-z0-9]"
+const qnameExtCharFmt string = "[-A-Za-z0-9_.]"
+const qualifiedNameFmt string = "(" + qnameCharFmt + qnameExtCharFmt + "*)?" + qnameCharFmt
+const qualifiedNameErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character"
+const qualifiedNameMaxLength int = 63
+
+var qualifiedNameRegexp = regexp.MustCompile("^" + qualifiedNameFmt + "$")
+
+// IsQualifiedName tests whether the value passed is what Kubernetes calls a
+// "qualified name". This is a format used in various places throughout the
+// system. If the value is not valid, a list of error strings is returned.
+// Otherwise an empty list (or nil) is returned.
+func IsQualifiedName(value string) []string {
+ var errs []string
+ parts := strings.Split(value, "/")
+ var name string
+ switch len(parts) {
+ case 1:
+ name = parts[0]
+ case 2:
+ var prefix string
+ prefix, name = parts[0], parts[1]
+ if len(prefix) == 0 {
+ errs = append(errs, "prefix part "+EmptyError())
+ } else if msgs := IsDNS1123Subdomain(prefix); len(msgs) != 0 {
+ errs = append(errs, prefixEach(msgs, "prefix part ")...)
+ }
+ default:
+ return append(errs, "a qualified name "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")+
+ " with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')")
+ }
+
+ if len(name) == 0 {
+ errs = append(errs, "name part "+EmptyError())
+ } else if len(name) > qualifiedNameMaxLength {
+ errs = append(errs, "name part "+MaxLenError(qualifiedNameMaxLength))
+ }
+ if !qualifiedNameRegexp.MatchString(name) {
+ errs = append(errs, "name part "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc"))
+ }
+ return errs
+}
+
+const labelValueFmt string = "(" + qualifiedNameFmt + ")?"
+const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character"
+const LabelValueMaxLength int = 63
+
+var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$")
+
+// IsValidLabelValue tests whether the value passed is a valid label value. If
+// the value is not valid, a list of error strings is returned. Otherwise an
+// empty list (or nil) is returned.
+func IsValidLabelValue(value string) []string {
+ var errs []string
+ if len(value) > LabelValueMaxLength {
+ errs = append(errs, MaxLenError(LabelValueMaxLength))
+ }
+ if !labelValueRegexp.MatchString(value) {
+ errs = append(errs, RegexError(labelValueErrMsg, labelValueFmt, "MyValue", "my_value", "12345"))
+ }
+ return errs
+}
+
+const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?"
+const dns1123LabelErrMsg string = "a DNS-1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character"
+const DNS1123LabelMaxLength int = 63
+
+var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$")
+
+// IsDNS1123Label tests for a string that conforms to the definition of a label in
+// DNS (RFC 1123).
+func IsDNS1123Label(value string) []string {
+ var errs []string
+ if len(value) > DNS1123LabelMaxLength {
+ errs = append(errs, MaxLenError(DNS1123LabelMaxLength))
+ }
+ if !dns1123LabelRegexp.MatchString(value) {
+ errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc"))
+ }
+ return errs
+}
+
+const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*"
+const dns1123SubdomainErrorMsg string = "a DNS-1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character"
+const DNS1123SubdomainMaxLength int = 253
+
+var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$")
+
+// IsDNS1123Subdomain tests for a string that conforms to the definition of a
+// subdomain in DNS (RFC 1123).
+func IsDNS1123Subdomain(value string) []string {
+ var errs []string
+ if len(value) > DNS1123SubdomainMaxLength {
+ errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength))
+ }
+ if !dns1123SubdomainRegexp.MatchString(value) {
+ errs = append(errs, RegexError(dns1123SubdomainErrorMsg, dns1123SubdomainFmt, "example.com"))
+ }
+ return errs
+}
+
+const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?"
+const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character"
+const DNS1035LabelMaxLength int = 63
+
+var dns1035LabelRegexp = regexp.MustCompile("^" + dns1035LabelFmt + "$")
+
+// IsDNS1035Label tests for a string that conforms to the definition of a label in
+// DNS (RFC 1035).
+func IsDNS1035Label(value string) []string {
+ var errs []string
+ if len(value) > DNS1035LabelMaxLength {
+ errs = append(errs, MaxLenError(DNS1035LabelMaxLength))
+ }
+ if !dns1035LabelRegexp.MatchString(value) {
+ errs = append(errs, RegexError(dns1035LabelErrMsg, dns1035LabelFmt, "my-name", "abc-123"))
+ }
+ return errs
+}
+
+// wildcard definition - RFC 1034 section 4.3.3.
+// examples:
+// - valid: *.bar.com, *.foo.bar.com
+// - invalid: *.*.bar.com, *.foo.*.com, *bar.com, f*.bar.com, *
+const wildcardDNS1123SubdomainFmt = "\\*\\." + dns1123SubdomainFmt
+const wildcardDNS1123SubdomainErrMsg = "a wildcard DNS-1123 subdomain must start with '*.', followed by a valid DNS subdomain, which must consist of lower case alphanumeric characters, '-' or '.' and end with an alphanumeric character"
+
+// IsWildcardDNS1123Subdomain tests for a string that conforms to the definition of a
+// wildcard subdomain in DNS (RFC 1034 section 4.3.3).
+func IsWildcardDNS1123Subdomain(value string) []string {
+ wildcardDNS1123SubdomainRegexp := regexp.MustCompile("^" + wildcardDNS1123SubdomainFmt + "$")
+
+ var errs []string
+ if len(value) > DNS1123SubdomainMaxLength {
+ errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength))
+ }
+ if !wildcardDNS1123SubdomainRegexp.MatchString(value) {
+ errs = append(errs, RegexError(wildcardDNS1123SubdomainErrMsg, wildcardDNS1123SubdomainFmt, "*.example.com"))
+ }
+ return errs
+}
+
+const cIdentifierFmt string = "[A-Za-z_][A-Za-z0-9_]*"
+const identifierErrMsg string = "a valid C identifier must start with alphabetic character or '_', followed by a string of alphanumeric characters or '_'"
+
+var cIdentifierRegexp = regexp.MustCompile("^" + cIdentifierFmt + "$")
+
+// IsCIdentifier tests for a string that conforms the definition of an identifier
+// in C. This checks the format, but not the length.
+func IsCIdentifier(value string) []string {
+ if !cIdentifierRegexp.MatchString(value) {
+ return []string{RegexError(identifierErrMsg, cIdentifierFmt, "my_name", "MY_NAME", "MyName")}
+ }
+ return nil
+}
+
+// IsValidPortNum tests that the argument is a valid, non-zero port number.
+func IsValidPortNum(port int) []string {
+ if 1 <= port && port <= 65535 {
+ return nil
+ }
+ return []string{InclusiveRangeError(1, 65535)}
+}
+
+// Now in libcontainer UID/GID limits is 0 ~ 1<<31 - 1
+// TODO: once we have a type for UID/GID we should make these that type.
+const (
+ minUserID = 0
+ maxUserID = math.MaxInt32
+ minGroupID = 0
+ maxGroupID = math.MaxInt32
+)
+
+// IsValidGroupID tests that the argument is a valid Unix GID.
+func IsValidGroupID(gid int64) []string {
+ if minGroupID <= gid && gid <= maxGroupID {
+ return nil
+ }
+ return []string{InclusiveRangeError(minGroupID, maxGroupID)}
+}
+
+// IsValidUserID tests that the argument is a valid Unix UID.
+func IsValidUserID(uid int64) []string {
+ if minUserID <= uid && uid <= maxUserID {
+ return nil
+ }
+ return []string{InclusiveRangeError(minUserID, maxUserID)}
+}
+
+var portNameCharsetRegex = regexp.MustCompile("^[-a-z0-9]+$")
+var portNameOneLetterRegexp = regexp.MustCompile("[a-z]")
+
+// IsValidPortName check that the argument is valid syntax. It must be
+// non-empty and no more than 15 characters long. It may contain only [-a-z0-9]
+// and must contain at least one letter [a-z]. It must not start or end with a
+// hyphen, nor contain adjacent hyphens.
+//
+// Note: We only allow lower-case characters, even though RFC 6335 is case
+// insensitive.
+func IsValidPortName(port string) []string {
+ var errs []string
+ if len(port) > 15 {
+ errs = append(errs, MaxLenError(15))
+ }
+ if !portNameCharsetRegex.MatchString(port) {
+ errs = append(errs, "must contain only alpha-numeric characters (a-z, 0-9), and hyphens (-)")
+ }
+ if !portNameOneLetterRegexp.MatchString(port) {
+ errs = append(errs, "must contain at least one letter or number (a-z, 0-9)")
+ }
+ if strings.Contains(port, "--") {
+ errs = append(errs, "must not contain consecutive hyphens")
+ }
+ if len(port) > 0 && (port[0] == '-' || port[len(port)-1] == '-') {
+ errs = append(errs, "must not begin or end with a hyphen")
+ }
+ return errs
+}
+
+// IsValidIP tests that the argument is a valid IP address.
+func IsValidIP(value string) []string {
+ if net.ParseIP(value) == nil {
+ return []string{"must be a valid IP address, (e.g. 10.9.8.7)"}
+ }
+ return nil
+}
+
+const percentFmt string = "[0-9]+%"
+const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'"
+
+var percentRegexp = regexp.MustCompile("^" + percentFmt + "$")
+
+func IsValidPercent(percent string) []string {
+ if !percentRegexp.MatchString(percent) {
+ return []string{RegexError(percentErrMsg, percentFmt, "1%", "93%")}
+ }
+ return nil
+}
+
+const httpHeaderNameFmt string = "[-A-Za-z0-9]+"
+const httpHeaderNameErrMsg string = "a valid HTTP header must consist of alphanumeric characters or '-'"
+
+var httpHeaderNameRegexp = regexp.MustCompile("^" + httpHeaderNameFmt + "$")
+
+// IsHTTPHeaderName checks that a string conforms to the Go HTTP library's
+// definition of a valid header field name (a stricter subset than RFC7230).
+func IsHTTPHeaderName(value string) []string {
+ if !httpHeaderNameRegexp.MatchString(value) {
+ return []string{RegexError(httpHeaderNameErrMsg, httpHeaderNameFmt, "X-Header-Name")}
+ }
+ return nil
+}
+
+const configMapKeyFmt = `[-._a-zA-Z0-9]+`
+const configMapKeyErrMsg string = "a valid config key must consist of alphanumeric characters, '-', '_' or '.'"
+
+var configMapKeyRegexp = regexp.MustCompile("^" + configMapKeyFmt + "$")
+
+// IsConfigMapKey tests for a string that is a valid key for a ConfigMap or Secret
+func IsConfigMapKey(value string) []string {
+ var errs []string
+ if len(value) > DNS1123SubdomainMaxLength {
+ errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength))
+ }
+ if !configMapKeyRegexp.MatchString(value) {
+ errs = append(errs, RegexError(configMapKeyErrMsg, configMapKeyFmt, "key.name", "KEY_NAME", "key-name"))
+ }
+ if value == "." {
+ errs = append(errs, `must not be '.'`)
+ } else if value == ".." {
+ errs = append(errs, `must not be '..'`)
+ } else if strings.HasPrefix(value, "..") {
+ errs = append(errs, `must not start with '..'`)
+ }
+ return errs
+}
+
+// MaxLenError returns a string explanation of a "string too long" validation
+// failure.
+func MaxLenError(length int) string {
+ return fmt.Sprintf("must be no more than %d characters", length)
+}
+
+// RegexError returns a string explanation of a regex validation failure.
+func RegexError(msg string, fmt string, examples ...string) string {
+ if len(examples) == 0 {
+ return msg + " (regex used for validation is '" + fmt + "')"
+ }
+ msg += " (e.g. "
+ for i := range examples {
+ if i > 0 {
+ msg += " or "
+ }
+ msg += "'" + examples[i] + "', "
+ }
+ msg += "regex used for validation is '" + fmt + "')"
+ return msg
+}
+
+// EmptyError returns a string explanation of a "must not be empty" validation
+// failure.
+func EmptyError() string {
+ return "must be non-empty"
+}
+
+func prefixEach(msgs []string, prefix string) []string {
+ for i := range msgs {
+ msgs[i] = prefix + msgs[i]
+ }
+ return msgs
+}
+
+// InclusiveRangeError returns a string explanation of a numeric "must be
+// between" validation failure.
+func InclusiveRangeError(lo, hi int) string {
+ return fmt.Sprintf(`must be between %d and %d, inclusive`, lo, hi)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go b/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go
new file mode 100644
index 000000000..3f0c968ec
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package wait provides tools for polling or listening for changes
+// to a condition.
+package wait // import "k8s.io/apimachinery/pkg/util/wait"
diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go
new file mode 100644
index 000000000..badaa2159
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go
@@ -0,0 +1,349 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package wait
+
+import (
+ "errors"
+ "math/rand"
+ "time"
+
+ "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+// For any test of the style:
+// ...
+// <- time.After(timeout):
+// t.Errorf("Timed out")
+// The value for timeout should effectively be "forever." Obviously we don't want our tests to truly lock up forever, but 30s
+// is long enough that it is effectively forever for the things that can slow down a run on a heavily contended machine
+// (GC, seeks, etc), but not so long as to make a developer ctrl-c a test run if they do happen to break that test.
+var ForeverTestTimeout = time.Second * 30
+
+// NeverStop may be passed to Until to make it never stop.
+var NeverStop <-chan struct{} = make(chan struct{})
+
+// Forever calls f every period for ever.
+//
+// Forever is syntactic sugar on top of Until.
+func Forever(f func(), period time.Duration) {
+ Until(f, period, NeverStop)
+}
+
+// Until loops until stop channel is closed, running f every period.
+//
+// Until is syntactic sugar on top of JitterUntil with zero jitter factor and
+// with sliding = true (which means the timer for period starts after the f
+// completes).
+func Until(f func(), period time.Duration, stopCh <-chan struct{}) {
+ JitterUntil(f, period, 0.0, true, stopCh)
+}
+
+// NonSlidingUntil loops until stop channel is closed, running f every
+// period.
+//
+// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter
+// factor, with sliding = false (meaning the timer for period starts at the same
+// time as the function starts).
+func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) {
+ JitterUntil(f, period, 0.0, false, stopCh)
+}
+
+// JitterUntil loops until stop channel is closed, running f every period.
+//
+// If jitterFactor is positive, the period is jittered before every run of f.
+// If jitterFactor is not positive, the period is unchanged and not jittered.
+//
+// If sliding is true, the period is computed after f runs. If it is false then
+// period includes the runtime for f.
+//
+// Close stopCh to stop. f may not be invoked if stop channel is already
+// closed. Pass NeverStop to if you don't want it stop.
+func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) {
+ var t *time.Timer
+ var sawTimeout bool
+
+ for {
+ select {
+ case <-stopCh:
+ return
+ default:
+ }
+
+ jitteredPeriod := period
+ if jitterFactor > 0.0 {
+ jitteredPeriod = Jitter(period, jitterFactor)
+ }
+
+ if !sliding {
+ t = resetOrReuseTimer(t, jitteredPeriod, sawTimeout)
+ }
+
+ func() {
+ defer runtime.HandleCrash()
+ f()
+ }()
+
+ if sliding {
+ t = resetOrReuseTimer(t, jitteredPeriod, sawTimeout)
+ }
+
+ // NOTE: b/c there is no priority selection in golang
+ // it is possible for this to race, meaning we could
+ // trigger t.C and stopCh, and t.C select falls through.
+ // In order to mitigate we re-check stopCh at the beginning
+ // of every loop to prevent extra executions of f().
+ select {
+ case <-stopCh:
+ return
+ case <-t.C:
+ sawTimeout = true
+ }
+ }
+}
+
+// Jitter returns a time.Duration between duration and duration + maxFactor *
+// duration.
+//
+// This allows clients to avoid converging on periodic behavior. If maxFactor
+// is 0.0, a suggested default value will be chosen.
+func Jitter(duration time.Duration, maxFactor float64) time.Duration {
+ if maxFactor <= 0.0 {
+ maxFactor = 1.0
+ }
+ wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration))
+ return wait
+}
+
+// ErrWaitTimeout is returned when the condition exited without success.
+var ErrWaitTimeout = errors.New("timed out waiting for the condition")
+
+// ConditionFunc returns true if the condition is satisfied, or an error
+// if the loop should be aborted.
+type ConditionFunc func() (done bool, err error)
+
+// Backoff holds parameters applied to a Backoff function.
+type Backoff struct {
+ Duration time.Duration // the base duration
+ Factor float64 // Duration is multiplied by factor each iteration
+ Jitter float64 // The amount of jitter applied each iteration
+ Steps int // Exit with error after this many steps
+}
+
+// ExponentialBackoff repeats a condition check with exponential backoff.
+//
+// It checks the condition up to Steps times, increasing the wait by multiplying
+// the previous duration by Factor.
+//
+// If Jitter is greater than zero, a random amount of each duration is added
+// (between duration and duration*(1+jitter)).
+//
+// If the condition never returns true, ErrWaitTimeout is returned. All other
+// errors terminate immediately.
+func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error {
+ duration := backoff.Duration
+ for i := 0; i < backoff.Steps; i++ {
+ if i != 0 {
+ adjusted := duration
+ if backoff.Jitter > 0.0 {
+ adjusted = Jitter(duration, backoff.Jitter)
+ }
+ time.Sleep(adjusted)
+ duration = time.Duration(float64(duration) * backoff.Factor)
+ }
+ if ok, err := condition(); err != nil || ok {
+ return err
+ }
+ }
+ return ErrWaitTimeout
+}
+
+// Poll tries a condition func until it returns true, an error, or the timeout
+// is reached.
+//
+// Poll always waits the interval before the run of 'condition'.
+// 'condition' will always be invoked at least once.
+//
+// Some intervals may be missed if the condition takes too long or the time
+// window is too short.
+//
+// If you want to Poll something forever, see PollInfinite.
+func Poll(interval, timeout time.Duration, condition ConditionFunc) error {
+ return pollInternal(poller(interval, timeout), condition)
+}
+
+func pollInternal(wait WaitFunc, condition ConditionFunc) error {
+ done := make(chan struct{})
+ defer close(done)
+ return WaitFor(wait, condition, done)
+}
+
+// PollImmediate tries a condition func until it returns true, an error, or the timeout
+// is reached.
+//
+// Poll always checks 'condition' before waiting for the interval. 'condition'
+// will always be invoked at least once.
+//
+// Some intervals may be missed if the condition takes too long or the time
+// window is too short.
+//
+// If you want to Poll something forever, see PollInfinite.
+func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error {
+ return pollImmediateInternal(poller(interval, timeout), condition)
+}
+
+func pollImmediateInternal(wait WaitFunc, condition ConditionFunc) error {
+ done, err := condition()
+ if err != nil {
+ return err
+ }
+ if done {
+ return nil
+ }
+ return pollInternal(wait, condition)
+}
+
+// PollInfinite tries a condition func until it returns true or an error
+//
+// PollInfinite always waits the interval before the run of 'condition'.
+//
+// Some intervals may be missed if the condition takes too long or the time
+// window is too short.
+func PollInfinite(interval time.Duration, condition ConditionFunc) error {
+ done := make(chan struct{})
+ defer close(done)
+ return PollUntil(interval, condition, done)
+}
+
+// PollImmediateInfinite tries a condition func until it returns true or an error
+//
+// PollImmediateInfinite runs the 'condition' before waiting for the interval.
+//
+// Some intervals may be missed if the condition takes too long or the time
+// window is too short.
+func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error {
+ done, err := condition()
+ if err != nil {
+ return err
+ }
+ if done {
+ return nil
+ }
+ return PollInfinite(interval, condition)
+}
+
+// PollUntil tries a condition func until it returns true, an error or stopCh is
+// closed.
+//
+// PolUntil always waits interval before the first run of 'condition'.
+// 'condition' will always be invoked at least once.
+func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
+ return WaitFor(poller(interval, 0), condition, stopCh)
+}
+
+// WaitFunc creates a channel that receives an item every time a test
+// should be executed and is closed when the last test should be invoked.
+type WaitFunc func(done <-chan struct{}) <-chan struct{}
+
+// WaitFor continually checks 'fn' as driven by 'wait'.
+//
+// WaitFor gets a channel from 'wait()'', and then invokes 'fn' once for every value
+// placed on the channel and once more when the channel is closed.
+//
+// If 'fn' returns an error the loop ends and that error is returned, and if
+// 'fn' returns true the loop ends and nil is returned.
+//
+// ErrWaitTimeout will be returned if the channel is closed without fn ever
+// returning true.
+func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error {
+ c := wait(done)
+ for {
+ _, open := <-c
+ ok, err := fn()
+ if err != nil {
+ return err
+ }
+ if ok {
+ return nil
+ }
+ if !open {
+ break
+ }
+ }
+ return ErrWaitTimeout
+}
+
+// poller returns a WaitFunc that will send to the channel every interval until
+// timeout has elapsed and then closes the channel.
+//
+// Over very short intervals you may receive no ticks before the channel is
+// closed. A timeout of 0 is interpreted as an infinity.
+//
+// Output ticks are not buffered. If the channel is not ready to receive an
+// item, the tick is skipped.
+func poller(interval, timeout time.Duration) WaitFunc {
+ return WaitFunc(func(done <-chan struct{}) <-chan struct{} {
+ ch := make(chan struct{})
+
+ go func() {
+ defer close(ch)
+
+ tick := time.NewTicker(interval)
+ defer tick.Stop()
+
+ var after <-chan time.Time
+ if timeout != 0 {
+ // time.After is more convenient, but it
+ // potentially leaves timers around much longer
+ // than necessary if we exit early.
+ timer := time.NewTimer(timeout)
+ after = timer.C
+ defer timer.Stop()
+ }
+
+ for {
+ select {
+ case <-tick.C:
+ // If the consumer isn't ready for this signal drop it and
+ // check the other channels.
+ select {
+ case ch <- struct{}{}:
+ default:
+ }
+ case <-after:
+ return
+ case <-done:
+ return
+ }
+ }
+ }()
+
+ return ch
+ })
+}
+
+// resetOrReuseTimer avoids allocating a new timer if one is already in use.
+// Not safe for multiple threads.
+func resetOrReuseTimer(t *time.Timer, d time.Duration, sawTimeout bool) *time.Timer {
+ if t == nil {
+ return time.NewTimer(d)
+ }
+ if !t.Stop() && !sawTimeout {
+ <-t.C
+ }
+ t.Reset(d)
+ return t
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
new file mode 100644
index 000000000..6ebfaea70
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
@@ -0,0 +1,346 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package yaml
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "strings"
+ "unicode"
+
+ "github.com/ghodss/yaml"
+ "github.com/golang/glog"
+)
+
+// ToJSON converts a single YAML document into a JSON document
+// or returns an error. If the document appears to be JSON the
+// YAML decoding path is not used (so that error messages are
+// JSON specific).
+func ToJSON(data []byte) ([]byte, error) {
+ if hasJSONPrefix(data) {
+ return data, nil
+ }
+ return yaml.YAMLToJSON(data)
+}
+
+// YAMLToJSONDecoder decodes YAML documents from an io.Reader by
+// separating individual documents. It first converts the YAML
+// body to JSON, then unmarshals the JSON.
+type YAMLToJSONDecoder struct {
+ reader Reader
+}
+
+// NewYAMLToJSONDecoder decodes YAML documents from the provided
+// stream in chunks by converting each document (as defined by
+// the YAML spec) into its own chunk, converting it to JSON via
+// yaml.YAMLToJSON, and then passing it to json.Decoder.
+func NewYAMLToJSONDecoder(r io.Reader) *YAMLToJSONDecoder {
+ reader := bufio.NewReader(r)
+ return &YAMLToJSONDecoder{
+ reader: NewYAMLReader(reader),
+ }
+}
+
+// Decode reads a YAML document as JSON from the stream or returns
+// an error. The decoding rules match json.Unmarshal, not
+// yaml.Unmarshal.
+func (d *YAMLToJSONDecoder) Decode(into interface{}) error {
+ bytes, err := d.reader.Read()
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ if len(bytes) != 0 {
+ err := yaml.Unmarshal(bytes, into)
+ if err != nil {
+ return YAMLSyntaxError{err}
+ }
+ }
+ return err
+}
+
+// YAMLDecoder reads chunks of objects and returns ErrShortBuffer if
+// the data is not sufficient.
+type YAMLDecoder struct {
+ r io.ReadCloser
+ scanner *bufio.Scanner
+ remaining []byte
+}
+
+// NewDocumentDecoder decodes YAML documents from the provided
+// stream in chunks by converting each document (as defined by
+// the YAML spec) into its own chunk. io.ErrShortBuffer will be
+// returned if the entire buffer could not be read to assist
+// the caller in framing the chunk.
+func NewDocumentDecoder(r io.ReadCloser) io.ReadCloser {
+ scanner := bufio.NewScanner(r)
+ scanner.Split(splitYAMLDocument)
+ return &YAMLDecoder{
+ r: r,
+ scanner: scanner,
+ }
+}
+
+// Read reads the previous slice into the buffer, or attempts to read
+// the next chunk.
+// TODO: switch to readline approach.
+func (d *YAMLDecoder) Read(data []byte) (n int, err error) {
+ left := len(d.remaining)
+ if left == 0 {
+ // return the next chunk from the stream
+ if !d.scanner.Scan() {
+ err := d.scanner.Err()
+ if err == nil {
+ err = io.EOF
+ }
+ return 0, err
+ }
+ out := d.scanner.Bytes()
+ d.remaining = out
+ left = len(out)
+ }
+
+ // fits within data
+ if left <= len(data) {
+ copy(data, d.remaining)
+ d.remaining = nil
+ return len(d.remaining), nil
+ }
+
+ // caller will need to reread
+ copy(data, d.remaining[:left])
+ d.remaining = d.remaining[left:]
+ return len(data), io.ErrShortBuffer
+}
+
+func (d *YAMLDecoder) Close() error {
+ return d.r.Close()
+}
+
+const yamlSeparator = "\n---"
+const separator = "---"
+
+// splitYAMLDocument is a bufio.SplitFunc for splitting YAML streams into individual documents.
+func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err error) {
+ if atEOF && len(data) == 0 {
+ return 0, nil, nil
+ }
+ sep := len([]byte(yamlSeparator))
+ if i := bytes.Index(data, []byte(yamlSeparator)); i >= 0 {
+ // We have a potential document terminator
+ i += sep
+ after := data[i:]
+ if len(after) == 0 {
+ // we can't read any more characters
+ if atEOF {
+ return len(data), data[:len(data)-sep], nil
+ }
+ return 0, nil, nil
+ }
+ if j := bytes.IndexByte(after, '\n'); j >= 0 {
+ return i + j + 1, data[0 : i-sep], nil
+ }
+ return 0, nil, nil
+ }
+ // If we're at EOF, we have a final, non-terminated line. Return it.
+ if atEOF {
+ return len(data), data, nil
+ }
+ // Request more data.
+ return 0, nil, nil
+}
+
+// decoder is a convenience interface for Decode.
+type decoder interface {
+ Decode(into interface{}) error
+}
+
+// YAMLOrJSONDecoder attempts to decode a stream of JSON documents or
+// YAML documents by sniffing for a leading { character.
+type YAMLOrJSONDecoder struct {
+ r io.Reader
+ bufferSize int
+
+ decoder decoder
+ rawData []byte
+}
+
+type JSONSyntaxError struct {
+ Line int
+ Err error
+}
+
+func (e JSONSyntaxError) Error() string {
+ return fmt.Sprintf("json: line %d: %s", e.Line, e.Err.Error())
+}
+
+type YAMLSyntaxError struct {
+ err error
+}
+
+func (e YAMLSyntaxError) Error() string {
+ return e.err.Error()
+}
+
+// NewYAMLOrJSONDecoder returns a decoder that will process YAML documents
+// or JSON documents from the given reader as a stream. bufferSize determines
+// how far into the stream the decoder will look to figure out whether this
+// is a JSON stream (has whitespace followed by an open brace).
+func NewYAMLOrJSONDecoder(r io.Reader, bufferSize int) *YAMLOrJSONDecoder {
+ return &YAMLOrJSONDecoder{
+ r: r,
+ bufferSize: bufferSize,
+ }
+}
+
+// Decode unmarshals the next object from the underlying stream into the
+// provide object, or returns an error.
+func (d *YAMLOrJSONDecoder) Decode(into interface{}) error {
+ if d.decoder == nil {
+ buffer, origData, isJSON := GuessJSONStream(d.r, d.bufferSize)
+ if isJSON {
+ glog.V(4).Infof("decoding stream as JSON")
+ d.decoder = json.NewDecoder(buffer)
+ d.rawData = origData
+ } else {
+ glog.V(4).Infof("decoding stream as YAML")
+ d.decoder = NewYAMLToJSONDecoder(buffer)
+ }
+ }
+ err := d.decoder.Decode(into)
+ if jsonDecoder, ok := d.decoder.(*json.Decoder); ok {
+ if syntax, ok := err.(*json.SyntaxError); ok {
+ data, readErr := ioutil.ReadAll(jsonDecoder.Buffered())
+ if readErr != nil {
+ glog.V(4).Infof("reading stream failed: %v", readErr)
+ }
+ js := string(data)
+
+ // if contents from io.Reader are not complete,
+ // use the original raw data to prevent panic
+ if int64(len(js)) <= syntax.Offset {
+ js = string(d.rawData)
+ }
+
+ start := strings.LastIndex(js[:syntax.Offset], "\n") + 1
+ line := strings.Count(js[:start], "\n")
+ return JSONSyntaxError{
+ Line: line,
+ Err: fmt.Errorf(syntax.Error()),
+ }
+ }
+ }
+ return err
+}
+
+type Reader interface {
+ Read() ([]byte, error)
+}
+
+type YAMLReader struct {
+ reader Reader
+}
+
+func NewYAMLReader(r *bufio.Reader) *YAMLReader {
+ return &YAMLReader{
+ reader: &LineReader{reader: r},
+ }
+}
+
+// Read returns a full YAML document.
+func (r *YAMLReader) Read() ([]byte, error) {
+ var buffer bytes.Buffer
+ for {
+ line, err := r.reader.Read()
+ if err != nil && err != io.EOF {
+ return nil, err
+ }
+
+ sep := len([]byte(separator))
+ if i := bytes.Index(line, []byte(separator)); i == 0 {
+ // We have a potential document terminator
+ i += sep
+ after := line[i:]
+ if len(strings.TrimRightFunc(string(after), unicode.IsSpace)) == 0 {
+ if buffer.Len() != 0 {
+ return buffer.Bytes(), nil
+ }
+ if err == io.EOF {
+ return nil, err
+ }
+ }
+ }
+ if err == io.EOF {
+ if buffer.Len() != 0 {
+ // If we're at EOF, we have a final, non-terminated line. Return it.
+ return buffer.Bytes(), nil
+ }
+ return nil, err
+ }
+ buffer.Write(line)
+ }
+}
+
+type LineReader struct {
+ reader *bufio.Reader
+}
+
+// Read returns a single line (with '\n' ended) from the underlying reader.
+// An error is returned iff there is an error with the underlying reader.
+func (r *LineReader) Read() ([]byte, error) {
+ var (
+ isPrefix bool = true
+ err error = nil
+ line []byte
+ buffer bytes.Buffer
+ )
+
+ for isPrefix && err == nil {
+ line, isPrefix, err = r.reader.ReadLine()
+ buffer.Write(line)
+ }
+ buffer.WriteByte('\n')
+ return buffer.Bytes(), err
+}
+
+// GuessJSONStream scans the provided reader up to size, looking
+// for an open brace indicating this is JSON. It will return the
+// bufio.Reader it creates for the consumer.
+func GuessJSONStream(r io.Reader, size int) (io.Reader, []byte, bool) {
+ buffer := bufio.NewReaderSize(r, size)
+ b, _ := buffer.Peek(size)
+ return buffer, b, hasJSONPrefix(b)
+}
+
+var jsonPrefix = []byte("{")
+
+// hasJSONPrefix returns true if the provided buffer appears to start with
+// a JSON open brace.
+func hasJSONPrefix(buf []byte) bool {
+ return hasPrefix(buf, jsonPrefix)
+}
+
+// Return true if the first non-whitespace bytes in buf is
+// prefix.
+func hasPrefix(buf []byte, prefix []byte) bool {
+ trim := bytes.TrimLeftFunc(buf, unicode.IsSpace)
+ return bytes.HasPrefix(trim, prefix)
+}