summaryrefslogtreecommitdiff
path: root/vendor/k8s.io/apimachinery/third_party
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/k8s.io/apimachinery/third_party')
-rw-r--r--vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go513
-rw-r--r--vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go27
-rw-r--r--vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go388
-rw-r--r--vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/type.go91
4 files changed, 1019 insertions, 0 deletions
diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go
new file mode 100644
index 000000000..ac6d9cb96
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go
@@ -0,0 +1,513 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package json is forked from the Go standard library to enable us to find the
+// field of a struct that a given JSON key maps to.
+package json
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+const (
+ patchStrategyTagKey = "patchStrategy"
+ patchMergeKeyTagKey = "patchMergeKey"
+)
+
+// Finds the patchStrategy and patchMergeKey struct tag fields on a given
+// struct field given the struct type and the JSON name of the field.
+// It returns field type, a slice of patch strategies, merge key and error.
+// TODO: fix the returned errors to be introspectable.
+func LookupPatchMetadata(t reflect.Type, jsonField string) (
+ elemType reflect.Type, patchStrategies []string, patchMergeKey string, e error) {
+ if t.Kind() == reflect.Map {
+ elemType = t.Elem()
+ return
+ }
+ if t.Kind() != reflect.Struct {
+ e = fmt.Errorf("merging an object in json but data type is not map or struct, instead is: %s",
+ t.Kind().String())
+ return
+ }
+ jf := []byte(jsonField)
+ // Find the field that the JSON library would use.
+ var f *field
+ fields := cachedTypeFields(t)
+ for i := range fields {
+ ff := &fields[i]
+ if bytes.Equal(ff.nameBytes, jf) {
+ f = ff
+ break
+ }
+ // Do case-insensitive comparison.
+ if f == nil && ff.equalFold(ff.nameBytes, jf) {
+ f = ff
+ }
+ }
+ if f != nil {
+ // Find the reflect.Value of the most preferential struct field.
+ tjf := t.Field(f.index[0])
+ // we must navigate down all the anonymously included structs in the chain
+ for i := 1; i < len(f.index); i++ {
+ tjf = tjf.Type.Field(f.index[i])
+ }
+ patchStrategy := tjf.Tag.Get(patchStrategyTagKey)
+ patchMergeKey = tjf.Tag.Get(patchMergeKeyTagKey)
+ patchStrategies = strings.Split(patchStrategy, ",")
+ elemType = tjf.Type
+ return
+ }
+ e = fmt.Errorf("unable to find api field in struct %s for the json field %q", t.Name(), jsonField)
+ return
+}
+
+// A field represents a single field found in a struct.
+type field struct {
+ name string
+ nameBytes []byte // []byte(name)
+ equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
+
+ tag bool
+ // index is the sequence of indexes from the containing type fields to this field.
+ // it is a slice because anonymous structs will need multiple navigation steps to correctly
+ // resolve the proper fields
+ index []int
+ typ reflect.Type
+ omitEmpty bool
+ quoted bool
+}
+
+func (f field) String() string {
+ return fmt.Sprintf("{name: %s, type: %v, tag: %v, index: %v, omitEmpty: %v, quoted: %v}", f.name, f.typ, f.tag, f.index, f.omitEmpty, f.quoted)
+}
+
+func fillField(f field) field {
+ f.nameBytes = []byte(f.name)
+ f.equalFold = foldFunc(f.nameBytes)
+ return f
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].index) != len(x[j].index) {
+ return len(x[i].index) < len(x[j].index)
+ }
+ if x[i].tag != x[j].tag {
+ return x[i].tag
+ }
+ return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+ for k, xik := range x[i].index {
+ if k >= len(x[j].index) {
+ return false
+ }
+ if xik != x[j].index[k] {
+ return xik < x[j].index[k]
+ }
+ }
+ return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+ // Anonymous fields to explore at the current level and the next.
+ current := []field{}
+ next := []field{{typ: t}}
+
+ // Count of queued names for current level and the next.
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []field
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.typ] {
+ continue
+ }
+ visited[f.typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.typ.NumField(); i++ {
+ sf := f.typ.Field(i)
+ if sf.PkgPath != "" { // unexported
+ continue
+ }
+ tag := sf.Tag.Get("json")
+ if tag == "-" {
+ continue
+ }
+ name, opts := parseTag(tag)
+ if !isValidTag(name) {
+ name = ""
+ }
+ index := make([]int, len(f.index)+1)
+ copy(index, f.index)
+ index[len(f.index)] = i
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Record found field and index sequence.
+ if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := name != ""
+ if name == "" {
+ name = sf.Name
+ }
+ fields = append(fields, fillField(field{
+ name: name,
+ tag: tagged,
+ index: index,
+ typ: ft,
+ omitEmpty: opts.Contains("omitempty"),
+ quoted: opts.Contains("string"),
+ }))
+ if count[f.typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
+ }
+ }
+ }
+ }
+
+ sort.Sort(byName(fields))
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with JSON tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(byIndex(fields))
+
+ return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order. The winner
+ // must therefore be one with the shortest index length. Drop all
+ // longer entries, which is easy: just truncate the slice.
+ length := len(fields[0].index)
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if len(f.index) > length {
+ fields = fields[:i]
+ break
+ }
+ if f.tag {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return field{}, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+var fieldCache struct {
+ sync.RWMutex
+ m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+ fieldCache.RLock()
+ f := fieldCache.m[t]
+ fieldCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = typeFields(t)
+ if f == nil {
+ f = []field{}
+ }
+
+ fieldCache.Lock()
+ if fieldCache.m == nil {
+ fieldCache.m = map[reflect.Type][]field{}
+ }
+ fieldCache.m[t] = f
+ fieldCache.Unlock()
+ return f
+}
+
+func isValidTag(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ default:
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+const (
+ caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
+ kelvin = '\u212a'
+ smallLongEss = '\u017f'
+)
+
+// foldFunc returns one of four different case folding equivalence
+// functions, from most general (and slow) to fastest:
+//
+// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
+// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
+// 3) asciiEqualFold, no special, but includes non-letters (including _)
+// 4) simpleLetterEqualFold, no specials, no non-letters.
+//
+// The letters S and K are special because they map to 3 runes, not just 2:
+// * S maps to s and to U+017F 'ſ' Latin small letter long s
+// * k maps to K and to U+212A 'K' Kelvin sign
+// See http://play.golang.org/p/tTxjOc0OGo
+//
+// The returned function is specialized for matching against s and
+// should only be given s. It's not curried for performance reasons.
+func foldFunc(s []byte) func(s, t []byte) bool {
+ nonLetter := false
+ special := false // special letter
+ for _, b := range s {
+ if b >= utf8.RuneSelf {
+ return bytes.EqualFold
+ }
+ upper := b & caseMask
+ if upper < 'A' || upper > 'Z' {
+ nonLetter = true
+ } else if upper == 'K' || upper == 'S' {
+ // See above for why these letters are special.
+ special = true
+ }
+ }
+ if special {
+ return equalFoldRight
+ }
+ if nonLetter {
+ return asciiEqualFold
+ }
+ return simpleLetterEqualFold
+}
+
+// equalFoldRight is a specialization of bytes.EqualFold when s is
+// known to be all ASCII (including punctuation), but contains an 's',
+// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
+// See comments on foldFunc.
+func equalFoldRight(s, t []byte) bool {
+ for _, sb := range s {
+ if len(t) == 0 {
+ return false
+ }
+ tb := t[0]
+ if tb < utf8.RuneSelf {
+ if sb != tb {
+ sbUpper := sb & caseMask
+ if 'A' <= sbUpper && sbUpper <= 'Z' {
+ if sbUpper != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ t = t[1:]
+ continue
+ }
+ // sb is ASCII and t is not. t must be either kelvin
+ // sign or long s; sb must be s, S, k, or K.
+ tr, size := utf8.DecodeRune(t)
+ switch sb {
+ case 's', 'S':
+ if tr != smallLongEss {
+ return false
+ }
+ case 'k', 'K':
+ if tr != kelvin {
+ return false
+ }
+ default:
+ return false
+ }
+ t = t[size:]
+
+ }
+ if len(t) > 0 {
+ return false
+ }
+ return true
+}
+
+// asciiEqualFold is a specialization of bytes.EqualFold for use when
+// s is all ASCII (but may contain non-letters) and contains no
+// special-folding letters.
+// See comments on foldFunc.
+func asciiEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, sb := range s {
+ tb := t[i]
+ if sb == tb {
+ continue
+ }
+ if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
+ if sb&caseMask != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ return true
+}
+
+// simpleLetterEqualFold is a specialization of bytes.EqualFold for
+// use when s is all ASCII letters (no underscores, etc) and also
+// doesn't contain 'k', 'K', 's', or 'S'.
+// See comments on foldFunc.
+func simpleLetterEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, b := range s {
+ if b&caseMask != t[i]&caseMask {
+ return false
+ }
+ }
+ return true
+}
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+ if idx := strings.Index(tag, ","); idx != -1 {
+ return tag[:idx], tagOptions(tag[idx+1:])
+ }
+ return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+ if len(o) == 0 {
+ return false
+ }
+ s := string(o)
+ for s != "" {
+ var next string
+ i := strings.Index(s, ",")
+ if i >= 0 {
+ s, next = s[:i], s[i+1:]
+ }
+ if s == optionName {
+ return true
+ }
+ s = next
+ }
+ return false
+}
diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go
new file mode 100644
index 000000000..c70f431c2
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go
@@ -0,0 +1,27 @@
+package netutil
+
+import (
+ "net/url"
+ "strings"
+)
+
+// FROM: http://golang.org/src/net/http/client.go
+// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
+// return true if the string includes a port.
+func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
+
+// FROM: http://golang.org/src/net/http/transport.go
+var portMap = map[string]string{
+ "http": "80",
+ "https": "443",
+}
+
+// FROM: http://golang.org/src/net/http/transport.go
+// canonicalAddr returns url.Host but always with a ":port" suffix
+func CanonicalAddr(url *url.URL) string {
+ addr := url.Host
+ if !hasPort(addr) {
+ return addr + ":" + portMap[url.Scheme]
+ }
+ return addr
+}
diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go
new file mode 100644
index 000000000..9e45dbe1d
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go
@@ -0,0 +1,388 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package reflect is a fork of go's standard library reflection package, which
+// allows for deep equal with equality functions defined.
+package reflect
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// Equalities is a map from type to a function comparing two values of
+// that type.
+type Equalities map[reflect.Type]reflect.Value
+
+// For convenience, panics on errrors
+func EqualitiesOrDie(funcs ...interface{}) Equalities {
+ e := Equalities{}
+ if err := e.AddFuncs(funcs...); err != nil {
+ panic(err)
+ }
+ return e
+}
+
+// AddFuncs is a shortcut for multiple calls to AddFunc.
+func (e Equalities) AddFuncs(funcs ...interface{}) error {
+ for _, f := range funcs {
+ if err := e.AddFunc(f); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// AddFunc uses func as an equality function: it must take
+// two parameters of the same type, and return a boolean.
+func (e Equalities) AddFunc(eqFunc interface{}) error {
+ fv := reflect.ValueOf(eqFunc)
+ ft := fv.Type()
+ if ft.Kind() != reflect.Func {
+ return fmt.Errorf("expected func, got: %v", ft)
+ }
+ if ft.NumIn() != 2 {
+ return fmt.Errorf("expected three 'in' params, got: %v", ft)
+ }
+ if ft.NumOut() != 1 {
+ return fmt.Errorf("expected one 'out' param, got: %v", ft)
+ }
+ if ft.In(0) != ft.In(1) {
+ return fmt.Errorf("expected arg 1 and 2 to have same type, but got %v", ft)
+ }
+ var forReturnType bool
+ boolType := reflect.TypeOf(forReturnType)
+ if ft.Out(0) != boolType {
+ return fmt.Errorf("expected bool return, got: %v", ft)
+ }
+ e[ft.In(0)] = fv
+ return nil
+}
+
+// Below here is forked from go's reflect/deepequal.go
+
+// During deepValueEqual, must keep track of checks that are
+// in progress. The comparison algorithm assumes that all
+// checks in progress are true when it reencounters them.
+// Visited comparisons are stored in a map indexed by visit.
+type visit struct {
+ a1 uintptr
+ a2 uintptr
+ typ reflect.Type
+}
+
+// unexportedTypePanic is thrown when you use this DeepEqual on something that has an
+// unexported type. It indicates a programmer error, so should not occur at runtime,
+// which is why it's not public and thus impossible to catch.
+type unexportedTypePanic []reflect.Type
+
+func (u unexportedTypePanic) Error() string { return u.String() }
+func (u unexportedTypePanic) String() string {
+ strs := make([]string, len(u))
+ for i, t := range u {
+ strs[i] = fmt.Sprintf("%v", t)
+ }
+ return "an unexported field was encountered, nested like this: " + strings.Join(strs, " -> ")
+}
+
+func makeUsefulPanic(v reflect.Value) {
+ if x := recover(); x != nil {
+ if u, ok := x.(unexportedTypePanic); ok {
+ u = append(unexportedTypePanic{v.Type()}, u...)
+ x = u
+ }
+ panic(x)
+ }
+}
+
+// Tests for deep equality using reflected types. The map argument tracks
+// comparisons that have already been seen, which allows short circuiting on
+// recursive types.
+func (e Equalities) deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool {
+ defer makeUsefulPanic(v1)
+
+ if !v1.IsValid() || !v2.IsValid() {
+ return v1.IsValid() == v2.IsValid()
+ }
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if fv, ok := e[v1.Type()]; ok {
+ return fv.Call([]reflect.Value{v1, v2})[0].Bool()
+ }
+
+ hard := func(k reflect.Kind) bool {
+ switch k {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct:
+ return true
+ }
+ return false
+ }
+
+ if v1.CanAddr() && v2.CanAddr() && hard(v1.Kind()) {
+ addr1 := v1.UnsafeAddr()
+ addr2 := v2.UnsafeAddr()
+ if addr1 > addr2 {
+ // Canonicalize order to reduce number of entries in visited.
+ addr1, addr2 = addr2, addr1
+ }
+
+ // Short circuit if references are identical ...
+ if addr1 == addr2 {
+ return true
+ }
+
+ // ... or already seen
+ typ := v1.Type()
+ v := visit{addr1, addr2, typ}
+ if visited[v] {
+ return true
+ }
+
+ // Remember for later.
+ visited[v] = true
+ }
+
+ switch v1.Kind() {
+ case reflect.Array:
+ // We don't need to check length here because length is part of
+ // an array's type, which has already been filtered for.
+ for i := 0; i < v1.Len(); i++ {
+ if !e.deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) {
+ return false
+ }
+ }
+ return true
+ case reflect.Slice:
+ if (v1.IsNil() || v1.Len() == 0) != (v2.IsNil() || v2.Len() == 0) {
+ return false
+ }
+ if v1.IsNil() || v1.Len() == 0 {
+ return true
+ }
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ if v1.Pointer() == v2.Pointer() {
+ return true
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !e.deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) {
+ return false
+ }
+ }
+ return true
+ case reflect.Interface:
+ if v1.IsNil() || v2.IsNil() {
+ return v1.IsNil() == v2.IsNil()
+ }
+ return e.deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1)
+ case reflect.Ptr:
+ return e.deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1)
+ case reflect.Struct:
+ for i, n := 0, v1.NumField(); i < n; i++ {
+ if !e.deepValueEqual(v1.Field(i), v2.Field(i), visited, depth+1) {
+ return false
+ }
+ }
+ return true
+ case reflect.Map:
+ if (v1.IsNil() || v1.Len() == 0) != (v2.IsNil() || v2.Len() == 0) {
+ return false
+ }
+ if v1.IsNil() || v1.Len() == 0 {
+ return true
+ }
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ if v1.Pointer() == v2.Pointer() {
+ return true
+ }
+ for _, k := range v1.MapKeys() {
+ if !e.deepValueEqual(v1.MapIndex(k), v2.MapIndex(k), visited, depth+1) {
+ return false
+ }
+ }
+ return true
+ case reflect.Func:
+ if v1.IsNil() && v2.IsNil() {
+ return true
+ }
+ // Can't do better than this:
+ return false
+ default:
+ // Normal equality suffices
+ if !v1.CanInterface() || !v2.CanInterface() {
+ panic(unexportedTypePanic{})
+ }
+ return v1.Interface() == v2.Interface()
+ }
+}
+
+// DeepEqual is like reflect.DeepEqual, but focused on semantic equality
+// instead of memory equality.
+//
+// It will use e's equality functions if it finds types that match.
+//
+// An empty slice *is* equal to a nil slice for our purposes; same for maps.
+//
+// Unexported field members cannot be compared and will cause an imformative panic; you must add an Equality
+// function for these types.
+func (e Equalities) DeepEqual(a1, a2 interface{}) bool {
+ if a1 == nil || a2 == nil {
+ return a1 == a2
+ }
+ v1 := reflect.ValueOf(a1)
+ v2 := reflect.ValueOf(a2)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ return e.deepValueEqual(v1, v2, make(map[visit]bool), 0)
+}
+
+func (e Equalities) deepValueDerive(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool {
+ defer makeUsefulPanic(v1)
+
+ if !v1.IsValid() || !v2.IsValid() {
+ return v1.IsValid() == v2.IsValid()
+ }
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if fv, ok := e[v1.Type()]; ok {
+ return fv.Call([]reflect.Value{v1, v2})[0].Bool()
+ }
+
+ hard := func(k reflect.Kind) bool {
+ switch k {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct:
+ return true
+ }
+ return false
+ }
+
+ if v1.CanAddr() && v2.CanAddr() && hard(v1.Kind()) {
+ addr1 := v1.UnsafeAddr()
+ addr2 := v2.UnsafeAddr()
+ if addr1 > addr2 {
+ // Canonicalize order to reduce number of entries in visited.
+ addr1, addr2 = addr2, addr1
+ }
+
+ // Short circuit if references are identical ...
+ if addr1 == addr2 {
+ return true
+ }
+
+ // ... or already seen
+ typ := v1.Type()
+ v := visit{addr1, addr2, typ}
+ if visited[v] {
+ return true
+ }
+
+ // Remember for later.
+ visited[v] = true
+ }
+
+ switch v1.Kind() {
+ case reflect.Array:
+ // We don't need to check length here because length is part of
+ // an array's type, which has already been filtered for.
+ for i := 0; i < v1.Len(); i++ {
+ if !e.deepValueDerive(v1.Index(i), v2.Index(i), visited, depth+1) {
+ return false
+ }
+ }
+ return true
+ case reflect.Slice:
+ if v1.IsNil() || v1.Len() == 0 {
+ return true
+ }
+ if v1.Len() > v2.Len() {
+ return false
+ }
+ if v1.Pointer() == v2.Pointer() {
+ return true
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !e.deepValueDerive(v1.Index(i), v2.Index(i), visited, depth+1) {
+ return false
+ }
+ }
+ return true
+ case reflect.String:
+ if v1.Len() == 0 {
+ return true
+ }
+ if v1.Len() > v2.Len() {
+ return false
+ }
+ return v1.String() == v2.String()
+ case reflect.Interface:
+ if v1.IsNil() {
+ return true
+ }
+ return e.deepValueDerive(v1.Elem(), v2.Elem(), visited, depth+1)
+ case reflect.Ptr:
+ if v1.IsNil() {
+ return true
+ }
+ return e.deepValueDerive(v1.Elem(), v2.Elem(), visited, depth+1)
+ case reflect.Struct:
+ for i, n := 0, v1.NumField(); i < n; i++ {
+ if !e.deepValueDerive(v1.Field(i), v2.Field(i), visited, depth+1) {
+ return false
+ }
+ }
+ return true
+ case reflect.Map:
+ if v1.IsNil() || v1.Len() == 0 {
+ return true
+ }
+ if v1.Len() > v2.Len() {
+ return false
+ }
+ if v1.Pointer() == v2.Pointer() {
+ return true
+ }
+ for _, k := range v1.MapKeys() {
+ if !e.deepValueDerive(v1.MapIndex(k), v2.MapIndex(k), visited, depth+1) {
+ return false
+ }
+ }
+ return true
+ case reflect.Func:
+ if v1.IsNil() && v2.IsNil() {
+ return true
+ }
+ // Can't do better than this:
+ return false
+ default:
+ // Normal equality suffices
+ if !v1.CanInterface() || !v2.CanInterface() {
+ panic(unexportedTypePanic{})
+ }
+ return v1.Interface() == v2.Interface()
+ }
+}
+
+// DeepDerivative is similar to DeepEqual except that unset fields in a1 are
+// ignored (not compared). This allows us to focus on the fields that matter to
+// the semantic comparison.
+//
+// The unset fields include a nil pointer and an empty string.
+func (e Equalities) DeepDerivative(a1, a2 interface{}) bool {
+ if a1 == nil {
+ return true
+ }
+ v1 := reflect.ValueOf(a1)
+ v2 := reflect.ValueOf(a2)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ return e.deepValueDerive(v1, v2, make(map[visit]bool), 0)
+}
diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/type.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/type.go
new file mode 100644
index 000000000..67957ee33
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/type.go
@@ -0,0 +1,91 @@
+//This package is copied from Go library reflect/type.go.
+//The struct tag library provides no way to extract the list of struct tags, only
+//a specific tag
+package reflect
+
+import (
+ "fmt"
+
+ "strconv"
+ "strings"
+)
+
+type StructTag struct {
+ Name string
+ Value string
+}
+
+func (t StructTag) String() string {
+ return fmt.Sprintf("%s:%q", t.Name, t.Value)
+}
+
+type StructTags []StructTag
+
+func (tags StructTags) String() string {
+ s := make([]string, 0, len(tags))
+ for _, tag := range tags {
+ s = append(s, tag.String())
+ }
+ return "`" + strings.Join(s, " ") + "`"
+}
+
+func (tags StructTags) Has(name string) bool {
+ for i := range tags {
+ if tags[i].Name == name {
+ return true
+ }
+ }
+ return false
+}
+
+// ParseStructTags returns the full set of fields in a struct tag in the order they appear in
+// the struct tag.
+func ParseStructTags(tag string) (StructTags, error) {
+ tags := StructTags{}
+ for tag != "" {
+ // Skip leading space.
+ i := 0
+ for i < len(tag) && tag[i] == ' ' {
+ i++
+ }
+ tag = tag[i:]
+ if tag == "" {
+ break
+ }
+
+ // Scan to colon. A space, a quote or a control character is a syntax error.
+ // Strictly speaking, control chars include the range [0x7f, 0x9f], not just
+ // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
+ // as it is simpler to inspect the tag's bytes than the tag's runes.
+ i = 0
+ for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
+ i++
+ }
+ if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
+ break
+ }
+ name := string(tag[:i])
+ tag = tag[i+1:]
+
+ // Scan quoted string to find value.
+ i = 1
+ for i < len(tag) && tag[i] != '"' {
+ if tag[i] == '\\' {
+ i++
+ }
+ i++
+ }
+ if i >= len(tag) {
+ break
+ }
+ qvalue := string(tag[:i+1])
+ tag = tag[i+1:]
+
+ value, err := strconv.Unquote(qvalue)
+ if err != nil {
+ return nil, err
+ }
+ tags = append(tags, StructTag{Name: name, Value: value})
+ }
+ return tags, nil
+}