aboutsummaryrefslogtreecommitdiff
path: root/vendor/sigs.k8s.io
diff options
context:
space:
mode:
authordependabot-preview[bot] <27856297+dependabot-preview[bot]@users.noreply.github.com>2020-05-05 08:57:17 +0000
committerDaniel J Walsh <dwalsh@redhat.com>2020-05-05 13:35:55 -0400
commit2f0bc5ff1cde9afb595868b92dd123478af9ef74 (patch)
treed620dce86657a325eb82b251f95e9cd4903ca80f /vendor/sigs.k8s.io
parente1be837a4ff149e00ff6af9e71cf9ea625e33e6f (diff)
downloadpodman-2f0bc5ff1cde9afb595868b92dd123478af9ef74.tar.gz
podman-2f0bc5ff1cde9afb595868b92dd123478af9ef74.tar.bz2
podman-2f0bc5ff1cde9afb595868b92dd123478af9ef74.zip
Bump k8s.io/api from 0.17.4 to 0.18.2
Bumps [k8s.io/api](https://github.com/kubernetes/api) from 0.17.4 to 0.18.2. - [Release notes](https://github.com/kubernetes/api/releases) - [Commits](https://github.com/kubernetes/api/compare/v0.17.4...v0.18.2) Signed-off-by: dependabot-preview[bot] <support@dependabot.com> Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
Diffstat (limited to 'vendor/sigs.k8s.io')
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v3/LICENSE201
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go203
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v3/value/doc.go21
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go97
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v3/value/jsontagutil.go91
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go139
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go98
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go74
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go270
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go209
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go190
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go463
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v3/value/scalar.go50
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go208
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go347
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go294
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go178
-rw-r--r--vendor/sigs.k8s.io/yaml/.travis.yml15
-rw-r--r--vendor/sigs.k8s.io/yaml/OWNERS2
-rw-r--r--vendor/sigs.k8s.io/yaml/README.md14
-rw-r--r--vendor/sigs.k8s.io/yaml/go.mod8
-rw-r--r--vendor/sigs.k8s.io/yaml/go.sum9
-rw-r--r--vendor/sigs.k8s.io/yaml/yaml.go61
23 files changed, 3228 insertions, 14 deletions
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/LICENSE b/vendor/sigs.k8s.io/structured-merge-diff/v3/LICENSE
new file mode 100644
index 000000000..8dada3eda
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go
new file mode 100644
index 000000000..f70cd4167
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go
@@ -0,0 +1,203 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+// Allocator provides a value object allocation strategy.
+// Value objects can be allocated by passing an allocator to the "Using"
+// receiver functions on the value interfaces, e.g. Map.ZipUsing(allocator, ...).
+// Value objects returned from "Using" functions should be given back to the allocator
+// once longer needed by calling Allocator.Free(Value).
+type Allocator interface {
+ // Free gives the allocator back any value objects returned by the "Using"
+ // receiver functions on the value interfaces.
+ // interface{} may be any of: Value, Map, List or Range.
+ Free(interface{})
+
+ // The unexported functions are for "Using" receiver functions of the value types
+ // to request what they need from the allocator.
+ allocValueUnstructured() *valueUnstructured
+ allocListUnstructuredRange() *listUnstructuredRange
+ allocValueReflect() *valueReflect
+ allocMapReflect() *mapReflect
+ allocStructReflect() *structReflect
+ allocListReflect() *listReflect
+ allocListReflectRange() *listReflectRange
+}
+
+// HeapAllocator simply allocates objects to the heap. It is the default
+// allocator used receiver functions on the value interfaces that do not accept
+// an allocator and should be used whenever allocating objects that will not
+// be given back to an allocator by calling Allocator.Free(Value).
+var HeapAllocator = &heapAllocator{}
+
+type heapAllocator struct{}
+
+func (p *heapAllocator) allocValueUnstructured() *valueUnstructured {
+ return &valueUnstructured{}
+}
+
+func (p *heapAllocator) allocListUnstructuredRange() *listUnstructuredRange {
+ return &listUnstructuredRange{vv: &valueUnstructured{}}
+}
+
+func (p *heapAllocator) allocValueReflect() *valueReflect {
+ return &valueReflect{}
+}
+
+func (p *heapAllocator) allocStructReflect() *structReflect {
+ return &structReflect{}
+}
+
+func (p *heapAllocator) allocMapReflect() *mapReflect {
+ return &mapReflect{}
+}
+
+func (p *heapAllocator) allocListReflect() *listReflect {
+ return &listReflect{}
+}
+
+func (p *heapAllocator) allocListReflectRange() *listReflectRange {
+ return &listReflectRange{vr: &valueReflect{}}
+}
+
+func (p *heapAllocator) Free(_ interface{}) {}
+
+// NewFreelistAllocator creates freelist based allocator.
+// This allocator provides fast allocation and freeing of short lived value objects.
+//
+// The freelists are bounded in size by freelistMaxSize. If more than this amount of value objects is
+// allocated at once, the excess will be returned to the heap for garbage collection when freed.
+//
+// This allocator is unsafe and must not be accessed concurrently by goroutines.
+//
+// This allocator works well for traversal of value data trees. Typical usage is to acquire
+// a freelist at the beginning of the traversal and use it through out
+// for all temporary value access.
+func NewFreelistAllocator() Allocator {
+ return &freelistAllocator{
+ valueUnstructured: &freelist{new: func() interface{} {
+ return &valueUnstructured{}
+ }},
+ listUnstructuredRange: &freelist{new: func() interface{} {
+ return &listUnstructuredRange{vv: &valueUnstructured{}}
+ }},
+ valueReflect: &freelist{new: func() interface{} {
+ return &valueReflect{}
+ }},
+ mapReflect: &freelist{new: func() interface{} {
+ return &mapReflect{}
+ }},
+ structReflect: &freelist{new: func() interface{} {
+ return &structReflect{}
+ }},
+ listReflect: &freelist{new: func() interface{} {
+ return &listReflect{}
+ }},
+ listReflectRange: &freelist{new: func() interface{} {
+ return &listReflectRange{vr: &valueReflect{}}
+ }},
+ }
+}
+
+// Bound memory usage of freelists. This prevents the processing of very large lists from leaking memory.
+// This limit is large enough for endpoints objects containing 1000 IP address entries. Freed objects
+// that don't fit into the freelist are orphaned on the heap to be garbage collected.
+const freelistMaxSize = 1000
+
+type freelistAllocator struct {
+ valueUnstructured *freelist
+ listUnstructuredRange *freelist
+ valueReflect *freelist
+ mapReflect *freelist
+ structReflect *freelist
+ listReflect *freelist
+ listReflectRange *freelist
+}
+
+type freelist struct {
+ list []interface{}
+ new func() interface{}
+}
+
+func (f *freelist) allocate() interface{} {
+ var w2 interface{}
+ if n := len(f.list); n > 0 {
+ w2, f.list = f.list[n-1], f.list[:n-1]
+ } else {
+ w2 = f.new()
+ }
+ return w2
+}
+
+func (f *freelist) free(v interface{}) {
+ if len(f.list) < freelistMaxSize {
+ f.list = append(f.list, v)
+ }
+}
+
+func (w *freelistAllocator) Free(value interface{}) {
+ switch v := value.(type) {
+ case *valueUnstructured:
+ v.Value = nil // don't hold references to unstructured objects
+ w.valueUnstructured.free(v)
+ case *listUnstructuredRange:
+ v.vv.Value = nil // don't hold references to unstructured objects
+ w.listUnstructuredRange.free(v)
+ case *valueReflect:
+ v.ParentMapKey = nil
+ v.ParentMap = nil
+ w.valueReflect.free(v)
+ case *mapReflect:
+ w.mapReflect.free(v)
+ case *structReflect:
+ w.structReflect.free(v)
+ case *listReflect:
+ w.listReflect.free(v)
+ case *listReflectRange:
+ v.vr.ParentMapKey = nil
+ v.vr.ParentMap = nil
+ w.listReflectRange.free(v)
+ }
+}
+
+func (w *freelistAllocator) allocValueUnstructured() *valueUnstructured {
+ return w.valueUnstructured.allocate().(*valueUnstructured)
+}
+
+func (w *freelistAllocator) allocListUnstructuredRange() *listUnstructuredRange {
+ return w.listUnstructuredRange.allocate().(*listUnstructuredRange)
+}
+
+func (w *freelistAllocator) allocValueReflect() *valueReflect {
+ return w.valueReflect.allocate().(*valueReflect)
+}
+
+func (w *freelistAllocator) allocStructReflect() *structReflect {
+ return w.structReflect.allocate().(*structReflect)
+}
+
+func (w *freelistAllocator) allocMapReflect() *mapReflect {
+ return w.mapReflect.allocate().(*mapReflect)
+}
+
+func (w *freelistAllocator) allocListReflect() *listReflect {
+ return w.listReflect.allocate().(*listReflect)
+}
+
+func (w *freelistAllocator) allocListReflectRange() *listReflectRange {
+ return w.listReflectRange.allocate().(*listReflectRange)
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/doc.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/doc.go
new file mode 100644
index 000000000..84d7f0f3f
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package value defines types for an in-memory representation of yaml or json
+// objects, organized for convenient comparison with a schema (as defined by
+// the sibling schema package). Functions for reading and writing the objects
+// are also provided.
+package value
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go
new file mode 100644
index 000000000..be3c67249
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go
@@ -0,0 +1,97 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+import (
+ "sort"
+ "strings"
+)
+
+// Field is an individual key-value pair.
+type Field struct {
+ Name string
+ Value Value
+}
+
+// FieldList is a list of key-value pairs. Each field is expected to
+// have a different name.
+type FieldList []Field
+
+// Sort sorts the field list by Name.
+func (f FieldList) Sort() {
+ if len(f) < 2 {
+ return
+ }
+ if len(f) == 2 {
+ if f[1].Name < f[0].Name {
+ f[0], f[1] = f[1], f[0]
+ }
+ return
+ }
+ sort.SliceStable(f, func(i, j int) bool {
+ return f[i].Name < f[j].Name
+ })
+}
+
+// Less compares two lists lexically.
+func (f FieldList) Less(rhs FieldList) bool {
+ return f.Compare(rhs) == -1
+}
+
+// Compare compares two lists lexically. The result will be 0 if f==rhs, -1
+// if f < rhs, and +1 if f > rhs.
+func (f FieldList) Compare(rhs FieldList) int {
+ i := 0
+ for {
+ if i >= len(f) && i >= len(rhs) {
+ // Maps are the same length and all items are equal.
+ return 0
+ }
+ if i >= len(f) {
+ // F is shorter.
+ return -1
+ }
+ if i >= len(rhs) {
+ // RHS is shorter.
+ return 1
+ }
+ if c := strings.Compare(f[i].Name, rhs[i].Name); c != 0 {
+ return c
+ }
+ if c := Compare(f[i].Value, rhs[i].Value); c != 0 {
+ return c
+ }
+ // The items are equal; continue.
+ i++
+ }
+}
+
+// Equals returns true if the two fieldslist are equals, false otherwise.
+func (f FieldList) Equals(rhs FieldList) bool {
+ if len(f) != len(rhs) {
+ return false
+ }
+ for i := range f {
+ if f[i].Name != rhs[i].Name {
+ return false
+ }
+ if !Equals(f[i].Value, rhs[i].Value) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/jsontagutil.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/jsontagutil.go
new file mode 100644
index 000000000..d4adb8fc9
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/jsontagutil.go
@@ -0,0 +1,91 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// TODO: This implements the same functionality as https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go#L236
+// but is based on the highly efficient approach from https://golang.org/src/encoding/json/encode.go
+
+func lookupJsonTags(f reflect.StructField) (name string, omit bool, inline bool, omitempty bool) {
+ tag := f.Tag.Get("json")
+ if tag == "-" {
+ return "", true, false, false
+ }
+ name, opts := parseTag(tag)
+ if name == "" {
+ name = f.Name
+ }
+ return name, false, opts.Contains("inline"), opts.Contains("omitempty")
+}
+
+func isZero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflect.Chan, reflect.Func:
+ panic(fmt.Sprintf("unsupported type: %v", v.Type()))
+ }
+ return false
+}
+
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+ if idx := strings.Index(tag, ","); idx != -1 {
+ return tag[:idx], tagOptions(tag[idx+1:])
+ }
+ return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+ if len(o) == 0 {
+ return false
+ }
+ s := string(o)
+ for s != "" {
+ var next string
+ i := strings.Index(s, ",")
+ if i >= 0 {
+ s, next = s[:i], s[i+1:]
+ }
+ if s == optionName {
+ return true
+ }
+ s = next
+ }
+ return false
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go
new file mode 100644
index 000000000..0748f18e8
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go
@@ -0,0 +1,139 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+// List represents a list object.
+type List interface {
+ // Length returns how many items can be found in the map.
+ Length() int
+ // At returns the item at the given position in the map. It will
+ // panic if the index is out of range.
+ At(int) Value
+ // AtUsing uses the provided allocator and returns the item at the given
+ // position in the map. It will panic if the index is out of range.
+ // The returned Value should be given back to the Allocator when no longer needed
+ // by calling Allocator.Free(Value).
+ AtUsing(Allocator, int) Value
+ // Range returns a ListRange for iterating over the items in the list.
+ Range() ListRange
+ // RangeUsing uses the provided allocator and returns a ListRange for
+ // iterating over the items in the list.
+ // The returned Range should be given back to the Allocator when no longer needed
+ // by calling Allocator.Free(Value).
+ RangeUsing(Allocator) ListRange
+ // Equals compares the two lists, and return true if they are the same, false otherwise.
+ // Implementations can use ListEquals as a general implementation for this methods.
+ Equals(List) bool
+ // EqualsUsing uses the provided allocator and compares the two lists, and return true if
+ // they are the same, false otherwise. Implementations can use ListEqualsUsing as a general
+ // implementation for this methods.
+ EqualsUsing(Allocator, List) bool
+}
+
+// ListRange represents a single iteration across the items of a list.
+type ListRange interface {
+ // Next increments to the next item in the range, if there is one, and returns true, or returns false if there are no more items.
+ Next() bool
+ // Item returns the index and value of the current item in the range. or panics if there is no current item.
+ // For efficiency, Item may reuse the values returned by previous Item calls. Callers should be careful avoid holding
+ // pointers to the value returned by Item() that escape the iteration loop since they become invalid once either
+ // Item() or Allocator.Free() is called.
+ Item() (index int, value Value)
+}
+
+var EmptyRange = &emptyRange{}
+
+type emptyRange struct{}
+
+func (_ *emptyRange) Next() bool {
+ return false
+}
+
+func (_ *emptyRange) Item() (index int, value Value) {
+ panic("Item called on empty ListRange")
+}
+
+// ListEquals compares two lists lexically.
+// WARN: This is a naive implementation, calling lhs.Equals(rhs) is typically the most efficient.
+func ListEquals(lhs, rhs List) bool {
+ return ListEqualsUsing(HeapAllocator, lhs, rhs)
+}
+
+// ListEqualsUsing uses the provided allocator and compares two lists lexically.
+// WARN: This is a naive implementation, calling lhs.EqualsUsing(allocator, rhs) is typically the most efficient.
+func ListEqualsUsing(a Allocator, lhs, rhs List) bool {
+ if lhs.Length() != rhs.Length() {
+ return false
+ }
+
+ lhsRange := lhs.RangeUsing(a)
+ defer a.Free(lhsRange)
+ rhsRange := rhs.RangeUsing(a)
+ defer a.Free(rhsRange)
+
+ for lhsRange.Next() && rhsRange.Next() {
+ _, lv := lhsRange.Item()
+ _, rv := rhsRange.Item()
+ if !EqualsUsing(a, lv, rv) {
+ return false
+ }
+ }
+ return true
+}
+
+// ListLess compares two lists lexically.
+func ListLess(lhs, rhs List) bool {
+ return ListCompare(lhs, rhs) == -1
+}
+
+// ListCompare compares two lists lexically. The result will be 0 if l==rhs, -1
+// if l < rhs, and +1 if l > rhs.
+func ListCompare(lhs, rhs List) int {
+ return ListCompareUsing(HeapAllocator, lhs, rhs)
+}
+
+// ListCompareUsing uses the provided allocator and compares two lists lexically. The result will be 0 if l==rhs, -1
+// if l < rhs, and +1 if l > rhs.
+func ListCompareUsing(a Allocator, lhs, rhs List) int {
+ lhsRange := lhs.RangeUsing(a)
+ defer a.Free(lhsRange)
+ rhsRange := rhs.RangeUsing(a)
+ defer a.Free(rhsRange)
+
+ for {
+ lhsOk := lhsRange.Next()
+ rhsOk := rhsRange.Next()
+ if !lhsOk && !rhsOk {
+ // Lists are the same length and all items are equal.
+ return 0
+ }
+ if !lhsOk {
+ // LHS is shorter.
+ return -1
+ }
+ if !rhsOk {
+ // RHS is shorter.
+ return 1
+ }
+ _, lv := lhsRange.Item()
+ _, rv := rhsRange.Item()
+ if c := CompareUsing(a, lv, rv); c != 0 {
+ return c
+ }
+ // The items are equal; continue.
+ }
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go
new file mode 100644
index 000000000..197d4c921
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go
@@ -0,0 +1,98 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+import (
+ "reflect"
+)
+
+type listReflect struct {
+ Value reflect.Value
+}
+
+func (r listReflect) Length() int {
+ val := r.Value
+ return val.Len()
+}
+
+func (r listReflect) At(i int) Value {
+ val := r.Value
+ return mustWrapValueReflect(val.Index(i), nil, nil)
+}
+
+func (r listReflect) AtUsing(a Allocator, i int) Value {
+ val := r.Value
+ return a.allocValueReflect().mustReuse(val.Index(i), nil, nil, nil)
+}
+
+func (r listReflect) Unstructured() interface{} {
+ l := r.Length()
+ result := make([]interface{}, l)
+ for i := 0; i < l; i++ {
+ result[i] = r.At(i).Unstructured()
+ }
+ return result
+}
+
+func (r listReflect) Range() ListRange {
+ return r.RangeUsing(HeapAllocator)
+}
+
+func (r listReflect) RangeUsing(a Allocator) ListRange {
+ length := r.Value.Len()
+ if length == 0 {
+ return EmptyRange
+ }
+ rr := a.allocListReflectRange()
+ rr.list = r.Value
+ rr.i = -1
+ rr.entry = TypeReflectEntryOf(r.Value.Type().Elem())
+ return rr
+}
+
+func (r listReflect) Equals(other List) bool {
+ return r.EqualsUsing(HeapAllocator, other)
+}
+func (r listReflect) EqualsUsing(a Allocator, other List) bool {
+ if otherReflectList, ok := other.(*listReflect); ok {
+ return reflect.DeepEqual(r.Value.Interface(), otherReflectList.Value.Interface())
+ }
+ return ListEqualsUsing(a, &r, other)
+}
+
+type listReflectRange struct {
+ list reflect.Value
+ vr *valueReflect
+ i int
+ entry *TypeReflectCacheEntry
+}
+
+func (r *listReflectRange) Next() bool {
+ r.i += 1
+ return r.i < r.list.Len()
+}
+
+func (r *listReflectRange) Item() (index int, value Value) {
+ if r.i < 0 {
+ panic("Item() called before first calling Next()")
+ }
+ if r.i >= r.list.Len() {
+ panic("Item() called on ListRange with no more items")
+ }
+ v := r.list.Index(r.i)
+ return r.i, r.vr.mustReuse(v, r.entry, nil, nil)
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go
new file mode 100644
index 000000000..64cd8e7c0
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go
@@ -0,0 +1,74 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+type listUnstructured []interface{}
+
+func (l listUnstructured) Length() int {
+ return len(l)
+}
+
+func (l listUnstructured) At(i int) Value {
+ return NewValueInterface(l[i])
+}
+
+func (l listUnstructured) AtUsing(a Allocator, i int) Value {
+ return a.allocValueUnstructured().reuse(l[i])
+}
+
+func (l listUnstructured) Equals(other List) bool {
+ return l.EqualsUsing(HeapAllocator, other)
+}
+
+func (l listUnstructured) EqualsUsing(a Allocator, other List) bool {
+ return ListEqualsUsing(a, &l, other)
+}
+
+func (l listUnstructured) Range() ListRange {
+ return l.RangeUsing(HeapAllocator)
+}
+
+func (l listUnstructured) RangeUsing(a Allocator) ListRange {
+ if len(l) == 0 {
+ return EmptyRange
+ }
+ r := a.allocListUnstructuredRange()
+ r.list = l
+ r.i = -1
+ return r
+}
+
+type listUnstructuredRange struct {
+ list listUnstructured
+ vv *valueUnstructured
+ i int
+}
+
+func (r *listUnstructuredRange) Next() bool {
+ r.i += 1
+ return r.i < len(r.list)
+}
+
+func (r *listUnstructuredRange) Item() (index int, value Value) {
+ if r.i < 0 {
+ panic("Item() called before first calling Next()")
+ }
+ if r.i >= len(r.list) {
+ panic("Item() called on ListRange with no more items")
+ }
+ return r.i, r.vv.reuse(r.list[r.i])
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go
new file mode 100644
index 000000000..168b9fa08
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go
@@ -0,0 +1,270 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+import (
+ "sort"
+)
+
+// Map represents a Map or go structure.
+type Map interface {
+ // Set changes or set the value of the given key.
+ Set(key string, val Value)
+ // Get returns the value for the given key, if present, or (nil, false) otherwise.
+ Get(key string) (Value, bool)
+ // GetUsing uses the provided allocator and returns the value for the given key,
+ // if present, or (nil, false) otherwise.
+ // The returned Value should be given back to the Allocator when no longer needed
+ // by calling Allocator.Free(Value).
+ GetUsing(a Allocator, key string) (Value, bool)
+ // Has returns true if the key is present, or false otherwise.
+ Has(key string) bool
+ // Delete removes the key from the map.
+ Delete(key string)
+ // Equals compares the two maps, and return true if they are the same, false otherwise.
+ // Implementations can use MapEquals as a general implementation for this methods.
+ Equals(other Map) bool
+ // EqualsUsing uses the provided allocator and compares the two maps, and return true if
+ // they are the same, false otherwise. Implementations can use MapEqualsUsing as a general
+ // implementation for this methods.
+ EqualsUsing(a Allocator, other Map) bool
+ // Iterate runs the given function for each key/value in the
+ // map. Returning false in the closure prematurely stops the
+ // iteration.
+ Iterate(func(key string, value Value) bool) bool
+ // IterateUsing uses the provided allocator and runs the given function for each key/value
+ // in the map. Returning false in the closure prematurely stops the iteration.
+ IterateUsing(Allocator, func(key string, value Value) bool) bool
+ // Length returns the number of items in the map.
+ Length() int
+ // Empty returns true if the map is empty.
+ Empty() bool
+ // Zip iterates over the entries of two maps together. If both maps contain a value for a given key, fn is called
+ // with the values from both maps, otherwise it is called with the value of the map that contains the key and nil
+ // for the map that does not contain the key. Returning false in the closure prematurely stops the iteration.
+ Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool
+ // ZipUsing uses the provided allocator and iterates over the entries of two maps together. If both maps
+ // contain a value for a given key, fn is called with the values from both maps, otherwise it is called with
+ // the value of the map that contains the key and nil for the map that does not contain the key. Returning
+ // false in the closure prematurely stops the iteration.
+ ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool
+}
+
+// MapTraverseOrder defines the map traversal ordering available.
+type MapTraverseOrder int
+
+const (
+ // Unordered indicates that the map traversal has no ordering requirement.
+ Unordered = iota
+ // LexicalKeyOrder indicates that the map traversal is ordered by key, lexically.
+ LexicalKeyOrder
+)
+
+// MapZip iterates over the entries of two maps together. If both maps contain a value for a given key, fn is called
+// with the values from both maps, otherwise it is called with the value of the map that contains the key and nil
+// for the other map. Returning false in the closure prematurely stops the iteration.
+func MapZip(lhs, rhs Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
+ return MapZipUsing(HeapAllocator, lhs, rhs, order, fn)
+}
+
+// MapZipUsing uses the provided allocator and iterates over the entries of two maps together. If both maps
+// contain a value for a given key, fn is called with the values from both maps, otherwise it is called with
+// the value of the map that contains the key and nil for the other map. Returning false in the closure
+// prematurely stops the iteration.
+func MapZipUsing(a Allocator, lhs, rhs Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
+ if lhs != nil {
+ return lhs.ZipUsing(a, rhs, order, fn)
+ }
+ if rhs != nil {
+ return rhs.ZipUsing(a, lhs, order, func(key string, rhs, lhs Value) bool { // arg positions of lhs and rhs deliberately swapped
+ return fn(key, lhs, rhs)
+ })
+ }
+ return true
+}
+
+// defaultMapZip provides a default implementation of Zip for implementations that do not need to provide
+// their own optimized implementation.
+func defaultMapZip(a Allocator, lhs, rhs Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
+ switch order {
+ case Unordered:
+ return unorderedMapZip(a, lhs, rhs, fn)
+ case LexicalKeyOrder:
+ return lexicalKeyOrderedMapZip(a, lhs, rhs, fn)
+ default:
+ panic("Unsupported map order")
+ }
+}
+
+func unorderedMapZip(a Allocator, lhs, rhs Map, fn func(key string, lhs, rhs Value) bool) bool {
+ if (lhs == nil || lhs.Empty()) && (rhs == nil || rhs.Empty()) {
+ return true
+ }
+
+ if lhs != nil {
+ ok := lhs.IterateUsing(a, func(key string, lhsValue Value) bool {
+ var rhsValue Value
+ if rhs != nil {
+ if item, ok := rhs.GetUsing(a, key); ok {
+ rhsValue = item
+ defer a.Free(rhsValue)
+ }
+ }
+ return fn(key, lhsValue, rhsValue)
+ })
+ if !ok {
+ return false
+ }
+ }
+ if rhs != nil {
+ return rhs.IterateUsing(a, func(key string, rhsValue Value) bool {
+ if lhs == nil || !lhs.Has(key) {
+ return fn(key, nil, rhsValue)
+ }
+ return true
+ })
+ }
+ return true
+}
+
+func lexicalKeyOrderedMapZip(a Allocator, lhs, rhs Map, fn func(key string, lhs, rhs Value) bool) bool {
+ var lhsLength, rhsLength int
+ var orderedLength int // rough estimate of length of union of map keys
+ if lhs != nil {
+ lhsLength = lhs.Length()
+ orderedLength = lhsLength
+ }
+ if rhs != nil {
+ rhsLength = rhs.Length()
+ if rhsLength > orderedLength {
+ orderedLength = rhsLength
+ }
+ }
+ if lhsLength == 0 && rhsLength == 0 {
+ return true
+ }
+
+ ordered := make([]string, 0, orderedLength)
+ if lhs != nil {
+ lhs.IterateUsing(a, func(key string, _ Value) bool {
+ ordered = append(ordered, key)
+ return true
+ })
+ }
+ if rhs != nil {
+ rhs.IterateUsing(a, func(key string, _ Value) bool {
+ if lhs == nil || !lhs.Has(key) {
+ ordered = append(ordered, key)
+ }
+ return true
+ })
+ }
+ sort.Strings(ordered)
+ for _, key := range ordered {
+ var litem, ritem Value
+ if lhs != nil {
+ litem, _ = lhs.GetUsing(a, key)
+ }
+ if rhs != nil {
+ ritem, _ = rhs.GetUsing(a, key)
+ }
+ ok := fn(key, litem, ritem)
+ if litem != nil {
+ a.Free(litem)
+ }
+ if ritem != nil {
+ a.Free(ritem)
+ }
+ if !ok {
+ return false
+ }
+ }
+ return true
+}
+
+// MapLess compares two maps lexically.
+func MapLess(lhs, rhs Map) bool {
+ return MapCompare(lhs, rhs) == -1
+}
+
+// MapCompare compares two maps lexically.
+func MapCompare(lhs, rhs Map) int {
+ return MapCompareUsing(HeapAllocator, lhs, rhs)
+}
+
+// MapCompareUsing uses the provided allocator and compares two maps lexically.
+func MapCompareUsing(a Allocator, lhs, rhs Map) int {
+ c := 0
+ var llength, rlength int
+ if lhs != nil {
+ llength = lhs.Length()
+ }
+ if rhs != nil {
+ rlength = rhs.Length()
+ }
+ if llength == 0 && rlength == 0 {
+ return 0
+ }
+ i := 0
+ MapZipUsing(a, lhs, rhs, LexicalKeyOrder, func(key string, lhs, rhs Value) bool {
+ switch {
+ case i == llength:
+ c = -1
+ case i == rlength:
+ c = 1
+ case lhs == nil:
+ c = 1
+ case rhs == nil:
+ c = -1
+ default:
+ c = CompareUsing(a, lhs, rhs)
+ }
+ i++
+ return c == 0
+ })
+ return c
+}
+
+// MapEquals returns true if lhs == rhs, false otherwise. This function
+// acts on generic types and should not be used by callers, but can help
+// implement Map.Equals.
+// WARN: This is a naive implementation, calling lhs.Equals(rhs) is typically the most efficient.
+func MapEquals(lhs, rhs Map) bool {
+ return MapEqualsUsing(HeapAllocator, lhs, rhs)
+}
+
+// MapEqualsUsing uses the provided allocator and returns true if lhs == rhs,
+// false otherwise. This function acts on generic types and should not be used
+// by callers, but can help implement Map.Equals.
+// WARN: This is a naive implementation, calling lhs.EqualsUsing(allocator, rhs) is typically the most efficient.
+func MapEqualsUsing(a Allocator, lhs, rhs Map) bool {
+ if lhs == nil && rhs == nil {
+ return true
+ }
+ if lhs == nil || rhs == nil {
+ return false
+ }
+ if lhs.Length() != rhs.Length() {
+ return false
+ }
+ return MapZipUsing(a, lhs, rhs, Unordered, func(key string, lhs, rhs Value) bool {
+ if lhs == nil || rhs == nil {
+ return false
+ }
+ return EqualsUsing(a, lhs, rhs)
+ })
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go
new file mode 100644
index 000000000..dc8b8c720
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go
@@ -0,0 +1,209 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+import (
+ "reflect"
+)
+
+type mapReflect struct {
+ valueReflect
+}
+
+func (r mapReflect) Length() int {
+ val := r.Value
+ return val.Len()
+}
+
+func (r mapReflect) Empty() bool {
+ val := r.Value
+ return val.Len() == 0
+}
+
+func (r mapReflect) Get(key string) (Value, bool) {
+ return r.GetUsing(HeapAllocator, key)
+}
+
+func (r mapReflect) GetUsing(a Allocator, key string) (Value, bool) {
+ k, v, ok := r.get(key)
+ if !ok {
+ return nil, false
+ }
+ return a.allocValueReflect().mustReuse(v, nil, &r.Value, &k), true
+}
+
+func (r mapReflect) get(k string) (key, value reflect.Value, ok bool) {
+ mapKey := r.toMapKey(k)
+ val := r.Value.MapIndex(mapKey)
+ return mapKey, val, val.IsValid() && val != reflect.Value{}
+}
+
+func (r mapReflect) Has(key string) bool {
+ var val reflect.Value
+ val = r.Value.MapIndex(r.toMapKey(key))
+ if !val.IsValid() {
+ return false
+ }
+ return val != reflect.Value{}
+}
+
+func (r mapReflect) Set(key string, val Value) {
+ r.Value.SetMapIndex(r.toMapKey(key), reflect.ValueOf(val.Unstructured()))
+}
+
+func (r mapReflect) Delete(key string) {
+ val := r.Value
+ val.SetMapIndex(r.toMapKey(key), reflect.Value{})
+}
+
+// TODO: Do we need to support types that implement json.Marshaler and are used as string keys?
+func (r mapReflect) toMapKey(key string) reflect.Value {
+ val := r.Value
+ return reflect.ValueOf(key).Convert(val.Type().Key())
+}
+
+func (r mapReflect) Iterate(fn func(string, Value) bool) bool {
+ return r.IterateUsing(HeapAllocator, fn)
+}
+
+func (r mapReflect) IterateUsing(a Allocator, fn func(string, Value) bool) bool {
+ if r.Value.Len() == 0 {
+ return true
+ }
+ v := a.allocValueReflect()
+ defer a.Free(v)
+ return eachMapEntry(r.Value, func(e *TypeReflectCacheEntry, key reflect.Value, value reflect.Value) bool {
+ return fn(key.String(), v.mustReuse(value, e, &r.Value, &key))
+ })
+}
+
+func eachMapEntry(val reflect.Value, fn func(*TypeReflectCacheEntry, reflect.Value, reflect.Value) bool) bool {
+ iter := val.MapRange()
+ entry := TypeReflectEntryOf(val.Type().Elem())
+ for iter.Next() {
+ next := iter.Value()
+ if !next.IsValid() {
+ continue
+ }
+ if !fn(entry, iter.Key(), next) {
+ return false
+ }
+ }
+ return true
+}
+
+func (r mapReflect) Unstructured() interface{} {
+ result := make(map[string]interface{}, r.Length())
+ r.Iterate(func(s string, value Value) bool {
+ result[s] = value.Unstructured()
+ return true
+ })
+ return result
+}
+
+func (r mapReflect) Equals(m Map) bool {
+ return r.EqualsUsing(HeapAllocator, m)
+}
+
+func (r mapReflect) EqualsUsing(a Allocator, m Map) bool {
+ lhsLength := r.Length()
+ rhsLength := m.Length()
+ if lhsLength != rhsLength {
+ return false
+ }
+ if lhsLength == 0 {
+ return true
+ }
+ vr := a.allocValueReflect()
+ defer a.Free(vr)
+ entry := TypeReflectEntryOf(r.Value.Type().Elem())
+ return m.Iterate(func(key string, value Value) bool {
+ _, lhsVal, ok := r.get(key)
+ if !ok {
+ return false
+ }
+ return Equals(vr.mustReuse(lhsVal, entry, nil, nil), value)
+ })
+}
+
+func (r mapReflect) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
+ return r.ZipUsing(HeapAllocator, other, order, fn)
+}
+
+func (r mapReflect) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
+ if otherMapReflect, ok := other.(*mapReflect); ok && order == Unordered {
+ return r.unorderedReflectZip(a, otherMapReflect, fn)
+ }
+ return defaultMapZip(a, &r, other, order, fn)
+}
+
+// unorderedReflectZip provides an optimized unordered zip for mapReflect types.
+func (r mapReflect) unorderedReflectZip(a Allocator, other *mapReflect, fn func(key string, lhs, rhs Value) bool) bool {
+ if r.Empty() && (other == nil || other.Empty()) {
+ return true
+ }
+
+ lhs := r.Value
+ lhsEntry := TypeReflectEntryOf(lhs.Type().Elem())
+
+ // map lookup via reflection is expensive enough that it is better to keep track of visited keys
+ visited := map[string]struct{}{}
+
+ vlhs, vrhs := a.allocValueReflect(), a.allocValueReflect()
+ defer a.Free(vlhs)
+ defer a.Free(vrhs)
+
+ if other != nil {
+ rhs := other.Value
+ rhsEntry := TypeReflectEntryOf(rhs.Type().Elem())
+ iter := rhs.MapRange()
+
+ for iter.Next() {
+ key := iter.Key()
+ keyString := key.String()
+ next := iter.Value()
+ if !next.IsValid() {
+ continue
+ }
+ rhsVal := vrhs.mustReuse(next, rhsEntry, &rhs, &key)
+ visited[keyString] = struct{}{}
+ var lhsVal Value
+ if _, v, ok := r.get(keyString); ok {
+ lhsVal = vlhs.mustReuse(v, lhsEntry, &lhs, &key)
+ }
+ if !fn(keyString, lhsVal, rhsVal) {
+ return false
+ }
+ }
+ }
+
+ iter := lhs.MapRange()
+ for iter.Next() {
+ key := iter.Key()
+ if _, ok := visited[key.String()]; ok {
+ continue
+ }
+ next := iter.Value()
+ if !next.IsValid() {
+ continue
+ }
+ if !fn(key.String(), vlhs.mustReuse(next, lhsEntry, &lhs, &key), nil) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go
new file mode 100644
index 000000000..d8e208628
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go
@@ -0,0 +1,190 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+type mapUnstructuredInterface map[interface{}]interface{}
+
+func (m mapUnstructuredInterface) Set(key string, val Value) {
+ m[key] = val.Unstructured()
+}
+
+func (m mapUnstructuredInterface) Get(key string) (Value, bool) {
+ return m.GetUsing(HeapAllocator, key)
+}
+
+func (m mapUnstructuredInterface) GetUsing(a Allocator, key string) (Value, bool) {
+ if v, ok := m[key]; !ok {
+ return nil, false
+ } else {
+ return a.allocValueUnstructured().reuse(v), true
+ }
+}
+
+func (m mapUnstructuredInterface) Has(key string) bool {
+ _, ok := m[key]
+ return ok
+}
+
+func (m mapUnstructuredInterface) Delete(key string) {
+ delete(m, key)
+}
+
+func (m mapUnstructuredInterface) Iterate(fn func(key string, value Value) bool) bool {
+ return m.IterateUsing(HeapAllocator, fn)
+}
+
+func (m mapUnstructuredInterface) IterateUsing(a Allocator, fn func(key string, value Value) bool) bool {
+ if len(m) == 0 {
+ return true
+ }
+ vv := a.allocValueUnstructured()
+ defer a.Free(vv)
+ for k, v := range m {
+ if ks, ok := k.(string); !ok {
+ continue
+ } else {
+ if !fn(ks, vv.reuse(v)) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func (m mapUnstructuredInterface) Length() int {
+ return len(m)
+}
+
+func (m mapUnstructuredInterface) Empty() bool {
+ return len(m) == 0
+}
+
+func (m mapUnstructuredInterface) Equals(other Map) bool {
+ return m.EqualsUsing(HeapAllocator, other)
+}
+
+func (m mapUnstructuredInterface) EqualsUsing(a Allocator, other Map) bool {
+ lhsLength := m.Length()
+ rhsLength := other.Length()
+ if lhsLength != rhsLength {
+ return false
+ }
+ if lhsLength == 0 {
+ return true
+ }
+ vv := a.allocValueUnstructured()
+ defer a.Free(vv)
+ return other.Iterate(func(key string, value Value) bool {
+ lhsVal, ok := m[key]
+ if !ok {
+ return false
+ }
+ return Equals(vv.reuse(lhsVal), value)
+ })
+}
+
+func (m mapUnstructuredInterface) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
+ return m.ZipUsing(HeapAllocator, other, order, fn)
+}
+
+func (m mapUnstructuredInterface) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
+ return defaultMapZip(a, m, other, order, fn)
+}
+
+type mapUnstructuredString map[string]interface{}
+
+func (m mapUnstructuredString) Set(key string, val Value) {
+ m[key] = val.Unstructured()
+}
+
+func (m mapUnstructuredString) Get(key string) (Value, bool) {
+ return m.GetUsing(HeapAllocator, key)
+}
+func (m mapUnstructuredString) GetUsing(a Allocator, key string) (Value, bool) {
+ if v, ok := m[key]; !ok {
+ return nil, false
+ } else {
+ return a.allocValueUnstructured().reuse(v), true
+ }
+}
+
+func (m mapUnstructuredString) Has(key string) bool {
+ _, ok := m[key]
+ return ok
+}
+
+func (m mapUnstructuredString) Delete(key string) {
+ delete(m, key)
+}
+
+func (m mapUnstructuredString) Iterate(fn func(key string, value Value) bool) bool {
+ return m.IterateUsing(HeapAllocator, fn)
+}
+
+func (m mapUnstructuredString) IterateUsing(a Allocator, fn func(key string, value Value) bool) bool {
+ if len(m) == 0 {
+ return true
+ }
+ vv := a.allocValueUnstructured()
+ defer a.Free(vv)
+ for k, v := range m {
+ if !fn(k, vv.reuse(v)) {
+ return false
+ }
+ }
+ return true
+}
+
+func (m mapUnstructuredString) Length() int {
+ return len(m)
+}
+
+func (m mapUnstructuredString) Equals(other Map) bool {
+ return m.EqualsUsing(HeapAllocator, other)
+}
+
+func (m mapUnstructuredString) EqualsUsing(a Allocator, other Map) bool {
+ lhsLength := m.Length()
+ rhsLength := other.Length()
+ if lhsLength != rhsLength {
+ return false
+ }
+ if lhsLength == 0 {
+ return true
+ }
+ vv := a.allocValueUnstructured()
+ defer a.Free(vv)
+ return other.Iterate(func(key string, value Value) bool {
+ lhsVal, ok := m[key]
+ if !ok {
+ return false
+ }
+ return Equals(vv.reuse(lhsVal), value)
+ })
+}
+
+func (m mapUnstructuredString) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
+ return m.ZipUsing(HeapAllocator, other, order, fn)
+}
+
+func (m mapUnstructuredString) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
+ return defaultMapZip(a, m, other, order, fn)
+}
+
+func (m mapUnstructuredString) Empty() bool {
+ return len(m) == 0
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go
new file mode 100644
index 000000000..49e6dd169
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go
@@ -0,0 +1,463 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "sort"
+ "sync"
+ "sync/atomic"
+)
+
+// UnstructuredConverter defines how a type can be converted directly to unstructured.
+// Types that implement json.Marshaler may also optionally implement this interface to provide a more
+// direct and more efficient conversion. All types that choose to implement this interface must still
+// implement this same conversion via json.Marshaler.
+type UnstructuredConverter interface {
+ json.Marshaler // require that json.Marshaler is implemented
+
+ // ToUnstructured returns the unstructured representation.
+ ToUnstructured() interface{}
+}
+
+// TypeReflectCacheEntry keeps data gathered using reflection about how a type is converted to/from unstructured.
+type TypeReflectCacheEntry struct {
+ isJsonMarshaler bool
+ ptrIsJsonMarshaler bool
+ isJsonUnmarshaler bool
+ ptrIsJsonUnmarshaler bool
+ isStringConvertable bool
+ ptrIsStringConvertable bool
+
+ structFields map[string]*FieldCacheEntry
+ orderedStructFields []*FieldCacheEntry
+}
+
+// FieldCacheEntry keeps data gathered using reflection about how the field of a struct is converted to/from
+// unstructured.
+type FieldCacheEntry struct {
+ // JsonName returns the name of the field according to the json tags on the struct field.
+ JsonName string
+ // isOmitEmpty is true if the field has the json 'omitempty' tag.
+ isOmitEmpty bool
+ // fieldPath is a list of field indices (see FieldByIndex) to lookup the value of
+ // a field in a reflect.Value struct. The field indices in the list form a path used
+ // to traverse through intermediary 'inline' fields.
+ fieldPath [][]int
+
+ fieldType reflect.Type
+ TypeEntry *TypeReflectCacheEntry
+}
+
+func (f *FieldCacheEntry) CanOmit(fieldVal reflect.Value) bool {
+ return f.isOmitEmpty && (safeIsNil(fieldVal) || isZero(fieldVal))
+}
+
+// GetUsing returns the field identified by this FieldCacheEntry from the provided struct.
+func (f *FieldCacheEntry) GetFrom(structVal reflect.Value) reflect.Value {
+ // field might be nested within 'inline' structs
+ for _, elem := range f.fieldPath {
+ structVal = structVal.FieldByIndex(elem)
+ }
+ return structVal
+}
+
+var marshalerType = reflect.TypeOf(new(json.Marshaler)).Elem()
+var unmarshalerType = reflect.TypeOf(new(json.Unmarshaler)).Elem()
+var unstructuredConvertableType = reflect.TypeOf(new(UnstructuredConverter)).Elem()
+var defaultReflectCache = newReflectCache()
+
+// TypeReflectEntryOf returns the TypeReflectCacheEntry of the provided reflect.Type.
+func TypeReflectEntryOf(t reflect.Type) *TypeReflectCacheEntry {
+ cm := defaultReflectCache.get()
+ if record, ok := cm[t]; ok {
+ return record
+ }
+ updates := reflectCacheMap{}
+ result := typeReflectEntryOf(cm, t, updates)
+ if len(updates) > 0 {
+ defaultReflectCache.update(updates)
+ }
+ return result
+}
+
+// TypeReflectEntryOf returns all updates needed to add provided reflect.Type, and the types its fields transitively
+// depend on, to the cache.
+func typeReflectEntryOf(cm reflectCacheMap, t reflect.Type, updates reflectCacheMap) *TypeReflectCacheEntry {
+ if record, ok := cm[t]; ok {
+ return record
+ }
+ if record, ok := updates[t]; ok {
+ return record
+ }
+ typeEntry := &TypeReflectCacheEntry{
+ isJsonMarshaler: t.Implements(marshalerType),
+ ptrIsJsonMarshaler: reflect.PtrTo(t).Implements(marshalerType),
+ isJsonUnmarshaler: reflect.PtrTo(t).Implements(unmarshalerType),
+ isStringConvertable: t.Implements(unstructuredConvertableType),
+ ptrIsStringConvertable: reflect.PtrTo(t).Implements(unstructuredConvertableType),
+ }
+ if t.Kind() == reflect.Struct {
+ fieldEntries := map[string]*FieldCacheEntry{}
+ buildStructCacheEntry(t, fieldEntries, nil)
+ typeEntry.structFields = fieldEntries
+ sortedByJsonName := make([]*FieldCacheEntry, len(fieldEntries))
+ i := 0
+ for _, entry := range fieldEntries {
+ sortedByJsonName[i] = entry
+ i++
+ }
+ sort.Slice(sortedByJsonName, func(i, j int) bool {
+ return sortedByJsonName[i].JsonName < sortedByJsonName[j].JsonName
+ })
+ typeEntry.orderedStructFields = sortedByJsonName
+ }
+
+ // cyclic type references are allowed, so we must add the typeEntry to the updates map before resolving
+ // the field.typeEntry references, or creating them if they are not already in the cache
+ updates[t] = typeEntry
+
+ for _, field := range typeEntry.structFields {
+ if field.TypeEntry == nil {
+ field.TypeEntry = typeReflectEntryOf(cm, field.fieldType, updates)
+ }
+ }
+ return typeEntry
+}
+
+func buildStructCacheEntry(t reflect.Type, infos map[string]*FieldCacheEntry, fieldPath [][]int) {
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+ jsonName, omit, isInline, isOmitempty := lookupJsonTags(field)
+ if omit {
+ continue
+ }
+ if isInline {
+ buildStructCacheEntry(field.Type, infos, append(fieldPath, field.Index))
+ continue
+ }
+ info := &FieldCacheEntry{JsonName: jsonName, isOmitEmpty: isOmitempty, fieldPath: append(fieldPath, field.Index), fieldType: field.Type}
+ infos[jsonName] = info
+ }
+}
+
+// Fields returns a map of JSON field name to FieldCacheEntry for structs, or nil for non-structs.
+func (e TypeReflectCacheEntry) Fields() map[string]*FieldCacheEntry {
+ return e.structFields
+}
+
+// Fields returns a map of JSON field name to FieldCacheEntry for structs, or nil for non-structs.
+func (e TypeReflectCacheEntry) OrderedFields() []*FieldCacheEntry {
+ return e.orderedStructFields
+}
+
+// CanConvertToUnstructured returns true if this TypeReflectCacheEntry can convert values of its type to unstructured.
+func (e TypeReflectCacheEntry) CanConvertToUnstructured() bool {
+ return e.isJsonMarshaler || e.ptrIsJsonMarshaler || e.isStringConvertable || e.ptrIsStringConvertable
+}
+
+// ToUnstructured converts the provided value to unstructured and returns it.
+func (e TypeReflectCacheEntry) ToUnstructured(sv reflect.Value) (interface{}, error) {
+ // This is based on https://github.com/kubernetes/kubernetes/blob/82c9e5c814eb7acc6cc0a090c057294d0667ad66/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go#L505
+ // and is intended to replace it.
+
+ // Check if the object has a custom string converter and use it if available, since it is much more efficient
+ // than round tripping through json.
+ if converter, ok := e.getUnstructuredConverter(sv); ok {
+ return converter.ToUnstructured(), nil
+ }
+ // Check if the object has a custom JSON marshaller/unmarshaller.
+ if marshaler, ok := e.getJsonMarshaler(sv); ok {
+ if sv.Kind() == reflect.Ptr && sv.IsNil() {
+ // We're done - we don't need to store anything.
+ return nil, nil
+ }
+
+ data, err := marshaler.MarshalJSON()
+ if err != nil {
+ return nil, err
+ }
+ switch {
+ case len(data) == 0:
+ return nil, fmt.Errorf("error decoding from json: empty value")
+
+ case bytes.Equal(data, nullBytes):
+ // We're done - we don't need to store anything.
+ return nil, nil
+
+ case bytes.Equal(data, trueBytes):
+ return true, nil
+
+ case bytes.Equal(data, falseBytes):
+ return false, nil
+
+ case data[0] == '"':
+ var result string
+ err := unmarshal(data, &result)
+ if err != nil {
+ return nil, fmt.Errorf("error decoding string from json: %v", err)
+ }
+ return result, nil
+
+ case data[0] == '{':
+ result := make(map[string]interface{})
+ err := unmarshal(data, &result)
+ if err != nil {
+ return nil, fmt.Errorf("error decoding object from json: %v", err)
+ }
+ return result, nil
+
+ case data[0] == '[':
+ result := make([]interface{}, 0)
+ err := unmarshal(data, &result)
+ if err != nil {
+ return nil, fmt.Errorf("error decoding array from json: %v", err)
+ }
+ return result, nil
+
+ default:
+ var (
+ resultInt int64
+ resultFloat float64
+ err error
+ )
+ if err = unmarshal(data, &resultInt); err == nil {
+ return resultInt, nil
+ } else if err = unmarshal(data, &resultFloat); err == nil {
+ return resultFloat, nil
+ } else {
+ return nil, fmt.Errorf("error decoding number from json: %v", err)
+ }
+ }
+ }
+
+ return nil, fmt.Errorf("provided type cannot be converted: %v", sv.Type())
+}
+
+// CanConvertFromUnstructured returns true if this TypeReflectCacheEntry can convert objects of the type from unstructured.
+func (e TypeReflectCacheEntry) CanConvertFromUnstructured() bool {
+ return e.isJsonUnmarshaler
+}
+
+// FromUnstructured converts the provided source value from unstructured into the provided destination value.
+func (e TypeReflectCacheEntry) FromUnstructured(sv, dv reflect.Value) error {
+ // TODO: this could be made much more efficient using direct conversions like
+ // UnstructuredConverter.ToUnstructured provides.
+ st := dv.Type()
+ data, err := json.Marshal(sv.Interface())
+ if err != nil {
+ return fmt.Errorf("error encoding %s to json: %v", st.String(), err)
+ }
+ if unmarshaler, ok := e.getJsonUnmarshaler(dv); ok {
+ return unmarshaler.UnmarshalJSON(data)
+ }
+ return fmt.Errorf("unable to unmarshal %v into %v", sv.Type(), dv.Type())
+}
+
+var (
+ nullBytes = []byte("null")
+ trueBytes = []byte("true")
+ falseBytes = []byte("false")
+)
+
+func (e TypeReflectCacheEntry) getJsonMarshaler(v reflect.Value) (json.Marshaler, bool) {
+ if e.isJsonMarshaler {
+ return v.Interface().(json.Marshaler), true
+ }
+ if e.ptrIsJsonMarshaler {
+ // Check pointer receivers if v is not a pointer
+ if v.Kind() != reflect.Ptr && v.CanAddr() {
+ v = v.Addr()
+ return v.Interface().(json.Marshaler), true
+ }
+ }
+ return nil, false
+}
+
+func (e TypeReflectCacheEntry) getJsonUnmarshaler(v reflect.Value) (json.Unmarshaler, bool) {
+ if !e.isJsonUnmarshaler {
+ return nil, false
+ }
+ return v.Addr().Interface().(json.Unmarshaler), true
+}
+
+func (e TypeReflectCacheEntry) getUnstructuredConverter(v reflect.Value) (UnstructuredConverter, bool) {
+ if e.isStringConvertable {
+ return v.Interface().(UnstructuredConverter), true
+ }
+ if e.ptrIsStringConvertable {
+ // Check pointer receivers if v is not a pointer
+ if v.CanAddr() {
+ v = v.Addr()
+ return v.Interface().(UnstructuredConverter), true
+ }
+ }
+ return nil, false
+}
+
+type typeReflectCache struct {
+ // use an atomic and copy-on-write since there are a fixed (typically very small) number of structs compiled into any
+ // go program using this cache
+ value atomic.Value
+ // mu is held by writers when performing load/modify/store operations on the cache, readers do not need to hold a
+ // read-lock since the atomic value is always read-only
+ mu sync.Mutex
+}
+
+func newReflectCache() *typeReflectCache {
+ cache := &typeReflectCache{}
+ cache.value.Store(make(reflectCacheMap))
+ return cache
+}
+
+type reflectCacheMap map[reflect.Type]*TypeReflectCacheEntry
+
+// get returns the reflectCacheMap.
+func (c *typeReflectCache) get() reflectCacheMap {
+ return c.value.Load().(reflectCacheMap)
+}
+
+// update merges the provided updates into the cache.
+func (c *typeReflectCache) update(updates reflectCacheMap) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ currentCacheMap := c.value.Load().(reflectCacheMap)
+
+ hasNewEntries := false
+ for t := range updates {
+ if _, ok := currentCacheMap[t]; !ok {
+ hasNewEntries = true
+ break
+ }
+ }
+ if !hasNewEntries {
+ // Bail if the updates have been set while waiting for lock acquisition.
+ // This is safe since setting entries is idempotent.
+ return
+ }
+
+ newCacheMap := make(reflectCacheMap, len(currentCacheMap)+len(updates))
+ for k, v := range currentCacheMap {
+ newCacheMap[k] = v
+ }
+ for t, update := range updates {
+ newCacheMap[t] = update
+ }
+ c.value.Store(newCacheMap)
+}
+
+// Below json Unmarshal is fromk8s.io/apimachinery/pkg/util/json
+// to handle number conversions as expected by Kubernetes
+
+// limit recursive depth to prevent stack overflow errors
+const maxDepth = 10000
+
+// unmarshal unmarshals the given data
+// If v is a *map[string]interface{}, numbers are converted to int64 or float64
+func unmarshal(data []byte, v interface{}) error {
+ switch v := v.(type) {
+ case *map[string]interface{}:
+ // Build a decoder from the given data
+ decoder := json.NewDecoder(bytes.NewBuffer(data))
+ // Preserve numbers, rather than casting to float64 automatically
+ decoder.UseNumber()
+ // Run the decode
+ if err := decoder.Decode(v); err != nil {
+ return err
+ }
+ // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
+ return convertMapNumbers(*v, 0)
+
+ case *[]interface{}:
+ // Build a decoder from the given data
+ decoder := json.NewDecoder(bytes.NewBuffer(data))
+ // Preserve numbers, rather than casting to float64 automatically
+ decoder.UseNumber()
+ // Run the decode
+ if err := decoder.Decode(v); err != nil {
+ return err
+ }
+ // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
+ return convertSliceNumbers(*v, 0)
+
+ default:
+ return json.Unmarshal(data, v)
+ }
+}
+
+// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64.
+// values which are map[string]interface{} or []interface{} are recursively visited
+func convertMapNumbers(m map[string]interface{}, depth int) error {
+ if depth > maxDepth {
+ return fmt.Errorf("exceeded max depth of %d", maxDepth)
+ }
+
+ var err error
+ for k, v := range m {
+ switch v := v.(type) {
+ case json.Number:
+ m[k], err = convertNumber(v)
+ case map[string]interface{}:
+ err = convertMapNumbers(v, depth+1)
+ case []interface{}:
+ err = convertSliceNumbers(v, depth+1)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64.
+// values which are map[string]interface{} or []interface{} are recursively visited
+func convertSliceNumbers(s []interface{}, depth int) error {
+ if depth > maxDepth {
+ return fmt.Errorf("exceeded max depth of %d", maxDepth)
+ }
+
+ var err error
+ for i, v := range s {
+ switch v := v.(type) {
+ case json.Number:
+ s[i], err = convertNumber(v)
+ case map[string]interface{}:
+ err = convertMapNumbers(v, depth+1)
+ case []interface{}:
+ err = convertSliceNumbers(v, depth+1)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// convertNumber converts a json.Number to an int64 or float64, or returns an error
+func convertNumber(n json.Number) (interface{}, error) {
+ // Attempt to convert to an int64 first
+ if i, err := n.Int64(); err == nil {
+ return i, nil
+ }
+ // Return a float64 (default json.Decode() behavior)
+ // An overflow will return an error
+ return n.Float64()
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/scalar.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/scalar.go
new file mode 100644
index 000000000..c78a4c18d
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/scalar.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+// Compare compares floats. The result will be 0 if lhs==rhs, -1 if f <
+// rhs, and +1 if f > rhs.
+func FloatCompare(lhs, rhs float64) int {
+ if lhs > rhs {
+ return 1
+ } else if lhs < rhs {
+ return -1
+ }
+ return 0
+}
+
+// IntCompare compares integers. The result will be 0 if i==rhs, -1 if i <
+// rhs, and +1 if i > rhs.
+func IntCompare(lhs, rhs int64) int {
+ if lhs > rhs {
+ return 1
+ } else if lhs < rhs {
+ return -1
+ }
+ return 0
+}
+
+// Compare compares booleans. The result will be 0 if b==rhs, -1 if b <
+// rhs, and +1 if b > rhs.
+func BoolCompare(lhs, rhs bool) int {
+ if lhs == rhs {
+ return 0
+ } else if lhs == false {
+ return -1
+ }
+ return 1
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go
new file mode 100644
index 000000000..4a7bb5c6e
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go
@@ -0,0 +1,208 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+import (
+ "fmt"
+ "reflect"
+)
+
+type structReflect struct {
+ valueReflect
+}
+
+func (r structReflect) Length() int {
+ i := 0
+ eachStructField(r.Value, func(_ *TypeReflectCacheEntry, s string, value reflect.Value) bool {
+ i++
+ return true
+ })
+ return i
+}
+
+func (r structReflect) Empty() bool {
+ return eachStructField(r.Value, func(_ *TypeReflectCacheEntry, s string, value reflect.Value) bool {
+ return false // exit early if the struct is non-empty
+ })
+}
+
+func (r structReflect) Get(key string) (Value, bool) {
+ return r.GetUsing(HeapAllocator, key)
+}
+
+func (r structReflect) GetUsing(a Allocator, key string) (Value, bool) {
+ if val, ok := r.findJsonNameField(key); ok {
+ return a.allocValueReflect().mustReuse(val, nil, nil, nil), true
+ }
+ return nil, false
+}
+
+func (r structReflect) Has(key string) bool {
+ _, ok := r.findJsonNameField(key)
+ return ok
+}
+
+func (r structReflect) Set(key string, val Value) {
+ fieldEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[key]
+ if !ok {
+ panic(fmt.Sprintf("key %s may not be set on struct %T: field does not exist", key, r.Value.Interface()))
+ }
+ oldVal := fieldEntry.GetFrom(r.Value)
+ newVal := reflect.ValueOf(val.Unstructured())
+ r.update(fieldEntry, key, oldVal, newVal)
+}
+
+func (r structReflect) Delete(key string) {
+ fieldEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[key]
+ if !ok {
+ panic(fmt.Sprintf("key %s may not be deleted on struct %T: field does not exist", key, r.Value.Interface()))
+ }
+ oldVal := fieldEntry.GetFrom(r.Value)
+ if oldVal.Kind() != reflect.Ptr && !fieldEntry.isOmitEmpty {
+ panic(fmt.Sprintf("key %s may not be deleted on struct: %T: value is neither a pointer nor an omitempty field", key, r.Value.Interface()))
+ }
+ r.update(fieldEntry, key, oldVal, reflect.Zero(oldVal.Type()))
+}
+
+func (r structReflect) update(fieldEntry *FieldCacheEntry, key string, oldVal, newVal reflect.Value) {
+ if oldVal.CanSet() {
+ oldVal.Set(newVal)
+ return
+ }
+
+ // map items are not addressable, so if a struct is contained in a map, the only way to modify it is
+ // to write a replacement fieldEntry into the map.
+ if r.ParentMap != nil {
+ if r.ParentMapKey == nil {
+ panic("ParentMapKey must not be nil if ParentMap is not nil")
+ }
+ replacement := reflect.New(r.Value.Type()).Elem()
+ fieldEntry.GetFrom(replacement).Set(newVal)
+ r.ParentMap.SetMapIndex(*r.ParentMapKey, replacement)
+ return
+ }
+
+ // This should never happen since NewValueReflect ensures that the root object reflected on is a pointer and map
+ // item replacement is handled above.
+ panic(fmt.Sprintf("key %s may not be modified on struct: %T: struct is not settable", key, r.Value.Interface()))
+}
+
+func (r structReflect) Iterate(fn func(string, Value) bool) bool {
+ return r.IterateUsing(HeapAllocator, fn)
+}
+
+func (r structReflect) IterateUsing(a Allocator, fn func(string, Value) bool) bool {
+ vr := a.allocValueReflect()
+ defer a.Free(vr)
+ return eachStructField(r.Value, func(e *TypeReflectCacheEntry, s string, value reflect.Value) bool {
+ return fn(s, vr.mustReuse(value, e, nil, nil))
+ })
+}
+
+func eachStructField(structVal reflect.Value, fn func(*TypeReflectCacheEntry, string, reflect.Value) bool) bool {
+ for _, fieldCacheEntry := range TypeReflectEntryOf(structVal.Type()).OrderedFields() {
+ fieldVal := fieldCacheEntry.GetFrom(structVal)
+ if fieldCacheEntry.CanOmit(fieldVal) {
+ // omit it
+ continue
+ }
+ ok := fn(fieldCacheEntry.TypeEntry, fieldCacheEntry.JsonName, fieldVal)
+ if !ok {
+ return false
+ }
+ }
+ return true
+}
+
+func (r structReflect) Unstructured() interface{} {
+ // Use number of struct fields as a cheap way to rough estimate map size
+ result := make(map[string]interface{}, r.Value.NumField())
+ r.Iterate(func(s string, value Value) bool {
+ result[s] = value.Unstructured()
+ return true
+ })
+ return result
+}
+
+func (r structReflect) Equals(m Map) bool {
+ return r.EqualsUsing(HeapAllocator, m)
+}
+
+func (r structReflect) EqualsUsing(a Allocator, m Map) bool {
+ // MapEquals uses zip and is fairly efficient for structReflect
+ return MapEqualsUsing(a, &r, m)
+}
+
+func (r structReflect) findJsonNameFieldAndNotEmpty(jsonName string) (reflect.Value, bool) {
+ structCacheEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[jsonName]
+ if !ok {
+ return reflect.Value{}, false
+ }
+ fieldVal := structCacheEntry.GetFrom(r.Value)
+ return fieldVal, !structCacheEntry.CanOmit(fieldVal)
+}
+
+func (r structReflect) findJsonNameField(jsonName string) (val reflect.Value, ok bool) {
+ structCacheEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[jsonName]
+ if !ok {
+ return reflect.Value{}, false
+ }
+ fieldVal := structCacheEntry.GetFrom(r.Value)
+ return fieldVal, !structCacheEntry.CanOmit(fieldVal)
+}
+
+func (r structReflect) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
+ return r.ZipUsing(HeapAllocator, other, order, fn)
+}
+
+func (r structReflect) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
+ if otherStruct, ok := other.(*structReflect); ok && r.Value.Type() == otherStruct.Value.Type() {
+ lhsvr, rhsvr := a.allocValueReflect(), a.allocValueReflect()
+ defer a.Free(lhsvr)
+ defer a.Free(rhsvr)
+ return r.structZip(otherStruct, lhsvr, rhsvr, fn)
+ }
+ return defaultMapZip(a, &r, other, order, fn)
+}
+
+// structZip provides an optimized zip for structReflect types. The zip is always lexical key ordered since there is
+// no additional cost to ordering the zip for structured types.
+func (r structReflect) structZip(other *structReflect, lhsvr, rhsvr *valueReflect, fn func(key string, lhs, rhs Value) bool) bool {
+ lhsVal := r.Value
+ rhsVal := other.Value
+
+ for _, fieldCacheEntry := range TypeReflectEntryOf(lhsVal.Type()).OrderedFields() {
+ lhsFieldVal := fieldCacheEntry.GetFrom(lhsVal)
+ rhsFieldVal := fieldCacheEntry.GetFrom(rhsVal)
+ lhsOmit := fieldCacheEntry.CanOmit(lhsFieldVal)
+ rhsOmit := fieldCacheEntry.CanOmit(rhsFieldVal)
+ if lhsOmit && rhsOmit {
+ continue
+ }
+ var lhsVal, rhsVal Value
+ if !lhsOmit {
+ lhsVal = lhsvr.mustReuse(lhsFieldVal, fieldCacheEntry.TypeEntry, nil, nil)
+ }
+ if !rhsOmit {
+ rhsVal = rhsvr.mustReuse(rhsFieldVal, fieldCacheEntry.TypeEntry, nil, nil)
+ }
+ if !fn(fieldCacheEntry.JsonName, lhsVal, rhsVal) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go
new file mode 100644
index 000000000..ea79e3a00
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go
@@ -0,0 +1,347 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+
+ jsoniter "github.com/json-iterator/go"
+ "gopkg.in/yaml.v2"
+)
+
+var (
+ readPool = jsoniter.NewIterator(jsoniter.ConfigCompatibleWithStandardLibrary).Pool()
+ writePool = jsoniter.NewStream(jsoniter.ConfigCompatibleWithStandardLibrary, nil, 1024).Pool()
+)
+
+// A Value corresponds to an 'atom' in the schema. It should return true
+// for at least one of the IsXXX methods below, or the value is
+// considered "invalid"
+type Value interface {
+ // IsMap returns true if the Value is a Map, false otherwise.
+ IsMap() bool
+ // IsList returns true if the Value is a List, false otherwise.
+ IsList() bool
+ // IsBool returns true if the Value is a bool, false otherwise.
+ IsBool() bool
+ // IsInt returns true if the Value is a int64, false otherwise.
+ IsInt() bool
+ // IsFloat returns true if the Value is a float64, false
+ // otherwise.
+ IsFloat() bool
+ // IsString returns true if the Value is a string, false
+ // otherwise.
+ IsString() bool
+ // IsMap returns true if the Value is null, false otherwise.
+ IsNull() bool
+
+ // AsMap converts the Value into a Map (or panic if the type
+ // doesn't allow it).
+ AsMap() Map
+ // AsMapUsing uses the provided allocator and converts the Value
+ // into a Map (or panic if the type doesn't allow it).
+ AsMapUsing(Allocator) Map
+ // AsList converts the Value into a List (or panic if the type
+ // doesn't allow it).
+ AsList() List
+ // AsListUsing uses the provided allocator and converts the Value
+ // into a List (or panic if the type doesn't allow it).
+ AsListUsing(Allocator) List
+ // AsBool converts the Value into a bool (or panic if the type
+ // doesn't allow it).
+ AsBool() bool
+ // AsInt converts the Value into an int64 (or panic if the type
+ // doesn't allow it).
+ AsInt() int64
+ // AsFloat converts the Value into a float64 (or panic if the type
+ // doesn't allow it).
+ AsFloat() float64
+ // AsString converts the Value into a string (or panic if the type
+ // doesn't allow it).
+ AsString() string
+
+ // Unstructured converts the Value into an Unstructured interface{}.
+ Unstructured() interface{}
+}
+
+// FromJSON is a helper function for reading a JSON document.
+func FromJSON(input []byte) (Value, error) {
+ return FromJSONFast(input)
+}
+
+// FromJSONFast is a helper function for reading a JSON document.
+func FromJSONFast(input []byte) (Value, error) {
+ iter := readPool.BorrowIterator(input)
+ defer readPool.ReturnIterator(iter)
+ return ReadJSONIter(iter)
+}
+
+// ToJSON is a helper function for producing a JSon document.
+func ToJSON(v Value) ([]byte, error) {
+ buf := bytes.Buffer{}
+ stream := writePool.BorrowStream(&buf)
+ defer writePool.ReturnStream(stream)
+ WriteJSONStream(v, stream)
+ b := stream.Buffer()
+ err := stream.Flush()
+ // Help jsoniter manage its buffers--without this, the next
+ // use of the stream is likely to require an allocation. Look
+ // at the jsoniter stream code to understand why. They were probably
+ // optimizing for folks using the buffer directly.
+ stream.SetBuffer(b[:0])
+ return buf.Bytes(), err
+}
+
+// ReadJSONIter reads a Value from a JSON iterator.
+func ReadJSONIter(iter *jsoniter.Iterator) (Value, error) {
+ v := iter.Read()
+ if iter.Error != nil && iter.Error != io.EOF {
+ return nil, iter.Error
+ }
+ return NewValueInterface(v), nil
+}
+
+// WriteJSONStream writes a value into a JSON stream.
+func WriteJSONStream(v Value, stream *jsoniter.Stream) {
+ stream.WriteVal(v.Unstructured())
+}
+
+// ToYAML marshals a value as YAML.
+func ToYAML(v Value) ([]byte, error) {
+ return yaml.Marshal(v.Unstructured())
+}
+
+// Equals returns true iff the two values are equal.
+func Equals(lhs, rhs Value) bool {
+ return EqualsUsing(HeapAllocator, lhs, rhs)
+}
+
+// EqualsUsing uses the provided allocator and returns true iff the two values are equal.
+func EqualsUsing(a Allocator, lhs, rhs Value) bool {
+ if lhs.IsFloat() || rhs.IsFloat() {
+ var lf float64
+ if lhs.IsFloat() {
+ lf = lhs.AsFloat()
+ } else if lhs.IsInt() {
+ lf = float64(lhs.AsInt())
+ } else {
+ return false
+ }
+ var rf float64
+ if rhs.IsFloat() {
+ rf = rhs.AsFloat()
+ } else if rhs.IsInt() {
+ rf = float64(rhs.AsInt())
+ } else {
+ return false
+ }
+ return lf == rf
+ }
+ if lhs.IsInt() {
+ if rhs.IsInt() {
+ return lhs.AsInt() == rhs.AsInt()
+ }
+ return false
+ } else if rhs.IsInt() {
+ return false
+ }
+ if lhs.IsString() {
+ if rhs.IsString() {
+ return lhs.AsString() == rhs.AsString()
+ }
+ return false
+ } else if rhs.IsString() {
+ return false
+ }
+ if lhs.IsBool() {
+ if rhs.IsBool() {
+ return lhs.AsBool() == rhs.AsBool()
+ }
+ return false
+ } else if rhs.IsBool() {
+ return false
+ }
+ if lhs.IsList() {
+ if rhs.IsList() {
+ lhsList := lhs.AsListUsing(a)
+ defer a.Free(lhsList)
+ rhsList := rhs.AsListUsing(a)
+ defer a.Free(rhsList)
+ return lhsList.EqualsUsing(a, rhsList)
+ }
+ return false
+ } else if rhs.IsList() {
+ return false
+ }
+ if lhs.IsMap() {
+ if rhs.IsMap() {
+ lhsList := lhs.AsMapUsing(a)
+ defer a.Free(lhsList)
+ rhsList := rhs.AsMapUsing(a)
+ defer a.Free(rhsList)
+ return lhsList.EqualsUsing(a, rhsList)
+ }
+ return false
+ } else if rhs.IsMap() {
+ return false
+ }
+ if lhs.IsNull() {
+ if rhs.IsNull() {
+ return true
+ }
+ return false
+ } else if rhs.IsNull() {
+ return false
+ }
+ // No field is set, on either objects.
+ return true
+}
+
+// ToString returns a human-readable representation of the value.
+func ToString(v Value) string {
+ if v.IsNull() {
+ return "null"
+ }
+ switch {
+ case v.IsFloat():
+ return fmt.Sprintf("%v", v.AsFloat())
+ case v.IsInt():
+ return fmt.Sprintf("%v", v.AsInt())
+ case v.IsString():
+ return fmt.Sprintf("%q", v.AsString())
+ case v.IsBool():
+ return fmt.Sprintf("%v", v.AsBool())
+ case v.IsList():
+ strs := []string{}
+ list := v.AsList()
+ for i := 0; i < list.Length(); i++ {
+ strs = append(strs, ToString(list.At(i)))
+ }
+ return "[" + strings.Join(strs, ",") + "]"
+ case v.IsMap():
+ strs := []string{}
+ v.AsMap().Iterate(func(k string, v Value) bool {
+ strs = append(strs, fmt.Sprintf("%v=%v", k, ToString(v)))
+ return true
+ })
+ return strings.Join(strs, "")
+ }
+ // No field is set, on either objects.
+ return "{{undefined}}"
+}
+
+// Less provides a total ordering for Value (so that they can be sorted, even
+// if they are of different types).
+func Less(lhs, rhs Value) bool {
+ return Compare(lhs, rhs) == -1
+}
+
+// Compare provides a total ordering for Value (so that they can be
+// sorted, even if they are of different types). The result will be 0 if
+// v==rhs, -1 if v < rhs, and +1 if v > rhs.
+func Compare(lhs, rhs Value) int {
+ return CompareUsing(HeapAllocator, lhs, rhs)
+}
+
+// CompareUsing uses the provided allocator and provides a total
+// ordering for Value (so that they can be sorted, even if they
+// are of different types). The result will be 0 if v==rhs, -1
+// if v < rhs, and +1 if v > rhs.
+func CompareUsing(a Allocator, lhs, rhs Value) int {
+ if lhs.IsFloat() {
+ if !rhs.IsFloat() {
+ // Extra: compare floats and ints numerically.
+ if rhs.IsInt() {
+ return FloatCompare(lhs.AsFloat(), float64(rhs.AsInt()))
+ }
+ return -1
+ }
+ return FloatCompare(lhs.AsFloat(), rhs.AsFloat())
+ } else if rhs.IsFloat() {
+ // Extra: compare floats and ints numerically.
+ if lhs.IsInt() {
+ return FloatCompare(float64(lhs.AsInt()), rhs.AsFloat())
+ }
+ return 1
+ }
+
+ if lhs.IsInt() {
+ if !rhs.IsInt() {
+ return -1
+ }
+ return IntCompare(lhs.AsInt(), rhs.AsInt())
+ } else if rhs.IsInt() {
+ return 1
+ }
+
+ if lhs.IsString() {
+ if !rhs.IsString() {
+ return -1
+ }
+ return strings.Compare(lhs.AsString(), rhs.AsString())
+ } else if rhs.IsString() {
+ return 1
+ }
+
+ if lhs.IsBool() {
+ if !rhs.IsBool() {
+ return -1
+ }
+ return BoolCompare(lhs.AsBool(), rhs.AsBool())
+ } else if rhs.IsBool() {
+ return 1
+ }
+
+ if lhs.IsList() {
+ if !rhs.IsList() {
+ return -1
+ }
+ lhsList := lhs.AsListUsing(a)
+ defer a.Free(lhsList)
+ rhsList := rhs.AsListUsing(a)
+ defer a.Free(rhsList)
+ return ListCompareUsing(a, lhsList, rhsList)
+ } else if rhs.IsList() {
+ return 1
+ }
+ if lhs.IsMap() {
+ if !rhs.IsMap() {
+ return -1
+ }
+ lhsMap := lhs.AsMapUsing(a)
+ defer a.Free(lhsMap)
+ rhsMap := rhs.AsMapUsing(a)
+ defer a.Free(rhsMap)
+ return MapCompareUsing(a, lhsMap, rhsMap)
+ } else if rhs.IsMap() {
+ return 1
+ }
+ if lhs.IsNull() {
+ if !rhs.IsNull() {
+ return -1
+ }
+ return 0
+ } else if rhs.IsNull() {
+ return 1
+ }
+
+ // Invalid Value-- nothing is set.
+ return 0
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go
new file mode 100644
index 000000000..05e70deba
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go
@@ -0,0 +1,294 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+import (
+ "encoding/base64"
+ "fmt"
+ "reflect"
+)
+
+// NewValueReflect creates a Value backed by an "interface{}" type,
+// typically an structured object in Kubernetes world that is uses reflection to expose.
+// The provided "interface{}" value must be a pointer so that the value can be modified via reflection.
+// The provided "interface{}" may contain structs and types that are converted to Values
+// by the jsonMarshaler interface.
+func NewValueReflect(value interface{}) (Value, error) {
+ if value == nil {
+ return NewValueInterface(nil), nil
+ }
+ v := reflect.ValueOf(value)
+ if v.Kind() != reflect.Ptr {
+ // The root value to reflect on must be a pointer so that map.Set() and map.Delete() operations are possible.
+ return nil, fmt.Errorf("value provided to NewValueReflect must be a pointer")
+ }
+ return wrapValueReflect(v, nil, nil)
+}
+
+// wrapValueReflect wraps the provide reflect.Value as a value. If parent in the data tree is a map, parentMap
+// and parentMapKey must be provided so that the returned value may be set and deleted.
+func wrapValueReflect(value reflect.Value, parentMap, parentMapKey *reflect.Value) (Value, error) {
+ val := HeapAllocator.allocValueReflect()
+ return val.reuse(value, nil, parentMap, parentMapKey)
+}
+
+// wrapValueReflect wraps the provide reflect.Value as a value, and panics if there is an error. If parent in the data
+// tree is a map, parentMap and parentMapKey must be provided so that the returned value may be set and deleted.
+func mustWrapValueReflect(value reflect.Value, parentMap, parentMapKey *reflect.Value) Value {
+ v, err := wrapValueReflect(value, parentMap, parentMapKey)
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
+
+// the value interface doesn't care about the type for value.IsNull, so we can use a constant
+var nilType = reflect.TypeOf(&struct{}{})
+
+// reuse replaces the value of the valueReflect. If parent in the data tree is a map, parentMap and parentMapKey
+// must be provided so that the returned value may be set and deleted.
+func (r *valueReflect) reuse(value reflect.Value, cacheEntry *TypeReflectCacheEntry, parentMap, parentMapKey *reflect.Value) (Value, error) {
+ if cacheEntry == nil {
+ cacheEntry = TypeReflectEntryOf(value.Type())
+ }
+ if cacheEntry.CanConvertToUnstructured() {
+ u, err := cacheEntry.ToUnstructured(value)
+ if err != nil {
+ return nil, err
+ }
+ if u == nil {
+ value = reflect.Zero(nilType)
+ } else {
+ value = reflect.ValueOf(u)
+ }
+ }
+ r.Value = dereference(value)
+ r.ParentMap = parentMap
+ r.ParentMapKey = parentMapKey
+ r.kind = kind(r.Value)
+ return r, nil
+}
+
+// mustReuse replaces the value of the valueReflect and panics if there is an error. If parent in the data tree is a
+// map, parentMap and parentMapKey must be provided so that the returned value may be set and deleted.
+func (r *valueReflect) mustReuse(value reflect.Value, cacheEntry *TypeReflectCacheEntry, parentMap, parentMapKey *reflect.Value) Value {
+ v, err := r.reuse(value, cacheEntry, parentMap, parentMapKey)
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
+
+func dereference(val reflect.Value) reflect.Value {
+ kind := val.Kind()
+ if (kind == reflect.Interface || kind == reflect.Ptr) && !safeIsNil(val) {
+ return val.Elem()
+ }
+ return val
+}
+
+type valueReflect struct {
+ ParentMap *reflect.Value
+ ParentMapKey *reflect.Value
+ Value reflect.Value
+ kind reflectType
+}
+
+func (r valueReflect) IsMap() bool {
+ return r.kind == mapType || r.kind == structMapType
+}
+
+func (r valueReflect) IsList() bool {
+ return r.kind == listType
+}
+
+func (r valueReflect) IsBool() bool {
+ return r.kind == boolType
+}
+
+func (r valueReflect) IsInt() bool {
+ return r.kind == intType || r.kind == uintType
+}
+
+func (r valueReflect) IsFloat() bool {
+ return r.kind == floatType
+}
+
+func (r valueReflect) IsString() bool {
+ return r.kind == stringType || r.kind == byteStringType
+}
+
+func (r valueReflect) IsNull() bool {
+ return r.kind == nullType
+}
+
+type reflectType = int
+
+const (
+ mapType = iota
+ structMapType
+ listType
+ intType
+ uintType
+ floatType
+ stringType
+ byteStringType
+ boolType
+ nullType
+)
+
+func kind(v reflect.Value) reflectType {
+ typ := v.Type()
+ rk := typ.Kind()
+ switch rk {
+ case reflect.Map:
+ if v.IsNil() {
+ return nullType
+ }
+ return mapType
+ case reflect.Struct:
+ return structMapType
+ case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8:
+ return intType
+ case reflect.Uint, reflect.Uint32, reflect.Uint16, reflect.Uint8:
+ // Uint64 deliberately excluded, see valueUnstructured.Int.
+ return uintType
+ case reflect.Float64, reflect.Float32:
+ return floatType
+ case reflect.String:
+ return stringType
+ case reflect.Bool:
+ return boolType
+ case reflect.Slice:
+ if v.IsNil() {
+ return nullType
+ }
+ elemKind := typ.Elem().Kind()
+ if elemKind == reflect.Uint8 {
+ return byteStringType
+ }
+ return listType
+ case reflect.Chan, reflect.Func, reflect.Ptr, reflect.UnsafePointer, reflect.Interface:
+ if v.IsNil() {
+ return nullType
+ }
+ panic(fmt.Sprintf("unsupported type: %v", v.Type()))
+ default:
+ panic(fmt.Sprintf("unsupported type: %v", v.Type()))
+ }
+}
+
+// TODO find a cleaner way to avoid panics from reflect.IsNil()
+func safeIsNil(v reflect.Value) bool {
+ k := v.Kind()
+ switch k {
+ case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.UnsafePointer, reflect.Interface, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
+
+func (r valueReflect) AsMap() Map {
+ return r.AsMapUsing(HeapAllocator)
+}
+
+func (r valueReflect) AsMapUsing(a Allocator) Map {
+ switch r.kind {
+ case structMapType:
+ v := a.allocStructReflect()
+ v.valueReflect = r
+ return v
+ case mapType:
+ v := a.allocMapReflect()
+ v.valueReflect = r
+ return v
+ default:
+ panic("value is not a map or struct")
+ }
+}
+
+func (r valueReflect) AsList() List {
+ return r.AsListUsing(HeapAllocator)
+}
+
+func (r valueReflect) AsListUsing(a Allocator) List {
+ if r.IsList() {
+ v := a.allocListReflect()
+ v.Value = r.Value
+ return v
+ }
+ panic("value is not a list")
+}
+
+func (r valueReflect) AsBool() bool {
+ if r.IsBool() {
+ return r.Value.Bool()
+ }
+ panic("value is not a bool")
+}
+
+func (r valueReflect) AsInt() int64 {
+ if r.kind == intType {
+ return r.Value.Int()
+ }
+ if r.kind == uintType {
+ return int64(r.Value.Uint())
+ }
+
+ panic("value is not an int")
+}
+
+func (r valueReflect) AsFloat() float64 {
+ if r.IsFloat() {
+ return r.Value.Float()
+ }
+ panic("value is not a float")
+}
+
+func (r valueReflect) AsString() string {
+ switch r.kind {
+ case stringType:
+ return r.Value.String()
+ case byteStringType:
+ return base64.StdEncoding.EncodeToString(r.Value.Bytes())
+ }
+ panic("value is not a string")
+}
+
+func (r valueReflect) Unstructured() interface{} {
+ val := r.Value
+ switch {
+ case r.IsNull():
+ return nil
+ case val.Kind() == reflect.Struct:
+ return structReflect{r}.Unstructured()
+ case val.Kind() == reflect.Map:
+ return mapReflect{valueReflect: r}.Unstructured()
+ case r.IsList():
+ return listReflect{r.Value}.Unstructured()
+ case r.IsString():
+ return r.AsString()
+ case r.IsInt():
+ return r.AsInt()
+ case r.IsBool():
+ return r.AsBool()
+ case r.IsFloat():
+ return r.AsFloat()
+ default:
+ panic(fmt.Sprintf("value of type %s is not a supported by value reflector", val.Type()))
+ }
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go
new file mode 100644
index 000000000..ac5a92628
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go
@@ -0,0 +1,178 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+import (
+ "fmt"
+)
+
+// NewValueInterface creates a Value backed by an "interface{}" type,
+// typically an unstructured object in Kubernetes world.
+// interface{} must be one of: map[string]interface{}, map[interface{}]interface{}, []interface{}, int types, float types,
+// string or boolean. Nested interface{} must also be one of these types.
+func NewValueInterface(v interface{}) Value {
+ return Value(HeapAllocator.allocValueUnstructured().reuse(v))
+}
+
+type valueUnstructured struct {
+ Value interface{}
+}
+
+// reuse replaces the value of the valueUnstructured.
+func (vi *valueUnstructured) reuse(value interface{}) Value {
+ vi.Value = value
+ return vi
+}
+
+func (v valueUnstructured) IsMap() bool {
+ if _, ok := v.Value.(map[string]interface{}); ok {
+ return true
+ }
+ if _, ok := v.Value.(map[interface{}]interface{}); ok {
+ return true
+ }
+ return false
+}
+
+func (v valueUnstructured) AsMap() Map {
+ return v.AsMapUsing(HeapAllocator)
+}
+
+func (v valueUnstructured) AsMapUsing(_ Allocator) Map {
+ if v.Value == nil {
+ panic("invalid nil")
+ }
+ switch t := v.Value.(type) {
+ case map[string]interface{}:
+ return mapUnstructuredString(t)
+ case map[interface{}]interface{}:
+ return mapUnstructuredInterface(t)
+ }
+ panic(fmt.Errorf("not a map: %#v", v))
+}
+
+func (v valueUnstructured) IsList() bool {
+ if v.Value == nil {
+ return false
+ }
+ _, ok := v.Value.([]interface{})
+ return ok
+}
+
+func (v valueUnstructured) AsList() List {
+ return v.AsListUsing(HeapAllocator)
+}
+
+func (v valueUnstructured) AsListUsing(_ Allocator) List {
+ return listUnstructured(v.Value.([]interface{}))
+}
+
+func (v valueUnstructured) IsFloat() bool {
+ if v.Value == nil {
+ return false
+ } else if _, ok := v.Value.(float64); ok {
+ return true
+ } else if _, ok := v.Value.(float32); ok {
+ return true
+ }
+ return false
+}
+
+func (v valueUnstructured) AsFloat() float64 {
+ if f, ok := v.Value.(float32); ok {
+ return float64(f)
+ }
+ return v.Value.(float64)
+}
+
+func (v valueUnstructured) IsInt() bool {
+ if v.Value == nil {
+ return false
+ } else if _, ok := v.Value.(int); ok {
+ return true
+ } else if _, ok := v.Value.(int8); ok {
+ return true
+ } else if _, ok := v.Value.(int16); ok {
+ return true
+ } else if _, ok := v.Value.(int32); ok {
+ return true
+ } else if _, ok := v.Value.(int64); ok {
+ return true
+ } else if _, ok := v.Value.(uint); ok {
+ return true
+ } else if _, ok := v.Value.(uint8); ok {
+ return true
+ } else if _, ok := v.Value.(uint16); ok {
+ return true
+ } else if _, ok := v.Value.(uint32); ok {
+ return true
+ }
+ return false
+}
+
+func (v valueUnstructured) AsInt() int64 {
+ if i, ok := v.Value.(int); ok {
+ return int64(i)
+ } else if i, ok := v.Value.(int8); ok {
+ return int64(i)
+ } else if i, ok := v.Value.(int16); ok {
+ return int64(i)
+ } else if i, ok := v.Value.(int32); ok {
+ return int64(i)
+ } else if i, ok := v.Value.(uint); ok {
+ return int64(i)
+ } else if i, ok := v.Value.(uint8); ok {
+ return int64(i)
+ } else if i, ok := v.Value.(uint16); ok {
+ return int64(i)
+ } else if i, ok := v.Value.(uint32); ok {
+ return int64(i)
+ }
+ return v.Value.(int64)
+}
+
+func (v valueUnstructured) IsString() bool {
+ if v.Value == nil {
+ return false
+ }
+ _, ok := v.Value.(string)
+ return ok
+}
+
+func (v valueUnstructured) AsString() string {
+ return v.Value.(string)
+}
+
+func (v valueUnstructured) IsBool() bool {
+ if v.Value == nil {
+ return false
+ }
+ _, ok := v.Value.(bool)
+ return ok
+}
+
+func (v valueUnstructured) AsBool() bool {
+ return v.Value.(bool)
+}
+
+func (v valueUnstructured) IsNull() bool {
+ return v.Value == nil
+}
+
+func (v valueUnstructured) Unstructured() interface{} {
+ return v.Value
+}
diff --git a/vendor/sigs.k8s.io/yaml/.travis.yml b/vendor/sigs.k8s.io/yaml/.travis.yml
index 03ddc7318..d20e23eff 100644
--- a/vendor/sigs.k8s.io/yaml/.travis.yml
+++ b/vendor/sigs.k8s.io/yaml/.travis.yml
@@ -1,14 +1,13 @@
language: go
dist: xenial
go:
- - 1.9.x
- - 1.10.x
- - 1.11.x
+ - 1.12.x
+ - 1.13.x
script:
- - go get -t -v ./...
- - diff -u <(echo -n) <(gofmt -d .)
+ - diff -u <(echo -n) <(gofmt -d *.go)
- diff -u <(echo -n) <(golint $(go list -e ./...) | grep -v YAMLToJSON)
- - go tool vet .
- - go test -v -race ./...
+ - GO111MODULE=on go vet .
+ - GO111MODULE=on go test -v -race ./...
+ - git diff --exit-code
install:
- - go get golang.org/x/lint/golint
+ - GO111MODULE=off go get golang.org/x/lint/golint
diff --git a/vendor/sigs.k8s.io/yaml/OWNERS b/vendor/sigs.k8s.io/yaml/OWNERS
index 11ad7ce1a..325b40b07 100644
--- a/vendor/sigs.k8s.io/yaml/OWNERS
+++ b/vendor/sigs.k8s.io/yaml/OWNERS
@@ -1,3 +1,5 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
approvers:
- dims
- lavalamp
diff --git a/vendor/sigs.k8s.io/yaml/README.md b/vendor/sigs.k8s.io/yaml/README.md
index 0200f75b4..5a651d916 100644
--- a/vendor/sigs.k8s.io/yaml/README.md
+++ b/vendor/sigs.k8s.io/yaml/README.md
@@ -1,12 +1,14 @@
# YAML marshaling and unmarshaling support for Go
-[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml)
+[![Build Status](https://travis-ci.org/kubernetes-sigs/yaml.svg)](https://travis-ci.org/kubernetes-sigs/yaml)
+
+kubernetes-sigs/yaml is a permanent fork of [ghodss/yaml](https://github.com/ghodss/yaml).
## Introduction
A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
-In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
+In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://web.archive.org/web/20190603050330/http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
## Compatibility
@@ -32,13 +34,13 @@ GOOD:
To install, run:
```
-$ go get github.com/ghodss/yaml
+$ go get sigs.k8s.io/yaml
```
And import using:
```
-import "github.com/ghodss/yaml"
+import "sigs.k8s.io/yaml"
```
Usage is very similar to the JSON library:
@@ -49,7 +51,7 @@ package main
import (
"fmt"
- "github.com/ghodss/yaml"
+ "sigs.k8s.io/yaml"
)
type Person struct {
@@ -93,7 +95,7 @@ package main
import (
"fmt"
- "github.com/ghodss/yaml"
+ "sigs.k8s.io/yaml"
)
func main() {
diff --git a/vendor/sigs.k8s.io/yaml/go.mod b/vendor/sigs.k8s.io/yaml/go.mod
new file mode 100644
index 000000000..7224f3497
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/go.mod
@@ -0,0 +1,8 @@
+module sigs.k8s.io/yaml
+
+go 1.12
+
+require (
+ github.com/davecgh/go-spew v1.1.1
+ gopkg.in/yaml.v2 v2.2.8
+)
diff --git a/vendor/sigs.k8s.io/yaml/go.sum b/vendor/sigs.k8s.io/yaml/go.sum
new file mode 100644
index 000000000..76e49483a
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/go.sum
@@ -0,0 +1,9 @@
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
+gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/sigs.k8s.io/yaml/yaml.go b/vendor/sigs.k8s.io/yaml/yaml.go
index 024596112..efbc535d4 100644
--- a/vendor/sigs.k8s.io/yaml/yaml.go
+++ b/vendor/sigs.k8s.io/yaml/yaml.go
@@ -317,3 +317,64 @@ func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (in
return yamlObj, nil
}
}
+
+// JSONObjectToYAMLObject converts an in-memory JSON object into a YAML in-memory MapSlice,
+// without going through a byte representation. A nil or empty map[string]interface{} input is
+// converted to an empty map, i.e. yaml.MapSlice(nil).
+//
+// interface{} slices stay interface{} slices. map[string]interface{} becomes yaml.MapSlice.
+//
+// int64 and float64 are down casted following the logic of github.com/go-yaml/yaml:
+// - float64s are down-casted as far as possible without data-loss to int, int64, uint64.
+// - int64s are down-casted to int if possible without data-loss.
+//
+// Big int/int64/uint64 do not lose precision as in the json-yaml roundtripping case.
+//
+// string, bool and any other types are unchanged.
+func JSONObjectToYAMLObject(j map[string]interface{}) yaml.MapSlice {
+ if len(j) == 0 {
+ return nil
+ }
+ ret := make(yaml.MapSlice, 0, len(j))
+ for k, v := range j {
+ ret = append(ret, yaml.MapItem{Key: k, Value: jsonToYAMLValue(v)})
+ }
+ return ret
+}
+
+func jsonToYAMLValue(j interface{}) interface{} {
+ switch j := j.(type) {
+ case map[string]interface{}:
+ if j == nil {
+ return interface{}(nil)
+ }
+ return JSONObjectToYAMLObject(j)
+ case []interface{}:
+ if j == nil {
+ return interface{}(nil)
+ }
+ ret := make([]interface{}, len(j))
+ for i := range j {
+ ret[i] = jsonToYAMLValue(j[i])
+ }
+ return ret
+ case float64:
+ // replicate the logic in https://github.com/go-yaml/yaml/blob/51d6538a90f86fe93ac480b35f37b2be17fef232/resolve.go#L151
+ if i64 := int64(j); j == float64(i64) {
+ if i := int(i64); i64 == int64(i) {
+ return i
+ }
+ return i64
+ }
+ if ui64 := uint64(j); j == float64(ui64) {
+ return ui64
+ }
+ return j
+ case int64:
+ if i := int(j); j == int64(i) {
+ return i
+ }
+ return j
+ }
+ return j
+}