summaryrefslogtreecommitdiff
path: root/vendor/k8s.io/apimachinery/pkg
diff options
context:
space:
mode:
authorbaude <bbaude@redhat.com>2021-02-25 09:25:28 -0600
committerbaude <bbaude@redhat.com>2021-02-25 10:02:41 -0600
commit24d9bda7ff8a3e6a9f249401e05e35e73284ae61 (patch)
tree6777cc2c23306d1a6b87ef40b9fe4eab2764b7dd /vendor/k8s.io/apimachinery/pkg
parent9ec8106841c55bc085012727748e2d73826be97d (diff)
downloadpodman-24d9bda7ff8a3e6a9f249401e05e35e73284ae61.tar.gz
podman-24d9bda7ff8a3e6a9f249401e05e35e73284ae61.tar.bz2
podman-24d9bda7ff8a3e6a9f249401e05e35e73284ae61.zip
prune remotecommand dependency
prune a dependency that was only being used for a simple struct. Should correct checksum issue on tarballs [NO TESTS NEEDED] Fixes: #9355 Signed-off-by: baude <bbaude@redhat.com>
Diffstat (limited to 'vendor/k8s.io/apimachinery/pkg')
-rw-r--r--vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS23
-rw-r--r--vendor/k8s.io/apimachinery/pkg/api/errors/doc.go18
-rw-r--r--vendor/k8s.io/apimachinery/pkg/api/errors/errors.go697
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go500
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go496
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go210
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go55
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go322
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go389
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go63
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go43
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go18
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go476
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go127
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go137
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go250
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/clock/clock.go407
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/framer/framer.go170
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go19
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go157
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go187
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go369
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go120
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go53
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go379
-rw-r--r--vendor/k8s.io/apimachinery/pkg/version/doc.go20
-rw-r--r--vendor/k8s.io/apimachinery/pkg/version/helpers.go88
-rw-r--r--vendor/k8s.io/apimachinery/pkg/version/types.go37
28 files changed, 0 insertions, 5830 deletions
diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS
deleted file mode 100644
index d18a17885..000000000
--- a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS
+++ /dev/null
@@ -1,23 +0,0 @@
-# See the OWNERS docs at https://go.k8s.io/owners
-
-reviewers:
-- thockin
-- lavalamp
-- smarterclayton
-- wojtek-t
-- deads2k
-- brendandburns
-- derekwaynecarr
-- caesarxuchao
-- mikedanese
-- liggitt
-- nikhiljindal
-- gmarek
-- erictune
-- saad-ali
-- janetkuo
-- tallclair
-- dims
-- hongchaodeng
-- krousey
-- cjcullen
diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go b/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go
deleted file mode 100644
index 167baf680..000000000
--- a/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package errors provides detailed error types for api field validation.
-package errors // import "k8s.io/apimachinery/pkg/api/errors"
diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
deleted file mode 100644
index d3927d817..000000000
--- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
+++ /dev/null
@@ -1,697 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package errors
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "reflect"
- "strings"
-
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/util/validation/field"
-)
-
-// StatusError is an error intended for consumption by a REST API server; it can also be
-// reconstructed by clients from a REST response. Public to allow easy type switches.
-type StatusError struct {
- ErrStatus metav1.Status
-}
-
-// APIStatus is exposed by errors that can be converted to an api.Status object
-// for finer grained details.
-type APIStatus interface {
- Status() metav1.Status
-}
-
-var _ error = &StatusError{}
-
-// Error implements the Error interface.
-func (e *StatusError) Error() string {
- return e.ErrStatus.Message
-}
-
-// Status allows access to e's status without having to know the detailed workings
-// of StatusError.
-func (e *StatusError) Status() metav1.Status {
- return e.ErrStatus
-}
-
-// DebugError reports extended info about the error to debug output.
-func (e *StatusError) DebugError() (string, []interface{}) {
- if out, err := json.MarshalIndent(e.ErrStatus, "", " "); err == nil {
- return "server response object: %s", []interface{}{string(out)}
- }
- return "server response object: %#v", []interface{}{e.ErrStatus}
-}
-
-// HasStatusCause returns true if the provided error has a details cause
-// with the provided type name.
-func HasStatusCause(err error, name metav1.CauseType) bool {
- _, ok := StatusCause(err, name)
- return ok
-}
-
-// StatusCause returns the named cause from the provided error if it exists and
-// the error is of the type APIStatus. Otherwise it returns false.
-func StatusCause(err error, name metav1.CauseType) (metav1.StatusCause, bool) {
- apierr, ok := err.(APIStatus)
- if !ok || apierr == nil || apierr.Status().Details == nil {
- return metav1.StatusCause{}, false
- }
- for _, cause := range apierr.Status().Details.Causes {
- if cause.Type == name {
- return cause, true
- }
- }
- return metav1.StatusCause{}, false
-}
-
-// UnexpectedObjectError can be returned by FromObject if it's passed a non-status object.
-type UnexpectedObjectError struct {
- Object runtime.Object
-}
-
-// Error returns an error message describing 'u'.
-func (u *UnexpectedObjectError) Error() string {
- return fmt.Sprintf("unexpected object: %v", u.Object)
-}
-
-// FromObject generates an StatusError from an metav1.Status, if that is the type of obj; otherwise,
-// returns an UnexpecteObjectError.
-func FromObject(obj runtime.Object) error {
- switch t := obj.(type) {
- case *metav1.Status:
- return &StatusError{ErrStatus: *t}
- case runtime.Unstructured:
- var status metav1.Status
- obj := t.UnstructuredContent()
- if !reflect.DeepEqual(obj["kind"], "Status") {
- break
- }
- if err := runtime.DefaultUnstructuredConverter.FromUnstructured(t.UnstructuredContent(), &status); err != nil {
- return err
- }
- if status.APIVersion != "v1" && status.APIVersion != "meta.k8s.io/v1" {
- break
- }
- return &StatusError{ErrStatus: status}
- }
- return &UnexpectedObjectError{obj}
-}
-
-// NewNotFound returns a new error which indicates that the resource of the kind and the name was not found.
-func NewNotFound(qualifiedResource schema.GroupResource, name string) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusNotFound,
- Reason: metav1.StatusReasonNotFound,
- Details: &metav1.StatusDetails{
- Group: qualifiedResource.Group,
- Kind: qualifiedResource.Resource,
- Name: name,
- },
- Message: fmt.Sprintf("%s %q not found", qualifiedResource.String(), name),
- }}
-}
-
-// NewAlreadyExists returns an error indicating the item requested exists by that identifier.
-func NewAlreadyExists(qualifiedResource schema.GroupResource, name string) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusConflict,
- Reason: metav1.StatusReasonAlreadyExists,
- Details: &metav1.StatusDetails{
- Group: qualifiedResource.Group,
- Kind: qualifiedResource.Resource,
- Name: name,
- },
- Message: fmt.Sprintf("%s %q already exists", qualifiedResource.String(), name),
- }}
-}
-
-// NewUnauthorized returns an error indicating the client is not authorized to perform the requested
-// action.
-func NewUnauthorized(reason string) *StatusError {
- message := reason
- if len(message) == 0 {
- message = "not authorized"
- }
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusUnauthorized,
- Reason: metav1.StatusReasonUnauthorized,
- Message: message,
- }}
-}
-
-// NewForbidden returns an error indicating the requested action was forbidden
-func NewForbidden(qualifiedResource schema.GroupResource, name string, err error) *StatusError {
- var message string
- if qualifiedResource.Empty() {
- message = fmt.Sprintf("forbidden: %v", err)
- } else if name == "" {
- message = fmt.Sprintf("%s is forbidden: %v", qualifiedResource.String(), err)
- } else {
- message = fmt.Sprintf("%s %q is forbidden: %v", qualifiedResource.String(), name, err)
- }
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusForbidden,
- Reason: metav1.StatusReasonForbidden,
- Details: &metav1.StatusDetails{
- Group: qualifiedResource.Group,
- Kind: qualifiedResource.Resource,
- Name: name,
- },
- Message: message,
- }}
-}
-
-// NewConflict returns an error indicating the item can't be updated as provided.
-func NewConflict(qualifiedResource schema.GroupResource, name string, err error) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusConflict,
- Reason: metav1.StatusReasonConflict,
- Details: &metav1.StatusDetails{
- Group: qualifiedResource.Group,
- Kind: qualifiedResource.Resource,
- Name: name,
- },
- Message: fmt.Sprintf("Operation cannot be fulfilled on %s %q: %v", qualifiedResource.String(), name, err),
- }}
-}
-
-// NewApplyConflict returns an error including details on the requests apply conflicts
-func NewApplyConflict(causes []metav1.StatusCause, message string) *StatusError {
- return &StatusError{ErrStatus: metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusConflict,
- Reason: metav1.StatusReasonConflict,
- Details: &metav1.StatusDetails{
- // TODO: Get obj details here?
- Causes: causes,
- },
- Message: message,
- }}
-}
-
-// NewGone returns an error indicating the item no longer available at the server and no forwarding address is known.
-// DEPRECATED: Please use NewResourceExpired instead.
-func NewGone(message string) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusGone,
- Reason: metav1.StatusReasonGone,
- Message: message,
- }}
-}
-
-// NewResourceExpired creates an error that indicates that the requested resource content has expired from
-// the server (usually due to a resourceVersion that is too old).
-func NewResourceExpired(message string) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusGone,
- Reason: metav1.StatusReasonExpired,
- Message: message,
- }}
-}
-
-// NewInvalid returns an error indicating the item is invalid and cannot be processed.
-func NewInvalid(qualifiedKind schema.GroupKind, name string, errs field.ErrorList) *StatusError {
- causes := make([]metav1.StatusCause, 0, len(errs))
- for i := range errs {
- err := errs[i]
- causes = append(causes, metav1.StatusCause{
- Type: metav1.CauseType(err.Type),
- Message: err.ErrorBody(),
- Field: err.Field,
- })
- }
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusUnprocessableEntity,
- Reason: metav1.StatusReasonInvalid,
- Details: &metav1.StatusDetails{
- Group: qualifiedKind.Group,
- Kind: qualifiedKind.Kind,
- Name: name,
- Causes: causes,
- },
- Message: fmt.Sprintf("%s %q is invalid: %v", qualifiedKind.String(), name, errs.ToAggregate()),
- }}
-}
-
-// NewBadRequest creates an error that indicates that the request is invalid and can not be processed.
-func NewBadRequest(reason string) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusBadRequest,
- Reason: metav1.StatusReasonBadRequest,
- Message: reason,
- }}
-}
-
-// NewTooManyRequests creates an error that indicates that the client must try again later because
-// the specified endpoint is not accepting requests. More specific details should be provided
-// if client should know why the failure was limited4.
-func NewTooManyRequests(message string, retryAfterSeconds int) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusTooManyRequests,
- Reason: metav1.StatusReasonTooManyRequests,
- Message: message,
- Details: &metav1.StatusDetails{
- RetryAfterSeconds: int32(retryAfterSeconds),
- },
- }}
-}
-
-// NewServiceUnavailable creates an error that indicates that the requested service is unavailable.
-func NewServiceUnavailable(reason string) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusServiceUnavailable,
- Reason: metav1.StatusReasonServiceUnavailable,
- Message: reason,
- }}
-}
-
-// NewMethodNotSupported returns an error indicating the requested action is not supported on this kind.
-func NewMethodNotSupported(qualifiedResource schema.GroupResource, action string) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusMethodNotAllowed,
- Reason: metav1.StatusReasonMethodNotAllowed,
- Details: &metav1.StatusDetails{
- Group: qualifiedResource.Group,
- Kind: qualifiedResource.Resource,
- },
- Message: fmt.Sprintf("%s is not supported on resources of kind %q", action, qualifiedResource.String()),
- }}
-}
-
-// NewServerTimeout returns an error indicating the requested action could not be completed due to a
-// transient error, and the client should try again.
-func NewServerTimeout(qualifiedResource schema.GroupResource, operation string, retryAfterSeconds int) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusInternalServerError,
- Reason: metav1.StatusReasonServerTimeout,
- Details: &metav1.StatusDetails{
- Group: qualifiedResource.Group,
- Kind: qualifiedResource.Resource,
- Name: operation,
- RetryAfterSeconds: int32(retryAfterSeconds),
- },
- Message: fmt.Sprintf("The %s operation against %s could not be completed at this time, please try again.", operation, qualifiedResource.String()),
- }}
-}
-
-// NewServerTimeoutForKind should not exist. Server timeouts happen when accessing resources, the Kind is just what we
-// happened to be looking at when the request failed. This delegates to keep code sane, but we should work towards removing this.
-func NewServerTimeoutForKind(qualifiedKind schema.GroupKind, operation string, retryAfterSeconds int) *StatusError {
- return NewServerTimeout(schema.GroupResource{Group: qualifiedKind.Group, Resource: qualifiedKind.Kind}, operation, retryAfterSeconds)
-}
-
-// NewInternalError returns an error indicating the item is invalid and cannot be processed.
-func NewInternalError(err error) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusInternalServerError,
- Reason: metav1.StatusReasonInternalError,
- Details: &metav1.StatusDetails{
- Causes: []metav1.StatusCause{{Message: err.Error()}},
- },
- Message: fmt.Sprintf("Internal error occurred: %v", err),
- }}
-}
-
-// NewTimeoutError returns an error indicating that a timeout occurred before the request
-// could be completed. Clients may retry, but the operation may still complete.
-func NewTimeoutError(message string, retryAfterSeconds int) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusGatewayTimeout,
- Reason: metav1.StatusReasonTimeout,
- Message: fmt.Sprintf("Timeout: %s", message),
- Details: &metav1.StatusDetails{
- RetryAfterSeconds: int32(retryAfterSeconds),
- },
- }}
-}
-
-// NewTooManyRequestsError returns an error indicating that the request was rejected because
-// the server has received too many requests. Client should wait and retry. But if the request
-// is perishable, then the client should not retry the request.
-func NewTooManyRequestsError(message string) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusTooManyRequests,
- Reason: metav1.StatusReasonTooManyRequests,
- Message: fmt.Sprintf("Too many requests: %s", message),
- }}
-}
-
-// NewRequestEntityTooLargeError returns an error indicating that the request
-// entity was too large.
-func NewRequestEntityTooLargeError(message string) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusRequestEntityTooLarge,
- Reason: metav1.StatusReasonRequestEntityTooLarge,
- Message: fmt.Sprintf("Request entity too large: %s", message),
- }}
-}
-
-// NewGenericServerResponse returns a new error for server responses that are not in a recognizable form.
-func NewGenericServerResponse(code int, verb string, qualifiedResource schema.GroupResource, name, serverMessage string, retryAfterSeconds int, isUnexpectedResponse bool) *StatusError {
- reason := metav1.StatusReasonUnknown
- message := fmt.Sprintf("the server responded with the status code %d but did not return more information", code)
- switch code {
- case http.StatusConflict:
- if verb == "POST" {
- reason = metav1.StatusReasonAlreadyExists
- } else {
- reason = metav1.StatusReasonConflict
- }
- message = "the server reported a conflict"
- case http.StatusNotFound:
- reason = metav1.StatusReasonNotFound
- message = "the server could not find the requested resource"
- case http.StatusBadRequest:
- reason = metav1.StatusReasonBadRequest
- message = "the server rejected our request for an unknown reason"
- case http.StatusUnauthorized:
- reason = metav1.StatusReasonUnauthorized
- message = "the server has asked for the client to provide credentials"
- case http.StatusForbidden:
- reason = metav1.StatusReasonForbidden
- // the server message has details about who is trying to perform what action. Keep its message.
- message = serverMessage
- case http.StatusNotAcceptable:
- reason = metav1.StatusReasonNotAcceptable
- // the server message has details about what types are acceptable
- if len(serverMessage) == 0 || serverMessage == "unknown" {
- message = "the server was unable to respond with a content type that the client supports"
- } else {
- message = serverMessage
- }
- case http.StatusUnsupportedMediaType:
- reason = metav1.StatusReasonUnsupportedMediaType
- // the server message has details about what types are acceptable
- message = serverMessage
- case http.StatusMethodNotAllowed:
- reason = metav1.StatusReasonMethodNotAllowed
- message = "the server does not allow this method on the requested resource"
- case http.StatusUnprocessableEntity:
- reason = metav1.StatusReasonInvalid
- message = "the server rejected our request due to an error in our request"
- case http.StatusServiceUnavailable:
- reason = metav1.StatusReasonServiceUnavailable
- message = "the server is currently unable to handle the request"
- case http.StatusGatewayTimeout:
- reason = metav1.StatusReasonTimeout
- message = "the server was unable to return a response in the time allotted, but may still be processing the request"
- case http.StatusTooManyRequests:
- reason = metav1.StatusReasonTooManyRequests
- message = "the server has received too many requests and has asked us to try again later"
- default:
- if code >= 500 {
- reason = metav1.StatusReasonInternalError
- message = fmt.Sprintf("an error on the server (%q) has prevented the request from succeeding", serverMessage)
- }
- }
- switch {
- case !qualifiedResource.Empty() && len(name) > 0:
- message = fmt.Sprintf("%s (%s %s %s)", message, strings.ToLower(verb), qualifiedResource.String(), name)
- case !qualifiedResource.Empty():
- message = fmt.Sprintf("%s (%s %s)", message, strings.ToLower(verb), qualifiedResource.String())
- }
- var causes []metav1.StatusCause
- if isUnexpectedResponse {
- causes = []metav1.StatusCause{
- {
- Type: metav1.CauseTypeUnexpectedServerResponse,
- Message: serverMessage,
- },
- }
- } else {
- causes = nil
- }
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: int32(code),
- Reason: reason,
- Details: &metav1.StatusDetails{
- Group: qualifiedResource.Group,
- Kind: qualifiedResource.Resource,
- Name: name,
-
- Causes: causes,
- RetryAfterSeconds: int32(retryAfterSeconds),
- },
- Message: message,
- }}
-}
-
-// IsNotFound returns true if the specified error was created by NewNotFound.
-// It supports wrapped errors.
-func IsNotFound(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonNotFound
-}
-
-// IsAlreadyExists determines if the err is an error which indicates that a specified resource already exists.
-// It supports wrapped errors.
-func IsAlreadyExists(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonAlreadyExists
-}
-
-// IsConflict determines if the err is an error which indicates the provided update conflicts.
-// It supports wrapped errors.
-func IsConflict(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonConflict
-}
-
-// IsInvalid determines if the err is an error which indicates the provided resource is not valid.
-// It supports wrapped errors.
-func IsInvalid(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonInvalid
-}
-
-// IsGone is true if the error indicates the requested resource is no longer available.
-// It supports wrapped errors.
-func IsGone(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonGone
-}
-
-// IsResourceExpired is true if the error indicates the resource has expired and the current action is
-// no longer possible.
-// It supports wrapped errors.
-func IsResourceExpired(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonExpired
-}
-
-// IsNotAcceptable determines if err is an error which indicates that the request failed due to an invalid Accept header
-// It supports wrapped errors.
-func IsNotAcceptable(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonNotAcceptable
-}
-
-// IsUnsupportedMediaType determines if err is an error which indicates that the request failed due to an invalid Content-Type header
-// It supports wrapped errors.
-func IsUnsupportedMediaType(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonUnsupportedMediaType
-}
-
-// IsMethodNotSupported determines if the err is an error which indicates the provided action could not
-// be performed because it is not supported by the server.
-// It supports wrapped errors.
-func IsMethodNotSupported(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonMethodNotAllowed
-}
-
-// IsServiceUnavailable is true if the error indicates the underlying service is no longer available.
-// It supports wrapped errors.
-func IsServiceUnavailable(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonServiceUnavailable
-}
-
-// IsBadRequest determines if err is an error which indicates that the request is invalid.
-// It supports wrapped errors.
-func IsBadRequest(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonBadRequest
-}
-
-// IsUnauthorized determines if err is an error which indicates that the request is unauthorized and
-// requires authentication by the user.
-// It supports wrapped errors.
-func IsUnauthorized(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonUnauthorized
-}
-
-// IsForbidden determines if err is an error which indicates that the request is forbidden and cannot
-// be completed as requested.
-// It supports wrapped errors.
-func IsForbidden(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonForbidden
-}
-
-// IsTimeout determines if err is an error which indicates that request times out due to long
-// processing.
-// It supports wrapped errors.
-func IsTimeout(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonTimeout
-}
-
-// IsServerTimeout determines if err is an error which indicates that the request needs to be retried
-// by the client.
-// It supports wrapped errors.
-func IsServerTimeout(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonServerTimeout
-}
-
-// IsInternalError determines if err is an error which indicates an internal server error.
-// It supports wrapped errors.
-func IsInternalError(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonInternalError
-}
-
-// IsTooManyRequests determines if err is an error which indicates that there are too many requests
-// that the server cannot handle.
-// It supports wrapped errors.
-func IsTooManyRequests(err error) bool {
- if ReasonForError(err) == metav1.StatusReasonTooManyRequests {
- return true
- }
- if status := APIStatus(nil); errors.As(err, &status) {
- return status.Status().Code == http.StatusTooManyRequests
- }
- return false
-}
-
-// IsRequestEntityTooLargeError determines if err is an error which indicates
-// the request entity is too large.
-// It supports wrapped errors.
-func IsRequestEntityTooLargeError(err error) bool {
- if ReasonForError(err) == metav1.StatusReasonRequestEntityTooLarge {
- return true
- }
- if status := APIStatus(nil); errors.As(err, &status) {
- return status.Status().Code == http.StatusRequestEntityTooLarge
- }
- return false
-}
-
-// IsUnexpectedServerError returns true if the server response was not in the expected API format,
-// and may be the result of another HTTP actor.
-// It supports wrapped errors.
-func IsUnexpectedServerError(err error) bool {
- if status := APIStatus(nil); errors.As(err, &status) && status.Status().Details != nil {
- for _, cause := range status.Status().Details.Causes {
- if cause.Type == metav1.CauseTypeUnexpectedServerResponse {
- return true
- }
- }
- }
- return false
-}
-
-// IsUnexpectedObjectError determines if err is due to an unexpected object from the master.
-// It supports wrapped errors.
-func IsUnexpectedObjectError(err error) bool {
- uoe := &UnexpectedObjectError{}
- return err != nil && errors.As(err, &uoe)
-}
-
-// SuggestsClientDelay returns true if this error suggests a client delay as well as the
-// suggested seconds to wait, or false if the error does not imply a wait. It does not
-// address whether the error *should* be retried, since some errors (like a 3xx) may
-// request delay without retry.
-// It supports wrapped errors.
-func SuggestsClientDelay(err error) (int, bool) {
- if t := APIStatus(nil); errors.As(err, &t) && t.Status().Details != nil {
- switch t.Status().Reason {
- // this StatusReason explicitly requests the caller to delay the action
- case metav1.StatusReasonServerTimeout:
- return int(t.Status().Details.RetryAfterSeconds), true
- }
- // If the client requests that we retry after a certain number of seconds
- if t.Status().Details.RetryAfterSeconds > 0 {
- return int(t.Status().Details.RetryAfterSeconds), true
- }
- }
- return 0, false
-}
-
-// ReasonForError returns the HTTP status for a particular error.
-// It supports wrapped errors.
-func ReasonForError(err error) metav1.StatusReason {
- if status := APIStatus(nil); errors.As(err, &status) {
- return status.Status().Reason
- }
- return metav1.StatusReasonUnknown
-}
-
-// ErrorReporter converts generic errors into runtime.Object errors without
-// requiring the caller to take a dependency on meta/v1 (where Status lives).
-// This prevents circular dependencies in core watch code.
-type ErrorReporter struct {
- code int
- verb string
- reason string
-}
-
-// NewClientErrorReporter will respond with valid v1.Status objects that report
-// unexpected server responses. Primarily used by watch to report errors when
-// we attempt to decode a response from the server and it is not in the form
-// we expect. Because watch is a dependency of the core api, we can't return
-// meta/v1.Status in that package and so much inject this interface to convert a
-// generic error as appropriate. The reason is passed as a unique status cause
-// on the returned status, otherwise the generic "ClientError" is returned.
-func NewClientErrorReporter(code int, verb string, reason string) *ErrorReporter {
- return &ErrorReporter{
- code: code,
- verb: verb,
- reason: reason,
- }
-}
-
-// AsObject returns a valid error runtime.Object (a v1.Status) for the given
-// error, using the code and verb of the reporter type. The error is set to
-// indicate that this was an unexpected server response.
-func (r *ErrorReporter) AsObject(err error) runtime.Object {
- status := NewGenericServerResponse(r.code, r.verb, schema.GroupResource{}, "", err.Error(), 0, true)
- if status.ErrStatus.Details == nil {
- status.ErrStatus.Details = &metav1.StatusDetails{}
- }
- reason := r.reason
- if len(reason) == 0 {
- reason = "ClientError"
- }
- status.ErrStatus.Details.Causes = append(status.ErrStatus.Details.Causes, metav1.StatusCause{
- Type: metav1.CauseType(reason),
- Message: err.Error(),
- })
- return &status.ErrStatus
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
deleted file mode 100644
index 7b101ea51..000000000
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
+++ /dev/null
@@ -1,500 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package unstructured
-
-import (
- gojson "encoding/json"
- "fmt"
- "io"
- "strings"
-
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/apimachinery/pkg/util/json"
- "k8s.io/klog/v2"
-)
-
-// NestedFieldCopy returns a deep copy of the value of a nested field.
-// Returns false if the value is missing.
-// No error is returned for a nil field.
-//
-// Note: fields passed to this function are treated as keys within the passed
-// object; no array/slice syntax is supported.
-func NestedFieldCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) {
- val, found, err := NestedFieldNoCopy(obj, fields...)
- if !found || err != nil {
- return nil, found, err
- }
- return runtime.DeepCopyJSONValue(val), true, nil
-}
-
-// NestedFieldNoCopy returns a reference to a nested field.
-// Returns false if value is not found and an error if unable
-// to traverse obj.
-//
-// Note: fields passed to this function are treated as keys within the passed
-// object; no array/slice syntax is supported.
-func NestedFieldNoCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) {
- var val interface{} = obj
-
- for i, field := range fields {
- if val == nil {
- return nil, false, nil
- }
- if m, ok := val.(map[string]interface{}); ok {
- val, ok = m[field]
- if !ok {
- return nil, false, nil
- }
- } else {
- return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected map[string]interface{}", jsonPath(fields[:i+1]), val, val)
- }
- }
- return val, true, nil
-}
-
-// NestedString returns the string value of a nested field.
-// Returns false if value is not found and an error if not a string.
-func NestedString(obj map[string]interface{}, fields ...string) (string, bool, error) {
- val, found, err := NestedFieldNoCopy(obj, fields...)
- if !found || err != nil {
- return "", found, err
- }
- s, ok := val.(string)
- if !ok {
- return "", false, fmt.Errorf("%v accessor error: %v is of the type %T, expected string", jsonPath(fields), val, val)
- }
- return s, true, nil
-}
-
-// NestedBool returns the bool value of a nested field.
-// Returns false if value is not found and an error if not a bool.
-func NestedBool(obj map[string]interface{}, fields ...string) (bool, bool, error) {
- val, found, err := NestedFieldNoCopy(obj, fields...)
- if !found || err != nil {
- return false, found, err
- }
- b, ok := val.(bool)
- if !ok {
- return false, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected bool", jsonPath(fields), val, val)
- }
- return b, true, nil
-}
-
-// NestedFloat64 returns the float64 value of a nested field.
-// Returns false if value is not found and an error if not a float64.
-func NestedFloat64(obj map[string]interface{}, fields ...string) (float64, bool, error) {
- val, found, err := NestedFieldNoCopy(obj, fields...)
- if !found || err != nil {
- return 0, found, err
- }
- f, ok := val.(float64)
- if !ok {
- return 0, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected float64", jsonPath(fields), val, val)
- }
- return f, true, nil
-}
-
-// NestedInt64 returns the int64 value of a nested field.
-// Returns false if value is not found and an error if not an int64.
-func NestedInt64(obj map[string]interface{}, fields ...string) (int64, bool, error) {
- val, found, err := NestedFieldNoCopy(obj, fields...)
- if !found || err != nil {
- return 0, found, err
- }
- i, ok := val.(int64)
- if !ok {
- return 0, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected int64", jsonPath(fields), val, val)
- }
- return i, true, nil
-}
-
-// NestedStringSlice returns a copy of []string value of a nested field.
-// Returns false if value is not found and an error if not a []interface{} or contains non-string items in the slice.
-func NestedStringSlice(obj map[string]interface{}, fields ...string) ([]string, bool, error) {
- val, found, err := NestedFieldNoCopy(obj, fields...)
- if !found || err != nil {
- return nil, found, err
- }
- m, ok := val.([]interface{})
- if !ok {
- return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected []interface{}", jsonPath(fields), val, val)
- }
- strSlice := make([]string, 0, len(m))
- for _, v := range m {
- if str, ok := v.(string); ok {
- strSlice = append(strSlice, str)
- } else {
- return nil, false, fmt.Errorf("%v accessor error: contains non-string key in the slice: %v is of the type %T, expected string", jsonPath(fields), v, v)
- }
- }
- return strSlice, true, nil
-}
-
-// NestedSlice returns a deep copy of []interface{} value of a nested field.
-// Returns false if value is not found and an error if not a []interface{}.
-func NestedSlice(obj map[string]interface{}, fields ...string) ([]interface{}, bool, error) {
- val, found, err := NestedFieldNoCopy(obj, fields...)
- if !found || err != nil {
- return nil, found, err
- }
- _, ok := val.([]interface{})
- if !ok {
- return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected []interface{}", jsonPath(fields), val, val)
- }
- return runtime.DeepCopyJSONValue(val).([]interface{}), true, nil
-}
-
-// NestedStringMap returns a copy of map[string]string value of a nested field.
-// Returns false if value is not found and an error if not a map[string]interface{} or contains non-string values in the map.
-func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]string, bool, error) {
- m, found, err := nestedMapNoCopy(obj, fields...)
- if !found || err != nil {
- return nil, found, err
- }
- strMap := make(map[string]string, len(m))
- for k, v := range m {
- if str, ok := v.(string); ok {
- strMap[k] = str
- } else {
- return nil, false, fmt.Errorf("%v accessor error: contains non-string key in the map: %v is of the type %T, expected string", jsonPath(fields), v, v)
- }
- }
- return strMap, true, nil
-}
-
-// NestedMap returns a deep copy of map[string]interface{} value of a nested field.
-// Returns false if value is not found and an error if not a map[string]interface{}.
-func NestedMap(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) {
- m, found, err := nestedMapNoCopy(obj, fields...)
- if !found || err != nil {
- return nil, found, err
- }
- return runtime.DeepCopyJSON(m), true, nil
-}
-
-// nestedMapNoCopy returns a map[string]interface{} value of a nested field.
-// Returns false if value is not found and an error if not a map[string]interface{}.
-func nestedMapNoCopy(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) {
- val, found, err := NestedFieldNoCopy(obj, fields...)
- if !found || err != nil {
- return nil, found, err
- }
- m, ok := val.(map[string]interface{})
- if !ok {
- return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected map[string]interface{}", jsonPath(fields), val, val)
- }
- return m, true, nil
-}
-
-// SetNestedField sets the value of a nested field to a deep copy of the value provided.
-// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}.
-func SetNestedField(obj map[string]interface{}, value interface{}, fields ...string) error {
- return setNestedFieldNoCopy(obj, runtime.DeepCopyJSONValue(value), fields...)
-}
-
-func setNestedFieldNoCopy(obj map[string]interface{}, value interface{}, fields ...string) error {
- m := obj
-
- for i, field := range fields[:len(fields)-1] {
- if val, ok := m[field]; ok {
- if valMap, ok := val.(map[string]interface{}); ok {
- m = valMap
- } else {
- return fmt.Errorf("value cannot be set because %v is not a map[string]interface{}", jsonPath(fields[:i+1]))
- }
- } else {
- newVal := make(map[string]interface{})
- m[field] = newVal
- m = newVal
- }
- }
- m[fields[len(fields)-1]] = value
- return nil
-}
-
-// SetNestedStringSlice sets the string slice value of a nested field.
-// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}.
-func SetNestedStringSlice(obj map[string]interface{}, value []string, fields ...string) error {
- m := make([]interface{}, 0, len(value)) // convert []string into []interface{}
- for _, v := range value {
- m = append(m, v)
- }
- return setNestedFieldNoCopy(obj, m, fields...)
-}
-
-// SetNestedSlice sets the slice value of a nested field.
-// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}.
-func SetNestedSlice(obj map[string]interface{}, value []interface{}, fields ...string) error {
- return SetNestedField(obj, value, fields...)
-}
-
-// SetNestedStringMap sets the map[string]string value of a nested field.
-// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}.
-func SetNestedStringMap(obj map[string]interface{}, value map[string]string, fields ...string) error {
- m := make(map[string]interface{}, len(value)) // convert map[string]string into map[string]interface{}
- for k, v := range value {
- m[k] = v
- }
- return setNestedFieldNoCopy(obj, m, fields...)
-}
-
-// SetNestedMap sets the map[string]interface{} value of a nested field.
-// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}.
-func SetNestedMap(obj map[string]interface{}, value map[string]interface{}, fields ...string) error {
- return SetNestedField(obj, value, fields...)
-}
-
-// RemoveNestedField removes the nested field from the obj.
-func RemoveNestedField(obj map[string]interface{}, fields ...string) {
- m := obj
- for _, field := range fields[:len(fields)-1] {
- if x, ok := m[field].(map[string]interface{}); ok {
- m = x
- } else {
- return
- }
- }
- delete(m, fields[len(fields)-1])
-}
-
-func getNestedString(obj map[string]interface{}, fields ...string) string {
- val, found, err := NestedString(obj, fields...)
- if !found || err != nil {
- return ""
- }
- return val
-}
-
-func getNestedInt64Pointer(obj map[string]interface{}, fields ...string) *int64 {
- val, found, err := NestedInt64(obj, fields...)
- if !found || err != nil {
- return nil
- }
- return &val
-}
-
-func jsonPath(fields []string) string {
- return "." + strings.Join(fields, ".")
-}
-
-func extractOwnerReference(v map[string]interface{}) metav1.OwnerReference {
- // though this field is a *bool, but when decoded from JSON, it's
- // unmarshalled as bool.
- var controllerPtr *bool
- if controller, found, err := NestedBool(v, "controller"); err == nil && found {
- controllerPtr = &controller
- }
- var blockOwnerDeletionPtr *bool
- if blockOwnerDeletion, found, err := NestedBool(v, "blockOwnerDeletion"); err == nil && found {
- blockOwnerDeletionPtr = &blockOwnerDeletion
- }
- return metav1.OwnerReference{
- Kind: getNestedString(v, "kind"),
- Name: getNestedString(v, "name"),
- APIVersion: getNestedString(v, "apiVersion"),
- UID: types.UID(getNestedString(v, "uid")),
- Controller: controllerPtr,
- BlockOwnerDeletion: blockOwnerDeletionPtr,
- }
-}
-
-// UnstructuredJSONScheme is capable of converting JSON data into the Unstructured
-// type, which can be used for generic access to objects without a predefined scheme.
-// TODO: move into serializer/json.
-var UnstructuredJSONScheme runtime.Codec = unstructuredJSONScheme{}
-
-type unstructuredJSONScheme struct{}
-
-const unstructuredJSONSchemeIdentifier runtime.Identifier = "unstructuredJSON"
-
-func (s unstructuredJSONScheme) Decode(data []byte, _ *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
- var err error
- if obj != nil {
- err = s.decodeInto(data, obj)
- } else {
- obj, err = s.decode(data)
- }
-
- if err != nil {
- return nil, nil, err
- }
-
- gvk := obj.GetObjectKind().GroupVersionKind()
- if len(gvk.Kind) == 0 {
- return nil, &gvk, runtime.NewMissingKindErr(string(data))
- }
-
- return obj, &gvk, nil
-}
-
-func (s unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error {
- if co, ok := obj.(runtime.CacheableObject); ok {
- return co.CacheEncode(s.Identifier(), s.doEncode, w)
- }
- return s.doEncode(obj, w)
-}
-
-func (unstructuredJSONScheme) doEncode(obj runtime.Object, w io.Writer) error {
- switch t := obj.(type) {
- case *Unstructured:
- return json.NewEncoder(w).Encode(t.Object)
- case *UnstructuredList:
- items := make([]interface{}, 0, len(t.Items))
- for _, i := range t.Items {
- items = append(items, i.Object)
- }
- listObj := make(map[string]interface{}, len(t.Object)+1)
- for k, v := range t.Object { // Make a shallow copy
- listObj[k] = v
- }
- listObj["items"] = items
- return json.NewEncoder(w).Encode(listObj)
- case *runtime.Unknown:
- // TODO: Unstructured needs to deal with ContentType.
- _, err := w.Write(t.Raw)
- return err
- default:
- return json.NewEncoder(w).Encode(t)
- }
-}
-
-// Identifier implements runtime.Encoder interface.
-func (unstructuredJSONScheme) Identifier() runtime.Identifier {
- return unstructuredJSONSchemeIdentifier
-}
-
-func (s unstructuredJSONScheme) decode(data []byte) (runtime.Object, error) {
- type detector struct {
- Items gojson.RawMessage
- }
- var det detector
- if err := json.Unmarshal(data, &det); err != nil {
- return nil, err
- }
-
- if det.Items != nil {
- list := &UnstructuredList{}
- err := s.decodeToList(data, list)
- return list, err
- }
-
- // No Items field, so it wasn't a list.
- unstruct := &Unstructured{}
- err := s.decodeToUnstructured(data, unstruct)
- return unstruct, err
-}
-
-func (s unstructuredJSONScheme) decodeInto(data []byte, obj runtime.Object) error {
- switch x := obj.(type) {
- case *Unstructured:
- return s.decodeToUnstructured(data, x)
- case *UnstructuredList:
- return s.decodeToList(data, x)
- default:
- return json.Unmarshal(data, x)
- }
-}
-
-func (unstructuredJSONScheme) decodeToUnstructured(data []byte, unstruct *Unstructured) error {
- m := make(map[string]interface{})
- if err := json.Unmarshal(data, &m); err != nil {
- return err
- }
-
- unstruct.Object = m
-
- return nil
-}
-
-func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList) error {
- type decodeList struct {
- Items []gojson.RawMessage
- }
-
- var dList decodeList
- if err := json.Unmarshal(data, &dList); err != nil {
- return err
- }
-
- if err := json.Unmarshal(data, &list.Object); err != nil {
- return err
- }
-
- // For typed lists, e.g., a PodList, API server doesn't set each item's
- // APIVersion and Kind. We need to set it.
- listAPIVersion := list.GetAPIVersion()
- listKind := list.GetKind()
- itemKind := strings.TrimSuffix(listKind, "List")
-
- delete(list.Object, "items")
- list.Items = make([]Unstructured, 0, len(dList.Items))
- for _, i := range dList.Items {
- unstruct := &Unstructured{}
- if err := s.decodeToUnstructured([]byte(i), unstruct); err != nil {
- return err
- }
- // This is hacky. Set the item's Kind and APIVersion to those inferred
- // from the List.
- if len(unstruct.GetKind()) == 0 && len(unstruct.GetAPIVersion()) == 0 {
- unstruct.SetKind(itemKind)
- unstruct.SetAPIVersion(listAPIVersion)
- }
- list.Items = append(list.Items, *unstruct)
- }
- return nil
-}
-
-type jsonFallbackEncoder struct {
- encoder runtime.Encoder
- identifier runtime.Identifier
-}
-
-func NewJSONFallbackEncoder(encoder runtime.Encoder) runtime.Encoder {
- result := map[string]string{
- "name": "fallback",
- "base": string(encoder.Identifier()),
- }
- identifier, err := gojson.Marshal(result)
- if err != nil {
- klog.Fatalf("Failed marshaling identifier for jsonFallbackEncoder: %v", err)
- }
- return &jsonFallbackEncoder{
- encoder: encoder,
- identifier: runtime.Identifier(identifier),
- }
-}
-
-func (c *jsonFallbackEncoder) Encode(obj runtime.Object, w io.Writer) error {
- // There is no need to handle runtime.CacheableObject, as we only
- // fallback to other encoders here.
- err := c.encoder.Encode(obj, w)
- if runtime.IsNotRegisteredError(err) {
- switch obj.(type) {
- case *Unstructured, *UnstructuredList:
- return UnstructuredJSONScheme.Encode(obj, w)
- }
- }
- return err
-}
-
-// Identifier implements runtime.Encoder interface.
-func (c *jsonFallbackEncoder) Identifier() runtime.Identifier {
- return c.identifier
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go
deleted file mode 100644
index d1903394d..000000000
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go
+++ /dev/null
@@ -1,496 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package unstructured
-
-import (
- "bytes"
- "errors"
- "fmt"
-
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/types"
- utilruntime "k8s.io/apimachinery/pkg/util/runtime"
-)
-
-// Unstructured allows objects that do not have Golang structs registered to be manipulated
-// generically. This can be used to deal with the API objects from a plug-in. Unstructured
-// objects still have functioning TypeMeta features-- kind, version, etc.
-//
-// WARNING: This object has accessors for the v1 standard metadata. You *MUST NOT* use this
-// type if you are dealing with objects that are not in the server meta v1 schema.
-//
-// TODO: make the serialization part of this type distinct from the field accessors.
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +k8s:deepcopy-gen=true
-type Unstructured struct {
- // Object is a JSON compatible map with string, float, int, bool, []interface{}, or
- // map[string]interface{}
- // children.
- Object map[string]interface{}
-}
-
-var _ metav1.Object = &Unstructured{}
-var _ runtime.Unstructured = &Unstructured{}
-var _ metav1.ListInterface = &Unstructured{}
-
-func (obj *Unstructured) GetObjectKind() schema.ObjectKind { return obj }
-
-func (obj *Unstructured) IsList() bool {
- field, ok := obj.Object["items"]
- if !ok {
- return false
- }
- _, ok = field.([]interface{})
- return ok
-}
-func (obj *Unstructured) ToList() (*UnstructuredList, error) {
- if !obj.IsList() {
- // return an empty list back
- return &UnstructuredList{Object: obj.Object}, nil
- }
-
- ret := &UnstructuredList{}
- ret.Object = obj.Object
-
- err := obj.EachListItem(func(item runtime.Object) error {
- castItem := item.(*Unstructured)
- ret.Items = append(ret.Items, *castItem)
- return nil
- })
- if err != nil {
- return nil, err
- }
-
- return ret, nil
-}
-
-func (obj *Unstructured) EachListItem(fn func(runtime.Object) error) error {
- field, ok := obj.Object["items"]
- if !ok {
- return errors.New("content is not a list")
- }
- items, ok := field.([]interface{})
- if !ok {
- return fmt.Errorf("content is not a list: %T", field)
- }
- for _, item := range items {
- child, ok := item.(map[string]interface{})
- if !ok {
- return fmt.Errorf("items member is not an object: %T", child)
- }
- if err := fn(&Unstructured{Object: child}); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (obj *Unstructured) UnstructuredContent() map[string]interface{} {
- if obj.Object == nil {
- return make(map[string]interface{})
- }
- return obj.Object
-}
-
-func (obj *Unstructured) SetUnstructuredContent(content map[string]interface{}) {
- obj.Object = content
-}
-
-// MarshalJSON ensures that the unstructured object produces proper
-// JSON when passed to Go's standard JSON library.
-func (u *Unstructured) MarshalJSON() ([]byte, error) {
- var buf bytes.Buffer
- err := UnstructuredJSONScheme.Encode(u, &buf)
- return buf.Bytes(), err
-}
-
-// UnmarshalJSON ensures that the unstructured object properly decodes
-// JSON when passed to Go's standard JSON library.
-func (u *Unstructured) UnmarshalJSON(b []byte) error {
- _, _, err := UnstructuredJSONScheme.Decode(b, nil, u)
- return err
-}
-
-// NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data.
-// This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info.
-func (in *Unstructured) NewEmptyInstance() runtime.Unstructured {
- out := new(Unstructured)
- if in != nil {
- out.GetObjectKind().SetGroupVersionKind(in.GetObjectKind().GroupVersionKind())
- }
- return out
-}
-
-func (in *Unstructured) DeepCopy() *Unstructured {
- if in == nil {
- return nil
- }
- out := new(Unstructured)
- *out = *in
- out.Object = runtime.DeepCopyJSON(in.Object)
- return out
-}
-
-func (u *Unstructured) setNestedField(value interface{}, fields ...string) {
- if u.Object == nil {
- u.Object = make(map[string]interface{})
- }
- SetNestedField(u.Object, value, fields...)
-}
-
-func (u *Unstructured) setNestedStringSlice(value []string, fields ...string) {
- if u.Object == nil {
- u.Object = make(map[string]interface{})
- }
- SetNestedStringSlice(u.Object, value, fields...)
-}
-
-func (u *Unstructured) setNestedSlice(value []interface{}, fields ...string) {
- if u.Object == nil {
- u.Object = make(map[string]interface{})
- }
- SetNestedSlice(u.Object, value, fields...)
-}
-
-func (u *Unstructured) setNestedMap(value map[string]string, fields ...string) {
- if u.Object == nil {
- u.Object = make(map[string]interface{})
- }
- SetNestedStringMap(u.Object, value, fields...)
-}
-
-func (u *Unstructured) GetOwnerReferences() []metav1.OwnerReference {
- field, found, err := NestedFieldNoCopy(u.Object, "metadata", "ownerReferences")
- if !found || err != nil {
- return nil
- }
- original, ok := field.([]interface{})
- if !ok {
- return nil
- }
- ret := make([]metav1.OwnerReference, 0, len(original))
- for _, obj := range original {
- o, ok := obj.(map[string]interface{})
- if !ok {
- // expected map[string]interface{}, got something else
- return nil
- }
- ret = append(ret, extractOwnerReference(o))
- }
- return ret
-}
-
-func (u *Unstructured) SetOwnerReferences(references []metav1.OwnerReference) {
- if references == nil {
- RemoveNestedField(u.Object, "metadata", "ownerReferences")
- return
- }
-
- newReferences := make([]interface{}, 0, len(references))
- for _, reference := range references {
- out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&reference)
- if err != nil {
- utilruntime.HandleError(fmt.Errorf("unable to convert Owner Reference: %v", err))
- continue
- }
- newReferences = append(newReferences, out)
- }
- u.setNestedField(newReferences, "metadata", "ownerReferences")
-}
-
-func (u *Unstructured) GetAPIVersion() string {
- return getNestedString(u.Object, "apiVersion")
-}
-
-func (u *Unstructured) SetAPIVersion(version string) {
- u.setNestedField(version, "apiVersion")
-}
-
-func (u *Unstructured) GetKind() string {
- return getNestedString(u.Object, "kind")
-}
-
-func (u *Unstructured) SetKind(kind string) {
- u.setNestedField(kind, "kind")
-}
-
-func (u *Unstructured) GetNamespace() string {
- return getNestedString(u.Object, "metadata", "namespace")
-}
-
-func (u *Unstructured) SetNamespace(namespace string) {
- if len(namespace) == 0 {
- RemoveNestedField(u.Object, "metadata", "namespace")
- return
- }
- u.setNestedField(namespace, "metadata", "namespace")
-}
-
-func (u *Unstructured) GetName() string {
- return getNestedString(u.Object, "metadata", "name")
-}
-
-func (u *Unstructured) SetName(name string) {
- if len(name) == 0 {
- RemoveNestedField(u.Object, "metadata", "name")
- return
- }
- u.setNestedField(name, "metadata", "name")
-}
-
-func (u *Unstructured) GetGenerateName() string {
- return getNestedString(u.Object, "metadata", "generateName")
-}
-
-func (u *Unstructured) SetGenerateName(generateName string) {
- if len(generateName) == 0 {
- RemoveNestedField(u.Object, "metadata", "generateName")
- return
- }
- u.setNestedField(generateName, "metadata", "generateName")
-}
-
-func (u *Unstructured) GetUID() types.UID {
- return types.UID(getNestedString(u.Object, "metadata", "uid"))
-}
-
-func (u *Unstructured) SetUID(uid types.UID) {
- if len(string(uid)) == 0 {
- RemoveNestedField(u.Object, "metadata", "uid")
- return
- }
- u.setNestedField(string(uid), "metadata", "uid")
-}
-
-func (u *Unstructured) GetResourceVersion() string {
- return getNestedString(u.Object, "metadata", "resourceVersion")
-}
-
-func (u *Unstructured) SetResourceVersion(resourceVersion string) {
- if len(resourceVersion) == 0 {
- RemoveNestedField(u.Object, "metadata", "resourceVersion")
- return
- }
- u.setNestedField(resourceVersion, "metadata", "resourceVersion")
-}
-
-func (u *Unstructured) GetGeneration() int64 {
- val, found, err := NestedInt64(u.Object, "metadata", "generation")
- if !found || err != nil {
- return 0
- }
- return val
-}
-
-func (u *Unstructured) SetGeneration(generation int64) {
- if generation == 0 {
- RemoveNestedField(u.Object, "metadata", "generation")
- return
- }
- u.setNestedField(generation, "metadata", "generation")
-}
-
-func (u *Unstructured) GetSelfLink() string {
- return getNestedString(u.Object, "metadata", "selfLink")
-}
-
-func (u *Unstructured) SetSelfLink(selfLink string) {
- if len(selfLink) == 0 {
- RemoveNestedField(u.Object, "metadata", "selfLink")
- return
- }
- u.setNestedField(selfLink, "metadata", "selfLink")
-}
-
-func (u *Unstructured) GetContinue() string {
- return getNestedString(u.Object, "metadata", "continue")
-}
-
-func (u *Unstructured) SetContinue(c string) {
- if len(c) == 0 {
- RemoveNestedField(u.Object, "metadata", "continue")
- return
- }
- u.setNestedField(c, "metadata", "continue")
-}
-
-func (u *Unstructured) GetRemainingItemCount() *int64 {
- return getNestedInt64Pointer(u.Object, "metadata", "remainingItemCount")
-}
-
-func (u *Unstructured) SetRemainingItemCount(c *int64) {
- if c == nil {
- RemoveNestedField(u.Object, "metadata", "remainingItemCount")
- } else {
- u.setNestedField(*c, "metadata", "remainingItemCount")
- }
-}
-
-func (u *Unstructured) GetCreationTimestamp() metav1.Time {
- var timestamp metav1.Time
- timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "creationTimestamp"))
- return timestamp
-}
-
-func (u *Unstructured) SetCreationTimestamp(timestamp metav1.Time) {
- ts, _ := timestamp.MarshalQueryParameter()
- if len(ts) == 0 || timestamp.Time.IsZero() {
- RemoveNestedField(u.Object, "metadata", "creationTimestamp")
- return
- }
- u.setNestedField(ts, "metadata", "creationTimestamp")
-}
-
-func (u *Unstructured) GetDeletionTimestamp() *metav1.Time {
- var timestamp metav1.Time
- timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "deletionTimestamp"))
- if timestamp.IsZero() {
- return nil
- }
- return &timestamp
-}
-
-func (u *Unstructured) SetDeletionTimestamp(timestamp *metav1.Time) {
- if timestamp == nil {
- RemoveNestedField(u.Object, "metadata", "deletionTimestamp")
- return
- }
- ts, _ := timestamp.MarshalQueryParameter()
- u.setNestedField(ts, "metadata", "deletionTimestamp")
-}
-
-func (u *Unstructured) GetDeletionGracePeriodSeconds() *int64 {
- val, found, err := NestedInt64(u.Object, "metadata", "deletionGracePeriodSeconds")
- if !found || err != nil {
- return nil
- }
- return &val
-}
-
-func (u *Unstructured) SetDeletionGracePeriodSeconds(deletionGracePeriodSeconds *int64) {
- if deletionGracePeriodSeconds == nil {
- RemoveNestedField(u.Object, "metadata", "deletionGracePeriodSeconds")
- return
- }
- u.setNestedField(*deletionGracePeriodSeconds, "metadata", "deletionGracePeriodSeconds")
-}
-
-func (u *Unstructured) GetLabels() map[string]string {
- m, _, _ := NestedStringMap(u.Object, "metadata", "labels")
- return m
-}
-
-func (u *Unstructured) SetLabels(labels map[string]string) {
- if labels == nil {
- RemoveNestedField(u.Object, "metadata", "labels")
- return
- }
- u.setNestedMap(labels, "metadata", "labels")
-}
-
-func (u *Unstructured) GetAnnotations() map[string]string {
- m, _, _ := NestedStringMap(u.Object, "metadata", "annotations")
- return m
-}
-
-func (u *Unstructured) SetAnnotations(annotations map[string]string) {
- if annotations == nil {
- RemoveNestedField(u.Object, "metadata", "annotations")
- return
- }
- u.setNestedMap(annotations, "metadata", "annotations")
-}
-
-func (u *Unstructured) SetGroupVersionKind(gvk schema.GroupVersionKind) {
- u.SetAPIVersion(gvk.GroupVersion().String())
- u.SetKind(gvk.Kind)
-}
-
-func (u *Unstructured) GroupVersionKind() schema.GroupVersionKind {
- gv, err := schema.ParseGroupVersion(u.GetAPIVersion())
- if err != nil {
- return schema.GroupVersionKind{}
- }
- gvk := gv.WithKind(u.GetKind())
- return gvk
-}
-
-func (u *Unstructured) GetFinalizers() []string {
- val, _, _ := NestedStringSlice(u.Object, "metadata", "finalizers")
- return val
-}
-
-func (u *Unstructured) SetFinalizers(finalizers []string) {
- if finalizers == nil {
- RemoveNestedField(u.Object, "metadata", "finalizers")
- return
- }
- u.setNestedStringSlice(finalizers, "metadata", "finalizers")
-}
-
-func (u *Unstructured) GetClusterName() string {
- return getNestedString(u.Object, "metadata", "clusterName")
-}
-
-func (u *Unstructured) SetClusterName(clusterName string) {
- if len(clusterName) == 0 {
- RemoveNestedField(u.Object, "metadata", "clusterName")
- return
- }
- u.setNestedField(clusterName, "metadata", "clusterName")
-}
-
-func (u *Unstructured) GetManagedFields() []metav1.ManagedFieldsEntry {
- items, found, err := NestedSlice(u.Object, "metadata", "managedFields")
- if !found || err != nil {
- return nil
- }
- managedFields := []metav1.ManagedFieldsEntry{}
- for _, item := range items {
- m, ok := item.(map[string]interface{})
- if !ok {
- utilruntime.HandleError(fmt.Errorf("unable to retrieve managedFields for object, item %v is not a map", item))
- return nil
- }
- out := metav1.ManagedFieldsEntry{}
- if err := runtime.DefaultUnstructuredConverter.FromUnstructured(m, &out); err != nil {
- utilruntime.HandleError(fmt.Errorf("unable to retrieve managedFields for object: %v", err))
- return nil
- }
- managedFields = append(managedFields, out)
- }
- return managedFields
-}
-
-func (u *Unstructured) SetManagedFields(managedFields []metav1.ManagedFieldsEntry) {
- if managedFields == nil {
- RemoveNestedField(u.Object, "metadata", "managedFields")
- return
- }
- items := []interface{}{}
- for _, managedFieldsEntry := range managedFields {
- out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&managedFieldsEntry)
- if err != nil {
- utilruntime.HandleError(fmt.Errorf("unable to set managedFields for object: %v", err))
- return
- }
- items = append(items, out)
- }
- u.setNestedSlice(items, "metadata", "managedFields")
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go
deleted file mode 100644
index 5028f5fb5..000000000
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package unstructured
-
-import (
- "bytes"
-
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-var _ runtime.Unstructured = &UnstructuredList{}
-var _ metav1.ListInterface = &UnstructuredList{}
-
-// UnstructuredList allows lists that do not have Golang structs
-// registered to be manipulated generically. This can be used to deal
-// with the API lists from a plug-in.
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +k8s:deepcopy-gen=true
-type UnstructuredList struct {
- Object map[string]interface{}
-
- // Items is a list of unstructured objects.
- Items []Unstructured `json:"items"`
-}
-
-func (u *UnstructuredList) GetObjectKind() schema.ObjectKind { return u }
-
-func (u *UnstructuredList) IsList() bool { return true }
-
-func (u *UnstructuredList) EachListItem(fn func(runtime.Object) error) error {
- for i := range u.Items {
- if err := fn(&u.Items[i]); err != nil {
- return err
- }
- }
- return nil
-}
-
-// NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data.
-// This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info.
-func (u *UnstructuredList) NewEmptyInstance() runtime.Unstructured {
- out := new(UnstructuredList)
- if u != nil {
- out.SetGroupVersionKind(u.GroupVersionKind())
- }
- return out
-}
-
-// UnstructuredContent returns a map contain an overlay of the Items field onto
-// the Object field. Items always overwrites overlay.
-func (u *UnstructuredList) UnstructuredContent() map[string]interface{} {
- out := make(map[string]interface{}, len(u.Object)+1)
-
- // shallow copy every property
- for k, v := range u.Object {
- out[k] = v
- }
-
- items := make([]interface{}, len(u.Items))
- for i, item := range u.Items {
- items[i] = item.UnstructuredContent()
- }
- out["items"] = items
- return out
-}
-
-// SetUnstructuredContent obeys the conventions of List and keeps Items and the items
-// array in sync. If items is not an array of objects in the incoming map, then any
-// mismatched item will be removed.
-func (obj *UnstructuredList) SetUnstructuredContent(content map[string]interface{}) {
- obj.Object = content
- if content == nil {
- obj.Items = nil
- return
- }
- items, ok := obj.Object["items"].([]interface{})
- if !ok || items == nil {
- items = []interface{}{}
- }
- unstructuredItems := make([]Unstructured, 0, len(items))
- newItems := make([]interface{}, 0, len(items))
- for _, item := range items {
- o, ok := item.(map[string]interface{})
- if !ok {
- continue
- }
- unstructuredItems = append(unstructuredItems, Unstructured{Object: o})
- newItems = append(newItems, o)
- }
- obj.Items = unstructuredItems
- obj.Object["items"] = newItems
-}
-
-func (u *UnstructuredList) DeepCopy() *UnstructuredList {
- if u == nil {
- return nil
- }
- out := new(UnstructuredList)
- *out = *u
- out.Object = runtime.DeepCopyJSON(u.Object)
- out.Items = make([]Unstructured, len(u.Items))
- for i := range u.Items {
- u.Items[i].DeepCopyInto(&out.Items[i])
- }
- return out
-}
-
-// MarshalJSON ensures that the unstructured list object produces proper
-// JSON when passed to Go's standard JSON library.
-func (u *UnstructuredList) MarshalJSON() ([]byte, error) {
- var buf bytes.Buffer
- err := UnstructuredJSONScheme.Encode(u, &buf)
- return buf.Bytes(), err
-}
-
-// UnmarshalJSON ensures that the unstructured list object properly
-// decodes JSON when passed to Go's standard JSON library.
-func (u *UnstructuredList) UnmarshalJSON(b []byte) error {
- _, _, err := UnstructuredJSONScheme.Decode(b, nil, u)
- return err
-}
-
-func (u *UnstructuredList) GetAPIVersion() string {
- return getNestedString(u.Object, "apiVersion")
-}
-
-func (u *UnstructuredList) SetAPIVersion(version string) {
- u.setNestedField(version, "apiVersion")
-}
-
-func (u *UnstructuredList) GetKind() string {
- return getNestedString(u.Object, "kind")
-}
-
-func (u *UnstructuredList) SetKind(kind string) {
- u.setNestedField(kind, "kind")
-}
-
-func (u *UnstructuredList) GetResourceVersion() string {
- return getNestedString(u.Object, "metadata", "resourceVersion")
-}
-
-func (u *UnstructuredList) SetResourceVersion(version string) {
- u.setNestedField(version, "metadata", "resourceVersion")
-}
-
-func (u *UnstructuredList) GetSelfLink() string {
- return getNestedString(u.Object, "metadata", "selfLink")
-}
-
-func (u *UnstructuredList) SetSelfLink(selfLink string) {
- u.setNestedField(selfLink, "metadata", "selfLink")
-}
-
-func (u *UnstructuredList) GetContinue() string {
- return getNestedString(u.Object, "metadata", "continue")
-}
-
-func (u *UnstructuredList) SetContinue(c string) {
- u.setNestedField(c, "metadata", "continue")
-}
-
-func (u *UnstructuredList) GetRemainingItemCount() *int64 {
- return getNestedInt64Pointer(u.Object, "metadata", "remainingItemCount")
-}
-
-func (u *UnstructuredList) SetRemainingItemCount(c *int64) {
- if c == nil {
- RemoveNestedField(u.Object, "metadata", "remainingItemCount")
- } else {
- u.setNestedField(*c, "metadata", "remainingItemCount")
- }
-}
-
-func (u *UnstructuredList) SetGroupVersionKind(gvk schema.GroupVersionKind) {
- u.SetAPIVersion(gvk.GroupVersion().String())
- u.SetKind(gvk.Kind)
-}
-
-func (u *UnstructuredList) GroupVersionKind() schema.GroupVersionKind {
- gv, err := schema.ParseGroupVersion(u.GetAPIVersion())
- if err != nil {
- return schema.GroupVersionKind{}
- }
- gvk := gv.WithKind(u.GetKind())
- return gvk
-}
-
-func (u *UnstructuredList) setNestedField(value interface{}, fields ...string) {
- if u.Object == nil {
- u.Object = make(map[string]interface{})
- }
- SetNestedField(u.Object, value, fields...)
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go
deleted file mode 100644
index 9a9f25e8f..000000000
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// +build !ignore_autogenerated
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by deepcopy-gen. DO NOT EDIT.
-
-package unstructured
-
-import (
- runtime "k8s.io/apimachinery/pkg/runtime"
-)
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Unstructured) DeepCopyInto(out *Unstructured) {
- clone := in.DeepCopy()
- *out = *clone
- return
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Unstructured) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *UnstructuredList) DeepCopyInto(out *UnstructuredList) {
- clone := in.DeepCopy()
- *out = *clone
- return
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *UnstructuredList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
deleted file mode 100644
index e55ab94d1..000000000
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
+++ /dev/null
@@ -1,322 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package serializer
-
-import (
- "mime"
- "strings"
-
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/runtime/serializer/json"
- "k8s.io/apimachinery/pkg/runtime/serializer/protobuf"
- "k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
- "k8s.io/apimachinery/pkg/runtime/serializer/versioning"
-)
-
-// serializerExtensions are for serializers that are conditionally compiled in
-var serializerExtensions = []func(*runtime.Scheme) (serializerType, bool){}
-
-type serializerType struct {
- AcceptContentTypes []string
- ContentType string
- FileExtensions []string
- // EncodesAsText should be true if this content type can be represented safely in UTF-8
- EncodesAsText bool
-
- Serializer runtime.Serializer
- PrettySerializer runtime.Serializer
-
- AcceptStreamContentTypes []string
- StreamContentType string
-
- Framer runtime.Framer
- StreamSerializer runtime.Serializer
-}
-
-func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, options CodecFactoryOptions) []serializerType {
- jsonSerializer := json.NewSerializerWithOptions(
- mf, scheme, scheme,
- json.SerializerOptions{Yaml: false, Pretty: false, Strict: options.Strict},
- )
- jsonSerializerType := serializerType{
- AcceptContentTypes: []string{runtime.ContentTypeJSON},
- ContentType: runtime.ContentTypeJSON,
- FileExtensions: []string{"json"},
- EncodesAsText: true,
- Serializer: jsonSerializer,
-
- Framer: json.Framer,
- StreamSerializer: jsonSerializer,
- }
- if options.Pretty {
- jsonSerializerType.PrettySerializer = json.NewSerializerWithOptions(
- mf, scheme, scheme,
- json.SerializerOptions{Yaml: false, Pretty: true, Strict: options.Strict},
- )
- }
-
- yamlSerializer := json.NewSerializerWithOptions(
- mf, scheme, scheme,
- json.SerializerOptions{Yaml: true, Pretty: false, Strict: options.Strict},
- )
- protoSerializer := protobuf.NewSerializer(scheme, scheme)
- protoRawSerializer := protobuf.NewRawSerializer(scheme, scheme)
-
- serializers := []serializerType{
- jsonSerializerType,
- {
- AcceptContentTypes: []string{runtime.ContentTypeYAML},
- ContentType: runtime.ContentTypeYAML,
- FileExtensions: []string{"yaml"},
- EncodesAsText: true,
- Serializer: yamlSerializer,
- },
- {
- AcceptContentTypes: []string{runtime.ContentTypeProtobuf},
- ContentType: runtime.ContentTypeProtobuf,
- FileExtensions: []string{"pb"},
- Serializer: protoSerializer,
-
- Framer: protobuf.LengthDelimitedFramer,
- StreamSerializer: protoRawSerializer,
- },
- }
-
- for _, fn := range serializerExtensions {
- if serializer, ok := fn(scheme); ok {
- serializers = append(serializers, serializer)
- }
- }
- return serializers
-}
-
-// CodecFactory provides methods for retrieving codecs and serializers for specific
-// versions and content types.
-type CodecFactory struct {
- scheme *runtime.Scheme
- universal runtime.Decoder
- accepts []runtime.SerializerInfo
-
- legacySerializer runtime.Serializer
-}
-
-// CodecFactoryOptions holds the options for configuring CodecFactory behavior
-type CodecFactoryOptions struct {
- // Strict configures all serializers in strict mode
- Strict bool
- // Pretty includes a pretty serializer along with the non-pretty one
- Pretty bool
-}
-
-// CodecFactoryOptionsMutator takes a pointer to an options struct and then modifies it.
-// Functions implementing this type can be passed to the NewCodecFactory() constructor.
-type CodecFactoryOptionsMutator func(*CodecFactoryOptions)
-
-// EnablePretty enables including a pretty serializer along with the non-pretty one
-func EnablePretty(options *CodecFactoryOptions) {
- options.Pretty = true
-}
-
-// DisablePretty disables including a pretty serializer along with the non-pretty one
-func DisablePretty(options *CodecFactoryOptions) {
- options.Pretty = false
-}
-
-// EnableStrict enables configuring all serializers in strict mode
-func EnableStrict(options *CodecFactoryOptions) {
- options.Strict = true
-}
-
-// DisableStrict disables configuring all serializers in strict mode
-func DisableStrict(options *CodecFactoryOptions) {
- options.Strict = false
-}
-
-// NewCodecFactory provides methods for retrieving serializers for the supported wire formats
-// and conversion wrappers to define preferred internal and external versions. In the future,
-// as the internal version is used less, callers may instead use a defaulting serializer and
-// only convert objects which are shared internally (Status, common API machinery).
-//
-// Mutators can be passed to change the CodecFactoryOptions before construction of the factory.
-// It is recommended to explicitly pass mutators instead of relying on defaults.
-// By default, Pretty is enabled -- this is conformant with previously supported behavior.
-//
-// TODO: allow other codecs to be compiled in?
-// TODO: accept a scheme interface
-func NewCodecFactory(scheme *runtime.Scheme, mutators ...CodecFactoryOptionsMutator) CodecFactory {
- options := CodecFactoryOptions{Pretty: true}
- for _, fn := range mutators {
- fn(&options)
- }
-
- serializers := newSerializersForScheme(scheme, json.DefaultMetaFactory, options)
- return newCodecFactory(scheme, serializers)
-}
-
-// newCodecFactory is a helper for testing that allows a different metafactory to be specified.
-func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) CodecFactory {
- decoders := make([]runtime.Decoder, 0, len(serializers))
- var accepts []runtime.SerializerInfo
- alreadyAccepted := make(map[string]struct{})
-
- var legacySerializer runtime.Serializer
- for _, d := range serializers {
- decoders = append(decoders, d.Serializer)
- for _, mediaType := range d.AcceptContentTypes {
- if _, ok := alreadyAccepted[mediaType]; ok {
- continue
- }
- alreadyAccepted[mediaType] = struct{}{}
- info := runtime.SerializerInfo{
- MediaType: d.ContentType,
- EncodesAsText: d.EncodesAsText,
- Serializer: d.Serializer,
- PrettySerializer: d.PrettySerializer,
- }
-
- mediaType, _, err := mime.ParseMediaType(info.MediaType)
- if err != nil {
- panic(err)
- }
- parts := strings.SplitN(mediaType, "/", 2)
- info.MediaTypeType = parts[0]
- info.MediaTypeSubType = parts[1]
-
- if d.StreamSerializer != nil {
- info.StreamSerializer = &runtime.StreamSerializerInfo{
- Serializer: d.StreamSerializer,
- EncodesAsText: d.EncodesAsText,
- Framer: d.Framer,
- }
- }
- accepts = append(accepts, info)
- if mediaType == runtime.ContentTypeJSON {
- legacySerializer = d.Serializer
- }
- }
- }
- if legacySerializer == nil {
- legacySerializer = serializers[0].Serializer
- }
-
- return CodecFactory{
- scheme: scheme,
- universal: recognizer.NewDecoder(decoders...),
-
- accepts: accepts,
-
- legacySerializer: legacySerializer,
- }
-}
-
-// WithoutConversion returns a NegotiatedSerializer that performs no conversion, even if the
-// caller requests it.
-func (f CodecFactory) WithoutConversion() runtime.NegotiatedSerializer {
- return WithoutConversionCodecFactory{f}
-}
-
-// SupportedMediaTypes returns the RFC2046 media types that this factory has serializers for.
-func (f CodecFactory) SupportedMediaTypes() []runtime.SerializerInfo {
- return f.accepts
-}
-
-// LegacyCodec encodes output to a given API versions, and decodes output into the internal form from
-// any recognized source. The returned codec will always encode output to JSON. If a type is not
-// found in the list of versions an error will be returned.
-//
-// This method is deprecated - clients and servers should negotiate a serializer by mime-type and
-// invoke CodecForVersions. Callers that need only to read data should use UniversalDecoder().
-//
-// TODO: make this call exist only in pkg/api, and initialize it with the set of default versions.
-// All other callers will be forced to request a Codec directly.
-func (f CodecFactory) LegacyCodec(version ...schema.GroupVersion) runtime.Codec {
- return versioning.NewDefaultingCodecForScheme(f.scheme, f.legacySerializer, f.universal, schema.GroupVersions(version), runtime.InternalGroupVersioner)
-}
-
-// UniversalDeserializer can convert any stored data recognized by this factory into a Go object that satisfies
-// runtime.Object. It does not perform conversion. It does not perform defaulting.
-func (f CodecFactory) UniversalDeserializer() runtime.Decoder {
- return f.universal
-}
-
-// UniversalDecoder returns a runtime.Decoder capable of decoding all known API objects in all known formats. Used
-// by clients that do not need to encode objects but want to deserialize API objects stored on disk. Only decodes
-// objects in groups registered with the scheme. The GroupVersions passed may be used to select alternate
-// versions of objects to return - by default, runtime.APIVersionInternal is used. If any versions are specified,
-// unrecognized groups will be returned in the version they are encoded as (no conversion). This decoder performs
-// defaulting.
-//
-// TODO: the decoder will eventually be removed in favor of dealing with objects in their versioned form
-// TODO: only accept a group versioner
-func (f CodecFactory) UniversalDecoder(versions ...schema.GroupVersion) runtime.Decoder {
- var versioner runtime.GroupVersioner
- if len(versions) == 0 {
- versioner = runtime.InternalGroupVersioner
- } else {
- versioner = schema.GroupVersions(versions)
- }
- return f.CodecForVersions(nil, f.universal, nil, versioner)
-}
-
-// CodecForVersions creates a codec with the provided serializer. If an object is decoded and its group is not in the list,
-// it will default to runtime.APIVersionInternal. If encode is not specified for an object's group, the object is not
-// converted. If encode or decode are nil, no conversion is performed.
-func (f CodecFactory) CodecForVersions(encoder runtime.Encoder, decoder runtime.Decoder, encode runtime.GroupVersioner, decode runtime.GroupVersioner) runtime.Codec {
- // TODO: these are for backcompat, remove them in the future
- if encode == nil {
- encode = runtime.DisabledGroupVersioner
- }
- if decode == nil {
- decode = runtime.InternalGroupVersioner
- }
- return versioning.NewDefaultingCodecForScheme(f.scheme, encoder, decoder, encode, decode)
-}
-
-// DecoderToVersion returns a decoder that targets the provided group version.
-func (f CodecFactory) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder {
- return f.CodecForVersions(nil, decoder, nil, gv)
-}
-
-// EncoderForVersion returns an encoder that targets the provided group version.
-func (f CodecFactory) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder {
- return f.CodecForVersions(encoder, nil, gv, nil)
-}
-
-// WithoutConversionCodecFactory is a CodecFactory that will explicitly ignore requests to perform conversion.
-// This wrapper is used while code migrates away from using conversion (such as external clients) and in the future
-// will be unnecessary when we change the signature of NegotiatedSerializer.
-type WithoutConversionCodecFactory struct {
- CodecFactory
-}
-
-// EncoderForVersion returns an encoder that does not do conversion, but does set the group version kind of the object
-// when serialized.
-func (f WithoutConversionCodecFactory) EncoderForVersion(serializer runtime.Encoder, version runtime.GroupVersioner) runtime.Encoder {
- return runtime.WithVersionEncoder{
- Version: version,
- Encoder: serializer,
- ObjectTyper: f.CodecFactory.scheme,
- }
-}
-
-// DecoderToVersion returns an decoder that does not do conversion.
-func (f WithoutConversionCodecFactory) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder {
- return runtime.WithoutVersionDecoder{
- Decoder: serializer,
- }
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
deleted file mode 100644
index 83b2e1393..000000000
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
+++ /dev/null
@@ -1,389 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package json
-
-import (
- "encoding/json"
- "io"
- "strconv"
- "unsafe"
-
- jsoniter "github.com/json-iterator/go"
- "github.com/modern-go/reflect2"
- "sigs.k8s.io/yaml"
-
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
- "k8s.io/apimachinery/pkg/util/framer"
- utilyaml "k8s.io/apimachinery/pkg/util/yaml"
- "k8s.io/klog/v2"
-)
-
-// NewSerializer creates a JSON serializer that handles encoding versioned objects into the proper JSON form. If typer
-// is not nil, the object has the group, version, and kind fields set.
-// Deprecated: use NewSerializerWithOptions instead.
-func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, pretty bool) *Serializer {
- return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{false, pretty, false})
-}
-
-// NewYAMLSerializer creates a YAML serializer that handles encoding versioned objects into the proper YAML form. If typer
-// is not nil, the object has the group, version, and kind fields set. This serializer supports only the subset of YAML that
-// matches JSON, and will error if constructs are used that do not serialize to JSON.
-// Deprecated: use NewSerializerWithOptions instead.
-func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer {
- return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{true, false, false})
-}
-
-// NewSerializerWithOptions creates a JSON/YAML serializer that handles encoding versioned objects into the proper JSON/YAML
-// form. If typer is not nil, the object has the group, version, and kind fields set. Options are copied into the Serializer
-// and are immutable.
-func NewSerializerWithOptions(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, options SerializerOptions) *Serializer {
- return &Serializer{
- meta: meta,
- creater: creater,
- typer: typer,
- options: options,
- identifier: identifier(options),
- }
-}
-
-// identifier computes Identifier of Encoder based on the given options.
-func identifier(options SerializerOptions) runtime.Identifier {
- result := map[string]string{
- "name": "json",
- "yaml": strconv.FormatBool(options.Yaml),
- "pretty": strconv.FormatBool(options.Pretty),
- }
- identifier, err := json.Marshal(result)
- if err != nil {
- klog.Fatalf("Failed marshaling identifier for json Serializer: %v", err)
- }
- return runtime.Identifier(identifier)
-}
-
-// SerializerOptions holds the options which are used to configure a JSON/YAML serializer.
-// example:
-// (1) To configure a JSON serializer, set `Yaml` to `false`.
-// (2) To configure a YAML serializer, set `Yaml` to `true`.
-// (3) To configure a strict serializer that can return strictDecodingError, set `Strict` to `true`.
-type SerializerOptions struct {
- // Yaml: configures the Serializer to work with JSON(false) or YAML(true).
- // When `Yaml` is enabled, this serializer only supports the subset of YAML that
- // matches JSON, and will error if constructs are used that do not serialize to JSON.
- Yaml bool
-
- // Pretty: configures a JSON enabled Serializer(`Yaml: false`) to produce human-readable output.
- // This option is silently ignored when `Yaml` is `true`.
- Pretty bool
-
- // Strict: configures the Serializer to return strictDecodingError's when duplicate fields are present decoding JSON or YAML.
- // Note that enabling this option is not as performant as the non-strict variant, and should not be used in fast paths.
- Strict bool
-}
-
-// Serializer handles encoding versioned objects into the proper JSON form
-type Serializer struct {
- meta MetaFactory
- options SerializerOptions
- creater runtime.ObjectCreater
- typer runtime.ObjectTyper
-
- identifier runtime.Identifier
-}
-
-// Serializer implements Serializer
-var _ runtime.Serializer = &Serializer{}
-var _ recognizer.RecognizingDecoder = &Serializer{}
-
-type customNumberExtension struct {
- jsoniter.DummyExtension
-}
-
-func (cne *customNumberExtension) CreateDecoder(typ reflect2.Type) jsoniter.ValDecoder {
- if typ.String() == "interface {}" {
- return customNumberDecoder{}
- }
- return nil
-}
-
-type customNumberDecoder struct {
-}
-
-func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
- switch iter.WhatIsNext() {
- case jsoniter.NumberValue:
- var number jsoniter.Number
- iter.ReadVal(&number)
- i64, err := strconv.ParseInt(string(number), 10, 64)
- if err == nil {
- *(*interface{})(ptr) = i64
- return
- }
- f64, err := strconv.ParseFloat(string(number), 64)
- if err == nil {
- *(*interface{})(ptr) = f64
- return
- }
- iter.ReportError("DecodeNumber", err.Error())
- default:
- *(*interface{})(ptr) = iter.Read()
- }
-}
-
-// CaseSensitiveJSONIterator returns a jsoniterator API that's configured to be
-// case-sensitive when unmarshalling, and otherwise compatible with
-// the encoding/json standard library.
-func CaseSensitiveJSONIterator() jsoniter.API {
- config := jsoniter.Config{
- EscapeHTML: true,
- SortMapKeys: true,
- ValidateJsonRawMessage: true,
- CaseSensitive: true,
- }.Froze()
- // Force jsoniter to decode number to interface{} via int64/float64, if possible.
- config.RegisterExtension(&customNumberExtension{})
- return config
-}
-
-// StrictCaseSensitiveJSONIterator returns a jsoniterator API that's configured to be
-// case-sensitive, but also disallows unknown fields when unmarshalling. It is compatible with
-// the encoding/json standard library.
-func StrictCaseSensitiveJSONIterator() jsoniter.API {
- config := jsoniter.Config{
- EscapeHTML: true,
- SortMapKeys: true,
- ValidateJsonRawMessage: true,
- CaseSensitive: true,
- DisallowUnknownFields: true,
- }.Froze()
- // Force jsoniter to decode number to interface{} via int64/float64, if possible.
- config.RegisterExtension(&customNumberExtension{})
- return config
-}
-
-// Private copies of jsoniter to try to shield against possible mutations
-// from outside. Still does not protect from package level jsoniter.Register*() functions - someone calling them
-// in some other library will mess with every usage of the jsoniter library in the whole program.
-// See https://github.com/json-iterator/go/issues/265
-var caseSensitiveJSONIterator = CaseSensitiveJSONIterator()
-var strictCaseSensitiveJSONIterator = StrictCaseSensitiveJSONIterator()
-
-// gvkWithDefaults returns group kind and version defaulting from provided default
-func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind {
- if len(actual.Kind) == 0 {
- actual.Kind = defaultGVK.Kind
- }
- if len(actual.Version) == 0 && len(actual.Group) == 0 {
- actual.Group = defaultGVK.Group
- actual.Version = defaultGVK.Version
- }
- if len(actual.Version) == 0 && actual.Group == defaultGVK.Group {
- actual.Version = defaultGVK.Version
- }
- return actual
-}
-
-// Decode attempts to convert the provided data into YAML or JSON, extract the stored schema kind, apply the provided default gvk, and then
-// load that data into an object matching the desired schema kind or the provided into.
-// If into is *runtime.Unknown, the raw data will be extracted and no decoding will be performed.
-// If into is not registered with the typer, then the object will be straight decoded using normal JSON/YAML unmarshalling.
-// If into is provided and the original data is not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk.
-// If into is nil or data's gvk different from into's gvk, it will generate a new Object with ObjectCreater.New(gvk)
-// On success or most errors, the method will return the calculated schema kind.
-// The gvk calculate priority will be originalData > default gvk > into
-func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
- data := originalData
- if s.options.Yaml {
- altered, err := yaml.YAMLToJSON(data)
- if err != nil {
- return nil, nil, err
- }
- data = altered
- }
-
- actual, err := s.meta.Interpret(data)
- if err != nil {
- return nil, nil, err
- }
-
- if gvk != nil {
- *actual = gvkWithDefaults(*actual, *gvk)
- }
-
- if unk, ok := into.(*runtime.Unknown); ok && unk != nil {
- unk.Raw = originalData
- unk.ContentType = runtime.ContentTypeJSON
- unk.GetObjectKind().SetGroupVersionKind(*actual)
- return unk, actual, nil
- }
-
- if into != nil {
- _, isUnstructured := into.(runtime.Unstructured)
- types, _, err := s.typer.ObjectKinds(into)
- switch {
- case runtime.IsNotRegisteredError(err), isUnstructured:
- if err := caseSensitiveJSONIterator.Unmarshal(data, into); err != nil {
- return nil, actual, err
- }
- return into, actual, nil
- case err != nil:
- return nil, actual, err
- default:
- *actual = gvkWithDefaults(*actual, types[0])
- }
- }
-
- if len(actual.Kind) == 0 {
- return nil, actual, runtime.NewMissingKindErr(string(originalData))
- }
- if len(actual.Version) == 0 {
- return nil, actual, runtime.NewMissingVersionErr(string(originalData))
- }
-
- // use the target if necessary
- obj, err := runtime.UseOrCreateObject(s.typer, s.creater, *actual, into)
- if err != nil {
- return nil, actual, err
- }
-
- if err := caseSensitiveJSONIterator.Unmarshal(data, obj); err != nil {
- return nil, actual, err
- }
-
- // If the deserializer is non-strict, return successfully here.
- if !s.options.Strict {
- return obj, actual, nil
- }
-
- // In strict mode pass the data trough the YAMLToJSONStrict converter.
- // This is done to catch duplicate fields regardless of encoding (JSON or YAML). For JSON data,
- // the output would equal the input, unless there is a parsing error such as duplicate fields.
- // As we know this was successful in the non-strict case, the only error that may be returned here
- // is because of the newly-added strictness. hence we know we can return the typed strictDecoderError
- // the actual error is that the object contains duplicate fields.
- altered, err := yaml.YAMLToJSONStrict(originalData)
- if err != nil {
- return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData))
- }
- // As performance is not an issue for now for the strict deserializer (one has regardless to do
- // the unmarshal twice), we take the sanitized, altered data that is guaranteed to have no duplicated
- // fields, and unmarshal this into a copy of the already-populated obj. Any error that occurs here is
- // due to that a matching field doesn't exist in the object. hence we can return a typed strictDecoderError,
- // the actual error is that the object contains unknown field.
- strictObj := obj.DeepCopyObject()
- if err := strictCaseSensitiveJSONIterator.Unmarshal(altered, strictObj); err != nil {
- return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData))
- }
- // Always return the same object as the non-strict serializer to avoid any deviations.
- return obj, actual, nil
-}
-
-// Encode serializes the provided object to the given writer.
-func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
- if co, ok := obj.(runtime.CacheableObject); ok {
- return co.CacheEncode(s.Identifier(), s.doEncode, w)
- }
- return s.doEncode(obj, w)
-}
-
-func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error {
- if s.options.Yaml {
- json, err := caseSensitiveJSONIterator.Marshal(obj)
- if err != nil {
- return err
- }
- data, err := yaml.JSONToYAML(json)
- if err != nil {
- return err
- }
- _, err = w.Write(data)
- return err
- }
-
- if s.options.Pretty {
- data, err := caseSensitiveJSONIterator.MarshalIndent(obj, "", " ")
- if err != nil {
- return err
- }
- _, err = w.Write(data)
- return err
- }
- encoder := json.NewEncoder(w)
- return encoder.Encode(obj)
-}
-
-// Identifier implements runtime.Encoder interface.
-func (s *Serializer) Identifier() runtime.Identifier {
- return s.identifier
-}
-
-// RecognizesData implements the RecognizingDecoder interface.
-func (s *Serializer) RecognizesData(peek io.Reader) (ok, unknown bool, err error) {
- if s.options.Yaml {
- // we could potentially look for '---'
- return false, true, nil
- }
- _, _, ok = utilyaml.GuessJSONStream(peek, 2048)
- return ok, false, nil
-}
-
-// Framer is the default JSON framing behavior, with newlines delimiting individual objects.
-var Framer = jsonFramer{}
-
-type jsonFramer struct{}
-
-// NewFrameWriter implements stream framing for this serializer
-func (jsonFramer) NewFrameWriter(w io.Writer) io.Writer {
- // we can write JSON objects directly to the writer, because they are self-framing
- return w
-}
-
-// NewFrameReader implements stream framing for this serializer
-func (jsonFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser {
- // we need to extract the JSON chunks of data to pass to Decode()
- return framer.NewJSONFramedReader(r)
-}
-
-// YAMLFramer is the default JSON framing behavior, with newlines delimiting individual objects.
-var YAMLFramer = yamlFramer{}
-
-type yamlFramer struct{}
-
-// NewFrameWriter implements stream framing for this serializer
-func (yamlFramer) NewFrameWriter(w io.Writer) io.Writer {
- return yamlFrameWriter{w}
-}
-
-// NewFrameReader implements stream framing for this serializer
-func (yamlFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser {
- // extract the YAML document chunks directly
- return utilyaml.NewDocumentDecoder(r)
-}
-
-type yamlFrameWriter struct {
- w io.Writer
-}
-
-// Write separates each document with the YAML document separator (`---` followed by line
-// break). Writers must write well formed YAML documents (include a final line break).
-func (w yamlFrameWriter) Write(data []byte) (n int, err error) {
- if _, err := w.w.Write([]byte("---\n")); err != nil {
- return 0, err
- }
- return w.w.Write(data)
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go
deleted file mode 100644
index df3f5f989..000000000
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package json
-
-import (
- "encoding/json"
- "fmt"
-
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-// MetaFactory is used to store and retrieve the version and kind
-// information for JSON objects in a serializer.
-type MetaFactory interface {
- // Interpret should return the version and kind of the wire-format of
- // the object.
- Interpret(data []byte) (*schema.GroupVersionKind, error)
-}
-
-// DefaultMetaFactory is a default factory for versioning objects in JSON. The object
-// in memory and in the default JSON serialization will use the "kind" and "apiVersion"
-// fields.
-var DefaultMetaFactory = SimpleMetaFactory{}
-
-// SimpleMetaFactory provides default methods for retrieving the type and version of objects
-// that are identified with an "apiVersion" and "kind" fields in their JSON
-// serialization. It may be parameterized with the names of the fields in memory, or an
-// optional list of base structs to search for those fields in memory.
-type SimpleMetaFactory struct {
-}
-
-// Interpret will return the APIVersion and Kind of the JSON wire-format
-// encoding of an object, or an error.
-func (SimpleMetaFactory) Interpret(data []byte) (*schema.GroupVersionKind, error) {
- findKind := struct {
- // +optional
- APIVersion string `json:"apiVersion,omitempty"`
- // +optional
- Kind string `json:"kind,omitempty"`
- }{}
- if err := json.Unmarshal(data, &findKind); err != nil {
- return nil, fmt.Errorf("couldn't get version/kind; json parse error: %v", err)
- }
- gv, err := schema.ParseGroupVersion(findKind.APIVersion)
- if err != nil {
- return nil, err
- }
- return &schema.GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: findKind.Kind}, nil
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go
deleted file mode 100644
index a42b4a41a..000000000
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
-Copyright 2016 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package serializer
-
-import (
- "k8s.io/apimachinery/pkg/runtime"
-)
-
-// TODO: We should split negotiated serializers that we can change versions on from those we can change
-// serialization formats on
-type negotiatedSerializerWrapper struct {
- info runtime.SerializerInfo
-}
-
-func NegotiatedSerializerWrapper(info runtime.SerializerInfo) runtime.NegotiatedSerializer {
- return &negotiatedSerializerWrapper{info}
-}
-
-func (n *negotiatedSerializerWrapper) SupportedMediaTypes() []runtime.SerializerInfo {
- return []runtime.SerializerInfo{n.info}
-}
-
-func (n *negotiatedSerializerWrapper) EncoderForVersion(e runtime.Encoder, _ runtime.GroupVersioner) runtime.Encoder {
- return e
-}
-
-func (n *negotiatedSerializerWrapper) DecoderToVersion(d runtime.Decoder, _gv runtime.GroupVersioner) runtime.Decoder {
- return d
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go
deleted file mode 100644
index 72d0ac79b..000000000
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package protobuf provides a Kubernetes serializer for the protobuf format.
-package protobuf // import "k8s.io/apimachinery/pkg/runtime/serializer/protobuf"
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
deleted file mode 100644
index 404fb1b7e..000000000
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
+++ /dev/null
@@ -1,476 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package protobuf
-
-import (
- "bytes"
- "fmt"
- "io"
- "net/http"
- "reflect"
-
- "github.com/gogo/protobuf/proto"
-
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
- "k8s.io/apimachinery/pkg/util/framer"
-)
-
-var (
- // protoEncodingPrefix serves as a magic number for an encoded protobuf message on this serializer. All
- // proto messages serialized by this schema will be preceded by the bytes 0x6b 0x38 0x73, with the fourth
- // byte being reserved for the encoding style. The only encoding style defined is 0x00, which means that
- // the rest of the byte stream is a message of type k8s.io.kubernetes.pkg.runtime.Unknown (proto2).
- //
- // See k8s.io/apimachinery/pkg/runtime/generated.proto for details of the runtime.Unknown message.
- //
- // This encoding scheme is experimental, and is subject to change at any time.
- protoEncodingPrefix = []byte{0x6b, 0x38, 0x73, 0x00}
-)
-
-type errNotMarshalable struct {
- t reflect.Type
-}
-
-func (e errNotMarshalable) Error() string {
- return fmt.Sprintf("object %v does not implement the protobuf marshalling interface and cannot be encoded to a protobuf message", e.t)
-}
-
-func (e errNotMarshalable) Status() metav1.Status {
- return metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusNotAcceptable,
- Reason: metav1.StatusReason("NotAcceptable"),
- Message: e.Error(),
- }
-}
-
-// IsNotMarshalable checks the type of error, returns a boolean true if error is not nil and not marshalable false otherwise
-func IsNotMarshalable(err error) bool {
- _, ok := err.(errNotMarshalable)
- return err != nil && ok
-}
-
-// NewSerializer creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If a typer
-// is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written
-// as-is (any type info passed with the object will be used).
-func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer {
- return &Serializer{
- prefix: protoEncodingPrefix,
- creater: creater,
- typer: typer,
- }
-}
-
-// Serializer handles encoding versioned objects into the proper wire form
-type Serializer struct {
- prefix []byte
- creater runtime.ObjectCreater
- typer runtime.ObjectTyper
-}
-
-var _ runtime.Serializer = &Serializer{}
-var _ recognizer.RecognizingDecoder = &Serializer{}
-
-const serializerIdentifier runtime.Identifier = "protobuf"
-
-// Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default
-// gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown,
-// the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will
-// be straight decoded using normal protobuf unmarshalling (the MarshalTo interface). If into is provided and the original data is
-// not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most
-// errors, the method will return the calculated schema kind.
-func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
- prefixLen := len(s.prefix)
- switch {
- case len(originalData) == 0:
- // TODO: treat like decoding {} from JSON with defaulting
- return nil, nil, fmt.Errorf("empty data")
- case len(originalData) < prefixLen || !bytes.Equal(s.prefix, originalData[:prefixLen]):
- return nil, nil, fmt.Errorf("provided data does not appear to be a protobuf message, expected prefix %v", s.prefix)
- case len(originalData) == prefixLen:
- // TODO: treat like decoding {} from JSON with defaulting
- return nil, nil, fmt.Errorf("empty body")
- }
-
- data := originalData[prefixLen:]
- unk := runtime.Unknown{}
- if err := unk.Unmarshal(data); err != nil {
- return nil, nil, err
- }
-
- actual := unk.GroupVersionKind()
- copyKindDefaults(&actual, gvk)
-
- if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil {
- *intoUnknown = unk
- if ok, _, _ := s.RecognizesData(bytes.NewBuffer(unk.Raw)); ok {
- intoUnknown.ContentType = runtime.ContentTypeProtobuf
- }
- return intoUnknown, &actual, nil
- }
-
- if into != nil {
- types, _, err := s.typer.ObjectKinds(into)
- switch {
- case runtime.IsNotRegisteredError(err):
- pb, ok := into.(proto.Message)
- if !ok {
- return nil, &actual, errNotMarshalable{reflect.TypeOf(into)}
- }
- if err := proto.Unmarshal(unk.Raw, pb); err != nil {
- return nil, &actual, err
- }
- return into, &actual, nil
- case err != nil:
- return nil, &actual, err
- default:
- copyKindDefaults(&actual, &types[0])
- // if the result of defaulting did not set a version or group, ensure that at least group is set
- // (copyKindDefaults will not assign Group if version is already set). This guarantees that the group
- // of into is set if there is no better information from the caller or object.
- if len(actual.Version) == 0 && len(actual.Group) == 0 {
- actual.Group = types[0].Group
- }
- }
- }
-
- if len(actual.Kind) == 0 {
- return nil, &actual, runtime.NewMissingKindErr(fmt.Sprintf("%#v", unk.TypeMeta))
- }
- if len(actual.Version) == 0 {
- return nil, &actual, runtime.NewMissingVersionErr(fmt.Sprintf("%#v", unk.TypeMeta))
- }
-
- return unmarshalToObject(s.typer, s.creater, &actual, into, unk.Raw)
-}
-
-// Encode serializes the provided object to the given writer.
-func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
- if co, ok := obj.(runtime.CacheableObject); ok {
- return co.CacheEncode(s.Identifier(), s.doEncode, w)
- }
- return s.doEncode(obj, w)
-}
-
-func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error {
- prefixSize := uint64(len(s.prefix))
-
- var unk runtime.Unknown
- switch t := obj.(type) {
- case *runtime.Unknown:
- estimatedSize := prefixSize + uint64(t.Size())
- data := make([]byte, estimatedSize)
- i, err := t.MarshalTo(data[prefixSize:])
- if err != nil {
- return err
- }
- copy(data, s.prefix)
- _, err = w.Write(data[:prefixSize+uint64(i)])
- return err
- default:
- kind := obj.GetObjectKind().GroupVersionKind()
- unk = runtime.Unknown{
- TypeMeta: runtime.TypeMeta{
- Kind: kind.Kind,
- APIVersion: kind.GroupVersion().String(),
- },
- }
- }
-
- switch t := obj.(type) {
- case bufferedMarshaller:
- // this path performs a single allocation during write but requires the caller to implement
- // the more efficient Size and MarshalToSizedBuffer methods
- encodedSize := uint64(t.Size())
- estimatedSize := prefixSize + estimateUnknownSize(&unk, encodedSize)
- data := make([]byte, estimatedSize)
-
- i, err := unk.NestedMarshalTo(data[prefixSize:], t, encodedSize)
- if err != nil {
- return err
- }
-
- copy(data, s.prefix)
-
- _, err = w.Write(data[:prefixSize+uint64(i)])
- return err
-
- case proto.Marshaler:
- // this path performs extra allocations
- data, err := t.Marshal()
- if err != nil {
- return err
- }
- unk.Raw = data
-
- estimatedSize := prefixSize + uint64(unk.Size())
- data = make([]byte, estimatedSize)
-
- i, err := unk.MarshalTo(data[prefixSize:])
- if err != nil {
- return err
- }
-
- copy(data, s.prefix)
-
- _, err = w.Write(data[:prefixSize+uint64(i)])
- return err
-
- default:
- // TODO: marshal with a different content type and serializer (JSON for third party objects)
- return errNotMarshalable{reflect.TypeOf(obj)}
- }
-}
-
-// Identifier implements runtime.Encoder interface.
-func (s *Serializer) Identifier() runtime.Identifier {
- return serializerIdentifier
-}
-
-// RecognizesData implements the RecognizingDecoder interface.
-func (s *Serializer) RecognizesData(peek io.Reader) (bool, bool, error) {
- prefix := make([]byte, 4)
- n, err := peek.Read(prefix)
- if err != nil {
- if err == io.EOF {
- return false, false, nil
- }
- return false, false, err
- }
- if n != 4 {
- return false, false, nil
- }
- return bytes.Equal(s.prefix, prefix), false, nil
-}
-
-// copyKindDefaults defaults dst to the value in src if dst does not have a value set.
-func copyKindDefaults(dst, src *schema.GroupVersionKind) {
- if src == nil {
- return
- }
- // apply kind and version defaulting from provided default
- if len(dst.Kind) == 0 {
- dst.Kind = src.Kind
- }
- if len(dst.Version) == 0 && len(src.Version) > 0 {
- dst.Group = src.Group
- dst.Version = src.Version
- }
-}
-
-// bufferedMarshaller describes a more efficient marshalling interface that can avoid allocating multiple
-// byte buffers by pre-calculating the size of the final buffer needed.
-type bufferedMarshaller interface {
- proto.Sizer
- runtime.ProtobufMarshaller
-}
-
-// Like bufferedMarshaller, but is able to marshal backwards, which is more efficient since it doesn't call Size() as frequently.
-type bufferedReverseMarshaller interface {
- proto.Sizer
- runtime.ProtobufReverseMarshaller
-}
-
-// estimateUnknownSize returns the expected bytes consumed by a given runtime.Unknown
-// object with a nil RawJSON struct and the expected size of the provided buffer. The
-// returned size will not be correct if RawJSOn is set on unk.
-func estimateUnknownSize(unk *runtime.Unknown, byteSize uint64) uint64 {
- size := uint64(unk.Size())
- // protobuf uses 1 byte for the tag, a varint for the length of the array (at most 8 bytes - uint64 - here),
- // and the size of the array.
- size += 1 + 8 + byteSize
- return size
-}
-
-// NewRawSerializer creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If typer
-// is not nil, the object has the group, version, and kind fields set. This serializer does not provide type information for the
-// encoded object, and thus is not self describing (callers must know what type is being described in order to decode).
-//
-// This encoding scheme is experimental, and is subject to change at any time.
-func NewRawSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *RawSerializer {
- return &RawSerializer{
- creater: creater,
- typer: typer,
- }
-}
-
-// RawSerializer encodes and decodes objects without adding a runtime.Unknown wrapper (objects are encoded without identifying
-// type).
-type RawSerializer struct {
- creater runtime.ObjectCreater
- typer runtime.ObjectTyper
-}
-
-var _ runtime.Serializer = &RawSerializer{}
-
-const rawSerializerIdentifier runtime.Identifier = "raw-protobuf"
-
-// Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default
-// gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown,
-// the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will
-// be straight decoded using normal protobuf unmarshalling (the MarshalTo interface). If into is provided and the original data is
-// not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most
-// errors, the method will return the calculated schema kind.
-func (s *RawSerializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
- if into == nil {
- return nil, nil, fmt.Errorf("this serializer requires an object to decode into: %#v", s)
- }
-
- if len(originalData) == 0 {
- // TODO: treat like decoding {} from JSON with defaulting
- return nil, nil, fmt.Errorf("empty data")
- }
- data := originalData
-
- actual := &schema.GroupVersionKind{}
- copyKindDefaults(actual, gvk)
-
- if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil {
- intoUnknown.Raw = data
- intoUnknown.ContentEncoding = ""
- intoUnknown.ContentType = runtime.ContentTypeProtobuf
- intoUnknown.SetGroupVersionKind(*actual)
- return intoUnknown, actual, nil
- }
-
- types, _, err := s.typer.ObjectKinds(into)
- switch {
- case runtime.IsNotRegisteredError(err):
- pb, ok := into.(proto.Message)
- if !ok {
- return nil, actual, errNotMarshalable{reflect.TypeOf(into)}
- }
- if err := proto.Unmarshal(data, pb); err != nil {
- return nil, actual, err
- }
- return into, actual, nil
- case err != nil:
- return nil, actual, err
- default:
- copyKindDefaults(actual, &types[0])
- // if the result of defaulting did not set a version or group, ensure that at least group is set
- // (copyKindDefaults will not assign Group if version is already set). This guarantees that the group
- // of into is set if there is no better information from the caller or object.
- if len(actual.Version) == 0 && len(actual.Group) == 0 {
- actual.Group = types[0].Group
- }
- }
-
- if len(actual.Kind) == 0 {
- return nil, actual, runtime.NewMissingKindErr("<protobuf encoded body - must provide default type>")
- }
- if len(actual.Version) == 0 {
- return nil, actual, runtime.NewMissingVersionErr("<protobuf encoded body - must provide default type>")
- }
-
- return unmarshalToObject(s.typer, s.creater, actual, into, data)
-}
-
-// unmarshalToObject is the common code between decode in the raw and normal serializer.
-func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater, actual *schema.GroupVersionKind, into runtime.Object, data []byte) (runtime.Object, *schema.GroupVersionKind, error) {
- // use the target if necessary
- obj, err := runtime.UseOrCreateObject(typer, creater, *actual, into)
- if err != nil {
- return nil, actual, err
- }
-
- pb, ok := obj.(proto.Message)
- if !ok {
- return nil, actual, errNotMarshalable{reflect.TypeOf(obj)}
- }
- if err := proto.Unmarshal(data, pb); err != nil {
- return nil, actual, err
- }
- if actual != nil {
- obj.GetObjectKind().SetGroupVersionKind(*actual)
- }
- return obj, actual, nil
-}
-
-// Encode serializes the provided object to the given writer. Overrides is ignored.
-func (s *RawSerializer) Encode(obj runtime.Object, w io.Writer) error {
- if co, ok := obj.(runtime.CacheableObject); ok {
- return co.CacheEncode(s.Identifier(), s.doEncode, w)
- }
- return s.doEncode(obj, w)
-}
-
-func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer) error {
- switch t := obj.(type) {
- case bufferedReverseMarshaller:
- // this path performs a single allocation during write but requires the caller to implement
- // the more efficient Size and MarshalToSizedBuffer methods
- encodedSize := uint64(t.Size())
- data := make([]byte, encodedSize)
-
- n, err := t.MarshalToSizedBuffer(data)
- if err != nil {
- return err
- }
- _, err = w.Write(data[:n])
- return err
-
- case bufferedMarshaller:
- // this path performs a single allocation during write but requires the caller to implement
- // the more efficient Size and MarshalTo methods
- encodedSize := uint64(t.Size())
- data := make([]byte, encodedSize)
-
- n, err := t.MarshalTo(data)
- if err != nil {
- return err
- }
- _, err = w.Write(data[:n])
- return err
-
- case proto.Marshaler:
- // this path performs extra allocations
- data, err := t.Marshal()
- if err != nil {
- return err
- }
- _, err = w.Write(data)
- return err
-
- default:
- return errNotMarshalable{reflect.TypeOf(obj)}
- }
-}
-
-// Identifier implements runtime.Encoder interface.
-func (s *RawSerializer) Identifier() runtime.Identifier {
- return rawSerializerIdentifier
-}
-
-// LengthDelimitedFramer is exported variable of type lengthDelimitedFramer
-var LengthDelimitedFramer = lengthDelimitedFramer{}
-
-// Provides length delimited frame reader and writer methods
-type lengthDelimitedFramer struct{}
-
-// NewFrameWriter implements stream framing for this serializer
-func (lengthDelimitedFramer) NewFrameWriter(w io.Writer) io.Writer {
- return framer.NewLengthDelimitedFrameWriter(w)
-}
-
-// NewFrameReader implements stream framing for this serializer
-func (lengthDelimitedFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser {
- return framer.NewLengthDelimitedFrameReader(r)
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go
deleted file mode 100644
index 38497ab53..000000000
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package recognizer
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
-
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-type RecognizingDecoder interface {
- runtime.Decoder
- // RecognizesData should return true if the input provided in the provided reader
- // belongs to this decoder, or an error if the data could not be read or is ambiguous.
- // Unknown is true if the data could not be determined to match the decoder type.
- // Decoders should assume that they can read as much of peek as they need (as the caller
- // provides) and may return unknown if the data provided is not sufficient to make a
- // a determination. When peek returns EOF that may mean the end of the input or the
- // end of buffered input - recognizers should return the best guess at that time.
- RecognizesData(peek io.Reader) (ok, unknown bool, err error)
-}
-
-// NewDecoder creates a decoder that will attempt multiple decoders in an order defined
-// by:
-//
-// 1. The decoder implements RecognizingDecoder and identifies the data
-// 2. All other decoders, and any decoder that returned true for unknown.
-//
-// The order passed to the constructor is preserved within those priorities.
-func NewDecoder(decoders ...runtime.Decoder) runtime.Decoder {
- return &decoder{
- decoders: decoders,
- }
-}
-
-type decoder struct {
- decoders []runtime.Decoder
-}
-
-var _ RecognizingDecoder = &decoder{}
-
-func (d *decoder) RecognizesData(peek io.Reader) (bool, bool, error) {
- var (
- lastErr error
- anyUnknown bool
- )
- data, _ := bufio.NewReaderSize(peek, 1024).Peek(1024)
- for _, r := range d.decoders {
- switch t := r.(type) {
- case RecognizingDecoder:
- ok, unknown, err := t.RecognizesData(bytes.NewBuffer(data))
- if err != nil {
- lastErr = err
- continue
- }
- anyUnknown = anyUnknown || unknown
- if !ok {
- continue
- }
- return true, false, nil
- }
- }
- return false, anyUnknown, lastErr
-}
-
-func (d *decoder) Decode(data []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
- var (
- lastErr error
- skipped []runtime.Decoder
- )
-
- // try recognizers, record any decoders we need to give a chance later
- for _, r := range d.decoders {
- switch t := r.(type) {
- case RecognizingDecoder:
- buf := bytes.NewBuffer(data)
- ok, unknown, err := t.RecognizesData(buf)
- if err != nil {
- lastErr = err
- continue
- }
- if unknown {
- skipped = append(skipped, t)
- continue
- }
- if !ok {
- continue
- }
- return r.Decode(data, gvk, into)
- default:
- skipped = append(skipped, t)
- }
- }
-
- // try recognizers that returned unknown or didn't recognize their data
- for _, r := range skipped {
- out, actual, err := r.Decode(data, gvk, into)
- if err != nil {
- lastErr = err
- continue
- }
- return out, actual, nil
- }
-
- if lastErr == nil {
- lastErr = fmt.Errorf("no serialization format matched the provided data")
- }
- return nil, nil, lastErr
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go
deleted file mode 100644
index a60a7c041..000000000
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package streaming implements encoder and decoder for streams
-// of runtime.Objects over io.Writer/Readers.
-package streaming
-
-import (
- "bytes"
- "fmt"
- "io"
-
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-// Encoder is a runtime.Encoder on a stream.
-type Encoder interface {
- // Encode will write the provided object to the stream or return an error. It obeys the same
- // contract as runtime.VersionedEncoder.
- Encode(obj runtime.Object) error
-}
-
-// Decoder is a runtime.Decoder from a stream.
-type Decoder interface {
- // Decode will return io.EOF when no more objects are available.
- Decode(defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error)
- // Close closes the underlying stream.
- Close() error
-}
-
-// Serializer is a factory for creating encoders and decoders that work over streams.
-type Serializer interface {
- NewEncoder(w io.Writer) Encoder
- NewDecoder(r io.ReadCloser) Decoder
-}
-
-type decoder struct {
- reader io.ReadCloser
- decoder runtime.Decoder
- buf []byte
- maxBytes int
- resetRead bool
-}
-
-// NewDecoder creates a streaming decoder that reads object chunks from r and decodes them with d.
-// The reader is expected to return ErrShortRead if the provided buffer is not large enough to read
-// an entire object.
-func NewDecoder(r io.ReadCloser, d runtime.Decoder) Decoder {
- return &decoder{
- reader: r,
- decoder: d,
- buf: make([]byte, 1024),
- maxBytes: 16 * 1024 * 1024,
- }
-}
-
-var ErrObjectTooLarge = fmt.Errorf("object to decode was longer than maximum allowed size")
-
-// Decode reads the next object from the stream and decodes it.
-func (d *decoder) Decode(defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
- base := 0
- for {
- n, err := d.reader.Read(d.buf[base:])
- if err == io.ErrShortBuffer {
- if n == 0 {
- return nil, nil, fmt.Errorf("got short buffer with n=0, base=%d, cap=%d", base, cap(d.buf))
- }
- if d.resetRead {
- continue
- }
- // double the buffer size up to maxBytes
- if len(d.buf) < d.maxBytes {
- base += n
- d.buf = append(d.buf, make([]byte, len(d.buf))...)
- continue
- }
- // must read the rest of the frame (until we stop getting ErrShortBuffer)
- d.resetRead = true
- base = 0
- return nil, nil, ErrObjectTooLarge
- }
- if err != nil {
- return nil, nil, err
- }
- if d.resetRead {
- // now that we have drained the large read, continue
- d.resetRead = false
- continue
- }
- base += n
- break
- }
- return d.decoder.Decode(d.buf[:base], defaults, into)
-}
-
-func (d *decoder) Close() error {
- return d.reader.Close()
-}
-
-type encoder struct {
- writer io.Writer
- encoder runtime.Encoder
- buf *bytes.Buffer
-}
-
-// NewEncoder returns a new streaming encoder.
-func NewEncoder(w io.Writer, e runtime.Encoder) Encoder {
- return &encoder{
- writer: w,
- encoder: e,
- buf: &bytes.Buffer{},
- }
-}
-
-// Encode writes the provided object to the nested writer.
-func (e *encoder) Encode(obj runtime.Object) error {
- if err := e.encoder.Encode(obj, e.buf); err != nil {
- return err
- }
- _, err := e.writer.Write(e.buf.Bytes())
- e.buf.Reset()
- return err
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go
deleted file mode 100644
index 718c5dfb7..000000000
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package versioning
-
-import (
- "encoding/json"
- "io"
- "reflect"
- "sync"
-
- "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/klog/v2"
-)
-
-// NewDefaultingCodecForScheme is a convenience method for callers that are using a scheme.
-func NewDefaultingCodecForScheme(
- // TODO: I should be a scheme interface?
- scheme *runtime.Scheme,
- encoder runtime.Encoder,
- decoder runtime.Decoder,
- encodeVersion runtime.GroupVersioner,
- decodeVersion runtime.GroupVersioner,
-) runtime.Codec {
- return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, scheme, encodeVersion, decodeVersion, scheme.Name())
-}
-
-// NewCodec takes objects in their internal versions and converts them to external versions before
-// serializing them. It assumes the serializer provided to it only deals with external versions.
-// This class is also a serializer, but is generally used with a specific version.
-func NewCodec(
- encoder runtime.Encoder,
- decoder runtime.Decoder,
- convertor runtime.ObjectConvertor,
- creater runtime.ObjectCreater,
- typer runtime.ObjectTyper,
- defaulter runtime.ObjectDefaulter,
- encodeVersion runtime.GroupVersioner,
- decodeVersion runtime.GroupVersioner,
- originalSchemeName string,
-) runtime.Codec {
- internal := &codec{
- encoder: encoder,
- decoder: decoder,
- convertor: convertor,
- creater: creater,
- typer: typer,
- defaulter: defaulter,
-
- encodeVersion: encodeVersion,
- decodeVersion: decodeVersion,
-
- identifier: identifier(encodeVersion, encoder),
-
- originalSchemeName: originalSchemeName,
- }
- return internal
-}
-
-type codec struct {
- encoder runtime.Encoder
- decoder runtime.Decoder
- convertor runtime.ObjectConvertor
- creater runtime.ObjectCreater
- typer runtime.ObjectTyper
- defaulter runtime.ObjectDefaulter
-
- encodeVersion runtime.GroupVersioner
- decodeVersion runtime.GroupVersioner
-
- identifier runtime.Identifier
-
- // originalSchemeName is optional, but when filled in it holds the name of the scheme from which this codec originates
- originalSchemeName string
-}
-
-var identifiersMap sync.Map
-
-type codecIdentifier struct {
- EncodeGV string `json:"encodeGV,omitempty"`
- Encoder string `json:"encoder,omitempty"`
- Name string `json:"name,omitempty"`
-}
-
-// identifier computes Identifier of Encoder based on codec parameters.
-func identifier(encodeGV runtime.GroupVersioner, encoder runtime.Encoder) runtime.Identifier {
- result := codecIdentifier{
- Name: "versioning",
- }
-
- if encodeGV != nil {
- result.EncodeGV = encodeGV.Identifier()
- }
- if encoder != nil {
- result.Encoder = string(encoder.Identifier())
- }
- if id, ok := identifiersMap.Load(result); ok {
- return id.(runtime.Identifier)
- }
- identifier, err := json.Marshal(result)
- if err != nil {
- klog.Fatalf("Failed marshaling identifier for codec: %v", err)
- }
- identifiersMap.Store(result, runtime.Identifier(identifier))
- return runtime.Identifier(identifier)
-}
-
-// Decode attempts a decode of the object, then tries to convert it to the internal version. If into is provided and the decoding is
-// successful, the returned runtime.Object will be the value passed as into. Note that this may bypass conversion if you pass an
-// into that matches the serialized version.
-func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
- // If the into object is unstructured and expresses an opinion about its group/version,
- // create a new instance of the type so we always exercise the conversion path (skips short-circuiting on `into == obj`)
- decodeInto := into
- if into != nil {
- if _, ok := into.(runtime.Unstructured); ok && !into.GetObjectKind().GroupVersionKind().GroupVersion().Empty() {
- decodeInto = reflect.New(reflect.TypeOf(into).Elem()).Interface().(runtime.Object)
- }
- }
-
- obj, gvk, err := c.decoder.Decode(data, defaultGVK, decodeInto)
- if err != nil {
- return nil, gvk, err
- }
-
- if d, ok := obj.(runtime.NestedObjectDecoder); ok {
- if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{c.decoder}); err != nil {
- return nil, gvk, err
- }
- }
-
- // if we specify a target, use generic conversion.
- if into != nil {
- // perform defaulting if requested
- if c.defaulter != nil {
- c.defaulter.Default(obj)
- }
-
- // Short-circuit conversion if the into object is same object
- if into == obj {
- return into, gvk, nil
- }
-
- if err := c.convertor.Convert(obj, into, c.decodeVersion); err != nil {
- return nil, gvk, err
- }
-
- return into, gvk, nil
- }
-
- // perform defaulting if requested
- if c.defaulter != nil {
- c.defaulter.Default(obj)
- }
-
- out, err := c.convertor.ConvertToVersion(obj, c.decodeVersion)
- if err != nil {
- return nil, gvk, err
- }
- return out, gvk, nil
-}
-
-// Encode ensures the provided object is output in the appropriate group and version, invoking
-// conversion if necessary. Unversioned objects (according to the ObjectTyper) are output as is.
-func (c *codec) Encode(obj runtime.Object, w io.Writer) error {
- if co, ok := obj.(runtime.CacheableObject); ok {
- return co.CacheEncode(c.Identifier(), c.doEncode, w)
- }
- return c.doEncode(obj, w)
-}
-
-func (c *codec) doEncode(obj runtime.Object, w io.Writer) error {
- switch obj := obj.(type) {
- case *runtime.Unknown:
- return c.encoder.Encode(obj, w)
- case runtime.Unstructured:
- // An unstructured list can contain objects of multiple group version kinds. don't short-circuit just
- // because the top-level type matches our desired destination type. actually send the object to the converter
- // to give it a chance to convert the list items if needed.
- if _, ok := obj.(*unstructured.UnstructuredList); !ok {
- // avoid conversion roundtrip if GVK is the right one already or is empty (yes, this is a hack, but the old behaviour we rely on in kubectl)
- objGVK := obj.GetObjectKind().GroupVersionKind()
- if len(objGVK.Version) == 0 {
- return c.encoder.Encode(obj, w)
- }
- targetGVK, ok := c.encodeVersion.KindForGroupVersionKinds([]schema.GroupVersionKind{objGVK})
- if !ok {
- return runtime.NewNotRegisteredGVKErrForTarget(c.originalSchemeName, objGVK, c.encodeVersion)
- }
- if targetGVK == objGVK {
- return c.encoder.Encode(obj, w)
- }
- }
- }
-
- gvks, isUnversioned, err := c.typer.ObjectKinds(obj)
- if err != nil {
- return err
- }
-
- objectKind := obj.GetObjectKind()
- old := objectKind.GroupVersionKind()
- // restore the old GVK after encoding
- defer objectKind.SetGroupVersionKind(old)
-
- if c.encodeVersion == nil || isUnversioned {
- if e, ok := obj.(runtime.NestedObjectEncoder); ok {
- if err := e.EncodeNestedObjects(runtime.WithVersionEncoder{Encoder: c.encoder, ObjectTyper: c.typer}); err != nil {
- return err
- }
- }
- objectKind.SetGroupVersionKind(gvks[0])
- return c.encoder.Encode(obj, w)
- }
-
- // Perform a conversion if necessary
- out, err := c.convertor.ConvertToVersion(obj, c.encodeVersion)
- if err != nil {
- return err
- }
-
- if e, ok := out.(runtime.NestedObjectEncoder); ok {
- if err := e.EncodeNestedObjects(runtime.WithVersionEncoder{Version: c.encodeVersion, Encoder: c.encoder, ObjectTyper: c.typer}); err != nil {
- return err
- }
- }
-
- // Conversion is responsible for setting the proper group, version, and kind onto the outgoing object
- return c.encoder.Encode(out, w)
-}
-
-// Identifier implements runtime.Encoder interface.
-func (c *codec) Identifier() runtime.Identifier {
- return c.identifier
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go
deleted file mode 100644
index 3e1e2517b..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go
+++ /dev/null
@@ -1,407 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package clock
-
-import (
- "sync"
- "time"
-)
-
-// PassiveClock allows for injecting fake or real clocks into code
-// that needs to read the current time but does not support scheduling
-// activity in the future.
-type PassiveClock interface {
- Now() time.Time
- Since(time.Time) time.Duration
-}
-
-// Clock allows for injecting fake or real clocks into code that
-// needs to do arbitrary things based on time.
-type Clock interface {
- PassiveClock
- After(time.Duration) <-chan time.Time
- NewTimer(time.Duration) Timer
- Sleep(time.Duration)
- NewTicker(time.Duration) Ticker
-}
-
-// RealClock really calls time.Now()
-type RealClock struct{}
-
-// Now returns the current time.
-func (RealClock) Now() time.Time {
- return time.Now()
-}
-
-// Since returns time since the specified timestamp.
-func (RealClock) Since(ts time.Time) time.Duration {
- return time.Since(ts)
-}
-
-// After is the same as time.After(d).
-func (RealClock) After(d time.Duration) <-chan time.Time {
- return time.After(d)
-}
-
-// NewTimer returns a new Timer.
-func (RealClock) NewTimer(d time.Duration) Timer {
- return &realTimer{
- timer: time.NewTimer(d),
- }
-}
-
-// NewTicker returns a new Ticker.
-func (RealClock) NewTicker(d time.Duration) Ticker {
- return &realTicker{
- ticker: time.NewTicker(d),
- }
-}
-
-// Sleep pauses the RealClock for duration d.
-func (RealClock) Sleep(d time.Duration) {
- time.Sleep(d)
-}
-
-// FakePassiveClock implements PassiveClock, but returns an arbitrary time.
-type FakePassiveClock struct {
- lock sync.RWMutex
- time time.Time
-}
-
-// FakeClock implements Clock, but returns an arbitrary time.
-type FakeClock struct {
- FakePassiveClock
-
- // waiters are waiting for the fake time to pass their specified time
- waiters []fakeClockWaiter
-}
-
-type fakeClockWaiter struct {
- targetTime time.Time
- stepInterval time.Duration
- skipIfBlocked bool
- destChan chan time.Time
-}
-
-// NewFakePassiveClock returns a new FakePassiveClock.
-func NewFakePassiveClock(t time.Time) *FakePassiveClock {
- return &FakePassiveClock{
- time: t,
- }
-}
-
-// NewFakeClock returns a new FakeClock
-func NewFakeClock(t time.Time) *FakeClock {
- return &FakeClock{
- FakePassiveClock: *NewFakePassiveClock(t),
- }
-}
-
-// Now returns f's time.
-func (f *FakePassiveClock) Now() time.Time {
- f.lock.RLock()
- defer f.lock.RUnlock()
- return f.time
-}
-
-// Since returns time since the time in f.
-func (f *FakePassiveClock) Since(ts time.Time) time.Duration {
- f.lock.RLock()
- defer f.lock.RUnlock()
- return f.time.Sub(ts)
-}
-
-// SetTime sets the time on the FakePassiveClock.
-func (f *FakePassiveClock) SetTime(t time.Time) {
- f.lock.Lock()
- defer f.lock.Unlock()
- f.time = t
-}
-
-// After is the Fake version of time.After(d).
-func (f *FakeClock) After(d time.Duration) <-chan time.Time {
- f.lock.Lock()
- defer f.lock.Unlock()
- stopTime := f.time.Add(d)
- ch := make(chan time.Time, 1) // Don't block!
- f.waiters = append(f.waiters, fakeClockWaiter{
- targetTime: stopTime,
- destChan: ch,
- })
- return ch
-}
-
-// NewTimer is the Fake version of time.NewTimer(d).
-func (f *FakeClock) NewTimer(d time.Duration) Timer {
- f.lock.Lock()
- defer f.lock.Unlock()
- stopTime := f.time.Add(d)
- ch := make(chan time.Time, 1) // Don't block!
- timer := &fakeTimer{
- fakeClock: f,
- waiter: fakeClockWaiter{
- targetTime: stopTime,
- destChan: ch,
- },
- }
- f.waiters = append(f.waiters, timer.waiter)
- return timer
-}
-
-// NewTicker returns a new Ticker.
-func (f *FakeClock) NewTicker(d time.Duration) Ticker {
- f.lock.Lock()
- defer f.lock.Unlock()
- tickTime := f.time.Add(d)
- ch := make(chan time.Time, 1) // hold one tick
- f.waiters = append(f.waiters, fakeClockWaiter{
- targetTime: tickTime,
- stepInterval: d,
- skipIfBlocked: true,
- destChan: ch,
- })
-
- return &fakeTicker{
- c: ch,
- }
-}
-
-// Step moves clock by Duration, notifies anyone that's called After, Tick, or NewTimer
-func (f *FakeClock) Step(d time.Duration) {
- f.lock.Lock()
- defer f.lock.Unlock()
- f.setTimeLocked(f.time.Add(d))
-}
-
-// SetTime sets the time on a FakeClock.
-func (f *FakeClock) SetTime(t time.Time) {
- f.lock.Lock()
- defer f.lock.Unlock()
- f.setTimeLocked(t)
-}
-
-// Actually changes the time and checks any waiters. f must be write-locked.
-func (f *FakeClock) setTimeLocked(t time.Time) {
- f.time = t
- newWaiters := make([]fakeClockWaiter, 0, len(f.waiters))
- for i := range f.waiters {
- w := &f.waiters[i]
- if !w.targetTime.After(t) {
-
- if w.skipIfBlocked {
- select {
- case w.destChan <- t:
- default:
- }
- } else {
- w.destChan <- t
- }
-
- if w.stepInterval > 0 {
- for !w.targetTime.After(t) {
- w.targetTime = w.targetTime.Add(w.stepInterval)
- }
- newWaiters = append(newWaiters, *w)
- }
-
- } else {
- newWaiters = append(newWaiters, f.waiters[i])
- }
- }
- f.waiters = newWaiters
-}
-
-// HasWaiters returns true if After has been called on f but not yet satisfied (so you can
-// write race-free tests).
-func (f *FakeClock) HasWaiters() bool {
- f.lock.RLock()
- defer f.lock.RUnlock()
- return len(f.waiters) > 0
-}
-
-// Sleep pauses the FakeClock for duration d.
-func (f *FakeClock) Sleep(d time.Duration) {
- f.Step(d)
-}
-
-// IntervalClock implements Clock, but each invocation of Now steps the clock forward the specified duration
-type IntervalClock struct {
- Time time.Time
- Duration time.Duration
-}
-
-// Now returns i's time.
-func (i *IntervalClock) Now() time.Time {
- i.Time = i.Time.Add(i.Duration)
- return i.Time
-}
-
-// Since returns time since the time in i.
-func (i *IntervalClock) Since(ts time.Time) time.Duration {
- return i.Time.Sub(ts)
-}
-
-// After is currently unimplemented, will panic.
-// TODO: make interval clock use FakeClock so this can be implemented.
-func (*IntervalClock) After(d time.Duration) <-chan time.Time {
- panic("IntervalClock doesn't implement After")
-}
-
-// NewTimer is currently unimplemented, will panic.
-// TODO: make interval clock use FakeClock so this can be implemented.
-func (*IntervalClock) NewTimer(d time.Duration) Timer {
- panic("IntervalClock doesn't implement NewTimer")
-}
-
-// NewTicker is currently unimplemented, will panic.
-// TODO: make interval clock use FakeClock so this can be implemented.
-func (*IntervalClock) NewTicker(d time.Duration) Ticker {
- panic("IntervalClock doesn't implement NewTicker")
-}
-
-// Sleep is currently unimplemented; will panic.
-func (*IntervalClock) Sleep(d time.Duration) {
- panic("IntervalClock doesn't implement Sleep")
-}
-
-// Timer allows for injecting fake or real timers into code that
-// needs to do arbitrary things based on time.
-type Timer interface {
- C() <-chan time.Time
- Stop() bool
- Reset(d time.Duration) bool
-}
-
-// realTimer is backed by an actual time.Timer.
-type realTimer struct {
- timer *time.Timer
-}
-
-// C returns the underlying timer's channel.
-func (r *realTimer) C() <-chan time.Time {
- return r.timer.C
-}
-
-// Stop calls Stop() on the underlying timer.
-func (r *realTimer) Stop() bool {
- return r.timer.Stop()
-}
-
-// Reset calls Reset() on the underlying timer.
-func (r *realTimer) Reset(d time.Duration) bool {
- return r.timer.Reset(d)
-}
-
-// fakeTimer implements Timer based on a FakeClock.
-type fakeTimer struct {
- fakeClock *FakeClock
- waiter fakeClockWaiter
-}
-
-// C returns the channel that notifies when this timer has fired.
-func (f *fakeTimer) C() <-chan time.Time {
- return f.waiter.destChan
-}
-
-// Stop conditionally stops the timer. If the timer has neither fired
-// nor been stopped then this call stops the timer and returns true,
-// otherwise this call returns false. This is like time.Timer::Stop.
-func (f *fakeTimer) Stop() bool {
- f.fakeClock.lock.Lock()
- defer f.fakeClock.lock.Unlock()
- // The timer has already fired or been stopped, unless it is found
- // among the clock's waiters.
- stopped := false
- oldWaiters := f.fakeClock.waiters
- newWaiters := make([]fakeClockWaiter, 0, len(oldWaiters))
- seekChan := f.waiter.destChan
- for i := range oldWaiters {
- // Identify the timer's fakeClockWaiter by the identity of the
- // destination channel, nothing else is necessarily unique and
- // constant since the timer's creation.
- if oldWaiters[i].destChan == seekChan {
- stopped = true
- } else {
- newWaiters = append(newWaiters, oldWaiters[i])
- }
- }
-
- f.fakeClock.waiters = newWaiters
-
- return stopped
-}
-
-// Reset conditionally updates the firing time of the timer. If the
-// timer has neither fired nor been stopped then this call resets the
-// timer to the fake clock's "now" + d and returns true, otherwise
-// it creates a new waiter, adds it to the clock, and returns true.
-//
-// It is not possible to return false, because a fake timer can be reset
-// from any state (waiting to fire, already fired, and stopped).
-//
-// See the GoDoc for time.Timer::Reset for more context on why
-// the return value of Reset() is not useful.
-func (f *fakeTimer) Reset(d time.Duration) bool {
- f.fakeClock.lock.Lock()
- defer f.fakeClock.lock.Unlock()
- waiters := f.fakeClock.waiters
- seekChan := f.waiter.destChan
- for i := range waiters {
- if waiters[i].destChan == seekChan {
- waiters[i].targetTime = f.fakeClock.time.Add(d)
- return true
- }
- }
- // No existing waiter, timer has already fired or been reset.
- // We should still enable Reset() to succeed by creating a
- // new waiter and adding it to the clock's waiters.
- newWaiter := fakeClockWaiter{
- targetTime: f.fakeClock.time.Add(d),
- destChan: seekChan,
- }
- f.fakeClock.waiters = append(f.fakeClock.waiters, newWaiter)
- return true
-}
-
-// Ticker defines the Ticker interface
-type Ticker interface {
- C() <-chan time.Time
- Stop()
-}
-
-type realTicker struct {
- ticker *time.Ticker
-}
-
-func (t *realTicker) C() <-chan time.Time {
- return t.ticker.C
-}
-
-func (t *realTicker) Stop() {
- t.ticker.Stop()
-}
-
-type fakeTicker struct {
- c <-chan time.Time
-}
-
-func (t *fakeTicker) C() <-chan time.Time {
- return t.c
-}
-
-func (t *fakeTicker) Stop() {
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go
deleted file mode 100644
index 45aa74bf5..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package framer implements simple frame decoding techniques for an io.ReadCloser
-package framer
-
-import (
- "encoding/binary"
- "encoding/json"
- "io"
-)
-
-type lengthDelimitedFrameWriter struct {
- w io.Writer
- h [4]byte
-}
-
-func NewLengthDelimitedFrameWriter(w io.Writer) io.Writer {
- return &lengthDelimitedFrameWriter{w: w}
-}
-
-// Write writes a single frame to the nested writer, prepending it with the length in
-// in bytes of data (as a 4 byte, bigendian uint32).
-func (w *lengthDelimitedFrameWriter) Write(data []byte) (int, error) {
- binary.BigEndian.PutUint32(w.h[:], uint32(len(data)))
- n, err := w.w.Write(w.h[:])
- if err != nil {
- return 0, err
- }
- if n != len(w.h) {
- return 0, io.ErrShortWrite
- }
- return w.w.Write(data)
-}
-
-type lengthDelimitedFrameReader struct {
- r io.ReadCloser
- remaining int
-}
-
-// NewLengthDelimitedFrameReader returns an io.Reader that will decode length-prefixed
-// frames off of a stream.
-//
-// The protocol is:
-//
-// stream: message ...
-// message: prefix body
-// prefix: 4 byte uint32 in BigEndian order, denotes length of body
-// body: bytes (0..prefix)
-//
-// If the buffer passed to Read is not long enough to contain an entire frame, io.ErrShortRead
-// will be returned along with the number of bytes read.
-func NewLengthDelimitedFrameReader(r io.ReadCloser) io.ReadCloser {
- return &lengthDelimitedFrameReader{r: r}
-}
-
-// Read attempts to read an entire frame into data. If that is not possible, io.ErrShortBuffer
-// is returned and subsequent calls will attempt to read the last frame. A frame is complete when
-// err is nil.
-func (r *lengthDelimitedFrameReader) Read(data []byte) (int, error) {
- if r.remaining <= 0 {
- header := [4]byte{}
- n, err := io.ReadAtLeast(r.r, header[:4], 4)
- if err != nil {
- return 0, err
- }
- if n != 4 {
- return 0, io.ErrUnexpectedEOF
- }
- frameLength := int(binary.BigEndian.Uint32(header[:]))
- r.remaining = frameLength
- }
-
- expect := r.remaining
- max := expect
- if max > len(data) {
- max = len(data)
- }
- n, err := io.ReadAtLeast(r.r, data[:max], int(max))
- r.remaining -= n
- if err == io.ErrShortBuffer || r.remaining > 0 {
- return n, io.ErrShortBuffer
- }
- if err != nil {
- return n, err
- }
- if n != expect {
- return n, io.ErrUnexpectedEOF
- }
-
- return n, nil
-}
-
-func (r *lengthDelimitedFrameReader) Close() error {
- return r.r.Close()
-}
-
-type jsonFrameReader struct {
- r io.ReadCloser
- decoder *json.Decoder
- remaining []byte
-}
-
-// NewJSONFramedReader returns an io.Reader that will decode individual JSON objects off
-// of a wire.
-//
-// The boundaries between each frame are valid JSON objects. A JSON parsing error will terminate
-// the read.
-func NewJSONFramedReader(r io.ReadCloser) io.ReadCloser {
- return &jsonFrameReader{
- r: r,
- decoder: json.NewDecoder(r),
- }
-}
-
-// ReadFrame decodes the next JSON object in the stream, or returns an error. The returned
-// byte slice will be modified the next time ReadFrame is invoked and should not be altered.
-func (r *jsonFrameReader) Read(data []byte) (int, error) {
- // Return whatever remaining data exists from an in progress frame
- if n := len(r.remaining); n > 0 {
- if n <= len(data) {
- //lint:ignore SA4006,SA4010 underlying array of data is modified here.
- data = append(data[0:0], r.remaining...)
- r.remaining = nil
- return n, nil
- }
-
- n = len(data)
- //lint:ignore SA4006,SA4010 underlying array of data is modified here.
- data = append(data[0:0], r.remaining[:n]...)
- r.remaining = r.remaining[n:]
- return n, io.ErrShortBuffer
- }
-
- // RawMessage#Unmarshal appends to data - we reset the slice down to 0 and will either see
- // data written to data, or be larger than data and a different array.
- n := len(data)
- m := json.RawMessage(data[:0])
- if err := r.decoder.Decode(&m); err != nil {
- return 0, err
- }
-
- // If capacity of data is less than length of the message, decoder will allocate a new slice
- // and set m to it, which means we need to copy the partial result back into data and preserve
- // the remaining result for subsequent reads.
- if len(m) > n {
- //lint:ignore SA4006,SA4010 underlying array of data is modified here.
- data = append(data[0:0], m[:n]...)
- r.remaining = m[n:]
- return n, io.ErrShortBuffer
- }
- return len(m), nil
-}
-
-func (r *jsonFrameReader) Close() error {
- return r.r.Close()
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go
deleted file mode 100644
index 5893df5bd..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package httpstream adds multiplexed streaming support to HTTP requests and
-// responses via connection upgrades.
-package httpstream // import "k8s.io/apimachinery/pkg/util/httpstream"
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go
deleted file mode 100644
index 00ce5f785..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package httpstream
-
-import (
- "fmt"
- "io"
- "net/http"
- "strings"
- "time"
-)
-
-const (
- HeaderConnection = "Connection"
- HeaderUpgrade = "Upgrade"
- HeaderProtocolVersion = "X-Stream-Protocol-Version"
- HeaderAcceptedProtocolVersions = "X-Accepted-Stream-Protocol-Versions"
-)
-
-// NewStreamHandler defines a function that is called when a new Stream is
-// received. If no error is returned, the Stream is accepted; otherwise,
-// the stream is rejected. After the reply frame has been sent, replySent is closed.
-type NewStreamHandler func(stream Stream, replySent <-chan struct{}) error
-
-// NoOpNewStreamHandler is a stream handler that accepts a new stream and
-// performs no other logic.
-func NoOpNewStreamHandler(stream Stream, replySent <-chan struct{}) error { return nil }
-
-// Dialer knows how to open a streaming connection to a server.
-type Dialer interface {
-
- // Dial opens a streaming connection to a server using one of the protocols
- // specified (in order of most preferred to least preferred).
- Dial(protocols ...string) (Connection, string, error)
-}
-
-// UpgradeRoundTripper is a type of http.RoundTripper that is able to upgrade
-// HTTP requests to support multiplexed bidirectional streams. After RoundTrip()
-// is invoked, if the upgrade is successful, clients may retrieve the upgraded
-// connection by calling UpgradeRoundTripper.Connection().
-type UpgradeRoundTripper interface {
- http.RoundTripper
- // NewConnection validates the response and creates a new Connection.
- NewConnection(resp *http.Response) (Connection, error)
-}
-
-// ResponseUpgrader knows how to upgrade HTTP requests and responses to
-// add streaming support to them.
-type ResponseUpgrader interface {
- // UpgradeResponse upgrades an HTTP response to one that supports multiplexed
- // streams. newStreamHandler will be called asynchronously whenever the
- // other end of the upgraded connection creates a new stream.
- UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler NewStreamHandler) Connection
-}
-
-// Connection represents an upgraded HTTP connection.
-type Connection interface {
- // CreateStream creates a new Stream with the supplied headers.
- CreateStream(headers http.Header) (Stream, error)
- // Close resets all streams and closes the connection.
- Close() error
- // CloseChan returns a channel that is closed when the underlying connection is closed.
- CloseChan() <-chan bool
- // SetIdleTimeout sets the amount of time the connection may remain idle before
- // it is automatically closed.
- SetIdleTimeout(timeout time.Duration)
-}
-
-// Stream represents a bidirectional communications channel that is part of an
-// upgraded connection.
-type Stream interface {
- io.ReadWriteCloser
- // Reset closes both directions of the stream, indicating that neither client
- // or server can use it any more.
- Reset() error
- // Headers returns the headers used to create the stream.
- Headers() http.Header
- // Identifier returns the stream's ID.
- Identifier() uint32
-}
-
-// IsUpgradeRequest returns true if the given request is a connection upgrade request
-func IsUpgradeRequest(req *http.Request) bool {
- for _, h := range req.Header[http.CanonicalHeaderKey(HeaderConnection)] {
- if strings.Contains(strings.ToLower(h), strings.ToLower(HeaderUpgrade)) {
- return true
- }
- }
- return false
-}
-
-func negotiateProtocol(clientProtocols, serverProtocols []string) string {
- for i := range clientProtocols {
- for j := range serverProtocols {
- if clientProtocols[i] == serverProtocols[j] {
- return clientProtocols[i]
- }
- }
- }
- return ""
-}
-
-func commaSeparatedHeaderValues(header []string) []string {
- var parsedClientProtocols []string
- for i := range header {
- for _, clientProtocol := range strings.Split(header[i], ",") {
- if proto := strings.Trim(clientProtocol, " "); len(proto) > 0 {
- parsedClientProtocols = append(parsedClientProtocols, proto)
- }
- }
- }
- return parsedClientProtocols
-}
-
-// Handshake performs a subprotocol negotiation. If the client did request a
-// subprotocol, Handshake will select the first common value found in
-// serverProtocols. If a match is found, Handshake adds a response header
-// indicating the chosen subprotocol. If no match is found, HTTP forbidden is
-// returned, along with a response header containing the list of protocols the
-// server can accept.
-func Handshake(req *http.Request, w http.ResponseWriter, serverProtocols []string) (string, error) {
- clientProtocols := commaSeparatedHeaderValues(req.Header[http.CanonicalHeaderKey(HeaderProtocolVersion)])
- if len(clientProtocols) == 0 {
- return "", fmt.Errorf("unable to upgrade: %s is required", HeaderProtocolVersion)
- }
-
- if len(serverProtocols) == 0 {
- panic(fmt.Errorf("unable to upgrade: serverProtocols is required"))
- }
-
- negotiatedProtocol := negotiateProtocol(clientProtocols, serverProtocols)
- if len(negotiatedProtocol) == 0 {
- for i := range serverProtocols {
- w.Header().Add(HeaderAcceptedProtocolVersions, serverProtocols[i])
- }
- err := fmt.Errorf("unable to upgrade: unable to negotiate protocol: client supports %v, server accepts %v", clientProtocols, serverProtocols)
- http.Error(w, err.Error(), http.StatusForbidden)
- return "", err
- }
-
- w.Header().Add(HeaderProtocolVersion, negotiatedProtocol)
- return negotiatedProtocol, nil
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go
deleted file mode 100644
index 336b4908b..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spdy
-
-import (
- "net"
- "net/http"
- "sync"
- "time"
-
- "github.com/docker/spdystream"
- "k8s.io/apimachinery/pkg/util/httpstream"
- "k8s.io/klog/v2"
-)
-
-// connection maintains state about a spdystream.Connection and its associated
-// streams.
-type connection struct {
- conn *spdystream.Connection
- streams []httpstream.Stream
- streamLock sync.Mutex
- newStreamHandler httpstream.NewStreamHandler
- ping func() (time.Duration, error)
-}
-
-// NewClientConnection creates a new SPDY client connection.
-func NewClientConnection(conn net.Conn) (httpstream.Connection, error) {
- return NewClientConnectionWithPings(conn, 0)
-}
-
-// NewClientConnectionWithPings creates a new SPDY client connection.
-//
-// If pingPeriod is non-zero, a background goroutine will send periodic Ping
-// frames to the server. Use this to keep idle connections through certain load
-// balancers alive longer.
-func NewClientConnectionWithPings(conn net.Conn, pingPeriod time.Duration) (httpstream.Connection, error) {
- spdyConn, err := spdystream.NewConnection(conn, false)
- if err != nil {
- defer conn.Close()
- return nil, err
- }
-
- return newConnection(spdyConn, httpstream.NoOpNewStreamHandler, pingPeriod, spdyConn.Ping), nil
-}
-
-// NewServerConnection creates a new SPDY server connection. newStreamHandler
-// will be invoked when the server receives a newly created stream from the
-// client.
-func NewServerConnection(conn net.Conn, newStreamHandler httpstream.NewStreamHandler) (httpstream.Connection, error) {
- return NewServerConnectionWithPings(conn, newStreamHandler, 0)
-}
-
-// NewServerConnectionWithPings creates a new SPDY server connection.
-// newStreamHandler will be invoked when the server receives a newly created
-// stream from the client.
-//
-// If pingPeriod is non-zero, a background goroutine will send periodic Ping
-// frames to the server. Use this to keep idle connections through certain load
-// balancers alive longer.
-func NewServerConnectionWithPings(conn net.Conn, newStreamHandler httpstream.NewStreamHandler, pingPeriod time.Duration) (httpstream.Connection, error) {
- spdyConn, err := spdystream.NewConnection(conn, true)
- if err != nil {
- defer conn.Close()
- return nil, err
- }
-
- return newConnection(spdyConn, newStreamHandler, pingPeriod, spdyConn.Ping), nil
-}
-
-// newConnection returns a new connection wrapping conn. newStreamHandler
-// will be invoked when the server receives a newly created stream from the
-// client.
-func newConnection(conn *spdystream.Connection, newStreamHandler httpstream.NewStreamHandler, pingPeriod time.Duration, pingFn func() (time.Duration, error)) httpstream.Connection {
- c := &connection{conn: conn, newStreamHandler: newStreamHandler, ping: pingFn}
- go conn.Serve(c.newSpdyStream)
- if pingPeriod > 0 && pingFn != nil {
- go c.sendPings(pingPeriod)
- }
- return c
-}
-
-// createStreamResponseTimeout indicates how long to wait for the other side to
-// acknowledge the new stream before timing out.
-const createStreamResponseTimeout = 30 * time.Second
-
-// Close first sends a reset for all of the connection's streams, and then
-// closes the underlying spdystream.Connection.
-func (c *connection) Close() error {
- c.streamLock.Lock()
- for _, s := range c.streams {
- // calling Reset instead of Close ensures that all streams are fully torn down
- s.Reset()
- }
- c.streams = make([]httpstream.Stream, 0)
- c.streamLock.Unlock()
-
- // now that all streams are fully torn down, it's safe to call close on the underlying connection,
- // which should be able to terminate immediately at this point, instead of waiting for any
- // remaining graceful stream termination.
- return c.conn.Close()
-}
-
-// CreateStream creates a new stream with the specified headers and registers
-// it with the connection.
-func (c *connection) CreateStream(headers http.Header) (httpstream.Stream, error) {
- stream, err := c.conn.CreateStream(headers, nil, false)
- if err != nil {
- return nil, err
- }
- if err = stream.WaitTimeout(createStreamResponseTimeout); err != nil {
- return nil, err
- }
-
- c.registerStream(stream)
- return stream, nil
-}
-
-// registerStream adds the stream s to the connection's list of streams that
-// it owns.
-func (c *connection) registerStream(s httpstream.Stream) {
- c.streamLock.Lock()
- c.streams = append(c.streams, s)
- c.streamLock.Unlock()
-}
-
-// CloseChan returns a channel that, when closed, indicates that the underlying
-// spdystream.Connection has been closed.
-func (c *connection) CloseChan() <-chan bool {
- return c.conn.CloseChan()
-}
-
-// newSpdyStream is the internal new stream handler used by spdystream.Connection.Serve.
-// It calls connection's newStreamHandler, giving it the opportunity to accept or reject
-// the stream. If newStreamHandler returns an error, the stream is rejected. If not, the
-// stream is accepted and registered with the connection.
-func (c *connection) newSpdyStream(stream *spdystream.Stream) {
- replySent := make(chan struct{})
- err := c.newStreamHandler(stream, replySent)
- rejectStream := (err != nil)
- if rejectStream {
- klog.Warningf("Stream rejected: %v", err)
- stream.Reset()
- return
- }
-
- c.registerStream(stream)
- stream.SendReply(http.Header{}, rejectStream)
- close(replySent)
-}
-
-// SetIdleTimeout sets the amount of time the connection may remain idle before
-// it is automatically closed.
-func (c *connection) SetIdleTimeout(timeout time.Duration) {
- c.conn.SetIdleTimeout(timeout)
-}
-
-func (c *connection) sendPings(period time.Duration) {
- t := time.NewTicker(period)
- defer t.Stop()
- for {
- select {
- case <-c.conn.CloseChan():
- return
- case <-t.C:
- }
- if _, err := c.ping(); err != nil {
- klog.V(3).Infof("SPDY Ping failed: %v", err)
- // Continue, in case this is a transient failure.
- // c.conn.CloseChan above will tell us when the connection is
- // actually closed.
- }
- }
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go
deleted file mode 100644
index 4cb1cfadc..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go
+++ /dev/null
@@ -1,369 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spdy
-
-import (
- "bufio"
- "bytes"
- "context"
- "crypto/tls"
- "encoding/base64"
- "fmt"
- "io"
- "io/ioutil"
- "net"
- "net/http"
- "net/http/httputil"
- "net/url"
- "strings"
- "time"
-
- apierrors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/serializer"
- "k8s.io/apimachinery/pkg/util/httpstream"
- utilnet "k8s.io/apimachinery/pkg/util/net"
- "k8s.io/apimachinery/third_party/forked/golang/netutil"
-)
-
-// SpdyRoundTripper knows how to upgrade an HTTP request to one that supports
-// multiplexed streams. After RoundTrip() is invoked, Conn will be set
-// and usable. SpdyRoundTripper implements the UpgradeRoundTripper interface.
-type SpdyRoundTripper struct {
- //tlsConfig holds the TLS configuration settings to use when connecting
- //to the remote server.
- tlsConfig *tls.Config
-
- /* TODO according to http://golang.org/pkg/net/http/#RoundTripper, a RoundTripper
- must be safe for use by multiple concurrent goroutines. If this is absolutely
- necessary, we could keep a map from http.Request to net.Conn. In practice,
- a client will create an http.Client, set the transport to a new insteace of
- SpdyRoundTripper, and use it a single time, so this hopefully won't be an issue.
- */
- // conn is the underlying network connection to the remote server.
- conn net.Conn
-
- // Dialer is the dialer used to connect. Used if non-nil.
- Dialer *net.Dialer
-
- // proxier knows which proxy to use given a request, defaults to http.ProxyFromEnvironment
- // Used primarily for mocking the proxy discovery in tests.
- proxier func(req *http.Request) (*url.URL, error)
-
- // followRedirects indicates if the round tripper should examine responses for redirects and
- // follow them.
- followRedirects bool
- // requireSameHostRedirects restricts redirect following to only follow redirects to the same host
- // as the original request.
- requireSameHostRedirects bool
- // pingPeriod is a period for sending Ping frames over established
- // connections.
- pingPeriod time.Duration
-}
-
-var _ utilnet.TLSClientConfigHolder = &SpdyRoundTripper{}
-var _ httpstream.UpgradeRoundTripper = &SpdyRoundTripper{}
-var _ utilnet.Dialer = &SpdyRoundTripper{}
-
-// NewRoundTripper creates a new SpdyRoundTripper that will use the specified
-// tlsConfig.
-func NewRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) *SpdyRoundTripper {
- return NewRoundTripperWithConfig(RoundTripperConfig{
- TLS: tlsConfig,
- FollowRedirects: followRedirects,
- RequireSameHostRedirects: requireSameHostRedirects,
- })
-}
-
-// NewRoundTripperWithProxy creates a new SpdyRoundTripper that will use the
-// specified tlsConfig and proxy func.
-func NewRoundTripperWithProxy(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool, proxier func(*http.Request) (*url.URL, error)) *SpdyRoundTripper {
- return NewRoundTripperWithConfig(RoundTripperConfig{
- TLS: tlsConfig,
- FollowRedirects: followRedirects,
- RequireSameHostRedirects: requireSameHostRedirects,
- Proxier: proxier,
- })
-}
-
-// NewRoundTripperWithProxy creates a new SpdyRoundTripper with the specified
-// configuration.
-func NewRoundTripperWithConfig(cfg RoundTripperConfig) *SpdyRoundTripper {
- if cfg.Proxier == nil {
- cfg.Proxier = utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment)
- }
- return &SpdyRoundTripper{
- tlsConfig: cfg.TLS,
- followRedirects: cfg.FollowRedirects,
- requireSameHostRedirects: cfg.RequireSameHostRedirects,
- proxier: cfg.Proxier,
- pingPeriod: cfg.PingPeriod,
- }
-}
-
-// RoundTripperConfig is a set of options for an SpdyRoundTripper.
-type RoundTripperConfig struct {
- // TLS configuration used by the round tripper.
- TLS *tls.Config
- // Proxier is a proxy function invoked on each request. Optional.
- Proxier func(*http.Request) (*url.URL, error)
- // PingPeriod is a period for sending SPDY Pings on the connection.
- // Optional.
- PingPeriod time.Duration
-
- FollowRedirects bool
- RequireSameHostRedirects bool
-}
-
-// TLSClientConfig implements pkg/util/net.TLSClientConfigHolder for proper TLS checking during
-// proxying with a spdy roundtripper.
-func (s *SpdyRoundTripper) TLSClientConfig() *tls.Config {
- return s.tlsConfig
-}
-
-// Dial implements k8s.io/apimachinery/pkg/util/net.Dialer.
-func (s *SpdyRoundTripper) Dial(req *http.Request) (net.Conn, error) {
- conn, err := s.dial(req)
- if err != nil {
- return nil, err
- }
-
- if err := req.Write(conn); err != nil {
- conn.Close()
- return nil, err
- }
-
- return conn, nil
-}
-
-// dial dials the host specified by req, using TLS if appropriate, optionally
-// using a proxy server if one is configured via environment variables.
-func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) {
- proxyURL, err := s.proxier(req)
- if err != nil {
- return nil, err
- }
-
- if proxyURL == nil {
- return s.dialWithoutProxy(req.Context(), req.URL)
- }
-
- // ensure we use a canonical host with proxyReq
- targetHost := netutil.CanonicalAddr(req.URL)
-
- // proxying logic adapted from http://blog.h6t.eu/post/74098062923/golang-websocket-with-http-proxy-support
- proxyReq := http.Request{
- Method: "CONNECT",
- URL: &url.URL{},
- Host: targetHost,
- }
-
- if pa := s.proxyAuth(proxyURL); pa != "" {
- proxyReq.Header = http.Header{}
- proxyReq.Header.Set("Proxy-Authorization", pa)
- }
-
- proxyDialConn, err := s.dialWithoutProxy(req.Context(), proxyURL)
- if err != nil {
- return nil, err
- }
-
- proxyClientConn := httputil.NewProxyClientConn(proxyDialConn, nil)
- _, err = proxyClientConn.Do(&proxyReq)
- if err != nil && err != httputil.ErrPersistEOF {
- return nil, err
- }
-
- rwc, _ := proxyClientConn.Hijack()
-
- if req.URL.Scheme != "https" {
- return rwc, nil
- }
-
- host, _, err := net.SplitHostPort(targetHost)
- if err != nil {
- return nil, err
- }
-
- tlsConfig := s.tlsConfig
- switch {
- case tlsConfig == nil:
- tlsConfig = &tls.Config{ServerName: host}
- case len(tlsConfig.ServerName) == 0:
- tlsConfig = tlsConfig.Clone()
- tlsConfig.ServerName = host
- }
-
- tlsConn := tls.Client(rwc, tlsConfig)
-
- // need to manually call Handshake() so we can call VerifyHostname() below
- if err := tlsConn.Handshake(); err != nil {
- return nil, err
- }
-
- // Return if we were configured to skip validation
- if tlsConfig.InsecureSkipVerify {
- return tlsConn, nil
- }
-
- if err := tlsConn.VerifyHostname(tlsConfig.ServerName); err != nil {
- return nil, err
- }
-
- return tlsConn, nil
-}
-
-// dialWithoutProxy dials the host specified by url, using TLS if appropriate.
-func (s *SpdyRoundTripper) dialWithoutProxy(ctx context.Context, url *url.URL) (net.Conn, error) {
- dialAddr := netutil.CanonicalAddr(url)
-
- if url.Scheme == "http" {
- if s.Dialer == nil {
- var d net.Dialer
- return d.DialContext(ctx, "tcp", dialAddr)
- } else {
- return s.Dialer.DialContext(ctx, "tcp", dialAddr)
- }
- }
-
- // TODO validate the TLSClientConfig is set up?
- var conn *tls.Conn
- var err error
- if s.Dialer == nil {
- conn, err = tls.Dial("tcp", dialAddr, s.tlsConfig)
- } else {
- conn, err = tls.DialWithDialer(s.Dialer, "tcp", dialAddr, s.tlsConfig)
- }
- if err != nil {
- return nil, err
- }
-
- // Return if we were configured to skip validation
- if s.tlsConfig != nil && s.tlsConfig.InsecureSkipVerify {
- return conn, nil
- }
-
- host, _, err := net.SplitHostPort(dialAddr)
- if err != nil {
- return nil, err
- }
- if s.tlsConfig != nil && len(s.tlsConfig.ServerName) > 0 {
- host = s.tlsConfig.ServerName
- }
- err = conn.VerifyHostname(host)
- if err != nil {
- return nil, err
- }
-
- return conn, nil
-}
-
-// proxyAuth returns, for a given proxy URL, the value to be used for the Proxy-Authorization header
-func (s *SpdyRoundTripper) proxyAuth(proxyURL *url.URL) string {
- if proxyURL == nil || proxyURL.User == nil {
- return ""
- }
- credentials := proxyURL.User.String()
- encodedAuth := base64.StdEncoding.EncodeToString([]byte(credentials))
- return fmt.Sprintf("Basic %s", encodedAuth)
-}
-
-// RoundTrip executes the Request and upgrades it. After a successful upgrade,
-// clients may call SpdyRoundTripper.Connection() to retrieve the upgraded
-// connection.
-func (s *SpdyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
- header := utilnet.CloneHeader(req.Header)
- header.Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade)
- header.Add(httpstream.HeaderUpgrade, HeaderSpdy31)
-
- var (
- conn net.Conn
- rawResponse []byte
- err error
- )
-
- if s.followRedirects {
- conn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, req.URL, header, req.Body, s, s.requireSameHostRedirects)
- } else {
- clone := utilnet.CloneRequest(req)
- clone.Header = header
- conn, err = s.Dial(clone)
- }
- if err != nil {
- return nil, err
- }
-
- responseReader := bufio.NewReader(
- io.MultiReader(
- bytes.NewBuffer(rawResponse),
- conn,
- ),
- )
-
- resp, err := http.ReadResponse(responseReader, nil)
- if err != nil {
- if conn != nil {
- conn.Close()
- }
- return nil, err
- }
-
- s.conn = conn
-
- return resp, nil
-}
-
-// NewConnection validates the upgrade response, creating and returning a new
-// httpstream.Connection if there were no errors.
-func (s *SpdyRoundTripper) NewConnection(resp *http.Response) (httpstream.Connection, error) {
- connectionHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderConnection))
- upgradeHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderUpgrade))
- if (resp.StatusCode != http.StatusSwitchingProtocols) || !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) {
- defer resp.Body.Close()
- responseError := ""
- responseErrorBytes, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- responseError = "unable to read error from server response"
- } else {
- // TODO: I don't belong here, I should be abstracted from this class
- if obj, _, err := statusCodecs.UniversalDecoder().Decode(responseErrorBytes, nil, &metav1.Status{}); err == nil {
- if status, ok := obj.(*metav1.Status); ok {
- return nil, &apierrors.StatusError{ErrStatus: *status}
- }
- }
- responseError = string(responseErrorBytes)
- responseError = strings.TrimSpace(responseError)
- }
-
- return nil, fmt.Errorf("unable to upgrade connection: %s", responseError)
- }
-
- return NewClientConnectionWithPings(s.conn, s.pingPeriod)
-}
-
-// statusScheme is private scheme for the decoding here until someone fixes the TODO in NewConnection
-var statusScheme = runtime.NewScheme()
-
-// ParameterCodec knows about query parameters used with the meta v1 API spec.
-var statusCodecs = serializer.NewCodecFactory(statusScheme)
-
-func init() {
- statusScheme.AddUnversionedTypes(metav1.SchemeGroupVersion,
- &metav1.Status{},
- )
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go
deleted file mode 100644
index f17eb09e9..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spdy
-
-import (
- "bufio"
- "fmt"
- "io"
- "net"
- "net/http"
- "strings"
- "sync/atomic"
- "time"
-
- "k8s.io/apimachinery/pkg/util/httpstream"
- "k8s.io/apimachinery/pkg/util/runtime"
-)
-
-const HeaderSpdy31 = "SPDY/3.1"
-
-// responseUpgrader knows how to upgrade HTTP responses. It
-// implements the httpstream.ResponseUpgrader interface.
-type responseUpgrader struct {
- pingPeriod time.Duration
-}
-
-// connWrapper is used to wrap a hijacked connection and its bufio.Reader. All
-// calls will be handled directly by the underlying net.Conn with the exception
-// of Read and Close calls, which will consider data in the bufio.Reader. This
-// ensures that data already inside the used bufio.Reader instance is also
-// read.
-type connWrapper struct {
- net.Conn
- closed int32
- bufReader *bufio.Reader
-}
-
-func (w *connWrapper) Read(b []byte) (n int, err error) {
- if atomic.LoadInt32(&w.closed) == 1 {
- return 0, io.EOF
- }
- return w.bufReader.Read(b)
-}
-
-func (w *connWrapper) Close() error {
- err := w.Conn.Close()
- atomic.StoreInt32(&w.closed, 1)
- return err
-}
-
-// NewResponseUpgrader returns a new httpstream.ResponseUpgrader that is
-// capable of upgrading HTTP responses using SPDY/3.1 via the
-// spdystream package.
-func NewResponseUpgrader() httpstream.ResponseUpgrader {
- return NewResponseUpgraderWithPings(0)
-}
-
-// NewResponseUpgraderWithPings returns a new httpstream.ResponseUpgrader that
-// is capable of upgrading HTTP responses using SPDY/3.1 via the spdystream
-// package.
-//
-// If pingPeriod is non-zero, for each incoming connection a background
-// goroutine will send periodic Ping frames to the server. Use this to keep
-// idle connections through certain load balancers alive longer.
-func NewResponseUpgraderWithPings(pingPeriod time.Duration) httpstream.ResponseUpgrader {
- return responseUpgrader{pingPeriod: pingPeriod}
-}
-
-// UpgradeResponse upgrades an HTTP response to one that supports multiplexed
-// streams. newStreamHandler will be called synchronously whenever the
-// other end of the upgraded connection creates a new stream.
-func (u responseUpgrader) UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler httpstream.NewStreamHandler) httpstream.Connection {
- connectionHeader := strings.ToLower(req.Header.Get(httpstream.HeaderConnection))
- upgradeHeader := strings.ToLower(req.Header.Get(httpstream.HeaderUpgrade))
- if !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) {
- errorMsg := fmt.Sprintf("unable to upgrade: missing upgrade headers in request: %#v", req.Header)
- http.Error(w, errorMsg, http.StatusBadRequest)
- return nil
- }
-
- hijacker, ok := w.(http.Hijacker)
- if !ok {
- errorMsg := fmt.Sprintf("unable to upgrade: unable to hijack response")
- http.Error(w, errorMsg, http.StatusInternalServerError)
- return nil
- }
-
- w.Header().Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade)
- w.Header().Add(httpstream.HeaderUpgrade, HeaderSpdy31)
- w.WriteHeader(http.StatusSwitchingProtocols)
-
- conn, bufrw, err := hijacker.Hijack()
- if err != nil {
- runtime.HandleError(fmt.Errorf("unable to upgrade: error hijacking response: %v", err))
- return nil
- }
-
- connWithBuf := &connWrapper{Conn: conn, bufReader: bufrw.Reader}
- spdyConn, err := NewServerConnectionWithPings(connWithBuf, newStreamHandler, u.pingPeriod)
- if err != nil {
- runtime.HandleError(fmt.Errorf("unable to upgrade: error creating SPDY server connection: %v", err))
- return nil
- }
-
- return spdyConn
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go b/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go
deleted file mode 100644
index acfeb827c..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
-Copyright 2016 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package remotecommand
-
-import (
- "time"
-
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-const (
- DefaultStreamCreationTimeout = 30 * time.Second
-
- // The SPDY subprotocol "channel.k8s.io" is used for remote command
- // attachment/execution. This represents the initial unversioned subprotocol,
- // which has the known bugs http://issues.k8s.io/13394 and
- // http://issues.k8s.io/13395.
- StreamProtocolV1Name = "channel.k8s.io"
-
- // The SPDY subprotocol "v2.channel.k8s.io" is used for remote command
- // attachment/execution. It is the second version of the subprotocol and
- // resolves the issues present in the first version.
- StreamProtocolV2Name = "v2.channel.k8s.io"
-
- // The SPDY subprotocol "v3.channel.k8s.io" is used for remote command
- // attachment/execution. It is the third version of the subprotocol and
- // adds support for resizing container terminals.
- StreamProtocolV3Name = "v3.channel.k8s.io"
-
- // The SPDY subprotocol "v4.channel.k8s.io" is used for remote command
- // attachment/execution. It is the 4th version of the subprotocol and
- // adds support for exit codes.
- StreamProtocolV4Name = "v4.channel.k8s.io"
-
- NonZeroExitCodeReason = metav1.StatusReason("NonZeroExitCode")
- ExitCodeCauseType = metav1.CauseType("ExitCode")
-)
-
-var SupportedStreamingProtocols = []string{StreamProtocolV4Name, StreamProtocolV3Name, StreamProtocolV2Name, StreamProtocolV1Name}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
deleted file mode 100644
index 7fe706467..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
+++ /dev/null
@@ -1,379 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package yaml
-
-import (
- "bufio"
- "bytes"
- "encoding/json"
- "fmt"
- "io"
- "io/ioutil"
- "strings"
- "unicode"
-
- jsonutil "k8s.io/apimachinery/pkg/util/json"
-
- "k8s.io/klog/v2"
- "sigs.k8s.io/yaml"
-)
-
-// Unmarshal unmarshals the given data
-// If v is a *map[string]interface{}, *[]interface{}, or *interface{} numbers
-// are converted to int64 or float64
-func Unmarshal(data []byte, v interface{}) error {
- preserveIntFloat := func(d *json.Decoder) *json.Decoder {
- d.UseNumber()
- return d
- }
- switch v := v.(type) {
- case *map[string]interface{}:
- if err := yaml.Unmarshal(data, v, preserveIntFloat); err != nil {
- return err
- }
- return jsonutil.ConvertMapNumbers(*v, 0)
- case *[]interface{}:
- if err := yaml.Unmarshal(data, v, preserveIntFloat); err != nil {
- return err
- }
- return jsonutil.ConvertSliceNumbers(*v, 0)
- case *interface{}:
- if err := yaml.Unmarshal(data, v, preserveIntFloat); err != nil {
- return err
- }
- return jsonutil.ConvertInterfaceNumbers(v, 0)
- default:
- return yaml.Unmarshal(data, v)
- }
-}
-
-// ToJSON converts a single YAML document into a JSON document
-// or returns an error. If the document appears to be JSON the
-// YAML decoding path is not used (so that error messages are
-// JSON specific).
-func ToJSON(data []byte) ([]byte, error) {
- if hasJSONPrefix(data) {
- return data, nil
- }
- return yaml.YAMLToJSON(data)
-}
-
-// YAMLToJSONDecoder decodes YAML documents from an io.Reader by
-// separating individual documents. It first converts the YAML
-// body to JSON, then unmarshals the JSON.
-type YAMLToJSONDecoder struct {
- reader Reader
-}
-
-// NewYAMLToJSONDecoder decodes YAML documents from the provided
-// stream in chunks by converting each document (as defined by
-// the YAML spec) into its own chunk, converting it to JSON via
-// yaml.YAMLToJSON, and then passing it to json.Decoder.
-func NewYAMLToJSONDecoder(r io.Reader) *YAMLToJSONDecoder {
- reader := bufio.NewReader(r)
- return &YAMLToJSONDecoder{
- reader: NewYAMLReader(reader),
- }
-}
-
-// Decode reads a YAML document as JSON from the stream or returns
-// an error. The decoding rules match json.Unmarshal, not
-// yaml.Unmarshal.
-func (d *YAMLToJSONDecoder) Decode(into interface{}) error {
- bytes, err := d.reader.Read()
- if err != nil && err != io.EOF {
- return err
- }
-
- if len(bytes) != 0 {
- err := yaml.Unmarshal(bytes, into)
- if err != nil {
- return YAMLSyntaxError{err}
- }
- }
- return err
-}
-
-// YAMLDecoder reads chunks of objects and returns ErrShortBuffer if
-// the data is not sufficient.
-type YAMLDecoder struct {
- r io.ReadCloser
- scanner *bufio.Scanner
- remaining []byte
-}
-
-// NewDocumentDecoder decodes YAML documents from the provided
-// stream in chunks by converting each document (as defined by
-// the YAML spec) into its own chunk. io.ErrShortBuffer will be
-// returned if the entire buffer could not be read to assist
-// the caller in framing the chunk.
-func NewDocumentDecoder(r io.ReadCloser) io.ReadCloser {
- scanner := bufio.NewScanner(r)
- // the size of initial allocation for buffer 4k
- buf := make([]byte, 4*1024)
- // the maximum size used to buffer a token 5M
- scanner.Buffer(buf, 5*1024*1024)
- scanner.Split(splitYAMLDocument)
- return &YAMLDecoder{
- r: r,
- scanner: scanner,
- }
-}
-
-// Read reads the previous slice into the buffer, or attempts to read
-// the next chunk.
-// TODO: switch to readline approach.
-func (d *YAMLDecoder) Read(data []byte) (n int, err error) {
- left := len(d.remaining)
- if left == 0 {
- // return the next chunk from the stream
- if !d.scanner.Scan() {
- err := d.scanner.Err()
- if err == nil {
- err = io.EOF
- }
- return 0, err
- }
- out := d.scanner.Bytes()
- d.remaining = out
- left = len(out)
- }
-
- // fits within data
- if left <= len(data) {
- copy(data, d.remaining)
- d.remaining = nil
- return left, nil
- }
-
- // caller will need to reread
- copy(data, d.remaining[:len(data)])
- d.remaining = d.remaining[len(data):]
- return len(data), io.ErrShortBuffer
-}
-
-func (d *YAMLDecoder) Close() error {
- return d.r.Close()
-}
-
-const yamlSeparator = "\n---"
-const separator = "---"
-
-// splitYAMLDocument is a bufio.SplitFunc for splitting YAML streams into individual documents.
-func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err error) {
- if atEOF && len(data) == 0 {
- return 0, nil, nil
- }
- sep := len([]byte(yamlSeparator))
- if i := bytes.Index(data, []byte(yamlSeparator)); i >= 0 {
- // We have a potential document terminator
- i += sep
- after := data[i:]
- if len(after) == 0 {
- // we can't read any more characters
- if atEOF {
- return len(data), data[:len(data)-sep], nil
- }
- return 0, nil, nil
- }
- if j := bytes.IndexByte(after, '\n'); j >= 0 {
- return i + j + 1, data[0 : i-sep], nil
- }
- return 0, nil, nil
- }
- // If we're at EOF, we have a final, non-terminated line. Return it.
- if atEOF {
- return len(data), data, nil
- }
- // Request more data.
- return 0, nil, nil
-}
-
-// decoder is a convenience interface for Decode.
-type decoder interface {
- Decode(into interface{}) error
-}
-
-// YAMLOrJSONDecoder attempts to decode a stream of JSON documents or
-// YAML documents by sniffing for a leading { character.
-type YAMLOrJSONDecoder struct {
- r io.Reader
- bufferSize int
-
- decoder decoder
- rawData []byte
-}
-
-type JSONSyntaxError struct {
- Line int
- Err error
-}
-
-func (e JSONSyntaxError) Error() string {
- return fmt.Sprintf("json: line %d: %s", e.Line, e.Err.Error())
-}
-
-type YAMLSyntaxError struct {
- err error
-}
-
-func (e YAMLSyntaxError) Error() string {
- return e.err.Error()
-}
-
-// NewYAMLOrJSONDecoder returns a decoder that will process YAML documents
-// or JSON documents from the given reader as a stream. bufferSize determines
-// how far into the stream the decoder will look to figure out whether this
-// is a JSON stream (has whitespace followed by an open brace).
-func NewYAMLOrJSONDecoder(r io.Reader, bufferSize int) *YAMLOrJSONDecoder {
- return &YAMLOrJSONDecoder{
- r: r,
- bufferSize: bufferSize,
- }
-}
-
-// Decode unmarshals the next object from the underlying stream into the
-// provide object, or returns an error.
-func (d *YAMLOrJSONDecoder) Decode(into interface{}) error {
- if d.decoder == nil {
- buffer, origData, isJSON := GuessJSONStream(d.r, d.bufferSize)
- if isJSON {
- d.decoder = json.NewDecoder(buffer)
- d.rawData = origData
- } else {
- d.decoder = NewYAMLToJSONDecoder(buffer)
- }
- }
- err := d.decoder.Decode(into)
- if jsonDecoder, ok := d.decoder.(*json.Decoder); ok {
- if syntax, ok := err.(*json.SyntaxError); ok {
- data, readErr := ioutil.ReadAll(jsonDecoder.Buffered())
- if readErr != nil {
- klog.V(4).Infof("reading stream failed: %v", readErr)
- }
- js := string(data)
-
- // if contents from io.Reader are not complete,
- // use the original raw data to prevent panic
- if int64(len(js)) <= syntax.Offset {
- js = string(d.rawData)
- }
-
- start := strings.LastIndex(js[:syntax.Offset], "\n") + 1
- line := strings.Count(js[:start], "\n")
- return JSONSyntaxError{
- Line: line,
- Err: fmt.Errorf(syntax.Error()),
- }
- }
- }
- return err
-}
-
-type Reader interface {
- Read() ([]byte, error)
-}
-
-type YAMLReader struct {
- reader Reader
-}
-
-func NewYAMLReader(r *bufio.Reader) *YAMLReader {
- return &YAMLReader{
- reader: &LineReader{reader: r},
- }
-}
-
-// Read returns a full YAML document.
-func (r *YAMLReader) Read() ([]byte, error) {
- var buffer bytes.Buffer
- for {
- line, err := r.reader.Read()
- if err != nil && err != io.EOF {
- return nil, err
- }
-
- sep := len([]byte(separator))
- if i := bytes.Index(line, []byte(separator)); i == 0 {
- // We have a potential document terminator
- i += sep
- after := line[i:]
- if len(strings.TrimRightFunc(string(after), unicode.IsSpace)) == 0 {
- if buffer.Len() != 0 {
- return buffer.Bytes(), nil
- }
- if err == io.EOF {
- return nil, err
- }
- }
- }
- if err == io.EOF {
- if buffer.Len() != 0 {
- // If we're at EOF, we have a final, non-terminated line. Return it.
- return buffer.Bytes(), nil
- }
- return nil, err
- }
- buffer.Write(line)
- }
-}
-
-type LineReader struct {
- reader *bufio.Reader
-}
-
-// Read returns a single line (with '\n' ended) from the underlying reader.
-// An error is returned iff there is an error with the underlying reader.
-func (r *LineReader) Read() ([]byte, error) {
- var (
- isPrefix bool = true
- err error = nil
- line []byte
- buffer bytes.Buffer
- )
-
- for isPrefix && err == nil {
- line, isPrefix, err = r.reader.ReadLine()
- buffer.Write(line)
- }
- buffer.WriteByte('\n')
- return buffer.Bytes(), err
-}
-
-// GuessJSONStream scans the provided reader up to size, looking
-// for an open brace indicating this is JSON. It will return the
-// bufio.Reader it creates for the consumer.
-func GuessJSONStream(r io.Reader, size int) (io.Reader, []byte, bool) {
- buffer := bufio.NewReaderSize(r, size)
- b, _ := buffer.Peek(size)
- return buffer, b, hasJSONPrefix(b)
-}
-
-var jsonPrefix = []byte("{")
-
-// hasJSONPrefix returns true if the provided buffer appears to start with
-// a JSON open brace.
-func hasJSONPrefix(buf []byte) bool {
- return hasPrefix(buf, jsonPrefix)
-}
-
-// Return true if the first non-whitespace bytes in buf is
-// prefix.
-func hasPrefix(buf []byte, prefix []byte) bool {
- trim := bytes.TrimLeftFunc(buf, unicode.IsSpace)
- return bytes.HasPrefix(trim, prefix)
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/version/doc.go b/vendor/k8s.io/apimachinery/pkg/version/doc.go
deleted file mode 100644
index 29574fd6d..000000000
--- a/vendor/k8s.io/apimachinery/pkg/version/doc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// +k8s:openapi-gen=true
-
-// Package version supplies the type for version information collected at build time.
-package version // import "k8s.io/apimachinery/pkg/version"
diff --git a/vendor/k8s.io/apimachinery/pkg/version/helpers.go b/vendor/k8s.io/apimachinery/pkg/version/helpers.go
deleted file mode 100644
index 5e041d6f3..000000000
--- a/vendor/k8s.io/apimachinery/pkg/version/helpers.go
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
-Copyright 2018 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package version
-
-import (
- "regexp"
- "strconv"
- "strings"
-)
-
-type versionType int
-
-const (
- // Bigger the version type number, higher priority it is
- versionTypeAlpha versionType = iota
- versionTypeBeta
- versionTypeGA
-)
-
-var kubeVersionRegex = regexp.MustCompile("^v([\\d]+)(?:(alpha|beta)([\\d]+))?$")
-
-func parseKubeVersion(v string) (majorVersion int, vType versionType, minorVersion int, ok bool) {
- var err error
- submatches := kubeVersionRegex.FindStringSubmatch(v)
- if len(submatches) != 4 {
- return 0, 0, 0, false
- }
- switch submatches[2] {
- case "alpha":
- vType = versionTypeAlpha
- case "beta":
- vType = versionTypeBeta
- case "":
- vType = versionTypeGA
- default:
- return 0, 0, 0, false
- }
- if majorVersion, err = strconv.Atoi(submatches[1]); err != nil {
- return 0, 0, 0, false
- }
- if vType != versionTypeGA {
- if minorVersion, err = strconv.Atoi(submatches[3]); err != nil {
- return 0, 0, 0, false
- }
- }
- return majorVersion, vType, minorVersion, true
-}
-
-// CompareKubeAwareVersionStrings compares two kube-like version strings.
-// Kube-like version strings are starting with a v, followed by a major version, optional "alpha" or "beta" strings
-// followed by a minor version (e.g. v1, v2beta1). Versions will be sorted based on GA/alpha/beta first and then major
-// and minor versions. e.g. v2, v1, v1beta2, v1beta1, v1alpha1.
-func CompareKubeAwareVersionStrings(v1, v2 string) int {
- if v1 == v2 {
- return 0
- }
- v1major, v1type, v1minor, ok1 := parseKubeVersion(v1)
- v2major, v2type, v2minor, ok2 := parseKubeVersion(v2)
- switch {
- case !ok1 && !ok2:
- return strings.Compare(v2, v1)
- case !ok1 && ok2:
- return -1
- case ok1 && !ok2:
- return 1
- }
- if v1type != v2type {
- return int(v1type) - int(v2type)
- }
- if v1major != v2major {
- return v1major - v2major
- }
- return v1minor - v2minor
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/version/types.go b/vendor/k8s.io/apimachinery/pkg/version/types.go
deleted file mode 100644
index 72727b503..000000000
--- a/vendor/k8s.io/apimachinery/pkg/version/types.go
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package version
-
-// Info contains versioning information.
-// TODO: Add []string of api versions supported? It's still unclear
-// how we'll want to distribute that information.
-type Info struct {
- Major string `json:"major"`
- Minor string `json:"minor"`
- GitVersion string `json:"gitVersion"`
- GitCommit string `json:"gitCommit"`
- GitTreeState string `json:"gitTreeState"`
- BuildDate string `json:"buildDate"`
- GoVersion string `json:"goVersion"`
- Compiler string `json:"compiler"`
- Platform string `json:"platform"`
-}
-
-// String returns info as a human-friendly version string.
-func (info Info) String() string {
- return info.GitVersion
-}