aboutsummaryrefslogtreecommitdiff
path: root/vendor/k8s.io/kubernetes/pkg/api/validation
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/k8s.io/kubernetes/pkg/api/validation')
-rw-r--r--vendor/k8s.io/kubernetes/pkg/api/validation/doc.go19
-rw-r--r--vendor/k8s.io/kubernetes/pkg/api/validation/events.go80
-rw-r--r--vendor/k8s.io/kubernetes/pkg/api/validation/schema.go435
-rw-r--r--vendor/k8s.io/kubernetes/pkg/api/validation/validation.go4241
4 files changed, 4775 insertions, 0 deletions
diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/doc.go b/vendor/k8s.io/kubernetes/pkg/api/validation/doc.go
new file mode 100644
index 000000000..30f541de3
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/api/validation/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package validation has functions for validating the correctness of api
+// objects and explaining what is wrong with them when they aren't valid.
+package validation // import "k8s.io/kubernetes/pkg/api/validation"
diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/events.go b/vendor/k8s.io/kubernetes/pkg/api/validation/events.go
new file mode 100644
index 000000000..a255f58e2
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/api/validation/events.go
@@ -0,0 +1,80 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/util/validation"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ "k8s.io/kubernetes/pkg/api"
+ apiutil "k8s.io/kubernetes/pkg/api/util"
+)
+
+// ValidateEvent makes sure that the event makes sense.
+func ValidateEvent(event *api.Event) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ // Make sure event.Namespace and the involvedObject.Namespace agree
+ if len(event.InvolvedObject.Namespace) == 0 {
+ // event.Namespace must also be empty (or "default", for compatibility with old clients)
+ if event.Namespace != metav1.NamespaceNone && event.Namespace != metav1.NamespaceDefault {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace"))
+ }
+ } else {
+ // event namespace must match
+ if event.Namespace != event.InvolvedObject.Namespace {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace"))
+ }
+ }
+
+ // For kinds we recognize, make sure involvedObject.Namespace is set for namespaced kinds
+ if namespaced, err := isNamespacedKind(event.InvolvedObject.Kind, event.InvolvedObject.APIVersion); err == nil {
+ if namespaced && len(event.InvolvedObject.Namespace) == 0 {
+ allErrs = append(allErrs, field.Required(field.NewPath("involvedObject", "namespace"), fmt.Sprintf("required for kind %s", event.InvolvedObject.Kind)))
+ }
+ if !namespaced && len(event.InvolvedObject.Namespace) > 0 {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, fmt.Sprintf("not allowed for kind %s", event.InvolvedObject.Kind)))
+ }
+ }
+
+ for _, msg := range validation.IsDNS1123Subdomain(event.Namespace) {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("namespace"), event.Namespace, msg))
+ }
+ return allErrs
+}
+
+// Check whether the kind in groupVersion is scoped at the root of the api hierarchy
+func isNamespacedKind(kind, groupVersion string) (bool, error) {
+ group := apiutil.GetGroup(groupVersion)
+ g, err := api.Registry.Group(group)
+ if err != nil {
+ return false, err
+ }
+ restMapping, err := g.RESTMapper.RESTMapping(schema.GroupKind{Group: group, Kind: kind}, apiutil.GetVersion(groupVersion))
+ if err != nil {
+ return false, err
+ }
+ scopeName := restMapping.Scope.Name()
+ if scopeName == meta.RESTScopeNameNamespace {
+ return true, nil
+ }
+ return false, nil
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/schema.go b/vendor/k8s.io/kubernetes/pkg/api/validation/schema.go
new file mode 100644
index 000000000..3f98345c3
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/api/validation/schema.go
@@ -0,0 +1,435 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "regexp"
+ "strings"
+
+ "github.com/emicklei/go-restful-swagger12"
+ ejson "github.com/exponent-io/jsonpath"
+ "github.com/golang/glog"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ utilerrors "k8s.io/apimachinery/pkg/util/errors"
+ "k8s.io/apimachinery/pkg/util/yaml"
+ apiutil "k8s.io/kubernetes/pkg/api/util"
+)
+
+type InvalidTypeError struct {
+ ExpectedKind reflect.Kind
+ ObservedKind reflect.Kind
+ FieldName string
+}
+
+func (i *InvalidTypeError) Error() string {
+ return fmt.Sprintf("expected type %s, for field %s, got %s", i.ExpectedKind.String(), i.FieldName, i.ObservedKind.String())
+}
+
+func NewInvalidTypeError(expected reflect.Kind, observed reflect.Kind, fieldName string) error {
+ return &InvalidTypeError{expected, observed, fieldName}
+}
+
+// TypeNotFoundError is returned when specified type
+// can not found in schema
+type TypeNotFoundError string
+
+func (tnfe TypeNotFoundError) Error() string {
+ return fmt.Sprintf("couldn't find type: %s", string(tnfe))
+}
+
+// Schema is an interface that knows how to validate an API object serialized to a byte array.
+type Schema interface {
+ ValidateBytes(data []byte) error
+}
+
+type NullSchema struct{}
+
+func (NullSchema) ValidateBytes(data []byte) error { return nil }
+
+type NoDoubleKeySchema struct{}
+
+func (NoDoubleKeySchema) ValidateBytes(data []byte) error {
+ var list []error = nil
+ if err := validateNoDuplicateKeys(data, "metadata", "labels"); err != nil {
+ list = append(list, err)
+ }
+ if err := validateNoDuplicateKeys(data, "metadata", "annotations"); err != nil {
+ list = append(list, err)
+ }
+ return utilerrors.NewAggregate(list)
+}
+
+func validateNoDuplicateKeys(data []byte, path ...string) error {
+ r := ejson.NewDecoder(bytes.NewReader(data))
+ // This is Go being unfriendly. The 'path ...string' comes in as a
+ // []string, and SeekTo takes ...interface{}, so we can't just pass
+ // the path straight in, we have to copy it. *sigh*
+ ifacePath := []interface{}{}
+ for ix := range path {
+ ifacePath = append(ifacePath, path[ix])
+ }
+ found, err := r.SeekTo(ifacePath...)
+ if err != nil {
+ return err
+ }
+ if !found {
+ return nil
+ }
+ seen := map[string]bool{}
+ for {
+ tok, err := r.Token()
+ if err != nil {
+ return err
+ }
+ switch t := tok.(type) {
+ case json.Delim:
+ if t.String() == "}" {
+ return nil
+ }
+ case ejson.KeyString:
+ if seen[string(t)] {
+ return fmt.Errorf("duplicate key: %s", string(t))
+ } else {
+ seen[string(t)] = true
+ }
+ }
+ }
+}
+
+type ConjunctiveSchema []Schema
+
+func (c ConjunctiveSchema) ValidateBytes(data []byte) error {
+ var list []error = nil
+ schemas := []Schema(c)
+ for ix := range schemas {
+ if err := schemas[ix].ValidateBytes(data); err != nil {
+ list = append(list, err)
+ }
+ }
+ return utilerrors.NewAggregate(list)
+}
+
+type SwaggerSchema struct {
+ api swagger.ApiDeclaration
+ delegate Schema // For delegating to other api groups
+}
+
+func NewSwaggerSchemaFromBytes(data []byte, factory Schema) (Schema, error) {
+ schema := &SwaggerSchema{}
+ err := json.Unmarshal(data, &schema.api)
+ if err != nil {
+ return nil, err
+ }
+ schema.delegate = factory
+ return schema, nil
+}
+
+// validateList unpacks a list and validate every item in the list.
+// It return nil if every item is ok.
+// Otherwise it return an error list contain errors of every item.
+func (s *SwaggerSchema) validateList(obj map[string]interface{}) []error {
+ items, exists := obj["items"]
+ if !exists {
+ return []error{fmt.Errorf("no items field in %#v", obj)}
+ }
+ return s.validateItems(items)
+}
+
+func (s *SwaggerSchema) validateItems(items interface{}) []error {
+ allErrs := []error{}
+ itemList, ok := items.([]interface{})
+ if !ok {
+ return append(allErrs, fmt.Errorf("items isn't a slice"))
+ }
+ for i, item := range itemList {
+ fields, ok := item.(map[string]interface{})
+ if !ok {
+ allErrs = append(allErrs, fmt.Errorf("items[%d] isn't a map[string]interface{}", i))
+ continue
+ }
+ groupVersion := fields["apiVersion"]
+ if groupVersion == nil {
+ allErrs = append(allErrs, fmt.Errorf("items[%d].apiVersion not set", i))
+ continue
+ }
+ itemVersion, ok := groupVersion.(string)
+ if !ok {
+ allErrs = append(allErrs, fmt.Errorf("items[%d].apiVersion isn't string type", i))
+ continue
+ }
+ if len(itemVersion) == 0 {
+ allErrs = append(allErrs, fmt.Errorf("items[%d].apiVersion is empty", i))
+ }
+ kind := fields["kind"]
+ if kind == nil {
+ allErrs = append(allErrs, fmt.Errorf("items[%d].kind not set", i))
+ continue
+ }
+ itemKind, ok := kind.(string)
+ if !ok {
+ allErrs = append(allErrs, fmt.Errorf("items[%d].kind isn't string type", i))
+ continue
+ }
+ if len(itemKind) == 0 {
+ allErrs = append(allErrs, fmt.Errorf("items[%d].kind is empty", i))
+ }
+ version := apiutil.GetVersion(itemVersion)
+ errs := s.ValidateObject(item, "", version+"."+itemKind)
+ if len(errs) >= 1 {
+ allErrs = append(allErrs, errs...)
+ }
+ }
+
+ return allErrs
+}
+
+func (s *SwaggerSchema) ValidateBytes(data []byte) error {
+ var obj interface{}
+ out, err := yaml.ToJSON(data)
+ if err != nil {
+ return err
+ }
+ data = out
+ if err := json.Unmarshal(data, &obj); err != nil {
+ return err
+ }
+ fields, ok := obj.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("error in unmarshaling data %s", string(data))
+ }
+ groupVersion := fields["apiVersion"]
+ if groupVersion == nil {
+ return fmt.Errorf("apiVersion not set")
+ }
+ if _, ok := groupVersion.(string); !ok {
+ return fmt.Errorf("apiVersion isn't string type")
+ }
+ kind := fields["kind"]
+ if kind == nil {
+ return fmt.Errorf("kind not set")
+ }
+ if _, ok := kind.(string); !ok {
+ return fmt.Errorf("kind isn't string type")
+ }
+ if strings.HasSuffix(kind.(string), "List") {
+ return utilerrors.NewAggregate(s.validateList(fields))
+ }
+ version := apiutil.GetVersion(groupVersion.(string))
+ allErrs := s.ValidateObject(obj, "", version+"."+kind.(string))
+ if len(allErrs) == 1 {
+ return allErrs[0]
+ }
+ return utilerrors.NewAggregate(allErrs)
+}
+
+func (s *SwaggerSchema) ValidateObject(obj interface{}, fieldName, typeName string) []error {
+ allErrs := []error{}
+ models := s.api.Models
+ model, ok := models.At(typeName)
+
+ // Verify the api version matches. This is required for nested types with differing api versions because
+ // s.api only has schema for 1 api version (the parent object type's version).
+ // e.g. an extensions/v1beta1 Template embedding a /v1 Service requires the schema for the extensions/v1beta1
+ // api to delegate to the schema for the /v1 api.
+ // Only do this for !ok objects so that cross ApiVersion vendored types take precedence.
+ if !ok && s.delegate != nil {
+ fields, mapOk := obj.(map[string]interface{})
+ if !mapOk {
+ return append(allErrs, fmt.Errorf("field %s for %s: expected object of type map[string]interface{}, but the actual type is %T", fieldName, typeName, obj))
+ }
+ if delegated, err := s.delegateIfDifferentApiVersion(&unstructured.Unstructured{Object: fields}); delegated {
+ if err != nil {
+ allErrs = append(allErrs, err)
+ }
+ return allErrs
+ }
+ }
+
+ if !ok {
+ return append(allErrs, TypeNotFoundError(typeName))
+ }
+ properties := model.Properties
+ if len(properties.List) == 0 {
+ // The object does not have any sub-fields.
+ return nil
+ }
+ fields, ok := obj.(map[string]interface{})
+ if !ok {
+ return append(allErrs, fmt.Errorf("field %s for %s: expected object of type map[string]interface{}, but the actual type is %T", fieldName, typeName, obj))
+ }
+ if len(fieldName) > 0 {
+ fieldName = fieldName + "."
+ }
+ // handle required fields
+ for _, requiredKey := range model.Required {
+ if _, ok := fields[requiredKey]; !ok {
+ allErrs = append(allErrs, fmt.Errorf("field %s%s for %s is required", fieldName, requiredKey, typeName))
+ }
+ }
+ for key, value := range fields {
+ details, ok := properties.At(key)
+
+ // Special case for runtime.RawExtension and runtime.Objects because they always fail to validate
+ // This is because the actual values will be of some sub-type (e.g. Deployment) not the expected
+ // super-type (RawExtension)
+ if s.isGenericArray(details) {
+ errs := s.validateItems(value)
+ if len(errs) > 0 {
+ allErrs = append(allErrs, errs...)
+ }
+ continue
+ }
+ if !ok {
+ allErrs = append(allErrs, fmt.Errorf("found invalid field %s for %s", key, typeName))
+ continue
+ }
+ if details.Type == nil && details.Ref == nil {
+ allErrs = append(allErrs, fmt.Errorf("could not find the type of %s%s from object %v", fieldName, key, details))
+ }
+ var fieldType string
+ if details.Type != nil {
+ fieldType = *details.Type
+ } else {
+ fieldType = *details.Ref
+ }
+ if value == nil {
+ glog.V(2).Infof("Skipping nil field: %s%s", fieldName, key)
+ continue
+ }
+ errs := s.validateField(value, fieldName+key, fieldType, &details)
+ if len(errs) > 0 {
+ allErrs = append(allErrs, errs...)
+ }
+ }
+ return allErrs
+}
+
+// delegateIfDifferentApiVersion delegates the validation of an object if its ApiGroup does not match the
+// current SwaggerSchema.
+// First return value is true if the validation was delegated (by a different ApiGroup SwaggerSchema)
+// Second return value is the result of the delegated validation if performed.
+func (s *SwaggerSchema) delegateIfDifferentApiVersion(obj *unstructured.Unstructured) (bool, error) {
+ // Never delegate objects in the same ApiVersion or we will get infinite recursion
+ if !s.isDifferentApiVersion(obj) {
+ return false, nil
+ }
+
+ // Convert the object back into bytes so that we can pass it to the ValidateBytes function
+ m, err := json.Marshal(obj.Object)
+ if err != nil {
+ return true, err
+ }
+
+ // Delegate validation of this object to the correct SwaggerSchema for its ApiGroup
+ return true, s.delegate.ValidateBytes(m)
+}
+
+// isDifferentApiVersion Returns true if obj lives in a different ApiVersion than the SwaggerSchema does.
+// The SwaggerSchema will not be able to process objects in different ApiVersions unless they are vendored.
+func (s *SwaggerSchema) isDifferentApiVersion(obj *unstructured.Unstructured) bool {
+ groupVersion := obj.GetAPIVersion()
+ return len(groupVersion) > 0 && s.api.ApiVersion != groupVersion
+}
+
+// isGenericArray Returns true if p is an array of generic Objects - either RawExtension or Object.
+func (s *SwaggerSchema) isGenericArray(p swagger.ModelProperty) bool {
+ return p.DataTypeFields.Type != nil &&
+ *p.DataTypeFields.Type == "array" &&
+ p.Items != nil &&
+ p.Items.Ref != nil &&
+ (*p.Items.Ref == "runtime.RawExtension" || *p.Items.Ref == "runtime.Object")
+}
+
+// This matches type name in the swagger spec, such as "v1.Binding".
+var versionRegexp = regexp.MustCompile(`^(v.+|unversioned|types)\..*`)
+
+func (s *SwaggerSchema) validateField(value interface{}, fieldName, fieldType string, fieldDetails *swagger.ModelProperty) []error {
+ allErrs := []error{}
+ if reflect.TypeOf(value) == nil {
+ return append(allErrs, fmt.Errorf("unexpected nil value for field %v", fieldName))
+ }
+ // TODO: caesarxuchao: because we have multiple group/versions and objects
+ // may reference objects in other group, the commented out way of checking
+ // if a filedType is a type defined by us is outdated. We use a hacky way
+ // for now.
+ // TODO: the type name in the swagger spec is something like "v1.Binding",
+ // and the "v1" is generated from the package name, not the groupVersion of
+ // the type. We need to fix go-restful to embed the group name in the type
+ // name, otherwise we couldn't handle identically named types in different
+ // groups correctly.
+ if versionRegexp.MatchString(fieldType) {
+ // if strings.HasPrefix(fieldType, apiVersion) {
+ return s.ValidateObject(value, fieldName, fieldType)
+ }
+ switch fieldType {
+ case "string":
+ // Be loose about what we accept for 'string' since we use IntOrString in a couple of places
+ _, isString := value.(string)
+ _, isNumber := value.(float64)
+ _, isInteger := value.(int)
+ if !isString && !isNumber && !isInteger {
+ return append(allErrs, NewInvalidTypeError(reflect.String, reflect.TypeOf(value).Kind(), fieldName))
+ }
+ case "array":
+ arr, ok := value.([]interface{})
+ if !ok {
+ return append(allErrs, NewInvalidTypeError(reflect.Array, reflect.TypeOf(value).Kind(), fieldName))
+ }
+ var arrType string
+ if fieldDetails.Items.Ref == nil && fieldDetails.Items.Type == nil {
+ return append(allErrs, NewInvalidTypeError(reflect.Array, reflect.TypeOf(value).Kind(), fieldName))
+ }
+ if fieldDetails.Items.Ref != nil {
+ arrType = *fieldDetails.Items.Ref
+ } else {
+ arrType = *fieldDetails.Items.Type
+ }
+ for ix := range arr {
+ errs := s.validateField(arr[ix], fmt.Sprintf("%s[%d]", fieldName, ix), arrType, nil)
+ if len(errs) > 0 {
+ allErrs = append(allErrs, errs...)
+ }
+ }
+ case "uint64":
+ case "int64":
+ case "integer":
+ _, isNumber := value.(float64)
+ _, isInteger := value.(int)
+ if !isNumber && !isInteger {
+ return append(allErrs, NewInvalidTypeError(reflect.Int, reflect.TypeOf(value).Kind(), fieldName))
+ }
+ case "float64":
+ if _, ok := value.(float64); !ok {
+ return append(allErrs, NewInvalidTypeError(reflect.Float64, reflect.TypeOf(value).Kind(), fieldName))
+ }
+ case "boolean":
+ if _, ok := value.(bool); !ok {
+ return append(allErrs, NewInvalidTypeError(reflect.Bool, reflect.TypeOf(value).Kind(), fieldName))
+ }
+ // API servers before release 1.3 produce swagger spec with `type: "any"` as the fallback type, while newer servers produce spec with `type: "object"`.
+ // We have both here so that kubectl can work with both old and new api servers.
+ case "object":
+ case "any":
+ default:
+ return append(allErrs, fmt.Errorf("unexpected type: %v", fieldType))
+ }
+ return allErrs
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go
new file mode 100644
index 000000000..785a059cd
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go
@@ -0,0 +1,4241 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "encoding/json"
+ "fmt"
+ "net"
+ "path"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/golang/glog"
+
+ "math"
+
+ apiequality "k8s.io/apimachinery/pkg/api/equality"
+ "k8s.io/apimachinery/pkg/api/resource"
+ apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
+ genericvalidation "k8s.io/apimachinery/pkg/api/validation"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/util/intstr"
+ "k8s.io/apimachinery/pkg/util/sets"
+ "k8s.io/apimachinery/pkg/util/validation"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ utilfeature "k8s.io/apiserver/pkg/util/feature"
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/helper"
+ apiservice "k8s.io/kubernetes/pkg/api/service"
+ "k8s.io/kubernetes/pkg/api/v1"
+ v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
+ "k8s.io/kubernetes/pkg/capabilities"
+ "k8s.io/kubernetes/pkg/features"
+ "k8s.io/kubernetes/pkg/security/apparmor"
+)
+
+// TODO: delete this global variable when we enable the validation of common
+// fields by default.
+var RepairMalformedUpdates bool = genericvalidation.RepairMalformedUpdates
+
+const isNegativeErrorMsg string = apimachineryvalidation.IsNegativeErrorMsg
+const isInvalidQuotaResource string = `must be a standard resource for quota`
+const fieldImmutableErrorMsg string = genericvalidation.FieldImmutableErrorMsg
+const isNotIntegerErrorMsg string = `must be an integer`
+
+var pdPartitionErrorMsg string = validation.InclusiveRangeError(1, 255)
+var volumeModeErrorMsg string = "must be a number between 0 and 0777 (octal), both inclusive"
+
+// BannedOwners is a black list of object that are not allowed to be owners.
+var BannedOwners = genericvalidation.BannedOwners
+
+// ValidateHasLabel requires that metav1.ObjectMeta has a Label with key and expectedValue
+func ValidateHasLabel(meta metav1.ObjectMeta, fldPath *field.Path, key, expectedValue string) field.ErrorList {
+ allErrs := field.ErrorList{}
+ actualValue, found := meta.Labels[key]
+ if !found {
+ allErrs = append(allErrs, field.Required(fldPath.Child("labels").Key(key),
+ fmt.Sprintf("must be '%s'", expectedValue)))
+ return allErrs
+ }
+ if actualValue != expectedValue {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("labels").Key(key), meta.Labels,
+ fmt.Sprintf("must be '%s'", expectedValue)))
+ }
+ return allErrs
+}
+
+// ValidateAnnotations validates that a set of annotations are correctly defined.
+func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
+ return genericvalidation.ValidateAnnotations(annotations, fldPath)
+}
+
+func ValidateDNS1123Label(value string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for _, msg := range validation.IsDNS1123Label(value) {
+ allErrs = append(allErrs, field.Invalid(fldPath, value, msg))
+ }
+ return allErrs
+}
+
+// ValidateDNS1123Subdomain validates that a name is a proper DNS subdomain.
+func ValidateDNS1123Subdomain(value string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for _, msg := range validation.IsDNS1123Subdomain(value) {
+ allErrs = append(allErrs, field.Invalid(fldPath, value, msg))
+ }
+ return allErrs
+}
+
+func ValidatePodSpecificAnnotations(annotations map[string]string, spec *api.PodSpec, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if annotations[api.AffinityAnnotationKey] != "" {
+ allErrs = append(allErrs, ValidateAffinityInPodAnnotations(annotations, fldPath)...)
+ }
+
+ if value, isMirror := annotations[api.MirrorPodAnnotationKey]; isMirror {
+ if len(spec.NodeName) == 0 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Key(api.MirrorPodAnnotationKey), value, "must set spec.nodeName if mirror pod annotation is set"))
+ }
+ }
+
+ if annotations[api.TolerationsAnnotationKey] != "" {
+ allErrs = append(allErrs, ValidateTolerationsInPodAnnotations(annotations, fldPath)...)
+ }
+
+ allErrs = append(allErrs, ValidateSeccompPodAnnotations(annotations, fldPath)...)
+ allErrs = append(allErrs, ValidateAppArmorPodAnnotations(annotations, spec, fldPath)...)
+
+ sysctls, err := helper.SysctlsFromPodAnnotation(annotations[api.SysctlsPodAnnotationKey])
+ if err != nil {
+ allErrs = append(allErrs, field.Invalid(fldPath.Key(api.SysctlsPodAnnotationKey), annotations[api.SysctlsPodAnnotationKey], err.Error()))
+ } else {
+ allErrs = append(allErrs, validateSysctls(sysctls, fldPath.Key(api.SysctlsPodAnnotationKey))...)
+ }
+ unsafeSysctls, err := helper.SysctlsFromPodAnnotation(annotations[api.UnsafeSysctlsPodAnnotationKey])
+ if err != nil {
+ allErrs = append(allErrs, field.Invalid(fldPath.Key(api.UnsafeSysctlsPodAnnotationKey), annotations[api.UnsafeSysctlsPodAnnotationKey], err.Error()))
+ } else {
+ allErrs = append(allErrs, validateSysctls(unsafeSysctls, fldPath.Key(api.UnsafeSysctlsPodAnnotationKey))...)
+ }
+ inBoth := sysctlIntersection(sysctls, unsafeSysctls)
+ if len(inBoth) > 0 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Key(api.UnsafeSysctlsPodAnnotationKey), strings.Join(inBoth, ", "), "can not be safe and unsafe"))
+ }
+
+ return allErrs
+}
+
+// ValidateTolerationsInPodAnnotations tests that the serialized tolerations in Pod.Annotations has valid data
+func ValidateTolerationsInPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ tolerations, err := helper.GetTolerationsFromPodAnnotations(annotations)
+ if err != nil {
+ allErrs = append(allErrs, field.Invalid(fldPath, api.TolerationsAnnotationKey, err.Error()))
+ return allErrs
+ }
+
+ if len(tolerations) > 0 {
+ allErrs = append(allErrs, ValidateTolerations(tolerations, fldPath.Child(api.TolerationsAnnotationKey))...)
+ }
+
+ return allErrs
+}
+
+// ValidateAffinityInPodAnnotations tests that the serialized Affinity in Pod.Annotations has valid data
+func ValidateAffinityInPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ affinity, err := helper.GetAffinityFromPodAnnotations(annotations)
+ if err != nil {
+ allErrs = append(allErrs, field.Invalid(fldPath, api.AffinityAnnotationKey, err.Error()))
+ return allErrs
+ }
+ if affinity == nil {
+ return allErrs
+ }
+
+ allErrs = append(allErrs, validateAffinity(affinity, fldPath.Child("affinity"))...)
+ return allErrs
+}
+
+func ValidatePodSpecificAnnotationUpdates(newPod, oldPod *api.Pod, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ newAnnotations := newPod.Annotations
+ oldAnnotations := oldPod.Annotations
+ for k, oldVal := range oldAnnotations {
+ if newVal, exists := newAnnotations[k]; exists && newVal == oldVal {
+ continue // No change.
+ }
+ if strings.HasPrefix(k, apparmor.ContainerAnnotationKeyPrefix) {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not remove or update AppArmor annotations"))
+ }
+ if k == api.MirrorPodAnnotationKey {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not remove or update mirror pod annotation"))
+ }
+ }
+ // Check for additions
+ for k := range newAnnotations {
+ if _, ok := oldAnnotations[k]; ok {
+ continue // No change.
+ }
+ if strings.HasPrefix(k, apparmor.ContainerAnnotationKeyPrefix) {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not add AppArmor annotations"))
+ }
+ if k == api.MirrorPodAnnotationKey {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not add mirror pod annotation"))
+ }
+ }
+ allErrs = append(allErrs, ValidatePodSpecificAnnotations(newAnnotations, &newPod.Spec, fldPath)...)
+ return allErrs
+}
+
+func ValidateEndpointsSpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ return allErrs
+}
+
+func ValidateOwnerReferences(ownerReferences []metav1.OwnerReference, fldPath *field.Path) field.ErrorList {
+ return genericvalidation.ValidateOwnerReferences(ownerReferences, fldPath)
+}
+
+// ValidateNameFunc validates that the provided name is valid for a given resource type.
+// Not all resources have the same validation rules for names. Prefix is true
+// if the name will have a value appended to it. If the name is not valid,
+// this returns a list of descriptions of individual characteristics of the
+// value that were not valid. Otherwise this returns an empty list or nil.
+type ValidateNameFunc apimachineryvalidation.ValidateNameFunc
+
+// maskTrailingDash replaces the final character of a string with a subdomain safe
+// value if is a dash.
+func maskTrailingDash(name string) string {
+ if strings.HasSuffix(name, "-") {
+ return name[:len(name)-2] + "a"
+ }
+ return name
+}
+
+// ValidatePodName can be used to check whether the given pod name is valid.
+// Prefix indicates this name will be used as part of generation, in which case
+// trailing dashes are allowed.
+var ValidatePodName = NameIsDNSSubdomain
+
+// ValidateReplicationControllerName can be used to check whether the given replication
+// controller name is valid.
+// Prefix indicates this name will be used as part of generation, in which case
+// trailing dashes are allowed.
+var ValidateReplicationControllerName = NameIsDNSSubdomain
+
+// ValidateServiceName can be used to check whether the given service name is valid.
+// Prefix indicates this name will be used as part of generation, in which case
+// trailing dashes are allowed.
+var ValidateServiceName = NameIsDNS1035Label
+
+// ValidateNodeName can be used to check whether the given node name is valid.
+// Prefix indicates this name will be used as part of generation, in which case
+// trailing dashes are allowed.
+var ValidateNodeName = NameIsDNSSubdomain
+
+// ValidateNamespaceName can be used to check whether the given namespace name is valid.
+// Prefix indicates this name will be used as part of generation, in which case
+// trailing dashes are allowed.
+var ValidateNamespaceName = apimachineryvalidation.ValidateNamespaceName
+
+// ValidateLimitRangeName can be used to check whether the given limit range name is valid.
+// Prefix indicates this name will be used as part of generation, in which case
+// trailing dashes are allowed.
+var ValidateLimitRangeName = NameIsDNSSubdomain
+
+// ValidateResourceQuotaName can be used to check whether the given
+// resource quota name is valid.
+// Prefix indicates this name will be used as part of generation, in which case
+// trailing dashes are allowed.
+var ValidateResourceQuotaName = NameIsDNSSubdomain
+
+// ValidateSecretName can be used to check whether the given secret name is valid.
+// Prefix indicates this name will be used as part of generation, in which case
+// trailing dashes are allowed.
+var ValidateSecretName = NameIsDNSSubdomain
+
+// ValidateServiceAccountName can be used to check whether the given service account name is valid.
+// Prefix indicates this name will be used as part of generation, in which case
+// trailing dashes are allowed.
+var ValidateServiceAccountName = apimachineryvalidation.ValidateServiceAccountName
+
+// ValidateEndpointsName can be used to check whether the given endpoints name is valid.
+// Prefix indicates this name will be used as part of generation, in which case
+// trailing dashes are allowed.
+var ValidateEndpointsName = NameIsDNSSubdomain
+
+// ValidateClusterName can be used to check whether the given cluster name is valid.
+var ValidateClusterName = genericvalidation.ValidateClusterName
+
+// ValidateClassName can be used to check whether the given class name is valid.
+// It is defined here to avoid import cycle between pkg/apis/storage/validation
+// (where it should be) and this file.
+var ValidateClassName = NameIsDNSSubdomain
+
+// TODO update all references to these functions to point to the genericvalidation ones
+// NameIsDNSSubdomain is a ValidateNameFunc for names that must be a DNS subdomain.
+func NameIsDNSSubdomain(name string, prefix bool) []string {
+ return apimachineryvalidation.NameIsDNSSubdomain(name, prefix)
+}
+
+// NameIsDNSLabel is a ValidateNameFunc for names that must be a DNS 1123 label.
+func NameIsDNSLabel(name string, prefix bool) []string {
+ return apimachineryvalidation.NameIsDNSLabel(name, prefix)
+}
+
+// NameIsDNS1035Label is a ValidateNameFunc for names that must be a DNS 952 label.
+func NameIsDNS1035Label(name string, prefix bool) []string {
+ return apimachineryvalidation.NameIsDNS1035Label(name, prefix)
+}
+
+// Validates that given value is not negative.
+func ValidateNonnegativeField(value int64, fldPath *field.Path) field.ErrorList {
+ return apimachineryvalidation.ValidateNonnegativeField(value, fldPath)
+}
+
+// Validates that a Quantity is not negative
+func ValidateNonnegativeQuantity(value resource.Quantity, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if value.Cmp(resource.Quantity{}) < 0 {
+ allErrs = append(allErrs, field.Invalid(fldPath, value.String(), isNegativeErrorMsg))
+ }
+ return allErrs
+}
+
+func ValidateImmutableField(newVal, oldVal interface{}, fldPath *field.Path) field.ErrorList {
+ return genericvalidation.ValidateImmutableField(newVal, oldVal, fldPath)
+}
+
+func ValidateImmutableAnnotation(newVal string, oldVal string, annotation string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if oldVal != newVal {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("annotations", annotation), newVal, fieldImmutableErrorMsg))
+ }
+ return allErrs
+}
+
+// ValidateObjectMeta validates an object's metadata on creation. It expects that name generation has already
+// been performed.
+// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before.
+// TODO: Remove calls to this method scattered in validations of specific resources, e.g., ValidatePodUpdate.
+func ValidateObjectMeta(meta *metav1.ObjectMeta, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList {
+ allErrs := genericvalidation.ValidateObjectMeta(meta, requiresNamespace, apimachineryvalidation.ValidateNameFunc(nameFn), fldPath)
+ // run additional checks for the finalizer name
+ for i := range meta.Finalizers {
+ allErrs = append(allErrs, validateKubeFinalizerName(string(meta.Finalizers[i]), fldPath.Child("finalizers").Index(i))...)
+ }
+ return allErrs
+}
+
+// ValidateObjectMetaUpdate validates an object's metadata when updated
+func ValidateObjectMetaUpdate(newMeta, oldMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList {
+ allErrs := genericvalidation.ValidateObjectMetaUpdate(newMeta, oldMeta, fldPath)
+ // run additional checks for the finalizer name
+ for i := range newMeta.Finalizers {
+ allErrs = append(allErrs, validateKubeFinalizerName(string(newMeta.Finalizers[i]), fldPath.Child("finalizers").Index(i))...)
+ }
+
+ return allErrs
+}
+
+func ValidateNoNewFinalizers(newFinalizers []string, oldFinalizers []string, fldPath *field.Path) field.ErrorList {
+ return genericvalidation.ValidateNoNewFinalizers(newFinalizers, oldFinalizers, fldPath)
+}
+
+func ValidateVolumes(volumes []api.Volume, fldPath *field.Path) (sets.String, field.ErrorList) {
+ allErrs := field.ErrorList{}
+
+ allNames := sets.String{}
+ for i, vol := range volumes {
+ idxPath := fldPath.Index(i)
+ namePath := idxPath.Child("name")
+ el := validateVolumeSource(&vol.VolumeSource, idxPath)
+ if len(vol.Name) == 0 {
+ el = append(el, field.Required(namePath, ""))
+ } else {
+ el = append(el, ValidateDNS1123Label(vol.Name, namePath)...)
+ }
+ if allNames.Has(vol.Name) {
+ el = append(el, field.Duplicate(namePath, vol.Name))
+ }
+ if len(el) == 0 {
+ allNames.Insert(vol.Name)
+ } else {
+ allErrs = append(allErrs, el...)
+ }
+
+ }
+ return allNames, allErrs
+}
+
+func validateVolumeSource(source *api.VolumeSource, fldPath *field.Path) field.ErrorList {
+ numVolumes := 0
+ allErrs := field.ErrorList{}
+ if source.EmptyDir != nil {
+ numVolumes++
+ if !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
+ if source.EmptyDir.SizeLimit != nil && source.EmptyDir.SizeLimit.Cmp(resource.Quantity{}) != 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("emptyDir").Child("sizeLimit"), "SizeLimit field disabled by feature-gate for EmptyDir volumes"))
+ }
+ } else {
+ if source.EmptyDir.SizeLimit != nil && source.EmptyDir.SizeLimit.Cmp(resource.Quantity{}) < 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("emptyDir").Child("sizeLimit"), "SizeLimit field must be a valid resource quantity"))
+ }
+ }
+ }
+ if source.HostPath != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("hostPath"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateHostPathVolumeSource(source.HostPath, fldPath.Child("hostPath"))...)
+ }
+ }
+ if source.GitRepo != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("gitRepo"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateGitRepoVolumeSource(source.GitRepo, fldPath.Child("gitRepo"))...)
+ }
+ }
+ if source.GCEPersistentDisk != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("gcePersistentDisk"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateGCEPersistentDiskVolumeSource(source.GCEPersistentDisk, fldPath.Child("persistentDisk"))...)
+ }
+ }
+ if source.AWSElasticBlockStore != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("awsElasticBlockStore"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateAWSElasticBlockStoreVolumeSource(source.AWSElasticBlockStore, fldPath.Child("awsElasticBlockStore"))...)
+ }
+ }
+ if source.Secret != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("secret"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateSecretVolumeSource(source.Secret, fldPath.Child("secret"))...)
+ }
+ }
+ if source.NFS != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("nfs"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateNFSVolumeSource(source.NFS, fldPath.Child("nfs"))...)
+ }
+ }
+ if source.ISCSI != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("iscsi"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateISCSIVolumeSource(source.ISCSI, fldPath.Child("iscsi"))...)
+ }
+ }
+ if source.Glusterfs != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("glusterfs"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateGlusterfs(source.Glusterfs, fldPath.Child("glusterfs"))...)
+ }
+ }
+ if source.Flocker != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("flocker"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateFlockerVolumeSource(source.Flocker, fldPath.Child("flocker"))...)
+ }
+ }
+ if source.PersistentVolumeClaim != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("persistentVolumeClaim"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validatePersistentClaimVolumeSource(source.PersistentVolumeClaim, fldPath.Child("persistentVolumeClaim"))...)
+ }
+ }
+ if source.RBD != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("rbd"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateRBDVolumeSource(source.RBD, fldPath.Child("rbd"))...)
+ }
+ }
+ if source.Cinder != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("cinder"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateCinderVolumeSource(source.Cinder, fldPath.Child("cinder"))...)
+ }
+ }
+ if source.CephFS != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("cephFS"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateCephFSVolumeSource(source.CephFS, fldPath.Child("cephfs"))...)
+ }
+ }
+ if source.Quobyte != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("quobyte"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateQuobyteVolumeSource(source.Quobyte, fldPath.Child("quobyte"))...)
+ }
+ }
+ if source.DownwardAPI != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("downwarAPI"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateDownwardAPIVolumeSource(source.DownwardAPI, fldPath.Child("downwardAPI"))...)
+ }
+ }
+ if source.FC != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("fc"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateFCVolumeSource(source.FC, fldPath.Child("fc"))...)
+ }
+ }
+ if source.FlexVolume != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("flexVolume"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateFlexVolumeSource(source.FlexVolume, fldPath.Child("flexVolume"))...)
+ }
+ }
+ if source.ConfigMap != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("configMap"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateConfigMapVolumeSource(source.ConfigMap, fldPath.Child("configMap"))...)
+ }
+ }
+
+ if source.AzureFile != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("azureFile"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateAzureFile(source.AzureFile, fldPath.Child("azureFile"))...)
+ }
+ }
+
+ if source.VsphereVolume != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("vsphereVolume"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateVsphereVolumeSource(source.VsphereVolume, fldPath.Child("vsphereVolume"))...)
+ }
+ }
+ if source.PhotonPersistentDisk != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("photonPersistentDisk"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validatePhotonPersistentDiskVolumeSource(source.PhotonPersistentDisk, fldPath.Child("photonPersistentDisk"))...)
+ }
+ }
+ if source.PortworxVolume != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("portworxVolume"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validatePortworxVolumeSource(source.PortworxVolume, fldPath.Child("portworxVolume"))...)
+ }
+ }
+ if source.AzureDisk != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("azureDisk"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateAzureDisk(source.AzureDisk, fldPath.Child("azureDisk"))...)
+ }
+ }
+ if source.StorageOS != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("storageos"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateStorageOSVolumeSource(source.StorageOS, fldPath.Child("storageos"))...)
+ }
+ }
+ if source.Projected != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("projected"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateProjectedVolumeSource(source.Projected, fldPath.Child("projected"))...)
+ }
+ }
+ if source.ScaleIO != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("scaleIO"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateScaleIOVolumeSource(source.ScaleIO, fldPath.Child("scaleIO"))...)
+ }
+ }
+
+ if numVolumes == 0 {
+ allErrs = append(allErrs, field.Required(fldPath, "must specify a volume type"))
+ }
+
+ return allErrs
+}
+
+func validateHostPathVolumeSource(hostPath *api.HostPathVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(hostPath.Path) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
+ return allErrs
+ }
+
+ allErrs = append(allErrs, validatePathNoBacksteps(hostPath.Path, fldPath.Child("path"))...)
+ return allErrs
+}
+
+func validateGitRepoVolumeSource(gitRepo *api.GitRepoVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(gitRepo.Repository) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("repository"), ""))
+ }
+
+ pathErrs := validateLocalDescendingPath(gitRepo.Directory, fldPath.Child("directory"))
+ allErrs = append(allErrs, pathErrs...)
+ return allErrs
+}
+
+func validateISCSIVolumeSource(iscsi *api.ISCSIVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(iscsi.TargetPortal) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("targetPortal"), ""))
+ }
+ if len(iscsi.IQN) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("iqn"), ""))
+ }
+ if iscsi.Lun < 0 || iscsi.Lun > 255 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), iscsi.Lun, validation.InclusiveRangeError(0, 255)))
+ }
+ if (iscsi.DiscoveryCHAPAuth || iscsi.SessionCHAPAuth) && iscsi.SecretRef == nil {
+ allErrs = append(allErrs, field.Required(fldPath.Child("secretRef"), ""))
+ }
+ return allErrs
+}
+
+func validateFCVolumeSource(fc *api.FCVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(fc.TargetWWNs) < 1 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("targetWWNs"), ""))
+ }
+
+ if fc.Lun == nil {
+ allErrs = append(allErrs, field.Required(fldPath.Child("lun"), ""))
+ } else {
+ if *fc.Lun < 0 || *fc.Lun > 255 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), fc.Lun, validation.InclusiveRangeError(0, 255)))
+ }
+ }
+ return allErrs
+}
+
+func validateGCEPersistentDiskVolumeSource(pd *api.GCEPersistentDiskVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(pd.PDName) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("pdName"), ""))
+ }
+ if pd.Partition < 0 || pd.Partition > 255 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("partition"), pd.Partition, pdPartitionErrorMsg))
+ }
+ return allErrs
+}
+
+func validateAWSElasticBlockStoreVolumeSource(PD *api.AWSElasticBlockStoreVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(PD.VolumeID) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), ""))
+ }
+ if PD.Partition < 0 || PD.Partition > 255 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("partition"), PD.Partition, pdPartitionErrorMsg))
+ }
+ return allErrs
+}
+
+func validateSecretVolumeSource(secretSource *api.SecretVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(secretSource.SecretName) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), ""))
+ }
+
+ secretMode := secretSource.DefaultMode
+ if secretMode != nil && (*secretMode > 0777 || *secretMode < 0) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *secretMode, volumeModeErrorMsg))
+ }
+
+ itemsPath := fldPath.Child("items")
+ for i, kp := range secretSource.Items {
+ itemPath := itemsPath.Index(i)
+ allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...)
+ }
+ return allErrs
+}
+
+func validateConfigMapVolumeSource(configMapSource *api.ConfigMapVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(configMapSource.Name) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
+ }
+
+ configMapMode := configMapSource.DefaultMode
+ if configMapMode != nil && (*configMapMode > 0777 || *configMapMode < 0) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *configMapMode, volumeModeErrorMsg))
+ }
+
+ itemsPath := fldPath.Child("items")
+ for i, kp := range configMapSource.Items {
+ itemPath := itemsPath.Index(i)
+ allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...)
+ }
+ return allErrs
+}
+
+func validateKeyToPath(kp *api.KeyToPath, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(kp.Key) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("key"), ""))
+ }
+ if len(kp.Path) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
+ }
+ allErrs = append(allErrs, validateLocalNonReservedPath(kp.Path, fldPath.Child("path"))...)
+ if kp.Mode != nil && (*kp.Mode > 0777 || *kp.Mode < 0) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("mode"), *kp.Mode, volumeModeErrorMsg))
+ }
+
+ return allErrs
+}
+
+func validatePersistentClaimVolumeSource(claim *api.PersistentVolumeClaimVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(claim.ClaimName) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("claimName"), ""))
+ }
+ return allErrs
+}
+
+func validateNFSVolumeSource(nfs *api.NFSVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(nfs.Server) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("server"), ""))
+ }
+ if len(nfs.Path) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
+ }
+ if !path.IsAbs(nfs.Path) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("path"), nfs.Path, "must be an absolute path"))
+ }
+ return allErrs
+}
+
+func validateQuobyteVolumeSource(quobyte *api.QuobyteVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(quobyte.Registry) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("registry"), "must be a host:port pair or multiple pairs separated by commas"))
+ } else {
+ for _, hostPortPair := range strings.Split(quobyte.Registry, ",") {
+ if _, _, err := net.SplitHostPort(hostPortPair); err != nil {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("registry"), quobyte.Registry, "must be a host:port pair or multiple pairs separated by commas"))
+ }
+ }
+ }
+
+ if len(quobyte.Volume) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("volume"), ""))
+ }
+ return allErrs
+}
+
+func validateGlusterfs(glusterfs *api.GlusterfsVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(glusterfs.EndpointsName) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("endpoints"), ""))
+ }
+ if len(glusterfs.Path) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
+ }
+ return allErrs
+}
+
+func validateFlockerVolumeSource(flocker *api.FlockerVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(flocker.DatasetName) == 0 && len(flocker.DatasetUUID) == 0 {
+ //TODO: consider adding a RequiredOneOf() error for this and similar cases
+ allErrs = append(allErrs, field.Required(fldPath, "one of datasetName and datasetUUID is required"))
+ }
+ if len(flocker.DatasetName) != 0 && len(flocker.DatasetUUID) != 0 {
+ allErrs = append(allErrs, field.Invalid(fldPath, "resource", "datasetName and datasetUUID can not be specified simultaneously"))
+ }
+ if strings.Contains(flocker.DatasetName, "/") {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("datasetName"), flocker.DatasetName, "must not contain '/'"))
+ }
+ return allErrs
+}
+
+var validDownwardAPIFieldPathExpressions = sets.NewString(
+ "metadata.name",
+ "metadata.namespace",
+ "metadata.labels",
+ "metadata.annotations")
+
+func validateDownwardAPIVolumeFile(file *api.DownwardAPIVolumeFile, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if len(file.Path) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
+ }
+ allErrs = append(allErrs, validateLocalNonReservedPath(file.Path, fldPath.Child("path"))...)
+ if file.FieldRef != nil {
+ allErrs = append(allErrs, validateObjectFieldSelector(file.FieldRef, &validDownwardAPIFieldPathExpressions, fldPath.Child("fieldRef"))...)
+ if file.ResourceFieldRef != nil {
+ allErrs = append(allErrs, field.Invalid(fldPath, "resource", "fieldRef and resourceFieldRef can not be specified simultaneously"))
+ }
+ } else if file.ResourceFieldRef != nil {
+ allErrs = append(allErrs, validateContainerResourceFieldSelector(file.ResourceFieldRef, &validContainerResourceFieldPathExpressions, fldPath.Child("resourceFieldRef"), true)...)
+ } else {
+ allErrs = append(allErrs, field.Required(fldPath, "one of fieldRef and resourceFieldRef is required"))
+ }
+ if file.Mode != nil && (*file.Mode > 0777 || *file.Mode < 0) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("mode"), *file.Mode, volumeModeErrorMsg))
+ }
+
+ return allErrs
+}
+
+func validateDownwardAPIVolumeSource(downwardAPIVolume *api.DownwardAPIVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ downwardAPIMode := downwardAPIVolume.DefaultMode
+ if downwardAPIMode != nil && (*downwardAPIMode > 0777 || *downwardAPIMode < 0) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *downwardAPIMode, volumeModeErrorMsg))
+ }
+
+ for _, file := range downwardAPIVolume.Items {
+ allErrs = append(allErrs, validateDownwardAPIVolumeFile(&file, fldPath)...)
+ }
+ return allErrs
+}
+
+func validateProjectionSources(projection *api.ProjectedVolumeSource, projectionMode *int32, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ allPaths := sets.String{}
+
+ for _, source := range projection.Sources {
+ numSources := 0
+ if source.Secret != nil {
+ if numSources > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("secret"), "may not specify more than 1 volume type"))
+ } else {
+ numSources++
+ if len(source.Secret.Name) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
+ }
+ itemsPath := fldPath.Child("items")
+ for i, kp := range source.Secret.Items {
+ itemPath := itemsPath.Index(i)
+ allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...)
+ if len(kp.Path) > 0 {
+ curPath := kp.Path
+ if !allPaths.Has(curPath) {
+ allPaths.Insert(curPath)
+ } else {
+ allErrs = append(allErrs, field.Invalid(fldPath, source.Secret.Name, "conflicting duplicate paths"))
+ }
+ }
+ }
+ }
+ }
+ if source.ConfigMap != nil {
+ if numSources > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("configMap"), "may not specify more than 1 volume type"))
+ } else {
+ numSources++
+ if len(source.ConfigMap.Name) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
+ }
+ itemsPath := fldPath.Child("items")
+ for i, kp := range source.ConfigMap.Items {
+ itemPath := itemsPath.Index(i)
+ allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...)
+ if len(kp.Path) > 0 {
+ curPath := kp.Path
+ if !allPaths.Has(curPath) {
+ allPaths.Insert(curPath)
+ } else {
+ allErrs = append(allErrs, field.Invalid(fldPath, source.ConfigMap.Name, "conflicting duplicate paths"))
+ }
+
+ }
+ }
+ }
+ }
+ if source.DownwardAPI != nil {
+ if numSources > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("downwardAPI"), "may not specify more than 1 volume type"))
+ } else {
+ numSources++
+ for _, file := range source.DownwardAPI.Items {
+ allErrs = append(allErrs, validateDownwardAPIVolumeFile(&file, fldPath.Child("downwardAPI"))...)
+ if len(file.Path) > 0 {
+ curPath := file.Path
+ if !allPaths.Has(curPath) {
+ allPaths.Insert(curPath)
+ } else {
+ allErrs = append(allErrs, field.Invalid(fldPath, curPath, "conflicting duplicate paths"))
+ }
+
+ }
+ }
+ }
+ }
+ }
+ return allErrs
+}
+
+func validateProjectedVolumeSource(projection *api.ProjectedVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ projectionMode := projection.DefaultMode
+ if projectionMode != nil && (*projectionMode > 0777 || *projectionMode < 0) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *projectionMode, volumeModeErrorMsg))
+ }
+
+ allErrs = append(allErrs, validateProjectionSources(projection, projectionMode, fldPath)...)
+ return allErrs
+}
+
+// This validate will make sure targetPath:
+// 1. is not abs path
+// 2. does not have any element which is ".."
+func validateLocalDescendingPath(targetPath string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if path.IsAbs(targetPath) {
+ allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must be a relative path"))
+ }
+
+ allErrs = append(allErrs, validatePathNoBacksteps(targetPath, fldPath)...)
+
+ return allErrs
+}
+
+// validatePathNoBacksteps makes sure the targetPath does not have any `..` path elements when split
+//
+// This assumes the OS of the apiserver and the nodes are the same. The same check should be done
+// on the node to ensure there are no backsteps.
+func validatePathNoBacksteps(targetPath string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ parts := strings.Split(filepath.ToSlash(targetPath), "/")
+ for _, item := range parts {
+ if item == ".." {
+ allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not contain '..'"))
+ break // even for `../../..`, one error is sufficient to make the point
+ }
+ }
+ return allErrs
+}
+
+// This validate will make sure targetPath:
+// 1. is not abs path
+// 2. does not contain any '..' elements
+// 3. does not start with '..'
+func validateLocalNonReservedPath(targetPath string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ allErrs = append(allErrs, validateLocalDescendingPath(targetPath, fldPath)...)
+ // Don't report this error if the check for .. elements already caught it.
+ if strings.HasPrefix(targetPath, "..") && !strings.HasPrefix(targetPath, "../") {
+ allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not start with '..'"))
+ }
+ return allErrs
+}
+
+func validateRBDVolumeSource(rbd *api.RBDVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(rbd.CephMonitors) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), ""))
+ }
+ if len(rbd.RBDImage) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("image"), ""))
+ }
+ return allErrs
+}
+
+func validateCinderVolumeSource(cd *api.CinderVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(cd.VolumeID) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), ""))
+ }
+ return allErrs
+}
+
+func validateCephFSVolumeSource(cephfs *api.CephFSVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(cephfs.Monitors) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), ""))
+ }
+ return allErrs
+}
+
+func validateFlexVolumeSource(fv *api.FlexVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(fv.Driver) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("driver"), ""))
+ }
+
+ // Make sure user-specified options don't use kubernetes namespaces
+ for k := range fv.Options {
+ namespace := k
+ if parts := strings.SplitN(k, "/", 2); len(parts) == 2 {
+ namespace = parts[0]
+ }
+ normalized := "." + strings.ToLower(namespace)
+ if strings.HasSuffix(normalized, ".kubernetes.io") || strings.HasSuffix(normalized, ".k8s.io") {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("options").Key(k), k, "kubernetes.io and k8s.io namespaces are reserved"))
+ }
+ }
+
+ return allErrs
+}
+
+func validateAzureFile(azure *api.AzureFileVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if azure.SecretName == "" {
+ allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), ""))
+ }
+ if azure.ShareName == "" {
+ allErrs = append(allErrs, field.Required(fldPath.Child("shareName"), ""))
+ }
+ return allErrs
+}
+
+func validateAzureDisk(azure *api.AzureDiskVolumeSource, fldPath *field.Path) field.ErrorList {
+ var supportedCachingModes = sets.NewString(string(api.AzureDataDiskCachingNone), string(api.AzureDataDiskCachingReadOnly), string(api.AzureDataDiskCachingReadWrite))
+ var supportedDiskKinds = sets.NewString(string(api.AzureSharedBlobDisk), string(api.AzureDedicatedBlobDisk), string(api.AzureManagedDisk))
+
+ diskUriSupportedManaged := []string{"/subscriptions/{sub-id}/resourcegroups/{group-name}/providers/microsoft.compute/disks/{disk-id}"}
+ diskUriSupportedblob := []string{"https://{account-name}.blob.core.windows.net/{container-name}/{disk-name}.vhd"}
+
+ allErrs := field.ErrorList{}
+ if azure.DiskName == "" {
+ allErrs = append(allErrs, field.Required(fldPath.Child("diskName"), ""))
+ }
+
+ if azure.DataDiskURI == "" {
+ allErrs = append(allErrs, field.Required(fldPath.Child("diskURI"), ""))
+ }
+
+ if azure.CachingMode != nil && !supportedCachingModes.Has(string(*azure.CachingMode)) {
+ allErrs = append(allErrs, field.NotSupported(fldPath.Child("cachingMode"), *azure.CachingMode, supportedCachingModes.List()))
+ }
+
+ if azure.Kind != nil && !supportedDiskKinds.Has(string(*azure.Kind)) {
+ allErrs = append(allErrs, field.NotSupported(fldPath.Child("kind"), *azure.Kind, supportedDiskKinds.List()))
+ }
+
+ // validate that DiskUri is the correct format
+ if azure.Kind != nil && *azure.Kind == api.AzureManagedDisk && strings.Index(azure.DataDiskURI, "/subscriptions/") != 0 {
+ allErrs = append(allErrs, field.NotSupported(fldPath.Child("diskURI"), azure.DataDiskURI, diskUriSupportedManaged))
+ }
+
+ if azure.Kind != nil && *azure.Kind != api.AzureManagedDisk && strings.Index(azure.DataDiskURI, "https://") != 0 {
+ allErrs = append(allErrs, field.NotSupported(fldPath.Child("diskURI"), azure.DataDiskURI, diskUriSupportedblob))
+ }
+
+ return allErrs
+}
+
+func validateVsphereVolumeSource(cd *api.VsphereVirtualDiskVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(cd.VolumePath) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("volumePath"), ""))
+ }
+ return allErrs
+}
+
+func validatePhotonPersistentDiskVolumeSource(cd *api.PhotonPersistentDiskVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(cd.PdID) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("pdID"), ""))
+ }
+ return allErrs
+}
+
+func validatePortworxVolumeSource(pwx *api.PortworxVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(pwx.VolumeID) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), ""))
+ }
+ return allErrs
+}
+
+func validateScaleIOVolumeSource(sio *api.ScaleIOVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if sio.Gateway == "" {
+ allErrs = append(allErrs, field.Required(fldPath.Child("gateway"), ""))
+ }
+ if sio.System == "" {
+ allErrs = append(allErrs, field.Required(fldPath.Child("system"), ""))
+ }
+ if sio.VolumeName == "" {
+ allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), ""))
+ }
+ return allErrs
+}
+
+func validateLocalVolumeSource(ls *api.LocalVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if ls.Path == "" {
+ allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
+ return allErrs
+ }
+
+ allErrs = append(allErrs, validatePathNoBacksteps(ls.Path, fldPath.Child("path"))...)
+ return allErrs
+}
+
+func validateStorageOSVolumeSource(storageos *api.StorageOSVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(storageos.VolumeName) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), ""))
+ } else {
+ allErrs = append(allErrs, ValidateDNS1123Label(storageos.VolumeName, fldPath.Child("volumeName"))...)
+ }
+ if len(storageos.VolumeNamespace) > 0 {
+ allErrs = append(allErrs, ValidateDNS1123Label(storageos.VolumeNamespace, fldPath.Child("volumeNamespace"))...)
+ }
+ if storageos.SecretRef != nil {
+ if len(storageos.SecretRef.Name) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), ""))
+ }
+ }
+ return allErrs
+}
+
+func validateStorageOSPersistentVolumeSource(storageos *api.StorageOSPersistentVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(storageos.VolumeName) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), ""))
+ } else {
+ allErrs = append(allErrs, ValidateDNS1123Label(storageos.VolumeName, fldPath.Child("volumeName"))...)
+ }
+ if len(storageos.VolumeNamespace) > 0 {
+ allErrs = append(allErrs, ValidateDNS1123Label(storageos.VolumeNamespace, fldPath.Child("volumeNamespace"))...)
+ }
+ if storageos.SecretRef != nil {
+ if len(storageos.SecretRef.Name) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), ""))
+ }
+ if len(storageos.SecretRef.Namespace) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "namespace"), ""))
+ }
+ }
+ return allErrs
+}
+
+// ValidatePersistentVolumeName checks that a name is appropriate for a
+// PersistentVolumeName object.
+var ValidatePersistentVolumeName = NameIsDNSSubdomain
+
+var supportedAccessModes = sets.NewString(string(api.ReadWriteOnce), string(api.ReadOnlyMany), string(api.ReadWriteMany))
+
+var supportedReclaimPolicy = sets.NewString(string(api.PersistentVolumeReclaimDelete), string(api.PersistentVolumeReclaimRecycle), string(api.PersistentVolumeReclaimRetain))
+
+func ValidatePersistentVolume(pv *api.PersistentVolume) field.ErrorList {
+ metaPath := field.NewPath("metadata")
+ allErrs := ValidateObjectMeta(&pv.ObjectMeta, false, ValidatePersistentVolumeName, metaPath)
+
+ specPath := field.NewPath("spec")
+ if len(pv.Spec.AccessModes) == 0 {
+ allErrs = append(allErrs, field.Required(specPath.Child("accessModes"), ""))
+ }
+ for _, mode := range pv.Spec.AccessModes {
+ if !supportedAccessModes.Has(string(mode)) {
+ allErrs = append(allErrs, field.NotSupported(specPath.Child("accessModes"), mode, supportedAccessModes.List()))
+ }
+ }
+
+ if len(pv.Spec.Capacity) == 0 {
+ allErrs = append(allErrs, field.Required(specPath.Child("capacity"), ""))
+ }
+
+ if _, ok := pv.Spec.Capacity[api.ResourceStorage]; !ok || len(pv.Spec.Capacity) > 1 {
+ allErrs = append(allErrs, field.NotSupported(specPath.Child("capacity"), pv.Spec.Capacity, []string{string(api.ResourceStorage)}))
+ }
+ capPath := specPath.Child("capacity")
+ for r, qty := range pv.Spec.Capacity {
+ allErrs = append(allErrs, validateBasicResource(qty, capPath.Key(string(r)))...)
+ }
+ if len(string(pv.Spec.PersistentVolumeReclaimPolicy)) > 0 {
+ if !supportedReclaimPolicy.Has(string(pv.Spec.PersistentVolumeReclaimPolicy)) {
+ allErrs = append(allErrs, field.NotSupported(specPath.Child("persistentVolumeReclaimPolicy"), pv.Spec.PersistentVolumeReclaimPolicy, supportedReclaimPolicy.List()))
+ }
+ }
+
+ nodeAffinitySpecified, errs := validateStorageNodeAffinityAnnotation(pv.ObjectMeta.Annotations, metaPath.Child("annotations"))
+ allErrs = append(allErrs, errs...)
+
+ numVolumes := 0
+ if pv.Spec.HostPath != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("hostPath"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateHostPathVolumeSource(pv.Spec.HostPath, specPath.Child("hostPath"))...)
+ }
+ }
+ if pv.Spec.GCEPersistentDisk != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("gcePersistentDisk"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateGCEPersistentDiskVolumeSource(pv.Spec.GCEPersistentDisk, specPath.Child("persistentDisk"))...)
+ }
+ }
+ if pv.Spec.AWSElasticBlockStore != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("awsElasticBlockStore"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateAWSElasticBlockStoreVolumeSource(pv.Spec.AWSElasticBlockStore, specPath.Child("awsElasticBlockStore"))...)
+ }
+ }
+ if pv.Spec.Glusterfs != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("glusterfs"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateGlusterfs(pv.Spec.Glusterfs, specPath.Child("glusterfs"))...)
+ }
+ }
+ if pv.Spec.Flocker != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("flocker"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateFlockerVolumeSource(pv.Spec.Flocker, specPath.Child("flocker"))...)
+ }
+ }
+ if pv.Spec.NFS != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("nfs"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateNFSVolumeSource(pv.Spec.NFS, specPath.Child("nfs"))...)
+ }
+ }
+ if pv.Spec.RBD != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("rbd"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateRBDVolumeSource(pv.Spec.RBD, specPath.Child("rbd"))...)
+ }
+ }
+ if pv.Spec.Quobyte != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("quobyte"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateQuobyteVolumeSource(pv.Spec.Quobyte, specPath.Child("quobyte"))...)
+ }
+ }
+ if pv.Spec.CephFS != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("cephFS"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateCephFSVolumeSource(pv.Spec.CephFS, specPath.Child("cephfs"))...)
+ }
+ }
+ if pv.Spec.ISCSI != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("iscsi"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateISCSIVolumeSource(pv.Spec.ISCSI, specPath.Child("iscsi"))...)
+ }
+ }
+ if pv.Spec.Cinder != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("cinder"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateCinderVolumeSource(pv.Spec.Cinder, specPath.Child("cinder"))...)
+ }
+ }
+ if pv.Spec.FC != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("fc"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateFCVolumeSource(pv.Spec.FC, specPath.Child("fc"))...)
+ }
+ }
+ if pv.Spec.FlexVolume != nil {
+ numVolumes++
+ allErrs = append(allErrs, validateFlexVolumeSource(pv.Spec.FlexVolume, specPath.Child("flexVolume"))...)
+ }
+ if pv.Spec.AzureFile != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("azureFile"), "may not specify more than 1 volume type"))
+
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateAzureFile(pv.Spec.AzureFile, specPath.Child("azureFile"))...)
+ }
+ }
+
+ if pv.Spec.VsphereVolume != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("vsphereVolume"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateVsphereVolumeSource(pv.Spec.VsphereVolume, specPath.Child("vsphereVolume"))...)
+ }
+ }
+ if pv.Spec.PhotonPersistentDisk != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("photonPersistentDisk"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validatePhotonPersistentDiskVolumeSource(pv.Spec.PhotonPersistentDisk, specPath.Child("photonPersistentDisk"))...)
+ }
+ }
+ if pv.Spec.PortworxVolume != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("portworxVolume"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validatePortworxVolumeSource(pv.Spec.PortworxVolume, specPath.Child("portworxVolume"))...)
+ }
+ }
+ if pv.Spec.AzureDisk != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("azureDisk"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateAzureDisk(pv.Spec.AzureDisk, specPath.Child("azureDisk"))...)
+ }
+ }
+ if pv.Spec.ScaleIO != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("scaleIO"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateScaleIOVolumeSource(pv.Spec.ScaleIO, specPath.Child("scaleIO"))...)
+ }
+ }
+ if pv.Spec.Local != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("local"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ if !utilfeature.DefaultFeatureGate.Enabled(features.PersistentLocalVolumes) {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("local"), "Local volumes are disabled by feature-gate"))
+ }
+ allErrs = append(allErrs, validateLocalVolumeSource(pv.Spec.Local, specPath.Child("local"))...)
+
+ // NodeAffinity is required
+ if !nodeAffinitySpecified {
+ allErrs = append(allErrs, field.Required(metaPath.Child("annotations"), "Local volume requires node affinity"))
+ }
+ }
+ }
+ if pv.Spec.StorageOS != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("storageos"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateStorageOSPersistentVolumeSource(pv.Spec.StorageOS, specPath.Child("storageos"))...)
+ }
+ }
+
+ if numVolumes == 0 {
+ allErrs = append(allErrs, field.Required(specPath, "must specify a volume type"))
+ }
+
+ // do not allow hostPath mounts of '/' to have a 'recycle' reclaim policy
+ if pv.Spec.HostPath != nil && path.Clean(pv.Spec.HostPath.Path) == "/" && pv.Spec.PersistentVolumeReclaimPolicy == api.PersistentVolumeReclaimRecycle {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("persistentVolumeReclaimPolicy"), "may not be 'recycle' for a hostPath mount of '/'"))
+ }
+
+ if len(pv.Spec.StorageClassName) > 0 {
+ for _, msg := range ValidateClassName(pv.Spec.StorageClassName, false) {
+ allErrs = append(allErrs, field.Invalid(specPath.Child("storageClassName"), pv.Spec.StorageClassName, msg))
+ }
+ }
+
+ return allErrs
+}
+
+// ValidatePersistentVolumeUpdate tests to see if the update is legal for an end user to make.
+// newPv is updated with fields that cannot be changed.
+func ValidatePersistentVolumeUpdate(newPv, oldPv *api.PersistentVolume) field.ErrorList {
+ allErrs := field.ErrorList{}
+ allErrs = ValidatePersistentVolume(newPv)
+ newPv.Status = oldPv.Status
+ return allErrs
+}
+
+// ValidatePersistentVolumeStatusUpdate tests to see if the status update is legal for an end user to make.
+// newPv is updated with fields that cannot be changed.
+func ValidatePersistentVolumeStatusUpdate(newPv, oldPv *api.PersistentVolume) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&newPv.ObjectMeta, &oldPv.ObjectMeta, field.NewPath("metadata"))
+ if len(newPv.ResourceVersion) == 0 {
+ allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), ""))
+ }
+ newPv.Spec = oldPv.Spec
+ return allErrs
+}
+
+// ValidatePersistentVolumeClaim validates a PersistentVolumeClaim
+func ValidatePersistentVolumeClaim(pvc *api.PersistentVolumeClaim) field.ErrorList {
+ allErrs := ValidateObjectMeta(&pvc.ObjectMeta, true, ValidatePersistentVolumeName, field.NewPath("metadata"))
+ allErrs = append(allErrs, ValidatePersistentVolumeClaimSpec(&pvc.Spec, field.NewPath("spec"))...)
+ return allErrs
+}
+
+// ValidatePersistentVolumeClaimSpec validates a PersistentVolumeClaimSpec
+func ValidatePersistentVolumeClaimSpec(spec *api.PersistentVolumeClaimSpec, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(spec.AccessModes) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("accessModes"), "at least 1 access mode is required"))
+ }
+ if spec.Selector != nil {
+ allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...)
+ }
+ for _, mode := range spec.AccessModes {
+ if mode != api.ReadWriteOnce && mode != api.ReadOnlyMany && mode != api.ReadWriteMany {
+ allErrs = append(allErrs, field.NotSupported(fldPath.Child("accessModes"), mode, supportedAccessModes.List()))
+ }
+ }
+ storageValue, ok := spec.Resources.Requests[api.ResourceStorage]
+ if !ok {
+ allErrs = append(allErrs, field.Required(fldPath.Child("resources").Key(string(api.ResourceStorage)), ""))
+ } else {
+ allErrs = append(allErrs, ValidateResourceQuantityValue(string(api.ResourceStorage), storageValue, fldPath.Child("resources").Key(string(api.ResourceStorage)))...)
+ }
+
+ if spec.StorageClassName != nil && len(*spec.StorageClassName) > 0 {
+ for _, msg := range ValidateClassName(*spec.StorageClassName, false) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("storageClassName"), *spec.StorageClassName, msg))
+ }
+ }
+ return allErrs
+}
+
+// ValidatePersistentVolumeClaimUpdate validates an update to a PersistentVolumeClaim
+func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *api.PersistentVolumeClaim) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata"))
+ allErrs = append(allErrs, ValidatePersistentVolumeClaim(newPvc)...)
+ // PVController needs to update PVC.Spec w/ VolumeName.
+ // Claims are immutable in order to enforce quota, range limits, etc. without gaming the system.
+ if len(oldPvc.Spec.VolumeName) == 0 {
+ // volumeName changes are allowed once.
+ // Reset back to empty string after equality check
+ oldPvc.Spec.VolumeName = newPvc.Spec.VolumeName
+ defer func() { oldPvc.Spec.VolumeName = "" }()
+ }
+ // changes to Spec are not allowed, but updates to label/and some annotations are OK.
+ // no-op updates pass validation.
+ if !apiequality.Semantic.DeepEqual(newPvc.Spec, oldPvc.Spec) {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "field is immutable after creation"))
+ }
+
+ // storageclass annotation should be immutable after creation
+ // TODO: remove Beta when no longer needed
+ allErrs = append(allErrs, ValidateImmutableAnnotation(newPvc.ObjectMeta.Annotations[v1.BetaStorageClassAnnotation], oldPvc.ObjectMeta.Annotations[v1.BetaStorageClassAnnotation], v1.BetaStorageClassAnnotation, field.NewPath("metadata"))...)
+
+ newPvc.Status = oldPvc.Status
+ return allErrs
+}
+
+// ValidatePersistentVolumeClaimStatusUpdate validates an update to status of a PersistentVolumeClaim
+func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *api.PersistentVolumeClaim) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata"))
+ if len(newPvc.ResourceVersion) == 0 {
+ allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), ""))
+ }
+ if len(newPvc.Spec.AccessModes) == 0 {
+ allErrs = append(allErrs, field.Required(field.NewPath("Spec", "accessModes"), ""))
+ }
+ capPath := field.NewPath("status", "capacity")
+ for r, qty := range newPvc.Status.Capacity {
+ allErrs = append(allErrs, validateBasicResource(qty, capPath.Key(string(r)))...)
+ }
+ newPvc.Spec = oldPvc.Spec
+ return allErrs
+}
+
+var supportedPortProtocols = sets.NewString(string(api.ProtocolTCP), string(api.ProtocolUDP))
+
+func validateContainerPorts(ports []api.ContainerPort, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ allNames := sets.String{}
+ for i, port := range ports {
+ idxPath := fldPath.Index(i)
+ if len(port.Name) > 0 {
+ if msgs := validation.IsValidPortName(port.Name); len(msgs) != 0 {
+ for i = range msgs {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), port.Name, msgs[i]))
+ }
+ } else if allNames.Has(port.Name) {
+ allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), port.Name))
+ } else {
+ allNames.Insert(port.Name)
+ }
+ }
+ if port.ContainerPort == 0 {
+ allErrs = append(allErrs, field.Required(idxPath.Child("containerPort"), ""))
+ } else {
+ for _, msg := range validation.IsValidPortNum(int(port.ContainerPort)) {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, msg))
+ }
+ }
+ if port.HostPort != 0 {
+ for _, msg := range validation.IsValidPortNum(int(port.HostPort)) {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("hostPort"), port.HostPort, msg))
+ }
+ }
+ if len(port.Protocol) == 0 {
+ allErrs = append(allErrs, field.Required(idxPath.Child("protocol"), ""))
+ } else if !supportedPortProtocols.Has(string(port.Protocol)) {
+ allErrs = append(allErrs, field.NotSupported(idxPath.Child("protocol"), port.Protocol, supportedPortProtocols.List()))
+ }
+ }
+ return allErrs
+}
+
+// ValidateEnv validates env vars
+func ValidateEnv(vars []api.EnvVar, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ for i, ev := range vars {
+ idxPath := fldPath.Index(i)
+ if len(ev.Name) == 0 {
+ allErrs = append(allErrs, field.Required(idxPath.Child("name"), ""))
+ } else {
+ for _, msg := range validation.IsCIdentifier(ev.Name) {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), ev.Name, msg))
+ }
+ }
+ allErrs = append(allErrs, validateEnvVarValueFrom(ev, idxPath.Child("valueFrom"))...)
+ }
+ return allErrs
+}
+
+var validFieldPathExpressionsEnv = sets.NewString("metadata.name", "metadata.namespace", "spec.nodeName", "spec.serviceAccountName", "status.hostIP", "status.podIP")
+var validContainerResourceFieldPathExpressions = sets.NewString("limits.cpu", "limits.memory", "requests.cpu", "requests.memory")
+
+func validateEnvVarValueFrom(ev api.EnvVar, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if ev.ValueFrom == nil {
+ return allErrs
+ }
+
+ numSources := 0
+
+ if ev.ValueFrom.FieldRef != nil {
+ numSources++
+ allErrs = append(allErrs, validateObjectFieldSelector(ev.ValueFrom.FieldRef, &validFieldPathExpressionsEnv, fldPath.Child("fieldRef"))...)
+ }
+ if ev.ValueFrom.ResourceFieldRef != nil {
+ numSources++
+ allErrs = append(allErrs, validateContainerResourceFieldSelector(ev.ValueFrom.ResourceFieldRef, &validContainerResourceFieldPathExpressions, fldPath.Child("resourceFieldRef"), false)...)
+ }
+ if ev.ValueFrom.ConfigMapKeyRef != nil {
+ numSources++
+ allErrs = append(allErrs, validateConfigMapKeySelector(ev.ValueFrom.ConfigMapKeyRef, fldPath.Child("configMapKeyRef"))...)
+ }
+ if ev.ValueFrom.SecretKeyRef != nil {
+ numSources++
+ allErrs = append(allErrs, validateSecretKeySelector(ev.ValueFrom.SecretKeyRef, fldPath.Child("secretKeyRef"))...)
+ }
+
+ if numSources == 0 {
+ allErrs = append(allErrs, field.Invalid(fldPath, "", "must specify one of: `fieldRef`, `resourceFieldRef`, `configMapKeyRef` or `secretKeyRef`"))
+ } else if len(ev.Value) != 0 {
+ if numSources != 0 {
+ allErrs = append(allErrs, field.Invalid(fldPath, "", "may not be specified when `value` is not empty"))
+ }
+ } else if numSources > 1 {
+ allErrs = append(allErrs, field.Invalid(fldPath, "", "may not have more than one field specified at a time"))
+ }
+
+ return allErrs
+}
+
+func validateObjectFieldSelector(fs *api.ObjectFieldSelector, expressions *sets.String, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if len(fs.APIVersion) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("apiVersion"), ""))
+ } else if len(fs.FieldPath) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("fieldPath"), ""))
+ } else {
+ internalFieldPath, _, err := api.Scheme.ConvertFieldLabel(fs.APIVersion, "Pod", fs.FieldPath, "")
+ if err != nil {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldPath"), fs.FieldPath, fmt.Sprintf("error converting fieldPath: %v", err)))
+ } else if !expressions.Has(internalFieldPath) {
+ allErrs = append(allErrs, field.NotSupported(fldPath.Child("fieldPath"), internalFieldPath, expressions.List()))
+ }
+ }
+
+ return allErrs
+}
+
+func validateContainerResourceFieldSelector(fs *api.ResourceFieldSelector, expressions *sets.String, fldPath *field.Path, volume bool) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if volume && len(fs.ContainerName) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("containerName"), ""))
+ } else if len(fs.Resource) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("resource"), ""))
+ } else if !expressions.Has(fs.Resource) {
+ allErrs = append(allErrs, field.NotSupported(fldPath.Child("resource"), fs.Resource, expressions.List()))
+ }
+ allErrs = append(allErrs, validateContainerResourceDivisor(fs.Resource, fs.Divisor, fldPath)...)
+ return allErrs
+}
+
+func ValidateEnvFrom(vars []api.EnvFromSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for i, ev := range vars {
+ idxPath := fldPath.Index(i)
+ if len(ev.Prefix) > 0 {
+ for _, msg := range validation.IsCIdentifier(ev.Prefix) {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("prefix"), ev.Prefix, msg))
+ }
+ }
+
+ numSources := 0
+ if ev.ConfigMapRef != nil {
+ numSources++
+ allErrs = append(allErrs, validateConfigMapEnvSource(ev.ConfigMapRef, idxPath.Child("configMapRef"))...)
+ }
+ if ev.SecretRef != nil {
+ numSources++
+ allErrs = append(allErrs, validateSecretEnvSource(ev.SecretRef, idxPath.Child("secretRef"))...)
+ }
+
+ if numSources == 0 {
+ allErrs = append(allErrs, field.Invalid(fldPath, "", "must specify one of: `configMapRef` or `secretRef`"))
+ } else if numSources > 1 {
+ allErrs = append(allErrs, field.Invalid(fldPath, "", "may not have more than one field specified at a time"))
+ }
+ }
+ return allErrs
+}
+
+func validateConfigMapEnvSource(configMapSource *api.ConfigMapEnvSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(configMapSource.Name) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
+ } else {
+ for _, msg := range ValidateConfigMapName(configMapSource.Name, true) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), configMapSource.Name, msg))
+ }
+ }
+ return allErrs
+}
+
+func validateSecretEnvSource(secretSource *api.SecretEnvSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(secretSource.Name) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
+ } else {
+ for _, msg := range ValidateSecretName(secretSource.Name, true) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), secretSource.Name, msg))
+ }
+ }
+ return allErrs
+}
+
+var validContainerResourceDivisorForCPU = sets.NewString("1m", "1")
+var validContainerResourceDivisorForMemory = sets.NewString("1", "1k", "1M", "1G", "1T", "1P", "1E", "1Ki", "1Mi", "1Gi", "1Ti", "1Pi", "1Ei")
+
+func validateContainerResourceDivisor(rName string, divisor resource.Quantity, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ unsetDivisor := resource.Quantity{}
+ if unsetDivisor.Cmp(divisor) == 0 {
+ return allErrs
+ }
+ switch rName {
+ case "limits.cpu", "requests.cpu":
+ if !validContainerResourceDivisorForCPU.Has(divisor.String()) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, "only divisor's values 1m and 1 are supported with the cpu resource"))
+ }
+ case "limits.memory", "requests.memory":
+ if !validContainerResourceDivisorForMemory.Has(divisor.String()) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, "only divisor's values 1, 1k, 1M, 1G, 1T, 1P, 1E, 1Ki, 1Mi, 1Gi, 1Ti, 1Pi, 1Ei are supported with the memory resource"))
+ }
+ }
+ return allErrs
+}
+
+func validateConfigMapKeySelector(s *api.ConfigMapKeySelector, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ nameFn := ValidateNameFunc(ValidateSecretName)
+ for _, msg := range nameFn(s.Name, false) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), s.Name, msg))
+ }
+ if len(s.Key) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("key"), ""))
+ } else {
+ for _, msg := range validation.IsConfigMapKey(s.Key) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), s.Key, msg))
+ }
+ }
+
+ return allErrs
+}
+
+func validateSecretKeySelector(s *api.SecretKeySelector, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ nameFn := ValidateNameFunc(ValidateSecretName)
+ for _, msg := range nameFn(s.Name, false) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), s.Name, msg))
+ }
+ if len(s.Key) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("key"), ""))
+ } else {
+ for _, msg := range validation.IsConfigMapKey(s.Key) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), s.Key, msg))
+ }
+ }
+
+ return allErrs
+}
+
+func ValidateVolumeMounts(mounts []api.VolumeMount, volumes sets.String, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ mountpoints := sets.NewString()
+
+ for i, mnt := range mounts {
+ idxPath := fldPath.Index(i)
+ if len(mnt.Name) == 0 {
+ allErrs = append(allErrs, field.Required(idxPath.Child("name"), ""))
+ } else if !volumes.Has(mnt.Name) {
+ allErrs = append(allErrs, field.NotFound(idxPath.Child("name"), mnt.Name))
+ }
+ if len(mnt.MountPath) == 0 {
+ allErrs = append(allErrs, field.Required(idxPath.Child("mountPath"), ""))
+ }
+ if mountpoints.Has(mnt.MountPath) {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must be unique"))
+ }
+ mountpoints.Insert(mnt.MountPath)
+ if len(mnt.SubPath) > 0 {
+ allErrs = append(allErrs, validateLocalDescendingPath(mnt.SubPath, fldPath.Child("subPath"))...)
+ }
+ }
+ return allErrs
+}
+
+func validateProbe(probe *api.Probe, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if probe == nil {
+ return allErrs
+ }
+ allErrs = append(allErrs, validateHandler(&probe.Handler, fldPath)...)
+
+ allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.InitialDelaySeconds), fldPath.Child("initialDelaySeconds"))...)
+ allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.TimeoutSeconds), fldPath.Child("timeoutSeconds"))...)
+ allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.PeriodSeconds), fldPath.Child("periodSeconds"))...)
+ allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.SuccessThreshold), fldPath.Child("successThreshold"))...)
+ allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.FailureThreshold), fldPath.Child("failureThreshold"))...)
+ return allErrs
+}
+
+// AccumulateUniqueHostPorts extracts each HostPort of each Container,
+// accumulating the results and returning an error if any ports conflict.
+func AccumulateUniqueHostPorts(containers []api.Container, accumulator *sets.String, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ for ci, ctr := range containers {
+ idxPath := fldPath.Index(ci)
+ portsPath := idxPath.Child("ports")
+ for pi := range ctr.Ports {
+ idxPath := portsPath.Index(pi)
+ port := ctr.Ports[pi].HostPort
+ if port == 0 {
+ continue
+ }
+ str := fmt.Sprintf("%s/%s/%d", ctr.Ports[pi].Protocol, ctr.Ports[pi].HostIP, port)
+ if accumulator.Has(str) {
+ allErrs = append(allErrs, field.Duplicate(idxPath.Child("hostPort"), str))
+ } else {
+ accumulator.Insert(str)
+ }
+ }
+ }
+ return allErrs
+}
+
+// checkHostPortConflicts checks for colliding Port.HostPort values across
+// a slice of containers.
+func checkHostPortConflicts(containers []api.Container, fldPath *field.Path) field.ErrorList {
+ allPorts := sets.String{}
+ return AccumulateUniqueHostPorts(containers, &allPorts, fldPath)
+}
+
+func validateExecAction(exec *api.ExecAction, fldPath *field.Path) field.ErrorList {
+ allErrors := field.ErrorList{}
+ if len(exec.Command) == 0 {
+ allErrors = append(allErrors, field.Required(fldPath.Child("command"), ""))
+ }
+ return allErrors
+}
+
+var supportedHTTPSchemes = sets.NewString(string(api.URISchemeHTTP), string(api.URISchemeHTTPS))
+
+func validateHTTPGetAction(http *api.HTTPGetAction, fldPath *field.Path) field.ErrorList {
+ allErrors := field.ErrorList{}
+ if len(http.Path) == 0 {
+ allErrors = append(allErrors, field.Required(fldPath.Child("path"), ""))
+ }
+ allErrors = append(allErrors, ValidatePortNumOrName(http.Port, fldPath.Child("port"))...)
+ if !supportedHTTPSchemes.Has(string(http.Scheme)) {
+ allErrors = append(allErrors, field.NotSupported(fldPath.Child("scheme"), http.Scheme, supportedHTTPSchemes.List()))
+ }
+ for _, header := range http.HTTPHeaders {
+ for _, msg := range validation.IsHTTPHeaderName(header.Name) {
+ allErrors = append(allErrors, field.Invalid(fldPath.Child("httpHeaders"), header.Name, msg))
+ }
+ }
+ return allErrors
+}
+
+func ValidatePortNumOrName(port intstr.IntOrString, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if port.Type == intstr.Int {
+ for _, msg := range validation.IsValidPortNum(port.IntValue()) {
+ allErrs = append(allErrs, field.Invalid(fldPath, port.IntValue(), msg))
+ }
+ } else if port.Type == intstr.String {
+ for _, msg := range validation.IsValidPortName(port.StrVal) {
+ allErrs = append(allErrs, field.Invalid(fldPath, port.StrVal, msg))
+ }
+ } else {
+ allErrs = append(allErrs, field.InternalError(fldPath, fmt.Errorf("unknown type: %v", port.Type)))
+ }
+ return allErrs
+}
+
+func validateTCPSocketAction(tcp *api.TCPSocketAction, fldPath *field.Path) field.ErrorList {
+ return ValidatePortNumOrName(tcp.Port, fldPath.Child("port"))
+}
+
+func validateHandler(handler *api.Handler, fldPath *field.Path) field.ErrorList {
+ numHandlers := 0
+ allErrors := field.ErrorList{}
+ if handler.Exec != nil {
+ if numHandlers > 0 {
+ allErrors = append(allErrors, field.Forbidden(fldPath.Child("exec"), "may not specify more than 1 handler type"))
+ } else {
+ numHandlers++
+ allErrors = append(allErrors, validateExecAction(handler.Exec, fldPath.Child("exec"))...)
+ }
+ }
+ if handler.HTTPGet != nil {
+ if numHandlers > 0 {
+ allErrors = append(allErrors, field.Forbidden(fldPath.Child("httpGet"), "may not specify more than 1 handler type"))
+ } else {
+ numHandlers++
+ allErrors = append(allErrors, validateHTTPGetAction(handler.HTTPGet, fldPath.Child("httpGet"))...)
+ }
+ }
+ if handler.TCPSocket != nil {
+ if numHandlers > 0 {
+ allErrors = append(allErrors, field.Forbidden(fldPath.Child("tcpSocket"), "may not specify more than 1 handler type"))
+ } else {
+ numHandlers++
+ allErrors = append(allErrors, validateTCPSocketAction(handler.TCPSocket, fldPath.Child("tcpSocket"))...)
+ }
+ }
+ if numHandlers == 0 {
+ allErrors = append(allErrors, field.Required(fldPath, "must specify a handler type"))
+ }
+ return allErrors
+}
+
+func validateLifecycle(lifecycle *api.Lifecycle, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if lifecycle.PostStart != nil {
+ allErrs = append(allErrs, validateHandler(lifecycle.PostStart, fldPath.Child("postStart"))...)
+ }
+ if lifecycle.PreStop != nil {
+ allErrs = append(allErrs, validateHandler(lifecycle.PreStop, fldPath.Child("preStop"))...)
+ }
+ return allErrs
+}
+
+var supportedPullPolicies = sets.NewString(string(api.PullAlways), string(api.PullIfNotPresent), string(api.PullNever))
+
+func validatePullPolicy(policy api.PullPolicy, fldPath *field.Path) field.ErrorList {
+ allErrors := field.ErrorList{}
+
+ switch policy {
+ case api.PullAlways, api.PullIfNotPresent, api.PullNever:
+ break
+ case "":
+ allErrors = append(allErrors, field.Required(fldPath, ""))
+ default:
+ allErrors = append(allErrors, field.NotSupported(fldPath, policy, supportedPullPolicies.List()))
+ }
+
+ return allErrors
+}
+
+func validateInitContainers(containers, otherContainers []api.Container, volumes sets.String, fldPath *field.Path) field.ErrorList {
+ var allErrs field.ErrorList
+ if len(containers) > 0 {
+ allErrs = append(allErrs, validateContainers(containers, volumes, fldPath)...)
+ }
+
+ allNames := sets.String{}
+ for _, ctr := range otherContainers {
+ allNames.Insert(ctr.Name)
+ }
+ for i, ctr := range containers {
+ idxPath := fldPath.Index(i)
+ if allNames.Has(ctr.Name) {
+ allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), ctr.Name))
+ }
+ if len(ctr.Name) > 0 {
+ allNames.Insert(ctr.Name)
+ }
+ if ctr.Lifecycle != nil {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("lifecycle"), ctr.Lifecycle, "must not be set for init containers"))
+ }
+ if ctr.LivenessProbe != nil {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("livenessProbe"), ctr.LivenessProbe, "must not be set for init containers"))
+ }
+ if ctr.ReadinessProbe != nil {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("readinessProbe"), ctr.ReadinessProbe, "must not be set for init containers"))
+ }
+ }
+ return allErrs
+}
+
+func validateContainers(containers []api.Container, volumes sets.String, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if len(containers) == 0 {
+ return append(allErrs, field.Required(fldPath, ""))
+ }
+
+ allNames := sets.String{}
+ for i, ctr := range containers {
+ idxPath := fldPath.Index(i)
+ namePath := idxPath.Child("name")
+ if len(ctr.Name) == 0 {
+ allErrs = append(allErrs, field.Required(namePath, ""))
+ } else {
+ allErrs = append(allErrs, ValidateDNS1123Label(ctr.Name, namePath)...)
+ }
+ if allNames.Has(ctr.Name) {
+ allErrs = append(allErrs, field.Duplicate(namePath, ctr.Name))
+ } else {
+ allNames.Insert(ctr.Name)
+ }
+ // TODO: do not validate leading and trailing whitespace to preserve backward compatibility.
+ // for example: https://github.com/openshift/origin/issues/14659 image = " " is special token in pod template
+ // others may have done similar
+ if len(ctr.Image) == 0 {
+ allErrs = append(allErrs, field.Required(idxPath.Child("image"), ""))
+ }
+ if ctr.Lifecycle != nil {
+ allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, idxPath.Child("lifecycle"))...)
+ }
+ allErrs = append(allErrs, validateProbe(ctr.LivenessProbe, idxPath.Child("livenessProbe"))...)
+ // Liveness-specific validation
+ if ctr.LivenessProbe != nil && ctr.LivenessProbe.SuccessThreshold != 1 {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("livenessProbe", "successThreshold"), ctr.LivenessProbe.SuccessThreshold, "must be 1"))
+ }
+
+ switch ctr.TerminationMessagePolicy {
+ case api.TerminationMessageReadFile, api.TerminationMessageFallbackToLogsOnError:
+ case "":
+ allErrs = append(allErrs, field.Required(idxPath.Child("terminationMessagePolicy"), "must be 'File' or 'FallbackToLogsOnError'"))
+ default:
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("terminationMessagePolicy"), ctr.TerminationMessagePolicy, "must be 'File' or 'FallbackToLogsOnError'"))
+ }
+
+ allErrs = append(allErrs, validateProbe(ctr.ReadinessProbe, idxPath.Child("readinessProbe"))...)
+ allErrs = append(allErrs, validateContainerPorts(ctr.Ports, idxPath.Child("ports"))...)
+ allErrs = append(allErrs, ValidateEnv(ctr.Env, idxPath.Child("env"))...)
+ allErrs = append(allErrs, ValidateEnvFrom(ctr.EnvFrom, idxPath.Child("envFrom"))...)
+ allErrs = append(allErrs, ValidateVolumeMounts(ctr.VolumeMounts, volumes, idxPath.Child("volumeMounts"))...)
+ allErrs = append(allErrs, validatePullPolicy(ctr.ImagePullPolicy, idxPath.Child("imagePullPolicy"))...)
+ allErrs = append(allErrs, ValidateResourceRequirements(&ctr.Resources, idxPath.Child("resources"))...)
+ allErrs = append(allErrs, ValidateSecurityContext(ctr.SecurityContext, idxPath.Child("securityContext"))...)
+ }
+ // Check for colliding ports across all containers.
+ allErrs = append(allErrs, checkHostPortConflicts(containers, fldPath)...)
+
+ return allErrs
+}
+
+func validateRestartPolicy(restartPolicy *api.RestartPolicy, fldPath *field.Path) field.ErrorList {
+ allErrors := field.ErrorList{}
+ switch *restartPolicy {
+ case api.RestartPolicyAlways, api.RestartPolicyOnFailure, api.RestartPolicyNever:
+ break
+ case "":
+ allErrors = append(allErrors, field.Required(fldPath, ""))
+ default:
+ validValues := []string{string(api.RestartPolicyAlways), string(api.RestartPolicyOnFailure), string(api.RestartPolicyNever)}
+ allErrors = append(allErrors, field.NotSupported(fldPath, *restartPolicy, validValues))
+ }
+
+ return allErrors
+}
+
+func validateDNSPolicy(dnsPolicy *api.DNSPolicy, fldPath *field.Path) field.ErrorList {
+ allErrors := field.ErrorList{}
+ switch *dnsPolicy {
+ case api.DNSClusterFirstWithHostNet, api.DNSClusterFirst, api.DNSDefault:
+ break
+ case "":
+ allErrors = append(allErrors, field.Required(fldPath, ""))
+ default:
+ validValues := []string{string(api.DNSClusterFirstWithHostNet), string(api.DNSClusterFirst), string(api.DNSDefault)}
+ allErrors = append(allErrors, field.NotSupported(fldPath, dnsPolicy, validValues))
+ }
+ return allErrors
+}
+
+func validateHostNetwork(hostNetwork bool, containers []api.Container, fldPath *field.Path) field.ErrorList {
+ allErrors := field.ErrorList{}
+ if hostNetwork {
+ for i, container := range containers {
+ portsPath := fldPath.Index(i).Child("ports")
+ for i, port := range container.Ports {
+ idxPath := portsPath.Index(i)
+ if port.HostPort != port.ContainerPort {
+ allErrors = append(allErrors, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, "must match `hostPort` when `hostNetwork` is true"))
+ }
+ }
+ }
+ }
+ return allErrors
+}
+
+func validateHostNetworkNoHostAliases(hostNetwork bool, hostAliases []api.HostAlias, fldPath *field.Path) field.ErrorList {
+ allErrors := field.ErrorList{}
+ if hostNetwork {
+ if len(hostAliases) > 0 {
+ allErrors = append(allErrors, field.Forbidden(fldPath, "may not be set when `hostNetwork` is true"))
+ }
+ }
+ return allErrors
+}
+
+// validateImagePullSecrets checks to make sure the pull secrets are well
+// formed. Right now, we only expect name to be set (it's the only field). If
+// this ever changes and someone decides to set those fields, we'd like to
+// know.
+func validateImagePullSecrets(imagePullSecrets []api.LocalObjectReference, fldPath *field.Path) field.ErrorList {
+ allErrors := field.ErrorList{}
+ for i, currPullSecret := range imagePullSecrets {
+ idxPath := fldPath.Index(i)
+ strippedRef := api.LocalObjectReference{Name: currPullSecret.Name}
+ if !reflect.DeepEqual(strippedRef, currPullSecret) {
+ allErrors = append(allErrors, field.Invalid(idxPath, currPullSecret, "only name may be set"))
+ }
+ }
+ return allErrors
+}
+
+// validateAffinity checks if given affinities are valid
+func validateAffinity(affinity *api.Affinity, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if affinity != nil {
+ if na := affinity.NodeAffinity; na != nil {
+ // TODO: Uncomment the next three lines once RequiredDuringSchedulingRequiredDuringExecution is implemented.
+ // if na.RequiredDuringSchedulingRequiredDuringExecution != nil {
+ // allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
+ // }
+
+ if na.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
+ }
+
+ if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
+ allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
+ }
+ }
+ if affinity.PodAffinity != nil {
+ allErrs = append(allErrs, validatePodAffinity(affinity.PodAffinity, fldPath.Child("podAffinity"))...)
+ }
+ if affinity.PodAntiAffinity != nil {
+ allErrs = append(allErrs, validatePodAntiAffinity(affinity.PodAntiAffinity, fldPath.Child("podAntiAffinity"))...)
+ }
+ }
+
+ return allErrs
+}
+
+func validateTaintEffect(effect *api.TaintEffect, allowEmpty bool, fldPath *field.Path) field.ErrorList {
+ if !allowEmpty && len(*effect) == 0 {
+ return field.ErrorList{field.Required(fldPath, "")}
+ }
+
+ allErrors := field.ErrorList{}
+ switch *effect {
+ // TODO: Replace next line with subsequent commented-out line when implement TaintEffectNoScheduleNoAdmit.
+ case api.TaintEffectNoSchedule, api.TaintEffectPreferNoSchedule, api.TaintEffectNoExecute:
+ // case api.TaintEffectNoSchedule, api.TaintEffectPreferNoSchedule, api.TaintEffectNoScheduleNoAdmit, api.TaintEffectNoExecute:
+ default:
+ validValues := []string{
+ string(api.TaintEffectNoSchedule),
+ string(api.TaintEffectPreferNoSchedule),
+ string(api.TaintEffectNoExecute),
+ // TODO: Uncomment this block when implement TaintEffectNoScheduleNoAdmit.
+ // string(api.TaintEffectNoScheduleNoAdmit),
+ }
+ allErrors = append(allErrors, field.NotSupported(fldPath, effect, validValues))
+ }
+ return allErrors
+}
+
+// validateOnlyAddedTolerations validates updated pod tolerations.
+func validateOnlyAddedTolerations(newTolerations []api.Toleration, oldTolerations []api.Toleration, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for _, old := range oldTolerations {
+ found := false
+ old.TolerationSeconds = nil
+ for _, new := range newTolerations {
+ new.TolerationSeconds = nil
+ if reflect.DeepEqual(old, new) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ allErrs = append(allErrs, field.Forbidden(fldPath, "existing toleration can not be modified except its tolerationSeconds"))
+ return allErrs
+ }
+ }
+
+ allErrs = append(allErrs, ValidateTolerations(newTolerations, fldPath)...)
+ return allErrs
+}
+
+func ValidateHostAliases(hostAliases []api.HostAlias, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for _, hostAlias := range hostAliases {
+ if ip := net.ParseIP(hostAlias.IP); ip == nil {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("ip"), hostAlias.IP, "must be valid IP address"))
+ }
+ for _, hostname := range hostAlias.Hostnames {
+ allErrs = append(allErrs, ValidateDNS1123Subdomain(hostname, fldPath.Child("hostnames"))...)
+ }
+ }
+ return allErrs
+}
+
+// ValidateTolerations tests if given tolerations have valid data.
+func ValidateTolerations(tolerations []api.Toleration, fldPath *field.Path) field.ErrorList {
+ allErrors := field.ErrorList{}
+ for i, toleration := range tolerations {
+ idxPath := fldPath.Index(i)
+ // validate the toleration key
+ if len(toleration.Key) > 0 {
+ allErrors = append(allErrors, unversionedvalidation.ValidateLabelName(toleration.Key, idxPath.Child("key"))...)
+ }
+
+ // empty toleration key with Exists operator and empty value means match all taints
+ if len(toleration.Key) == 0 && toleration.Operator != api.TolerationOpExists {
+ allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Operator,
+ "operator must be Exists when `key` is empty, which means \"match all values and all keys\""))
+ }
+
+ if toleration.TolerationSeconds != nil && toleration.Effect != api.TaintEffectNoExecute {
+ allErrors = append(allErrors, field.Invalid(idxPath.Child("effect"), toleration.Effect,
+ "effect must be 'NoExecute' when `tolerationSeconds` is set"))
+ }
+
+ // validate toleration operator and value
+ switch toleration.Operator {
+ // empty operator means Equal
+ case api.TolerationOpEqual, "":
+ if errs := validation.IsValidLabelValue(toleration.Value); len(errs) != 0 {
+ allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Value, strings.Join(errs, ";")))
+ }
+ case api.TolerationOpExists:
+ if len(toleration.Value) > 0 {
+ allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration, "value must be empty when `operator` is 'Exists'"))
+ }
+ default:
+ validValues := []string{string(api.TolerationOpEqual), string(api.TolerationOpExists)}
+ allErrors = append(allErrors, field.NotSupported(idxPath.Child("operator"), toleration.Operator, validValues))
+ }
+
+ // validate toleration effect, empty toleration effect means match all taint effects
+ if len(toleration.Effect) > 0 {
+ allErrors = append(allErrors, validateTaintEffect(&toleration.Effect, true, idxPath.Child("effect"))...)
+ }
+ }
+ return allErrors
+}
+
+// validateContainersOnlyForPod does additional validation for containers on a pod versus a pod template
+// it only does additive validation of fields not covered in validateContainers
+func validateContainersOnlyForPod(containers []api.Container, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for i, ctr := range containers {
+ idxPath := fldPath.Index(i)
+ if len(ctr.Image) != len(strings.TrimSpace(ctr.Image)) {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("image"), ctr.Image, "must not have leading or trailing whitespace"))
+ }
+ }
+ return allErrs
+}
+
+// ValidatePod tests if required fields in the pod are set.
+func ValidatePod(pod *api.Pod) field.ErrorList {
+ fldPath := field.NewPath("metadata")
+ allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, fldPath)
+ allErrs = append(allErrs, ValidatePodSpecificAnnotations(pod.ObjectMeta.Annotations, &pod.Spec, fldPath.Child("annotations"))...)
+ allErrs = append(allErrs, ValidatePodSpec(&pod.Spec, field.NewPath("spec"))...)
+
+ // we do additional validation only pertinent for pods and not pod templates
+ // this was done to preserve backwards compatibility
+ specPath := field.NewPath("spec")
+
+ allErrs = append(allErrs, validateContainersOnlyForPod(pod.Spec.Containers, specPath.Child("containers"))...)
+ allErrs = append(allErrs, validateContainersOnlyForPod(pod.Spec.InitContainers, specPath.Child("initContainers"))...)
+
+ return allErrs
+}
+
+// ValidatePodSpec tests that the specified PodSpec has valid data.
+// This includes checking formatting and uniqueness. It also canonicalizes the
+// structure by setting default values and implementing any backwards-compatibility
+// tricks.
+func ValidatePodSpec(spec *api.PodSpec, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ allVolumes, vErrs := ValidateVolumes(spec.Volumes, fldPath.Child("volumes"))
+ allErrs = append(allErrs, vErrs...)
+ allErrs = append(allErrs, validateContainers(spec.Containers, allVolumes, fldPath.Child("containers"))...)
+ allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, allVolumes, fldPath.Child("initContainers"))...)
+ allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...)
+ allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...)
+ allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...)
+ allErrs = append(allErrs, ValidatePodSecurityContext(spec.SecurityContext, spec, fldPath, fldPath.Child("securityContext"))...)
+ allErrs = append(allErrs, validateImagePullSecrets(spec.ImagePullSecrets, fldPath.Child("imagePullSecrets"))...)
+ allErrs = append(allErrs, validateAffinity(spec.Affinity, fldPath.Child("affinity"))...)
+ if len(spec.ServiceAccountName) > 0 {
+ for _, msg := range ValidateServiceAccountName(spec.ServiceAccountName, false) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceAccountName"), spec.ServiceAccountName, msg))
+ }
+ }
+
+ if len(spec.NodeName) > 0 {
+ for _, msg := range ValidateNodeName(spec.NodeName, false) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("nodeName"), spec.NodeName, msg))
+ }
+ }
+
+ if spec.ActiveDeadlineSeconds != nil {
+ value := *spec.ActiveDeadlineSeconds
+ if value < 1 || value > math.MaxInt32 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("activeDeadlineSeconds"), value, validation.InclusiveRangeError(1, math.MaxInt32)))
+ }
+ }
+
+ if len(spec.Hostname) > 0 {
+ allErrs = append(allErrs, ValidateDNS1123Label(spec.Hostname, fldPath.Child("hostname"))...)
+ }
+
+ if len(spec.Subdomain) > 0 {
+ allErrs = append(allErrs, ValidateDNS1123Label(spec.Subdomain, fldPath.Child("subdomain"))...)
+ }
+
+ if len(spec.Tolerations) > 0 {
+ allErrs = append(allErrs, ValidateTolerations(spec.Tolerations, fldPath.Child("tolerations"))...)
+ }
+
+ if len(spec.HostAliases) > 0 {
+ allErrs = append(allErrs, ValidateHostAliases(spec.HostAliases, fldPath.Child("hostAliases"))...)
+ }
+
+ return allErrs
+}
+
+// ValidateNodeSelectorRequirement tests that the specified NodeSelectorRequirement fields has valid data
+func ValidateNodeSelectorRequirement(rq api.NodeSelectorRequirement, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ switch rq.Operator {
+ case api.NodeSelectorOpIn, api.NodeSelectorOpNotIn:
+ if len(rq.Values) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'"))
+ }
+ case api.NodeSelectorOpExists, api.NodeSelectorOpDoesNotExist:
+ if len(rq.Values) > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'"))
+ }
+
+ case api.NodeSelectorOpGt, api.NodeSelectorOpLt:
+ if len(rq.Values) != 1 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified single value when `operator` is 'Lt' or 'Gt'"))
+ }
+ default:
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), rq.Operator, "not a valid selector operator"))
+ }
+ allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(rq.Key, fldPath.Child("key"))...)
+ return allErrs
+}
+
+// ValidateNodeSelectorTerm tests that the specified node selector term has valid data
+func ValidateNodeSelectorTerm(term api.NodeSelectorTerm, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if len(term.MatchExpressions) == 0 {
+ return append(allErrs, field.Required(fldPath.Child("matchExpressions"), "must have at least one node selector requirement"))
+ }
+ for j, req := range term.MatchExpressions {
+ allErrs = append(allErrs, ValidateNodeSelectorRequirement(req, fldPath.Child("matchExpressions").Index(j))...)
+ }
+ return allErrs
+}
+
+// ValidateNodeSelector tests that the specified nodeSelector fields has valid data
+func ValidateNodeSelector(nodeSelector *api.NodeSelector, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ termFldPath := fldPath.Child("nodeSelectorTerms")
+ if len(nodeSelector.NodeSelectorTerms) == 0 {
+ return append(allErrs, field.Required(termFldPath, "must have at least one node selector term"))
+ }
+
+ for i, term := range nodeSelector.NodeSelectorTerms {
+ allErrs = append(allErrs, ValidateNodeSelectorTerm(term, termFldPath.Index(i))...)
+ }
+
+ return allErrs
+}
+
+// ValidateAvoidPodsInNodeAnnotations tests that the serialized AvoidPods in Node.Annotations has valid data
+func ValidateAvoidPodsInNodeAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ v1Avoids, err := v1helper.GetAvoidPodsFromNodeAnnotations(annotations)
+ if err != nil {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("AvoidPods"), api.PreferAvoidPodsAnnotationKey, err.Error()))
+ return allErrs
+ }
+ var avoids api.AvoidPods
+ if err := v1.Convert_v1_AvoidPods_To_api_AvoidPods(&v1Avoids, &avoids, nil); err != nil {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("AvoidPods"), api.PreferAvoidPodsAnnotationKey, err.Error()))
+ return allErrs
+ }
+
+ if len(avoids.PreferAvoidPods) != 0 {
+ for i, pa := range avoids.PreferAvoidPods {
+ idxPath := fldPath.Child(api.PreferAvoidPodsAnnotationKey).Index(i)
+ allErrs = append(allErrs, validatePreferAvoidPodsEntry(pa, idxPath)...)
+ }
+ }
+
+ return allErrs
+}
+
+// validatePreferAvoidPodsEntry tests if given PreferAvoidPodsEntry has valid data.
+func validatePreferAvoidPodsEntry(avoidPodEntry api.PreferAvoidPodsEntry, fldPath *field.Path) field.ErrorList {
+ allErrors := field.ErrorList{}
+ if avoidPodEntry.PodSignature.PodController == nil {
+ allErrors = append(allErrors, field.Required(fldPath.Child("PodSignature"), ""))
+ } else {
+ if *(avoidPodEntry.PodSignature.PodController.Controller) != true {
+ allErrors = append(allErrors,
+ field.Invalid(fldPath.Child("PodSignature").Child("PodController").Child("Controller"),
+ *(avoidPodEntry.PodSignature.PodController.Controller), "must point to a controller"))
+ }
+ }
+ return allErrors
+}
+
+// ValidatePreferredSchedulingTerms tests that the specified SoftNodeAffinity fields has valid data
+func ValidatePreferredSchedulingTerms(terms []api.PreferredSchedulingTerm, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ for i, term := range terms {
+ if term.Weight <= 0 || term.Weight > 100 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("weight"), term.Weight, "must be in the range 1-100"))
+ }
+
+ allErrs = append(allErrs, ValidateNodeSelectorTerm(term.Preference, fldPath.Index(i).Child("preference"))...)
+ }
+ return allErrs
+}
+
+// validatePodAffinityTerm tests that the specified podAffinityTerm fields have valid data
+func validatePodAffinityTerm(podAffinityTerm api.PodAffinityTerm, allowEmptyTopologyKey bool, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if !utilfeature.DefaultFeatureGate.Enabled(features.AffinityInAnnotations) && len(podAffinityTerm.TopologyKey) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("topologyKey"), "can not be empty"))
+ }
+ allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.LabelSelector, fldPath.Child("matchExpressions"))...)
+ for _, name := range podAffinityTerm.Namespaces {
+ for _, msg := range ValidateNamespaceName(name, false) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), name, msg))
+ }
+ }
+ if !allowEmptyTopologyKey && len(podAffinityTerm.TopologyKey) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("topologyKey"), "can only be empty for PreferredDuringScheduling pod anti affinity"))
+ }
+ if len(podAffinityTerm.TopologyKey) != 0 {
+ allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(podAffinityTerm.TopologyKey, fldPath.Child("topologyKey"))...)
+ }
+ return allErrs
+}
+
+// validatePodAffinityTerms tests that the specified podAffinityTerms fields have valid data
+func validatePodAffinityTerms(podAffinityTerms []api.PodAffinityTerm, allowEmptyTopologyKey bool, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for i, podAffinityTerm := range podAffinityTerms {
+ allErrs = append(allErrs, validatePodAffinityTerm(podAffinityTerm, allowEmptyTopologyKey, fldPath.Index(i))...)
+ }
+ return allErrs
+}
+
+// validateWeightedPodAffinityTerms tests that the specified weightedPodAffinityTerms fields have valid data
+func validateWeightedPodAffinityTerms(weightedPodAffinityTerms []api.WeightedPodAffinityTerm, allowEmptyTopologyKey bool, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for j, weightedTerm := range weightedPodAffinityTerms {
+ if weightedTerm.Weight <= 0 || weightedTerm.Weight > 100 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Index(j).Child("weight"), weightedTerm.Weight, "must be in the range 1-100"))
+ }
+ allErrs = append(allErrs, validatePodAffinityTerm(weightedTerm.PodAffinityTerm, allowEmptyTopologyKey, fldPath.Index(j).Child("podAffinityTerm"))...)
+ }
+ return allErrs
+}
+
+// validatePodAntiAffinity tests that the specified podAntiAffinity fields have valid data
+func validatePodAntiAffinity(podAntiAffinity *api.PodAntiAffinity, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ // TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented.
+ // if podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil {
+ // allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution, false,
+ // fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
+ //}
+ if podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ // empty topologyKey is not allowed for hard pod anti-affinity
+ allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, false,
+ fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
+ }
+ if podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil {
+ // empty topologyKey is allowed for soft pod anti-affinity
+ allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, true,
+ fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
+ }
+ return allErrs
+}
+
+// validatePodAffinity tests that the specified podAffinity fields have valid data
+func validatePodAffinity(podAffinity *api.PodAffinity, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ // TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented.
+ // if podAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil {
+ // allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingRequiredDuringExecution, false,
+ // fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
+ //}
+ if podAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ // empty topologyKey is not allowed for hard pod affinity
+ allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution, false,
+ fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
+ }
+ if podAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil {
+ // empty topologyKey is not allowed for soft pod affinity
+ allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAffinity.PreferredDuringSchedulingIgnoredDuringExecution, false,
+ fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
+ }
+ return allErrs
+}
+
+func ValidateSeccompProfile(p string, fldPath *field.Path) field.ErrorList {
+ if p == "docker/default" {
+ return nil
+ }
+ if p == "unconfined" {
+ return nil
+ }
+ if strings.HasPrefix(p, "localhost/") {
+ return validateLocalDescendingPath(strings.TrimPrefix(p, "localhost/"), fldPath)
+ }
+ return field.ErrorList{field.Invalid(fldPath, p, "must be a valid seccomp profile")}
+}
+
+func ValidateSeccompPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if p, exists := annotations[api.SeccompPodAnnotationKey]; exists {
+ allErrs = append(allErrs, ValidateSeccompProfile(p, fldPath.Child(api.SeccompPodAnnotationKey))...)
+ }
+ for k, p := range annotations {
+ if strings.HasPrefix(k, api.SeccompContainerAnnotationKeyPrefix) {
+ allErrs = append(allErrs, ValidateSeccompProfile(p, fldPath.Child(k))...)
+ }
+ }
+
+ return allErrs
+}
+
+func ValidateAppArmorPodAnnotations(annotations map[string]string, spec *api.PodSpec, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for k, p := range annotations {
+ if !strings.HasPrefix(k, apparmor.ContainerAnnotationKeyPrefix) {
+ continue
+ }
+ // TODO: this belongs to admission, not general pod validation:
+ if !utilfeature.DefaultFeatureGate.Enabled(features.AppArmor) {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "AppArmor is disabled by feature-gate"))
+ continue
+ }
+ containerName := strings.TrimPrefix(k, apparmor.ContainerAnnotationKeyPrefix)
+ if !podSpecHasContainer(spec, containerName) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Key(k), containerName, "container not found"))
+ }
+
+ if err := apparmor.ValidateProfileFormat(p); err != nil {
+ allErrs = append(allErrs, field.Invalid(fldPath.Key(k), p, err.Error()))
+ }
+ }
+
+ return allErrs
+}
+
+func podSpecHasContainer(spec *api.PodSpec, containerName string) bool {
+ for _, c := range spec.InitContainers {
+ if c.Name == containerName {
+ return true
+ }
+ }
+ for _, c := range spec.Containers {
+ if c.Name == containerName {
+ return true
+ }
+ }
+ return false
+}
+
+const (
+ // a sysctl segment regex, concatenated with dots to form a sysctl name
+ SysctlSegmentFmt string = "[a-z0-9]([-_a-z0-9]*[a-z0-9])?"
+
+ // a sysctl name regex
+ SysctlFmt string = "(" + SysctlSegmentFmt + "\\.)*" + SysctlSegmentFmt
+
+ // the maximal length of a sysctl name
+ SysctlMaxLength int = 253
+)
+
+var sysctlRegexp = regexp.MustCompile("^" + SysctlFmt + "$")
+
+// IsValidSysctlName checks that the given string is a valid sysctl name,
+// i.e. matches SysctlFmt.
+func IsValidSysctlName(name string) bool {
+ if len(name) > SysctlMaxLength {
+ return false
+ }
+ return sysctlRegexp.MatchString(name)
+}
+
+func validateSysctls(sysctls []api.Sysctl, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for i, s := range sysctls {
+ if len(s.Name) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Index(i).Child("name"), ""))
+ } else if !IsValidSysctlName(s.Name) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("name"), s.Name, fmt.Sprintf("must have at most %d characters and match regex %s", SysctlMaxLength, SysctlFmt)))
+ }
+ }
+ return allErrs
+}
+
+// ValidatePodSecurityContext test that the specified PodSecurityContext has valid data.
+func ValidatePodSecurityContext(securityContext *api.PodSecurityContext, spec *api.PodSpec, specPath, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if securityContext != nil {
+ allErrs = append(allErrs, validateHostNetwork(securityContext.HostNetwork, spec.Containers, specPath.Child("containers"))...)
+ allErrs = append(allErrs, validateHostNetworkNoHostAliases(securityContext.HostNetwork, spec.HostAliases, specPath)...)
+ if securityContext.FSGroup != nil {
+ for _, msg := range validation.IsValidGroupID(*securityContext.FSGroup) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("fsGroup"), *(securityContext.FSGroup), msg))
+ }
+ }
+ if securityContext.RunAsUser != nil {
+ for _, msg := range validation.IsValidUserID(*securityContext.RunAsUser) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *(securityContext.RunAsUser), msg))
+ }
+ }
+ for g, gid := range securityContext.SupplementalGroups {
+ for _, msg := range validation.IsValidGroupID(gid) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("supplementalGroups").Index(g), gid, msg))
+ }
+ }
+ }
+
+ return allErrs
+}
+
+func ValidateContainerUpdates(newContainers, oldContainers []api.Container, fldPath *field.Path) (allErrs field.ErrorList, stop bool) {
+ allErrs = field.ErrorList{}
+ if len(newContainers) != len(oldContainers) {
+ //TODO: Pinpoint the specific container that causes the invalid error after we have strategic merge diff
+ allErrs = append(allErrs, field.Forbidden(fldPath, "pod updates may not add or remove containers"))
+ return allErrs, true
+ }
+
+ // validate updated container images
+ for i, ctr := range newContainers {
+ if len(ctr.Image) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Index(i).Child("image"), ""))
+ }
+ // this is only called from ValidatePodUpdate so its safe to check leading/trailing whitespace.
+ if len(strings.TrimSpace(ctr.Image)) != len(ctr.Image) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("image"), ctr.Image, "must not have leading or trailing whitespace"))
+ }
+ }
+ return allErrs, false
+}
+
+// ValidatePodUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields
+// that cannot be changed.
+func ValidatePodUpdate(newPod, oldPod *api.Pod) field.ErrorList {
+ fldPath := field.NewPath("metadata")
+ allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath)
+ allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"))...)
+ specPath := field.NewPath("spec")
+
+ // validate updateable fields:
+ // 1. spec.containers[*].image
+ // 2. spec.initContainers[*].image
+ // 3. spec.activeDeadlineSeconds
+
+ containerErrs, stop := ValidateContainerUpdates(newPod.Spec.Containers, oldPod.Spec.Containers, specPath.Child("containers"))
+ allErrs = append(allErrs, containerErrs...)
+ if stop {
+ return allErrs
+ }
+ containerErrs, stop = ValidateContainerUpdates(newPod.Spec.InitContainers, oldPod.Spec.InitContainers, specPath.Child("initContainers"))
+ allErrs = append(allErrs, containerErrs...)
+ if stop {
+ return allErrs
+ }
+
+ // validate updated spec.activeDeadlineSeconds. two types of updates are allowed:
+ // 1. from nil to a positive value
+ // 2. from a positive value to a lesser, non-negative value
+ if newPod.Spec.ActiveDeadlineSeconds != nil {
+ newActiveDeadlineSeconds := *newPod.Spec.ActiveDeadlineSeconds
+ if newActiveDeadlineSeconds < 0 || newActiveDeadlineSeconds > math.MaxInt32 {
+ allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newActiveDeadlineSeconds, validation.InclusiveRangeError(0, math.MaxInt32)))
+ return allErrs
+ }
+ if oldPod.Spec.ActiveDeadlineSeconds != nil {
+ oldActiveDeadlineSeconds := *oldPod.Spec.ActiveDeadlineSeconds
+ if oldActiveDeadlineSeconds < newActiveDeadlineSeconds {
+ allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newActiveDeadlineSeconds, "must be less than or equal to previous value"))
+ return allErrs
+ }
+ }
+ } else if oldPod.Spec.ActiveDeadlineSeconds != nil {
+ allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newPod.Spec.ActiveDeadlineSeconds, "must not update from a positive integer to nil value"))
+ }
+
+ // handle updateable fields by munging those fields prior to deep equal comparison.
+ mungedPod := *newPod
+
+ // allow hostname and subdomain to be updated if they are empty. This allows for migration between the beta
+ // annotations and the GA field when upgrading between Kubernetes 1.6.x and 1.7.x.
+ if oldPod.Spec.Hostname == "" {
+ mungedPod.Spec.Hostname = oldPod.Spec.Hostname
+ }
+ if oldPod.Spec.Subdomain == "" {
+ mungedPod.Spec.Subdomain = oldPod.Spec.Subdomain
+ }
+
+ // munge spec.containers[*].image
+ var newContainers []api.Container
+ for ix, container := range mungedPod.Spec.Containers {
+ container.Image = oldPod.Spec.Containers[ix].Image
+ newContainers = append(newContainers, container)
+ }
+ mungedPod.Spec.Containers = newContainers
+ // munge spec.initContainers[*].image
+ var newInitContainers []api.Container
+ for ix, container := range mungedPod.Spec.InitContainers {
+ container.Image = oldPod.Spec.InitContainers[ix].Image
+ newInitContainers = append(newInitContainers, container)
+ }
+ mungedPod.Spec.InitContainers = newInitContainers
+ // munge spec.activeDeadlineSeconds
+ mungedPod.Spec.ActiveDeadlineSeconds = nil
+ if oldPod.Spec.ActiveDeadlineSeconds != nil {
+ activeDeadlineSeconds := *oldPod.Spec.ActiveDeadlineSeconds
+ mungedPod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
+ }
+
+ // Allow only additions to tolerations updates.
+ mungedPod.Spec.Tolerations = oldPod.Spec.Tolerations
+ allErrs = append(allErrs, validateOnlyAddedTolerations(newPod.Spec.Tolerations, oldPod.Spec.Tolerations, specPath.Child("tolerations"))...)
+
+ if !apiequality.Semantic.DeepEqual(mungedPod.Spec, oldPod.Spec) {
+ //TODO: Pinpoint the specific field that causes the invalid error after we have strategic merge diff
+ allErrs = append(allErrs, field.Forbidden(specPath, "pod updates may not change fields other than `spec.containers[*].image`, `spec.initContainers[*].image`, `spec.activeDeadlineSeconds` or `spec.tolerations` (only additions to existing tolerations)"))
+ }
+
+ return allErrs
+}
+
+// ValidatePodStatusUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields
+// that cannot be changed.
+func ValidatePodStatusUpdate(newPod, oldPod *api.Pod) field.ErrorList {
+ fldPath := field.NewPath("metadata")
+ allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath)
+ allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"))...)
+
+ if newPod.Spec.NodeName != oldPod.Spec.NodeName {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("status", "nodeName"), "may not be changed directly"))
+ }
+
+ // For status update we ignore changes to pod spec.
+ newPod.Spec = oldPod.Spec
+
+ return allErrs
+}
+
+// ValidatePodBinding tests if required fields in the pod binding are legal.
+func ValidatePodBinding(binding *api.Binding) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if len(binding.Target.Kind) != 0 && binding.Target.Kind != "Node" {
+ // TODO: When validation becomes versioned, this gets more complicated.
+ allErrs = append(allErrs, field.NotSupported(field.NewPath("target", "kind"), binding.Target.Kind, []string{"Node", "<empty>"}))
+ }
+ if len(binding.Target.Name) == 0 {
+ // TODO: When validation becomes versioned, this gets more complicated.
+ allErrs = append(allErrs, field.Required(field.NewPath("target", "name"), ""))
+ }
+
+ return allErrs
+}
+
+// ValidatePodTemplate tests if required fields in the pod template are set.
+func ValidatePodTemplate(pod *api.PodTemplate) field.ErrorList {
+ allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, field.NewPath("metadata"))
+ allErrs = append(allErrs, ValidatePodTemplateSpec(&pod.Template, field.NewPath("template"))...)
+ return allErrs
+}
+
+// ValidatePodTemplateUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields
+// that cannot be changed.
+func ValidatePodTemplateUpdate(newPod, oldPod *api.PodTemplate) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&oldPod.ObjectMeta, &newPod.ObjectMeta, field.NewPath("metadata"))
+ allErrs = append(allErrs, ValidatePodTemplateSpec(&newPod.Template, field.NewPath("template"))...)
+ return allErrs
+}
+
+var supportedSessionAffinityType = sets.NewString(string(api.ServiceAffinityClientIP), string(api.ServiceAffinityNone))
+var supportedServiceType = sets.NewString(string(api.ServiceTypeClusterIP), string(api.ServiceTypeNodePort),
+ string(api.ServiceTypeLoadBalancer), string(api.ServiceTypeExternalName))
+
+// ValidateService tests if required fields/annotations of a Service are valid.
+func ValidateService(service *api.Service) field.ErrorList {
+ allErrs := ValidateObjectMeta(&service.ObjectMeta, true, ValidateServiceName, field.NewPath("metadata"))
+
+ specPath := field.NewPath("spec")
+ isHeadlessService := service.Spec.ClusterIP == api.ClusterIPNone
+ if len(service.Spec.Ports) == 0 && !isHeadlessService && service.Spec.Type != api.ServiceTypeExternalName {
+ allErrs = append(allErrs, field.Required(specPath.Child("ports"), ""))
+ }
+ switch service.Spec.Type {
+ case api.ServiceTypeLoadBalancer:
+ for ix := range service.Spec.Ports {
+ port := &service.Spec.Ports[ix]
+ // This is a workaround for broken cloud environments that
+ // over-open firewalls. Hopefully it can go away when more clouds
+ // understand containers better.
+ if port.Port == 10250 {
+ portPath := specPath.Child("ports").Index(ix)
+ allErrs = append(allErrs, field.Invalid(portPath, port.Port, "may not expose port 10250 externally since it is used by kubelet"))
+ }
+ }
+ if service.Spec.ClusterIP == "None" {
+ allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "may not be set to 'None' for LoadBalancer services"))
+ }
+ case api.ServiceTypeNodePort:
+ if service.Spec.ClusterIP == "None" {
+ allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "may not be set to 'None' for NodePort services"))
+ }
+ case api.ServiceTypeExternalName:
+ if service.Spec.ClusterIP != "" {
+ allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "must be empty for ExternalName services"))
+ }
+ if len(service.Spec.ExternalName) > 0 {
+ allErrs = append(allErrs, ValidateDNS1123Subdomain(service.Spec.ExternalName, specPath.Child("externalName"))...)
+ } else {
+ allErrs = append(allErrs, field.Required(specPath.Child("externalName"), ""))
+ }
+ }
+
+ allPortNames := sets.String{}
+ portsPath := specPath.Child("ports")
+ for i := range service.Spec.Ports {
+ portPath := portsPath.Index(i)
+ allErrs = append(allErrs, validateServicePort(&service.Spec.Ports[i], len(service.Spec.Ports) > 1, isHeadlessService, &allPortNames, portPath)...)
+ }
+
+ if service.Spec.Selector != nil {
+ allErrs = append(allErrs, unversionedvalidation.ValidateLabels(service.Spec.Selector, specPath.Child("selector"))...)
+ }
+
+ if len(service.Spec.SessionAffinity) == 0 {
+ allErrs = append(allErrs, field.Required(specPath.Child("sessionAffinity"), ""))
+ } else if !supportedSessionAffinityType.Has(string(service.Spec.SessionAffinity)) {
+ allErrs = append(allErrs, field.NotSupported(specPath.Child("sessionAffinity"), service.Spec.SessionAffinity, supportedSessionAffinityType.List()))
+ }
+
+ if helper.IsServiceIPSet(service) {
+ if ip := net.ParseIP(service.Spec.ClusterIP); ip == nil {
+ allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "must be empty, 'None', or a valid IP address"))
+ }
+ }
+
+ ipPath := specPath.Child("externalIPs")
+ for i, ip := range service.Spec.ExternalIPs {
+ idxPath := ipPath.Index(i)
+ if msgs := validation.IsValidIP(ip); len(msgs) != 0 {
+ for i := range msgs {
+ allErrs = append(allErrs, field.Invalid(idxPath, ip, msgs[i]))
+ }
+ } else {
+ allErrs = append(allErrs, validateNonSpecialIP(ip, idxPath)...)
+ }
+ }
+
+ if len(service.Spec.Type) == 0 {
+ allErrs = append(allErrs, field.Required(specPath.Child("type"), ""))
+ } else if !supportedServiceType.Has(string(service.Spec.Type)) {
+ allErrs = append(allErrs, field.NotSupported(specPath.Child("type"), service.Spec.Type, supportedServiceType.List()))
+ }
+
+ if service.Spec.Type == api.ServiceTypeLoadBalancer {
+ portsPath := specPath.Child("ports")
+ includeProtocols := sets.NewString()
+ for i := range service.Spec.Ports {
+ portPath := portsPath.Index(i)
+ if !supportedPortProtocols.Has(string(service.Spec.Ports[i].Protocol)) {
+ allErrs = append(allErrs, field.Invalid(portPath.Child("protocol"), service.Spec.Ports[i].Protocol, "cannot create an external load balancer with non-TCP/UDP ports"))
+ } else {
+ includeProtocols.Insert(string(service.Spec.Ports[i].Protocol))
+ }
+ }
+ if includeProtocols.Len() > 1 {
+ allErrs = append(allErrs, field.Invalid(portsPath, service.Spec.Ports, "cannot create an external load balancer with mix protocols"))
+ }
+ }
+
+ if service.Spec.Type == api.ServiceTypeClusterIP {
+ portsPath := specPath.Child("ports")
+ for i := range service.Spec.Ports {
+ portPath := portsPath.Index(i)
+ if service.Spec.Ports[i].NodePort != 0 {
+ allErrs = append(allErrs, field.Invalid(portPath.Child("nodePort"), service.Spec.Ports[i].NodePort, "may not be used when `type` is 'ClusterIP'"))
+ }
+ }
+ }
+
+ // Check for duplicate NodePorts, considering (protocol,port) pairs
+ portsPath = specPath.Child("ports")
+ nodePorts := make(map[api.ServicePort]bool)
+ for i := range service.Spec.Ports {
+ port := &service.Spec.Ports[i]
+ if port.NodePort == 0 {
+ continue
+ }
+ portPath := portsPath.Index(i)
+ var key api.ServicePort
+ key.Protocol = port.Protocol
+ key.NodePort = port.NodePort
+ _, found := nodePorts[key]
+ if found {
+ allErrs = append(allErrs, field.Duplicate(portPath.Child("nodePort"), port.NodePort))
+ }
+ nodePorts[key] = true
+ }
+
+ // Validate SourceRange field and annotation
+ _, ok := service.Annotations[api.AnnotationLoadBalancerSourceRangesKey]
+ if len(service.Spec.LoadBalancerSourceRanges) > 0 || ok {
+ var fieldPath *field.Path
+ var val string
+ if len(service.Spec.LoadBalancerSourceRanges) > 0 {
+ fieldPath = specPath.Child("LoadBalancerSourceRanges")
+ val = fmt.Sprintf("%v", service.Spec.LoadBalancerSourceRanges)
+ } else {
+ fieldPath = field.NewPath("metadata", "annotations").Key(api.AnnotationLoadBalancerSourceRangesKey)
+ val = service.Annotations[api.AnnotationLoadBalancerSourceRangesKey]
+ }
+ if service.Spec.Type != api.ServiceTypeLoadBalancer {
+ allErrs = append(allErrs, field.Invalid(fieldPath, "", "may only be used when `type` is 'LoadBalancer'"))
+ }
+ _, err := apiservice.GetLoadBalancerSourceRanges(service)
+ if err != nil {
+ allErrs = append(allErrs, field.Invalid(fieldPath, val, "must be a list of IP ranges. For example, 10.240.0.0/24,10.250.0.0/24 "))
+ }
+ }
+
+ allErrs = append(allErrs, validateServiceExternalTrafficFieldsValue(service)...)
+ allErrs = append(allErrs, validateServiceExternalTrafficAPIVersion(service)...)
+
+ return allErrs
+}
+
+func validateServicePort(sp *api.ServicePort, requireName, isHeadlessService bool, allNames *sets.String, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if requireName && len(sp.Name) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
+ } else if len(sp.Name) != 0 {
+ allErrs = append(allErrs, ValidateDNS1123Label(sp.Name, fldPath.Child("name"))...)
+ if allNames.Has(sp.Name) {
+ allErrs = append(allErrs, field.Duplicate(fldPath.Child("name"), sp.Name))
+ } else {
+ allNames.Insert(sp.Name)
+ }
+ }
+
+ for _, msg := range validation.IsValidPortNum(int(sp.Port)) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), sp.Port, msg))
+ }
+
+ if len(sp.Protocol) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("protocol"), ""))
+ } else if !supportedPortProtocols.Has(string(sp.Protocol)) {
+ allErrs = append(allErrs, field.NotSupported(fldPath.Child("protocol"), sp.Protocol, supportedPortProtocols.List()))
+ }
+
+ allErrs = append(allErrs, ValidatePortNumOrName(sp.TargetPort, fldPath.Child("targetPort"))...)
+
+ // in the v1 API, targetPorts on headless services were tolerated.
+ // once we have version-specific validation, we can reject this on newer API versions, but until then, we have to tolerate it for compatibility.
+ //
+ // if isHeadlessService {
+ // if sp.TargetPort.Type == intstr.String || (sp.TargetPort.Type == intstr.Int && sp.Port != sp.TargetPort.IntValue()) {
+ // allErrs = append(allErrs, field.Invalid(fldPath.Child("targetPort"), sp.TargetPort, "must be equal to the value of 'port' when clusterIP = None"))
+ // }
+ // }
+
+ return allErrs
+}
+
+// validateServiceExternalTrafficFieldsValue validates ExternalTraffic related annotations
+// have legal value.
+func validateServiceExternalTrafficFieldsValue(service *api.Service) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ // Check beta annotations.
+ if l, ok := service.Annotations[api.BetaAnnotationExternalTraffic]; ok {
+ if l != api.AnnotationValueExternalTrafficLocal &&
+ l != api.AnnotationValueExternalTrafficGlobal {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("metadata", "annotations").Key(api.BetaAnnotationExternalTraffic), l,
+ fmt.Sprintf("ExternalTraffic must be %v or %v", api.AnnotationValueExternalTrafficLocal, api.AnnotationValueExternalTrafficGlobal)))
+ }
+ }
+ if l, ok := service.Annotations[api.BetaAnnotationHealthCheckNodePort]; ok {
+ p, err := strconv.Atoi(l)
+ if err != nil {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("metadata", "annotations").Key(api.BetaAnnotationHealthCheckNodePort), l,
+ "HealthCheckNodePort must be a valid port number"))
+ } else if p <= 0 {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("metadata", "annotations").Key(api.BetaAnnotationHealthCheckNodePort), l,
+ "HealthCheckNodePort must be greater than 0"))
+ }
+ }
+
+ // Check first class fields.
+ if service.Spec.ExternalTrafficPolicy != "" &&
+ service.Spec.ExternalTrafficPolicy != api.ServiceExternalTrafficPolicyTypeCluster &&
+ service.Spec.ExternalTrafficPolicy != api.ServiceExternalTrafficPolicyTypeLocal {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("externalTrafficPolicy"), service.Spec.ExternalTrafficPolicy,
+ fmt.Sprintf("ExternalTrafficPolicy must be empty, %v or %v", api.ServiceExternalTrafficPolicyTypeCluster, api.ServiceExternalTrafficPolicyTypeLocal)))
+ }
+ if service.Spec.HealthCheckNodePort < 0 {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("healthCheckNodePort"), service.Spec.HealthCheckNodePort,
+ "HealthCheckNodePort must be not less than 0"))
+ }
+
+ return allErrs
+}
+
+// serviceExternalTrafficStatus stores flags indicating whether ExternalTraffic
+// related beta annotations and GA fields are set on service.
+type serviceExternalTrafficStatus struct {
+ betaExternalTrafficIsSet bool
+ betaHealthCheckIsSet bool
+ gaExternalTrafficIsSet bool
+ gaHealthCheckIsSet bool
+}
+
+func (s *serviceExternalTrafficStatus) useBetaExternalTrafficWithGA() bool {
+ return s.betaExternalTrafficIsSet && (s.gaExternalTrafficIsSet || s.gaHealthCheckIsSet)
+}
+
+func (s *serviceExternalTrafficStatus) useBetaHealthCheckWithGA() bool {
+ return s.betaHealthCheckIsSet && (s.gaExternalTrafficIsSet || s.gaHealthCheckIsSet)
+}
+
+func getServiceExternalTrafficStatus(service *api.Service) *serviceExternalTrafficStatus {
+ s := serviceExternalTrafficStatus{}
+ _, s.betaExternalTrafficIsSet = service.Annotations[api.BetaAnnotationExternalTraffic]
+ _, s.betaHealthCheckIsSet = service.Annotations[api.BetaAnnotationHealthCheckNodePort]
+ s.gaExternalTrafficIsSet = service.Spec.ExternalTrafficPolicy != ""
+ s.gaHealthCheckIsSet = service.Spec.HealthCheckNodePort != 0
+ return &s
+}
+
+// validateServiceExternalTrafficAPIVersion checks if user mixes ExternalTraffic
+// API versions.
+func validateServiceExternalTrafficAPIVersion(service *api.Service) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ status := getServiceExternalTrafficStatus(service)
+
+ if status.useBetaExternalTrafficWithGA() {
+ fieldPath := field.NewPath("metadata", "annotations").Key(api.BetaAnnotationExternalTraffic)
+ msg := fmt.Sprintf("please replace the beta annotation with 'ExternalTrafficPolicy' field")
+ allErrs = append(allErrs, field.Invalid(fieldPath, api.BetaAnnotationExternalTraffic, msg))
+ }
+
+ if status.useBetaHealthCheckWithGA() {
+ fieldPath := field.NewPath("metadata", "annotations").Key(api.BetaAnnotationHealthCheckNodePort)
+ msg := fmt.Sprintf("please replace the beta annotation with 'HealthCheckNodePort' field")
+ allErrs = append(allErrs, field.Invalid(fieldPath, api.BetaAnnotationHealthCheckNodePort, msg))
+ }
+
+ return allErrs
+}
+
+// ValidateServiceExternalTrafficFieldsCombination validates if ExternalTrafficPolicy,
+// HealthCheckNodePort and Type combination are legal. For update, it should be called
+// after clearing externalTraffic related fields for the ease of transitioning between
+// different service types.
+func ValidateServiceExternalTrafficFieldsCombination(service *api.Service) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if service.Spec.Type != api.ServiceTypeLoadBalancer &&
+ service.Spec.Type != api.ServiceTypeNodePort &&
+ service.Spec.ExternalTrafficPolicy != "" {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "externalTrafficPolicy"), service.Spec.ExternalTrafficPolicy,
+ "ExternalTrafficPolicy can only be set on NodePort and LoadBalancer service"))
+ }
+
+ if !apiservice.NeedsHealthCheck(service) &&
+ service.Spec.HealthCheckNodePort != 0 {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "healthCheckNodePort"), service.Spec.HealthCheckNodePort,
+ "HealthCheckNodePort can only be set on LoadBalancer service with ExternalTrafficPolicy=Local"))
+ }
+
+ return allErrs
+}
+
+// ValidateServiceUpdate tests if required fields in the service are set during an update
+func ValidateServiceUpdate(service, oldService *api.Service) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&service.ObjectMeta, &oldService.ObjectMeta, field.NewPath("metadata"))
+
+ // ClusterIP should be immutable for services using it (every type other than ExternalName)
+ // which do not have ClusterIP assigned yet (empty string value)
+ if service.Spec.Type != api.ServiceTypeExternalName {
+ if oldService.Spec.Type != api.ServiceTypeExternalName && oldService.Spec.ClusterIP != "" {
+ allErrs = append(allErrs, ValidateImmutableField(service.Spec.ClusterIP, oldService.Spec.ClusterIP, field.NewPath("spec", "clusterIP"))...)
+ }
+ }
+
+ allErrs = append(allErrs, ValidateService(service)...)
+ return allErrs
+}
+
+// ValidateServiceStatusUpdate tests if required fields in the Service are set when updating status.
+func ValidateServiceStatusUpdate(service, oldService *api.Service) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&service.ObjectMeta, &oldService.ObjectMeta, field.NewPath("metadata"))
+ allErrs = append(allErrs, ValidateLoadBalancerStatus(&service.Status.LoadBalancer, field.NewPath("status", "loadBalancer"))...)
+ return allErrs
+}
+
+// ValidateReplicationController tests if required fields in the replication controller are set.
+func ValidateReplicationController(controller *api.ReplicationController) field.ErrorList {
+ allErrs := ValidateObjectMeta(&controller.ObjectMeta, true, ValidateReplicationControllerName, field.NewPath("metadata"))
+ allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, field.NewPath("spec"))...)
+ return allErrs
+}
+
+// ValidateReplicationControllerUpdate tests if required fields in the replication controller are set.
+func ValidateReplicationControllerUpdate(controller, oldController *api.ReplicationController) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata"))
+ allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, field.NewPath("spec"))...)
+ return allErrs
+}
+
+// ValidateReplicationControllerStatusUpdate tests if required fields in the replication controller are set.
+func ValidateReplicationControllerStatusUpdate(controller, oldController *api.ReplicationController) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata"))
+ allErrs = append(allErrs, ValidateReplicationControllerStatus(controller.Status, field.NewPath("status"))...)
+ return allErrs
+}
+
+func ValidateReplicationControllerStatus(status api.ReplicationControllerStatus, statusPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ allErrs = append(allErrs, ValidateNonnegativeField(int64(status.Replicas), statusPath.Child("replicas"))...)
+ allErrs = append(allErrs, ValidateNonnegativeField(int64(status.FullyLabeledReplicas), statusPath.Child("fullyLabeledReplicas"))...)
+ allErrs = append(allErrs, ValidateNonnegativeField(int64(status.ReadyReplicas), statusPath.Child("readyReplicas"))...)
+ allErrs = append(allErrs, ValidateNonnegativeField(int64(status.AvailableReplicas), statusPath.Child("availableReplicas"))...)
+ allErrs = append(allErrs, ValidateNonnegativeField(int64(status.ObservedGeneration), statusPath.Child("observedGeneration"))...)
+ msg := "cannot be greater than status.replicas"
+ if status.FullyLabeledReplicas > status.Replicas {
+ allErrs = append(allErrs, field.Invalid(statusPath.Child("fullyLabeledReplicas"), status.FullyLabeledReplicas, msg))
+ }
+ if status.ReadyReplicas > status.Replicas {
+ allErrs = append(allErrs, field.Invalid(statusPath.Child("readyReplicas"), status.ReadyReplicas, msg))
+ }
+ if status.AvailableReplicas > status.Replicas {
+ allErrs = append(allErrs, field.Invalid(statusPath.Child("availableReplicas"), status.AvailableReplicas, msg))
+ }
+ if status.AvailableReplicas > status.ReadyReplicas {
+ allErrs = append(allErrs, field.Invalid(statusPath.Child("availableReplicas"), status.AvailableReplicas, "cannot be greater than readyReplicas"))
+ }
+ return allErrs
+}
+
+// Validates that the given selector is non-empty.
+func ValidateNonEmptySelector(selectorMap map[string]string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ selector := labels.Set(selectorMap).AsSelector()
+ if selector.Empty() {
+ allErrs = append(allErrs, field.Required(fldPath, ""))
+ }
+ return allErrs
+}
+
+// Validates the given template and ensures that it is in accordance with the desired selector and replicas.
+func ValidatePodTemplateSpecForRC(template *api.PodTemplateSpec, selectorMap map[string]string, replicas int32, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if template == nil {
+ allErrs = append(allErrs, field.Required(fldPath, ""))
+ } else {
+ selector := labels.Set(selectorMap).AsSelector()
+ if !selector.Empty() {
+ // Verify that the RC selector matches the labels in template.
+ labels := labels.Set(template.Labels)
+ if !selector.Matches(labels) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("metadata", "labels"), template.Labels, "`selector` does not match template `labels`"))
+ }
+ }
+ allErrs = append(allErrs, ValidatePodTemplateSpec(template, fldPath)...)
+ if replicas > 1 {
+ allErrs = append(allErrs, ValidateReadOnlyPersistentDisks(template.Spec.Volumes, fldPath.Child("spec", "volumes"))...)
+ }
+ // RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec().
+ if template.Spec.RestartPolicy != api.RestartPolicyAlways {
+ allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)}))
+ }
+ if template.Spec.ActiveDeadlineSeconds != nil {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("spec", "activeDeadlineSeconds"), template.Spec.ActiveDeadlineSeconds, "must not be specified"))
+ }
+ }
+ return allErrs
+}
+
+// ValidateReplicationControllerSpec tests if required fields in the replication controller spec are set.
+func ValidateReplicationControllerSpec(spec *api.ReplicationControllerSpec, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ allErrs = append(allErrs, ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...)
+ allErrs = append(allErrs, ValidateNonEmptySelector(spec.Selector, fldPath.Child("selector"))...)
+ allErrs = append(allErrs, ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...)
+ allErrs = append(allErrs, ValidatePodTemplateSpecForRC(spec.Template, spec.Selector, spec.Replicas, fldPath.Child("template"))...)
+ return allErrs
+}
+
+// ValidatePodTemplateSpec validates the spec of a pod template
+func ValidatePodTemplateSpec(spec *api.PodTemplateSpec, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.Labels, fldPath.Child("labels"))...)
+ allErrs = append(allErrs, ValidateAnnotations(spec.Annotations, fldPath.Child("annotations"))...)
+ allErrs = append(allErrs, ValidatePodSpecificAnnotations(spec.Annotations, &spec.Spec, fldPath.Child("annotations"))...)
+ allErrs = append(allErrs, ValidatePodSpec(&spec.Spec, fldPath.Child("spec"))...)
+ return allErrs
+}
+
+func ValidateReadOnlyPersistentDisks(volumes []api.Volume, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for i := range volumes {
+ vol := &volumes[i]
+ idxPath := fldPath.Index(i)
+ if vol.GCEPersistentDisk != nil {
+ if vol.GCEPersistentDisk.ReadOnly == false {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("gcePersistentDisk", "readOnly"), false, "must be true for replicated pods > 1; GCE PD can only be mounted on multiple machines if it is read-only"))
+ }
+ }
+ // TODO: What to do for AWS? It doesn't support replicas
+ }
+ return allErrs
+}
+
+// ValidateTaintsInNodeAnnotations tests that the serialized taints in Node.Annotations has valid data
+func ValidateTaintsInNodeAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ taints, err := helper.GetTaintsFromNodeAnnotations(annotations)
+ if err != nil {
+ allErrs = append(allErrs, field.Invalid(fldPath, api.TaintsAnnotationKey, err.Error()))
+ return allErrs
+ }
+
+ if len(taints) > 0 {
+ allErrs = append(allErrs, validateNodeTaints(taints, fldPath.Child(api.TaintsAnnotationKey))...)
+ }
+
+ return allErrs
+}
+
+// validateNodeTaints tests if given taints have valid data.
+func validateNodeTaints(taints []api.Taint, fldPath *field.Path) field.ErrorList {
+ allErrors := field.ErrorList{}
+
+ uniqueTaints := map[api.TaintEffect]sets.String{}
+
+ for i, currTaint := range taints {
+ idxPath := fldPath.Index(i)
+ // validate the taint key
+ allErrors = append(allErrors, unversionedvalidation.ValidateLabelName(currTaint.Key, idxPath.Child("key"))...)
+ // validate the taint value
+ if errs := validation.IsValidLabelValue(currTaint.Value); len(errs) != 0 {
+ allErrors = append(allErrors, field.Invalid(idxPath.Child("value"), currTaint.Value, strings.Join(errs, ";")))
+ }
+ // validate the taint effect
+ allErrors = append(allErrors, validateTaintEffect(&currTaint.Effect, false, idxPath.Child("effect"))...)
+
+ // validate if taint is unique by <key, effect>
+ if len(uniqueTaints[currTaint.Effect]) > 0 && uniqueTaints[currTaint.Effect].Has(currTaint.Key) {
+ duplicatedError := field.Duplicate(idxPath, currTaint)
+ duplicatedError.Detail = "taints must be unique by key and effect pair"
+ allErrors = append(allErrors, duplicatedError)
+ continue
+ }
+
+ // add taint to existingTaints for uniqueness check
+ if len(uniqueTaints[currTaint.Effect]) == 0 {
+ uniqueTaints[currTaint.Effect] = sets.String{}
+ }
+ uniqueTaints[currTaint.Effect].Insert(currTaint.Key)
+ }
+ return allErrors
+}
+
+func ValidateNodeSpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if annotations[api.TaintsAnnotationKey] != "" {
+ allErrs = append(allErrs, ValidateTaintsInNodeAnnotations(annotations, fldPath)...)
+ }
+
+ if annotations[api.PreferAvoidPodsAnnotationKey] != "" {
+ allErrs = append(allErrs, ValidateAvoidPodsInNodeAnnotations(annotations, fldPath)...)
+ }
+ return allErrs
+}
+
+// ValidateNode tests if required fields in the node are set.
+func ValidateNode(node *api.Node) field.ErrorList {
+ fldPath := field.NewPath("metadata")
+ allErrs := ValidateObjectMeta(&node.ObjectMeta, false, ValidateNodeName, fldPath)
+ allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...)
+ if len(node.Spec.Taints) > 0 {
+ allErrs = append(allErrs, validateNodeTaints(node.Spec.Taints, fldPath.Child("taints"))...)
+ }
+
+ // Only validate spec. All status fields are optional and can be updated later.
+
+ // external ID is required.
+ if len(node.Spec.ExternalID) == 0 {
+ allErrs = append(allErrs, field.Required(field.NewPath("spec", "externalID"), ""))
+ }
+
+ // TODO(rjnagal): Ignore PodCIDR till its completely implemented.
+ return allErrs
+}
+
+// ValidateNodeUpdate tests to make sure a node update can be applied. Modifies oldNode.
+func ValidateNodeUpdate(node, oldNode *api.Node) field.ErrorList {
+ fldPath := field.NewPath("metadata")
+ allErrs := ValidateObjectMetaUpdate(&node.ObjectMeta, &oldNode.ObjectMeta, fldPath)
+ allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...)
+
+ // TODO: Enable the code once we have better api object.status update model. Currently,
+ // anyone can update node status.
+ // if !apiequality.Semantic.DeepEqual(node.Status, api.NodeStatus{}) {
+ // allErrs = append(allErrs, field.Invalid("status", node.Status, "must be empty"))
+ // }
+
+ // Validate resource quantities in capacity.
+ for k, v := range node.Status.Capacity {
+ resPath := field.NewPath("status", "capacity", string(k))
+ allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...)
+ }
+ // Validate resource quantities in allocatable.
+ for k, v := range node.Status.Allocatable {
+ resPath := field.NewPath("status", "allocatable", string(k))
+ allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...)
+ }
+
+ // Validte no duplicate addresses in node status.
+ addresses := make(map[api.NodeAddress]bool)
+ for i, address := range node.Status.Addresses {
+ if _, ok := addresses[address]; ok {
+ allErrs = append(allErrs, field.Duplicate(field.NewPath("status", "addresses").Index(i), address))
+ }
+ addresses[address] = true
+ }
+
+ if len(oldNode.Spec.PodCIDR) == 0 {
+ // Allow the controller manager to assign a CIDR to a node if it doesn't have one.
+ oldNode.Spec.PodCIDR = node.Spec.PodCIDR
+ } else {
+ if oldNode.Spec.PodCIDR != node.Spec.PodCIDR {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "podCIDR"), "node updates may not change podCIDR except from \"\" to valid"))
+ }
+ }
+
+ // Allow controller manager updating provider ID when not set
+ if len(oldNode.Spec.ProviderID) == 0 {
+ oldNode.Spec.ProviderID = node.Spec.ProviderID
+ } else {
+ if oldNode.Spec.ProviderID != node.Spec.ProviderID {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "providerID"), "node updates may not change providerID except from \"\" to valid"))
+ }
+ }
+
+ // TODO: move reset function to its own location
+ // Ignore metadata changes now that they have been tested
+ oldNode.ObjectMeta = node.ObjectMeta
+ // Allow users to update capacity
+ oldNode.Status.Capacity = node.Status.Capacity
+ // Allow users to unschedule node
+ oldNode.Spec.Unschedulable = node.Spec.Unschedulable
+ // Clear status
+ oldNode.Status = node.Status
+
+ // update taints
+ if len(node.Spec.Taints) > 0 {
+ allErrs = append(allErrs, validateNodeTaints(node.Spec.Taints, fldPath.Child("taints"))...)
+ }
+ oldNode.Spec.Taints = node.Spec.Taints
+
+ // TODO: Add a 'real' error type for this error and provide print actual diffs.
+ if !apiequality.Semantic.DeepEqual(oldNode, node) {
+ glog.V(4).Infof("Update failed validation %#v vs %#v", oldNode, node)
+ allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "node updates may only change labels, taints or capacity"))
+ }
+
+ return allErrs
+}
+
+// Validate compute resource typename.
+// Refer to docs/design/resources.md for more details.
+func validateResourceName(value string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for _, msg := range validation.IsQualifiedName(value) {
+ allErrs = append(allErrs, field.Invalid(fldPath, value, msg))
+ }
+ if len(allErrs) != 0 {
+ return allErrs
+ }
+
+ if len(strings.Split(value, "/")) == 1 {
+ if !helper.IsStandardResourceName(value) {
+ return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource type or fully qualified"))
+ }
+ }
+
+ return field.ErrorList{}
+}
+
+// Validate container resource name
+// Refer to docs/design/resources.md for more details.
+func validateContainerResourceName(value string, fldPath *field.Path) field.ErrorList {
+ allErrs := validateResourceName(value, fldPath)
+
+ if len(strings.Split(value, "/")) == 1 {
+ if !helper.IsStandardContainerResourceName(value) {
+ return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource for containers"))
+ }
+ }
+ return field.ErrorList{}
+}
+
+// Validate resource names that can go in a resource quota
+// Refer to docs/design/resources.md for more details.
+func ValidateResourceQuotaResourceName(value string, fldPath *field.Path) field.ErrorList {
+ allErrs := validateResourceName(value, fldPath)
+ if len(strings.Split(value, "/")) == 1 {
+ if !helper.IsStandardQuotaResourceName(value) {
+ return append(allErrs, field.Invalid(fldPath, value, isInvalidQuotaResource))
+ }
+ }
+ return field.ErrorList{}
+}
+
+// Validate limit range types
+func validateLimitRangeTypeName(value string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for _, msg := range validation.IsQualifiedName(value) {
+ allErrs = append(allErrs, field.Invalid(fldPath, value, msg))
+ }
+ if len(allErrs) != 0 {
+ return allErrs
+ }
+
+ if len(strings.Split(value, "/")) == 1 {
+ if !helper.IsStandardLimitRangeType(value) {
+ return append(allErrs, field.Invalid(fldPath, value, "must be a standard limit type or fully qualified"))
+ }
+ }
+
+ return allErrs
+}
+
+// Validate limit range resource name
+// limit types (other than Pod/Container) could contain storage not just cpu or memory
+func validateLimitRangeResourceName(limitType api.LimitType, value string, fldPath *field.Path) field.ErrorList {
+ switch limitType {
+ case api.LimitTypePod, api.LimitTypeContainer:
+ return validateContainerResourceName(value, fldPath)
+ default:
+ return validateResourceName(value, fldPath)
+ }
+}
+
+// ValidateLimitRange tests if required fields in the LimitRange are set.
+func ValidateLimitRange(limitRange *api.LimitRange) field.ErrorList {
+ allErrs := ValidateObjectMeta(&limitRange.ObjectMeta, true, ValidateLimitRangeName, field.NewPath("metadata"))
+
+ // ensure resource names are properly qualified per docs/design/resources.md
+ limitTypeSet := map[api.LimitType]bool{}
+ fldPath := field.NewPath("spec", "limits")
+ for i := range limitRange.Spec.Limits {
+ idxPath := fldPath.Index(i)
+ limit := &limitRange.Spec.Limits[i]
+ allErrs = append(allErrs, validateLimitRangeTypeName(string(limit.Type), idxPath.Child("type"))...)
+
+ _, found := limitTypeSet[limit.Type]
+ if found {
+ allErrs = append(allErrs, field.Duplicate(idxPath.Child("type"), limit.Type))
+ }
+ limitTypeSet[limit.Type] = true
+
+ keys := sets.String{}
+ min := map[string]resource.Quantity{}
+ max := map[string]resource.Quantity{}
+ defaults := map[string]resource.Quantity{}
+ defaultRequests := map[string]resource.Quantity{}
+ maxLimitRequestRatios := map[string]resource.Quantity{}
+
+ for k, q := range limit.Max {
+ allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("max").Key(string(k)))...)
+ keys.Insert(string(k))
+ max[string(k)] = q
+ }
+ for k, q := range limit.Min {
+ allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("min").Key(string(k)))...)
+ keys.Insert(string(k))
+ min[string(k)] = q
+ }
+
+ if limit.Type == api.LimitTypePod {
+ if len(limit.Default) > 0 {
+ allErrs = append(allErrs, field.Forbidden(idxPath.Child("default"), "may not be specified when `type` is 'Pod'"))
+ }
+ if len(limit.DefaultRequest) > 0 {
+ allErrs = append(allErrs, field.Forbidden(idxPath.Child("defaultRequest"), "may not be specified when `type` is 'Pod'"))
+ }
+ } else {
+ for k, q := range limit.Default {
+ allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("default").Key(string(k)))...)
+ keys.Insert(string(k))
+ defaults[string(k)] = q
+ }
+ for k, q := range limit.DefaultRequest {
+ allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("defaultRequest").Key(string(k)))...)
+ keys.Insert(string(k))
+ defaultRequests[string(k)] = q
+ }
+ }
+
+ if limit.Type == api.LimitTypePersistentVolumeClaim {
+ _, minQuantityFound := limit.Min[api.ResourceStorage]
+ _, maxQuantityFound := limit.Max[api.ResourceStorage]
+ if !minQuantityFound && !maxQuantityFound {
+ allErrs = append(allErrs, field.Required(idxPath.Child("limits"), "either minimum or maximum storage value is required, but neither was provided"))
+ }
+ }
+
+ for k, q := range limit.MaxLimitRequestRatio {
+ allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("maxLimitRequestRatio").Key(string(k)))...)
+ keys.Insert(string(k))
+ maxLimitRequestRatios[string(k)] = q
+ }
+
+ for k := range keys {
+ minQuantity, minQuantityFound := min[k]
+ maxQuantity, maxQuantityFound := max[k]
+ defaultQuantity, defaultQuantityFound := defaults[k]
+ defaultRequestQuantity, defaultRequestQuantityFound := defaultRequests[k]
+ maxRatio, maxRatioFound := maxLimitRequestRatios[k]
+
+ if minQuantityFound && maxQuantityFound && minQuantity.Cmp(maxQuantity) > 0 {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("min").Key(string(k)), minQuantity, fmt.Sprintf("min value %s is greater than max value %s", minQuantity.String(), maxQuantity.String())))
+ }
+
+ if defaultRequestQuantityFound && minQuantityFound && minQuantity.Cmp(defaultRequestQuantity) > 0 {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("min value %s is greater than default request value %s", minQuantity.String(), defaultRequestQuantity.String())))
+ }
+
+ if defaultRequestQuantityFound && maxQuantityFound && defaultRequestQuantity.Cmp(maxQuantity) > 0 {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than max value %s", defaultRequestQuantity.String(), maxQuantity.String())))
+ }
+
+ if defaultRequestQuantityFound && defaultQuantityFound && defaultRequestQuantity.Cmp(defaultQuantity) > 0 {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than default limit value %s", defaultRequestQuantity.String(), defaultQuantity.String())))
+ }
+
+ if defaultQuantityFound && minQuantityFound && minQuantity.Cmp(defaultQuantity) > 0 {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("default").Key(string(k)), minQuantity, fmt.Sprintf("min value %s is greater than default value %s", minQuantity.String(), defaultQuantity.String())))
+ }
+
+ if defaultQuantityFound && maxQuantityFound && defaultQuantity.Cmp(maxQuantity) > 0 {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("default").Key(string(k)), maxQuantity, fmt.Sprintf("default value %s is greater than max value %s", defaultQuantity.String(), maxQuantity.String())))
+ }
+ if maxRatioFound && maxRatio.Cmp(*resource.NewQuantity(1, resource.DecimalSI)) < 0 {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("maxLimitRequestRatio").Key(string(k)), maxRatio, fmt.Sprintf("ratio %s is less than 1", maxRatio.String())))
+ }
+ if maxRatioFound && minQuantityFound && maxQuantityFound {
+ maxRatioValue := float64(maxRatio.Value())
+ minQuantityValue := minQuantity.Value()
+ maxQuantityValue := maxQuantity.Value()
+ if maxRatio.Value() < resource.MaxMilliValue && minQuantityValue < resource.MaxMilliValue && maxQuantityValue < resource.MaxMilliValue {
+ maxRatioValue = float64(maxRatio.MilliValue()) / 1000
+ minQuantityValue = minQuantity.MilliValue()
+ maxQuantityValue = maxQuantity.MilliValue()
+ }
+ maxRatioLimit := float64(maxQuantityValue) / float64(minQuantityValue)
+ if maxRatioValue > maxRatioLimit {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("maxLimitRequestRatio").Key(string(k)), maxRatio, fmt.Sprintf("ratio %s is greater than max/min = %f", maxRatio.String(), maxRatioLimit)))
+ }
+ }
+ }
+ }
+
+ return allErrs
+}
+
+// ValidateServiceAccount tests if required fields in the ServiceAccount are set.
+func ValidateServiceAccount(serviceAccount *api.ServiceAccount) field.ErrorList {
+ allErrs := ValidateObjectMeta(&serviceAccount.ObjectMeta, true, ValidateServiceAccountName, field.NewPath("metadata"))
+ return allErrs
+}
+
+// ValidateServiceAccountUpdate tests if required fields in the ServiceAccount are set.
+func ValidateServiceAccountUpdate(newServiceAccount, oldServiceAccount *api.ServiceAccount) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&newServiceAccount.ObjectMeta, &oldServiceAccount.ObjectMeta, field.NewPath("metadata"))
+ allErrs = append(allErrs, ValidateServiceAccount(newServiceAccount)...)
+ return allErrs
+}
+
+// ValidateSecret tests if required fields in the Secret are set.
+func ValidateSecret(secret *api.Secret) field.ErrorList {
+ allErrs := ValidateObjectMeta(&secret.ObjectMeta, true, ValidateSecretName, field.NewPath("metadata"))
+
+ dataPath := field.NewPath("data")
+ totalSize := 0
+ for key, value := range secret.Data {
+ for _, msg := range validation.IsConfigMapKey(key) {
+ allErrs = append(allErrs, field.Invalid(dataPath.Key(key), key, msg))
+ }
+ totalSize += len(value)
+ }
+ if totalSize > api.MaxSecretSize {
+ allErrs = append(allErrs, field.TooLong(dataPath, "", api.MaxSecretSize))
+ }
+
+ switch secret.Type {
+ case api.SecretTypeServiceAccountToken:
+ // Only require Annotations[kubernetes.io/service-account.name]
+ // Additional fields (like Annotations[kubernetes.io/service-account.uid] and Data[token]) might be contributed later by a controller loop
+ if value := secret.Annotations[api.ServiceAccountNameKey]; len(value) == 0 {
+ allErrs = append(allErrs, field.Required(field.NewPath("metadata", "annotations").Key(api.ServiceAccountNameKey), ""))
+ }
+ case api.SecretTypeOpaque, "":
+ // no-op
+ case api.SecretTypeDockercfg:
+ dockercfgBytes, exists := secret.Data[api.DockerConfigKey]
+ if !exists {
+ allErrs = append(allErrs, field.Required(dataPath.Key(api.DockerConfigKey), ""))
+ break
+ }
+
+ // make sure that the content is well-formed json.
+ if err := json.Unmarshal(dockercfgBytes, &map[string]interface{}{}); err != nil {
+ allErrs = append(allErrs, field.Invalid(dataPath.Key(api.DockerConfigKey), "<secret contents redacted>", err.Error()))
+ }
+ case api.SecretTypeDockerConfigJson:
+ dockerConfigJsonBytes, exists := secret.Data[api.DockerConfigJsonKey]
+ if !exists {
+ allErrs = append(allErrs, field.Required(dataPath.Key(api.DockerConfigJsonKey), ""))
+ break
+ }
+
+ // make sure that the content is well-formed json.
+ if err := json.Unmarshal(dockerConfigJsonBytes, &map[string]interface{}{}); err != nil {
+ allErrs = append(allErrs, field.Invalid(dataPath.Key(api.DockerConfigJsonKey), "<secret contents redacted>", err.Error()))
+ }
+ case api.SecretTypeBasicAuth:
+ _, usernameFieldExists := secret.Data[api.BasicAuthUsernameKey]
+ _, passwordFieldExists := secret.Data[api.BasicAuthPasswordKey]
+
+ // username or password might be empty, but the field must be present
+ if !usernameFieldExists && !passwordFieldExists {
+ allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(api.BasicAuthUsernameKey), ""))
+ allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(api.BasicAuthPasswordKey), ""))
+ break
+ }
+ case api.SecretTypeSSHAuth:
+ if len(secret.Data[api.SSHAuthPrivateKey]) == 0 {
+ allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(api.SSHAuthPrivateKey), ""))
+ break
+ }
+
+ case api.SecretTypeTLS:
+ if _, exists := secret.Data[api.TLSCertKey]; !exists {
+ allErrs = append(allErrs, field.Required(dataPath.Key(api.TLSCertKey), ""))
+ }
+ if _, exists := secret.Data[api.TLSPrivateKeyKey]; !exists {
+ allErrs = append(allErrs, field.Required(dataPath.Key(api.TLSPrivateKeyKey), ""))
+ }
+ // TODO: Verify that the key matches the cert.
+ default:
+ // no-op
+ }
+
+ return allErrs
+}
+
+// ValidateSecretUpdate tests if required fields in the Secret are set.
+func ValidateSecretUpdate(newSecret, oldSecret *api.Secret) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&newSecret.ObjectMeta, &oldSecret.ObjectMeta, field.NewPath("metadata"))
+
+ if len(newSecret.Type) == 0 {
+ newSecret.Type = oldSecret.Type
+ }
+
+ allErrs = append(allErrs, ValidateImmutableField(newSecret.Type, oldSecret.Type, field.NewPath("type"))...)
+
+ allErrs = append(allErrs, ValidateSecret(newSecret)...)
+ return allErrs
+}
+
+// ValidateConfigMapName can be used to check whether the given ConfigMap name is valid.
+// Prefix indicates this name will be used as part of generation, in which case
+// trailing dashes are allowed.
+var ValidateConfigMapName = NameIsDNSSubdomain
+
+// ValidateConfigMap tests whether required fields in the ConfigMap are set.
+func ValidateConfigMap(cfg *api.ConfigMap) field.ErrorList {
+ allErrs := field.ErrorList{}
+ allErrs = append(allErrs, ValidateObjectMeta(&cfg.ObjectMeta, true, ValidateConfigMapName, field.NewPath("metadata"))...)
+
+ totalSize := 0
+
+ for key, value := range cfg.Data {
+ for _, msg := range validation.IsConfigMapKey(key) {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("data").Key(key), key, msg))
+ }
+ totalSize += len(value)
+ }
+ if totalSize > api.MaxSecretSize {
+ allErrs = append(allErrs, field.TooLong(field.NewPath("data"), "", api.MaxSecretSize))
+ }
+
+ return allErrs
+}
+
+// ValidateConfigMapUpdate tests if required fields in the ConfigMap are set.
+func ValidateConfigMapUpdate(newCfg, oldCfg *api.ConfigMap) field.ErrorList {
+ allErrs := field.ErrorList{}
+ allErrs = append(allErrs, ValidateObjectMetaUpdate(&newCfg.ObjectMeta, &oldCfg.ObjectMeta, field.NewPath("metadata"))...)
+ allErrs = append(allErrs, ValidateConfigMap(newCfg)...)
+
+ return allErrs
+}
+
+func validateBasicResource(quantity resource.Quantity, fldPath *field.Path) field.ErrorList {
+ if quantity.Value() < 0 {
+ return field.ErrorList{field.Invalid(fldPath, quantity.Value(), "must be a valid resource quantity")}
+ }
+ return field.ErrorList{}
+}
+
+// Validates resource requirement spec.
+func ValidateResourceRequirements(requirements *api.ResourceRequirements, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ limPath := fldPath.Child("limits")
+ reqPath := fldPath.Child("requests")
+ for resourceName, quantity := range requirements.Limits {
+ fldPath := limPath.Key(string(resourceName))
+ // Validate resource name.
+ allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...)
+
+ // Validate resource quantity.
+ allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...)
+
+ // Check that request <= limit.
+ requestQuantity, exists := requirements.Requests[resourceName]
+ if exists {
+ // For GPUs, not only requests can't exceed limits, they also can't be lower, i.e. must be equal.
+ if resourceName == api.ResourceNvidiaGPU && quantity.Cmp(requestQuantity) != 0 {
+ allErrs = append(allErrs, field.Invalid(reqPath, requestQuantity.String(), fmt.Sprintf("must be equal to %s limit", api.ResourceNvidiaGPU)))
+ } else if quantity.Cmp(requestQuantity) < 0 {
+ allErrs = append(allErrs, field.Invalid(limPath, quantity.String(), fmt.Sprintf("must be greater than or equal to %s request", resourceName)))
+ }
+ }
+ if resourceName == api.ResourceStorageOverlay && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
+ allErrs = append(allErrs, field.Forbidden(limPath, "ResourceStorageOverlay field disabled by feature-gate for ResourceRequirements"))
+ }
+ }
+ for resourceName, quantity := range requirements.Requests {
+ fldPath := reqPath.Key(string(resourceName))
+ // Validate resource name.
+ allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...)
+ // Validate resource quantity.
+ allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...)
+ }
+
+ return allErrs
+}
+
+// validateResourceQuotaScopes ensures that each enumerated hard resource constraint is valid for set of scopes
+func validateResourceQuotaScopes(resourceQuotaSpec *api.ResourceQuotaSpec, fld *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(resourceQuotaSpec.Scopes) == 0 {
+ return allErrs
+ }
+ hardLimits := sets.NewString()
+ for k := range resourceQuotaSpec.Hard {
+ hardLimits.Insert(string(k))
+ }
+ fldPath := fld.Child("scopes")
+ scopeSet := sets.NewString()
+ for _, scope := range resourceQuotaSpec.Scopes {
+ if !helper.IsStandardResourceQuotaScope(string(scope)) {
+ allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "unsupported scope"))
+ }
+ for _, k := range hardLimits.List() {
+ if helper.IsStandardQuotaResourceName(k) && !helper.IsResourceQuotaScopeValidForResource(scope, k) {
+ allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "unsupported scope applied to resource"))
+ }
+ }
+ scopeSet.Insert(string(scope))
+ }
+ invalidScopePairs := []sets.String{
+ sets.NewString(string(api.ResourceQuotaScopeBestEffort), string(api.ResourceQuotaScopeNotBestEffort)),
+ sets.NewString(string(api.ResourceQuotaScopeTerminating), string(api.ResourceQuotaScopeNotTerminating)),
+ }
+ for _, invalidScopePair := range invalidScopePairs {
+ if scopeSet.HasAll(invalidScopePair.List()...) {
+ allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "conflicting scopes"))
+ }
+ }
+ return allErrs
+}
+
+// ValidateResourceQuota tests if required fields in the ResourceQuota are set.
+func ValidateResourceQuota(resourceQuota *api.ResourceQuota) field.ErrorList {
+ allErrs := ValidateObjectMeta(&resourceQuota.ObjectMeta, true, ValidateResourceQuotaName, field.NewPath("metadata"))
+
+ allErrs = append(allErrs, ValidateResourceQuotaSpec(&resourceQuota.Spec, field.NewPath("spec"))...)
+ allErrs = append(allErrs, ValidateResourceQuotaStatus(&resourceQuota.Status, field.NewPath("status"))...)
+
+ return allErrs
+}
+
+func ValidateResourceQuotaStatus(status *api.ResourceQuotaStatus, fld *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ fldPath := fld.Child("hard")
+ for k, v := range status.Hard {
+ resPath := fldPath.Key(string(k))
+ allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...)
+ allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...)
+ }
+ fldPath = fld.Child("used")
+ for k, v := range status.Used {
+ resPath := fldPath.Key(string(k))
+ allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...)
+ allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...)
+ }
+
+ return allErrs
+}
+
+func ValidateResourceQuotaSpec(resourceQuotaSpec *api.ResourceQuotaSpec, fld *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ fldPath := fld.Child("hard")
+ for k, v := range resourceQuotaSpec.Hard {
+ resPath := fldPath.Key(string(k))
+ allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...)
+ allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...)
+ }
+ allErrs = append(allErrs, validateResourceQuotaScopes(resourceQuotaSpec, fld)...)
+
+ return allErrs
+}
+
+// ValidateResourceQuantityValue enforces that specified quantity is valid for specified resource
+func ValidateResourceQuantityValue(resource string, value resource.Quantity, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ allErrs = append(allErrs, ValidateNonnegativeQuantity(value, fldPath)...)
+ if helper.IsIntegerResourceName(resource) {
+ if value.MilliValue()%int64(1000) != int64(0) {
+ allErrs = append(allErrs, field.Invalid(fldPath, value, isNotIntegerErrorMsg))
+ }
+ }
+ return allErrs
+}
+
+// ValidateResourceQuotaUpdate tests to see if the update is legal for an end user to make.
+// newResourceQuota is updated with fields that cannot be changed.
+func ValidateResourceQuotaUpdate(newResourceQuota, oldResourceQuota *api.ResourceQuota) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&newResourceQuota.ObjectMeta, &oldResourceQuota.ObjectMeta, field.NewPath("metadata"))
+ allErrs = append(allErrs, ValidateResourceQuotaSpec(&newResourceQuota.Spec, field.NewPath("spec"))...)
+
+ // ensure scopes cannot change, and that resources are still valid for scope
+ fldPath := field.NewPath("spec", "scopes")
+ oldScopes := sets.NewString()
+ newScopes := sets.NewString()
+ for _, scope := range newResourceQuota.Spec.Scopes {
+ newScopes.Insert(string(scope))
+ }
+ for _, scope := range oldResourceQuota.Spec.Scopes {
+ oldScopes.Insert(string(scope))
+ }
+ if !oldScopes.Equal(newScopes) {
+ allErrs = append(allErrs, field.Invalid(fldPath, newResourceQuota.Spec.Scopes, fieldImmutableErrorMsg))
+ }
+
+ newResourceQuota.Status = oldResourceQuota.Status
+ return allErrs
+}
+
+// ValidateResourceQuotaStatusUpdate tests to see if the status update is legal for an end user to make.
+// newResourceQuota is updated with fields that cannot be changed.
+func ValidateResourceQuotaStatusUpdate(newResourceQuota, oldResourceQuota *api.ResourceQuota) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&newResourceQuota.ObjectMeta, &oldResourceQuota.ObjectMeta, field.NewPath("metadata"))
+ if len(newResourceQuota.ResourceVersion) == 0 {
+ allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), ""))
+ }
+ fldPath := field.NewPath("status", "hard")
+ for k, v := range newResourceQuota.Status.Hard {
+ resPath := fldPath.Key(string(k))
+ allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...)
+ allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...)
+ }
+ fldPath = field.NewPath("status", "used")
+ for k, v := range newResourceQuota.Status.Used {
+ resPath := fldPath.Key(string(k))
+ allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...)
+ allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...)
+ }
+ newResourceQuota.Spec = oldResourceQuota.Spec
+ return allErrs
+}
+
+// ValidateNamespace tests if required fields are set.
+func ValidateNamespace(namespace *api.Namespace) field.ErrorList {
+ allErrs := ValidateObjectMeta(&namespace.ObjectMeta, false, ValidateNamespaceName, field.NewPath("metadata"))
+ for i := range namespace.Spec.Finalizers {
+ allErrs = append(allErrs, validateFinalizerName(string(namespace.Spec.Finalizers[i]), field.NewPath("spec", "finalizers"))...)
+ }
+ return allErrs
+}
+
+// Validate finalizer names
+func validateFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList {
+ allErrs := genericvalidation.ValidateFinalizerName(stringValue, fldPath)
+ for _, err := range validateKubeFinalizerName(stringValue, fldPath) {
+ allErrs = append(allErrs, err)
+ }
+
+ return allErrs
+}
+
+// validateKubeFinalizerName checks for "standard" names of legacy finalizer
+func validateKubeFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(strings.Split(stringValue, "/")) == 1 {
+ if !helper.IsStandardFinalizerName(stringValue) {
+ return append(allErrs, field.Invalid(fldPath, stringValue, "name is neither a standard finalizer name nor is it fully qualified"))
+ }
+ }
+
+ return allErrs
+}
+
+// ValidateNamespaceUpdate tests to make sure a namespace update can be applied.
+// newNamespace is updated with fields that cannot be changed
+func ValidateNamespaceUpdate(newNamespace *api.Namespace, oldNamespace *api.Namespace) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata"))
+ newNamespace.Spec.Finalizers = oldNamespace.Spec.Finalizers
+ newNamespace.Status = oldNamespace.Status
+ return allErrs
+}
+
+// ValidateNamespaceStatusUpdate tests to see if the update is legal for an end user to make. newNamespace is updated with fields
+// that cannot be changed.
+func ValidateNamespaceStatusUpdate(newNamespace, oldNamespace *api.Namespace) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata"))
+ newNamespace.Spec = oldNamespace.Spec
+ if newNamespace.DeletionTimestamp.IsZero() {
+ if newNamespace.Status.Phase != api.NamespaceActive {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("status", "Phase"), newNamespace.Status.Phase, "may only be 'Active' if `deletionTimestamp` is empty"))
+ }
+ } else {
+ if newNamespace.Status.Phase != api.NamespaceTerminating {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("status", "Phase"), newNamespace.Status.Phase, "may only be 'Terminating' if `deletionTimestamp` is not empty"))
+ }
+ }
+ return allErrs
+}
+
+// ValidateNamespaceFinalizeUpdate tests to see if the update is legal for an end user to make.
+// newNamespace is updated with fields that cannot be changed.
+func ValidateNamespaceFinalizeUpdate(newNamespace, oldNamespace *api.Namespace) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata"))
+
+ fldPath := field.NewPath("spec", "finalizers")
+ for i := range newNamespace.Spec.Finalizers {
+ idxPath := fldPath.Index(i)
+ allErrs = append(allErrs, validateFinalizerName(string(newNamespace.Spec.Finalizers[i]), idxPath)...)
+ }
+ newNamespace.Status = oldNamespace.Status
+ return allErrs
+}
+
+// Construct lookup map of old subset IPs to NodeNames.
+func updateEpAddrToNodeNameMap(ipToNodeName map[string]string, addresses []api.EndpointAddress) {
+ for n := range addresses {
+ if addresses[n].NodeName == nil {
+ continue
+ }
+ ipToNodeName[addresses[n].IP] = *addresses[n].NodeName
+ }
+}
+
+// Build a map across all subsets of IP -> NodeName
+func buildEndpointAddressNodeNameMap(subsets []api.EndpointSubset) map[string]string {
+ ipToNodeName := make(map[string]string)
+ for i := range subsets {
+ updateEpAddrToNodeNameMap(ipToNodeName, subsets[i].Addresses)
+ updateEpAddrToNodeNameMap(ipToNodeName, subsets[i].NotReadyAddresses)
+ }
+ return ipToNodeName
+}
+
+func validateEpAddrNodeNameTransition(addr *api.EndpointAddress, ipToNodeName map[string]string, fldPath *field.Path) field.ErrorList {
+ errList := field.ErrorList{}
+ existingNodeName, found := ipToNodeName[addr.IP]
+ if !found {
+ return errList
+ }
+ if addr.NodeName == nil || *addr.NodeName == existingNodeName {
+ return errList
+ }
+ // NodeName entry found for this endpoint IP, but user is attempting to change NodeName
+ return append(errList, field.Forbidden(fldPath, fmt.Sprintf("Cannot change NodeName for %s to %s", addr.IP, *addr.NodeName)))
+}
+
+// ValidateEndpoints tests if required fields are set.
+func ValidateEndpoints(endpoints *api.Endpoints) field.ErrorList {
+ allErrs := ValidateObjectMeta(&endpoints.ObjectMeta, true, ValidateEndpointsName, field.NewPath("metadata"))
+ allErrs = append(allErrs, ValidateEndpointsSpecificAnnotations(endpoints.Annotations, field.NewPath("annotations"))...)
+ allErrs = append(allErrs, validateEndpointSubsets(endpoints.Subsets, []api.EndpointSubset{}, field.NewPath("subsets"))...)
+ return allErrs
+}
+
+func validateEndpointSubsets(subsets []api.EndpointSubset, oldSubsets []api.EndpointSubset, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ ipToNodeName := buildEndpointAddressNodeNameMap(oldSubsets)
+ for i := range subsets {
+ ss := &subsets[i]
+ idxPath := fldPath.Index(i)
+
+ if len(ss.Addresses) == 0 && len(ss.NotReadyAddresses) == 0 {
+ //TODO: consider adding a RequiredOneOf() error for this and similar cases
+ allErrs = append(allErrs, field.Required(idxPath, "must specify `addresses` or `notReadyAddresses`"))
+ }
+ if len(ss.Ports) == 0 {
+ allErrs = append(allErrs, field.Required(idxPath.Child("ports"), ""))
+ }
+ for addr := range ss.Addresses {
+ allErrs = append(allErrs, validateEndpointAddress(&ss.Addresses[addr], idxPath.Child("addresses").Index(addr), ipToNodeName)...)
+ }
+ for addr := range ss.NotReadyAddresses {
+ allErrs = append(allErrs, validateEndpointAddress(&ss.NotReadyAddresses[addr], idxPath.Child("notReadyAddresses").Index(addr), ipToNodeName)...)
+ }
+ for port := range ss.Ports {
+ allErrs = append(allErrs, validateEndpointPort(&ss.Ports[port], len(ss.Ports) > 1, idxPath.Child("ports").Index(port))...)
+ }
+ }
+
+ return allErrs
+}
+
+func validateEndpointAddress(address *api.EndpointAddress, fldPath *field.Path, ipToNodeName map[string]string) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for _, msg := range validation.IsValidIP(address.IP) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("ip"), address.IP, msg))
+ }
+ if len(address.Hostname) > 0 {
+ allErrs = append(allErrs, ValidateDNS1123Label(address.Hostname, fldPath.Child("hostname"))...)
+ }
+ // During endpoint update, verify that NodeName is a DNS subdomain and transition rules allow the update
+ if address.NodeName != nil {
+ for _, msg := range ValidateNodeName(*address.NodeName, false) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("nodeName"), *address.NodeName, msg))
+ }
+ }
+ allErrs = append(allErrs, validateEpAddrNodeNameTransition(address, ipToNodeName, fldPath.Child("nodeName"))...)
+ if len(allErrs) > 0 {
+ return allErrs
+ }
+ allErrs = append(allErrs, validateNonSpecialIP(address.IP, fldPath.Child("ip"))...)
+ return allErrs
+}
+
+func validateNonSpecialIP(ipAddress string, fldPath *field.Path) field.ErrorList {
+ // We disallow some IPs as endpoints or external-ips. Specifically,
+ // unspecified and loopback addresses are nonsensical and link-local
+ // addresses tend to be used for node-centric purposes (e.g. metadata
+ // service).
+ allErrs := field.ErrorList{}
+ ip := net.ParseIP(ipAddress)
+ if ip == nil {
+ allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "must be a valid IP address"))
+ return allErrs
+ }
+ if ip.IsUnspecified() {
+ allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be unspecified (0.0.0.0)"))
+ }
+ if ip.IsLoopback() {
+ allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the loopback range (127.0.0.0/8)"))
+ }
+ if ip.IsLinkLocalUnicast() {
+ allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the link-local range (169.254.0.0/16)"))
+ }
+ if ip.IsLinkLocalMulticast() {
+ allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the link-local multicast range (224.0.0.0/24)"))
+ }
+ return allErrs
+}
+
+func validateEndpointPort(port *api.EndpointPort, requireName bool, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if requireName && len(port.Name) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
+ } else if len(port.Name) != 0 {
+ allErrs = append(allErrs, ValidateDNS1123Label(port.Name, fldPath.Child("name"))...)
+ }
+ for _, msg := range validation.IsValidPortNum(int(port.Port)) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), port.Port, msg))
+ }
+ if len(port.Protocol) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("protocol"), ""))
+ } else if !supportedPortProtocols.Has(string(port.Protocol)) {
+ allErrs = append(allErrs, field.NotSupported(fldPath.Child("protocol"), port.Protocol, supportedPortProtocols.List()))
+ }
+ return allErrs
+}
+
+// ValidateEndpointsUpdate tests to make sure an endpoints update can be applied.
+func ValidateEndpointsUpdate(newEndpoints, oldEndpoints *api.Endpoints) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&newEndpoints.ObjectMeta, &oldEndpoints.ObjectMeta, field.NewPath("metadata"))
+ allErrs = append(allErrs, validateEndpointSubsets(newEndpoints.Subsets, oldEndpoints.Subsets, field.NewPath("subsets"))...)
+ allErrs = append(allErrs, ValidateEndpointsSpecificAnnotations(newEndpoints.Annotations, field.NewPath("annotations"))...)
+ return allErrs
+}
+
+// ValidateSecurityContext ensure the security context contains valid settings
+func ValidateSecurityContext(sc *api.SecurityContext, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ //this should only be true for testing since SecurityContext is defaulted by the api
+ if sc == nil {
+ return allErrs
+ }
+
+ if sc.Privileged != nil {
+ if *sc.Privileged && !capabilities.Get().AllowPrivileged {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("privileged"), "disallowed by cluster policy"))
+ }
+ }
+
+ if sc.RunAsUser != nil {
+ if *sc.RunAsUser < 0 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *sc.RunAsUser, isNegativeErrorMsg))
+ }
+ }
+ return allErrs
+}
+
+func ValidatePodLogOptions(opts *api.PodLogOptions) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if opts.TailLines != nil && *opts.TailLines < 0 {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("tailLines"), *opts.TailLines, isNegativeErrorMsg))
+ }
+ if opts.LimitBytes != nil && *opts.LimitBytes < 1 {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("limitBytes"), *opts.LimitBytes, "must be greater than 0"))
+ }
+ switch {
+ case opts.SinceSeconds != nil && opts.SinceTime != nil:
+ allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "at most one of `sinceTime` or `sinceSeconds` may be specified"))
+ case opts.SinceSeconds != nil:
+ if *opts.SinceSeconds < 1 {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("sinceSeconds"), *opts.SinceSeconds, "must be greater than 0"))
+ }
+ }
+ return allErrs
+}
+
+// ValidateLoadBalancerStatus validates required fields on a LoadBalancerStatus
+func ValidateLoadBalancerStatus(status *api.LoadBalancerStatus, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for i, ingress := range status.Ingress {
+ idxPath := fldPath.Child("ingress").Index(i)
+ if len(ingress.IP) > 0 {
+ if isIP := (net.ParseIP(ingress.IP) != nil); !isIP {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("ip"), ingress.IP, "must be a valid IP address"))
+ }
+ }
+ if len(ingress.Hostname) > 0 {
+ for _, msg := range validation.IsDNS1123Subdomain(ingress.Hostname) {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, msg))
+ }
+ if isIP := (net.ParseIP(ingress.Hostname) != nil); isIP {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, "must be a DNS name, not an IP address"))
+ }
+ }
+ }
+ return allErrs
+}
+
+func sysctlIntersection(a []api.Sysctl, b []api.Sysctl) []string {
+ lookup := make(map[string]struct{}, len(a))
+ result := []string{}
+ for i := range a {
+ lookup[a[i].Name] = struct{}{}
+ }
+ for i := range b {
+ if _, found := lookup[b[i].Name]; found {
+ result = append(result, b[i].Name)
+ }
+ }
+ return result
+}
+
+// validateStorageNodeAffinityAnnotation tests that the serialized TopologyConstraints in PersistentVolume.Annotations has valid data
+func validateStorageNodeAffinityAnnotation(annotations map[string]string, fldPath *field.Path) (bool, field.ErrorList) {
+ allErrs := field.ErrorList{}
+
+ na, err := helper.GetStorageNodeAffinityFromAnnotation(annotations)
+ if err != nil {
+ allErrs = append(allErrs, field.Invalid(fldPath, api.AlphaStorageNodeAffinityAnnotation, err.Error()))
+ return false, allErrs
+ }
+ if na == nil {
+ return false, allErrs
+ }
+
+ if !utilfeature.DefaultFeatureGate.Enabled(features.PersistentLocalVolumes) {
+ allErrs = append(allErrs, field.Forbidden(fldPath, "Storage node affinity is disabled by feature-gate"))
+ }
+
+ policySpecified := false
+ if na.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
+ policySpecified = true
+ }
+
+ if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("preferredDuringSchedulingIgnoredDuringExection"), "Storage node affinity does not support preferredDuringSchedulingIgnoredDuringExecution"))
+ }
+ return policySpecified, allErrs
+}