summaryrefslogtreecommitdiff
path: root/vendor/k8s.io/kubernetes/pkg/controller
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/k8s.io/kubernetes/pkg/controller')
-rw-r--r--vendor/k8s.io/kubernetes/pkg/controller/client_builder.go32
-rw-r--r--vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go151
-rw-r--r--vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go203
3 files changed, 199 insertions, 187 deletions
diff --git a/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go b/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go
index 491366288..041717623 100644
--- a/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go
+++ b/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go
@@ -20,21 +20,21 @@ import (
"fmt"
"time"
+ v1authenticationapi "k8s.io/api/authentication/v1"
+ "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount"
- clientgoclientset "k8s.io/client-go/kubernetes"
+ clientset "k8s.io/client-go/kubernetes"
+ v1authentication "k8s.io/client-go/kubernetes/typed/authentication/v1"
+ v1core "k8s.io/client-go/kubernetes/typed/core/v1"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
- "k8s.io/kubernetes/pkg/api"
- "k8s.io/kubernetes/pkg/api/v1"
- v1authenticationapi "k8s.io/kubernetes/pkg/apis/authentication/v1"
- "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
- v1authentication "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1"
- v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
+ "k8s.io/kubernetes/pkg/api/legacyscheme"
+ api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/serviceaccount"
"github.com/golang/glog"
@@ -46,8 +46,8 @@ type ControllerClientBuilder interface {
ConfigOrDie(name string) *restclient.Config
Client(name string) (clientset.Interface, error)
ClientOrDie(name string) clientset.Interface
- ClientGoClient(name string) (clientgoclientset.Interface, error)
- ClientGoClientOrDie(name string) clientgoclientset.Interface
+ ClientGoClient(name string) (clientset.Interface, error)
+ ClientGoClientOrDie(name string) clientset.Interface
}
// SimpleControllerClientBuilder returns a fixed client with different user agents
@@ -85,15 +85,15 @@ func (b SimpleControllerClientBuilder) ClientOrDie(name string) clientset.Interf
return client
}
-func (b SimpleControllerClientBuilder) ClientGoClient(name string) (clientgoclientset.Interface, error) {
+func (b SimpleControllerClientBuilder) ClientGoClient(name string) (clientset.Interface, error) {
clientConfig, err := b.Config(name)
if err != nil {
return nil, err
}
- return clientgoclientset.NewForConfig(clientConfig)
+ return clientset.NewForConfig(clientConfig)
}
-func (b SimpleControllerClientBuilder) ClientGoClientOrDie(name string) clientgoclientset.Interface {
+func (b SimpleControllerClientBuilder) ClientGoClientOrDie(name string) clientset.Interface {
client, err := b.ClientGoClient(name)
if err != nil {
glog.Fatal(err)
@@ -237,7 +237,7 @@ func (b SAControllerClientBuilder) getAuthenticatedConfig(sa *v1.ServiceAccount,
// If we couldn't run the token review, the API might be disabled or we might not have permission.
// Try to make a request to /apis with the token. If we get a 401 we should consider the token invalid.
clientConfigCopy := *clientConfig
- clientConfigCopy.NegotiatedSerializer = api.Codecs
+ clientConfigCopy.NegotiatedSerializer = legacyscheme.Codecs
client, err := restclient.UnversionedRESTClientFor(&clientConfigCopy)
if err != nil {
return nil, false, err
@@ -275,15 +275,15 @@ func (b SAControllerClientBuilder) ClientOrDie(name string) clientset.Interface
return client
}
-func (b SAControllerClientBuilder) ClientGoClient(name string) (clientgoclientset.Interface, error) {
+func (b SAControllerClientBuilder) ClientGoClient(name string) (clientset.Interface, error) {
clientConfig, err := b.Config(name)
if err != nil {
return nil, err
}
- return clientgoclientset.NewForConfig(clientConfig)
+ return clientset.NewForConfig(clientConfig)
}
-func (b SAControllerClientBuilder) ClientGoClientOrDie(name string) clientgoclientset.Interface {
+func (b SAControllerClientBuilder) ClientGoClientOrDie(name string) clientset.Interface {
client, err := b.ClientGoClient(name)
if err != nil {
glog.Fatal(err)
diff --git a/vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go b/vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go
index 5477a073f..21d7aa302 100644
--- a/vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go
+++ b/vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go
@@ -21,54 +21,41 @@ import (
"sync"
"github.com/golang/glog"
+ apps "k8s.io/api/apps/v1"
+ "k8s.io/api/core/v1"
+ extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
- "k8s.io/kubernetes/pkg/api/v1"
- appsv1beta1 "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
- extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
)
-// GetControllerOf returns the controllerRef if controllee has a controller,
-// otherwise returns nil.
-func GetControllerOf(controllee metav1.Object) *metav1.OwnerReference {
- ownerRefs := controllee.GetOwnerReferences()
- for i := range ownerRefs {
- owner := &ownerRefs[i]
- if owner.Controller != nil && *owner.Controller == true {
- return owner
- }
- }
- return nil
-}
-
-type baseControllerRefManager struct {
- controller metav1.Object
- selector labels.Selector
+type BaseControllerRefManager struct {
+ Controller metav1.Object
+ Selector labels.Selector
canAdoptErr error
canAdoptOnce sync.Once
- canAdoptFunc func() error
+ CanAdoptFunc func() error
}
-func (m *baseControllerRefManager) canAdopt() error {
+func (m *BaseControllerRefManager) CanAdopt() error {
m.canAdoptOnce.Do(func() {
- if m.canAdoptFunc != nil {
- m.canAdoptErr = m.canAdoptFunc()
+ if m.CanAdoptFunc != nil {
+ m.canAdoptErr = m.CanAdoptFunc()
}
})
return m.canAdoptErr
}
-// claimObject tries to take ownership of an object for this controller.
+// ClaimObject tries to take ownership of an object for this controller.
//
// It will reconcile the following:
// * Adopt orphans if the match function returns true.
// * Release owned objects if the match function returns false.
//
-// A non-nil error is returned if some form of reconciliation was attemped and
+// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//
@@ -77,10 +64,10 @@ func (m *baseControllerRefManager) canAdopt() error {
// own the object.
//
// No reconciliation will be attempted if the controller is being deleted.
-func (m *baseControllerRefManager) claimObject(obj metav1.Object, match func(metav1.Object) bool, adopt, release func(metav1.Object) error) (bool, error) {
- controllerRef := GetControllerOf(obj)
+func (m *BaseControllerRefManager) ClaimObject(obj metav1.Object, match func(metav1.Object) bool, adopt, release func(metav1.Object) error) (bool, error) {
+ controllerRef := metav1.GetControllerOf(obj)
if controllerRef != nil {
- if controllerRef.UID != m.controller.GetUID() {
+ if controllerRef.UID != m.Controller.GetUID() {
// Owned by someone else. Ignore.
return false, nil
}
@@ -93,7 +80,7 @@ func (m *baseControllerRefManager) claimObject(obj metav1.Object, match func(met
}
// Owned by us but selector doesn't match.
// Try to release, unless we're being deleted.
- if m.controller.GetDeletionTimestamp() != nil {
+ if m.Controller.GetDeletionTimestamp() != nil {
return false, nil
}
if err := release(obj); err != nil {
@@ -110,7 +97,7 @@ func (m *baseControllerRefManager) claimObject(obj metav1.Object, match func(met
}
// It's an orphan.
- if m.controller.GetDeletionTimestamp() != nil || !match(obj) {
+ if m.Controller.GetDeletionTimestamp() != nil || !match(obj) {
// Ignore if we're being deleted or selector doesn't match.
return false, nil
}
@@ -133,7 +120,7 @@ func (m *baseControllerRefManager) claimObject(obj metav1.Object, match func(met
}
type PodControllerRefManager struct {
- baseControllerRefManager
+ BaseControllerRefManager
controllerKind schema.GroupVersionKind
podControl PodControlInterface
}
@@ -141,14 +128,14 @@ type PodControllerRefManager struct {
// NewPodControllerRefManager returns a PodControllerRefManager that exposes
// methods to manage the controllerRef of pods.
//
-// The canAdopt() function can be used to perform a potentially expensive check
+// The CanAdopt() function can be used to perform a potentially expensive check
// (such as a live GET from the API server) prior to the first adoption.
// It will only be called (at most once) if an adoption is actually attempted.
-// If canAdopt() returns a non-nil error, all adoptions will fail.
+// If CanAdopt() returns a non-nil error, all adoptions will fail.
//
-// NOTE: Once canAdopt() is called, it will not be called again by the same
+// NOTE: Once CanAdopt() is called, it will not be called again by the same
// PodControllerRefManager instance. Create a new instance if it makes
-// sense to check canAdopt() again (e.g. in a different sync pass).
+// sense to check CanAdopt() again (e.g. in a different sync pass).
func NewPodControllerRefManager(
podControl PodControlInterface,
controller metav1.Object,
@@ -157,10 +144,10 @@ func NewPodControllerRefManager(
canAdopt func() error,
) *PodControllerRefManager {
return &PodControllerRefManager{
- baseControllerRefManager: baseControllerRefManager{
- controller: controller,
- selector: selector,
- canAdoptFunc: canAdopt,
+ BaseControllerRefManager: BaseControllerRefManager{
+ Controller: controller,
+ Selector: selector,
+ CanAdoptFunc: canAdopt,
},
controllerKind: controllerKind,
podControl: podControl,
@@ -176,7 +163,7 @@ func NewPodControllerRefManager(
// Optional: If one or more filters are specified, a Pod will only be claimed if
// all filters return true.
//
-// A non-nil error is returned if some form of reconciliation was attemped and
+// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//
@@ -189,7 +176,7 @@ func (m *PodControllerRefManager) ClaimPods(pods []*v1.Pod, filters ...func(*v1.
match := func(obj metav1.Object) bool {
pod := obj.(*v1.Pod)
// Check selector first so filters only run on potentially matching Pods.
- if !m.selector.Matches(labels.Set(pod.Labels)) {
+ if !m.Selector.Matches(labels.Set(pod.Labels)) {
return false
}
for _, filter := range filters {
@@ -207,7 +194,7 @@ func (m *PodControllerRefManager) ClaimPods(pods []*v1.Pod, filters ...func(*v1.
}
for _, pod := range pods {
- ok, err := m.claimObject(pod, match, adopt, release)
+ ok, err := m.ClaimObject(pod, match, adopt, release)
if err != nil {
errlist = append(errlist, err)
continue
@@ -222,7 +209,7 @@ func (m *PodControllerRefManager) ClaimPods(pods []*v1.Pod, filters ...func(*v1.
// AdoptPod sends a patch to take control of the pod. It returns the error if
// the patching fails.
func (m *PodControllerRefManager) AdoptPod(pod *v1.Pod) error {
- if err := m.canAdopt(); err != nil {
+ if err := m.CanAdopt(); err != nil {
return fmt.Errorf("can't adopt Pod %v/%v (%v): %v", pod.Namespace, pod.Name, pod.UID, err)
}
// Note that ValidateOwnerReferences() will reject this patch if another
@@ -230,7 +217,7 @@ func (m *PodControllerRefManager) AdoptPod(pod *v1.Pod) error {
addControllerPatch := fmt.Sprintf(
`{"metadata":{"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}],"uid":"%s"}}`,
m.controllerKind.GroupVersion(), m.controllerKind.Kind,
- m.controller.GetName(), m.controller.GetUID(), pod.UID)
+ m.Controller.GetName(), m.Controller.GetUID(), pod.UID)
return m.podControl.PatchPod(pod.Namespace, pod.Name, []byte(addControllerPatch))
}
@@ -238,8 +225,8 @@ func (m *PodControllerRefManager) AdoptPod(pod *v1.Pod) error {
// It returns the error if the patching fails. 404 and 422 errors are ignored.
func (m *PodControllerRefManager) ReleasePod(pod *v1.Pod) error {
glog.V(2).Infof("patching pod %s_%s to remove its controllerRef to %s/%s:%s",
- pod.Namespace, pod.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.controller.GetName())
- deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.controller.GetUID(), pod.UID)
+ pod.Namespace, pod.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
+ deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), pod.UID)
err := m.podControl.PatchPod(pod.Namespace, pod.Name, []byte(deleteOwnerRefPatch))
if err != nil {
if errors.IsNotFound(err) {
@@ -267,7 +254,7 @@ func (m *PodControllerRefManager) ReleasePod(pod *v1.Pod) error {
// categories and accordingly adopt or release them. See comments on these functions
// for more details.
type ReplicaSetControllerRefManager struct {
- baseControllerRefManager
+ BaseControllerRefManager
controllerKind schema.GroupVersionKind
rsControl RSControlInterface
}
@@ -275,14 +262,14 @@ type ReplicaSetControllerRefManager struct {
// NewReplicaSetControllerRefManager returns a ReplicaSetControllerRefManager that exposes
// methods to manage the controllerRef of ReplicaSets.
//
-// The canAdopt() function can be used to perform a potentially expensive check
+// The CanAdopt() function can be used to perform a potentially expensive check
// (such as a live GET from the API server) prior to the first adoption.
// It will only be called (at most once) if an adoption is actually attempted.
-// If canAdopt() returns a non-nil error, all adoptions will fail.
+// If CanAdopt() returns a non-nil error, all adoptions will fail.
//
-// NOTE: Once canAdopt() is called, it will not be called again by the same
+// NOTE: Once CanAdopt() is called, it will not be called again by the same
// ReplicaSetControllerRefManager instance. Create a new instance if it
-// makes sense to check canAdopt() again (e.g. in a different sync pass).
+// makes sense to check CanAdopt() again (e.g. in a different sync pass).
func NewReplicaSetControllerRefManager(
rsControl RSControlInterface,
controller metav1.Object,
@@ -291,10 +278,10 @@ func NewReplicaSetControllerRefManager(
canAdopt func() error,
) *ReplicaSetControllerRefManager {
return &ReplicaSetControllerRefManager{
- baseControllerRefManager: baseControllerRefManager{
- controller: controller,
- selector: selector,
- canAdoptFunc: canAdopt,
+ BaseControllerRefManager: BaseControllerRefManager{
+ Controller: controller,
+ Selector: selector,
+ CanAdoptFunc: canAdopt,
},
controllerKind: controllerKind,
rsControl: rsControl,
@@ -307,7 +294,7 @@ func NewReplicaSetControllerRefManager(
// * Adopt orphans if the selector matches.
// * Release owned objects if the selector no longer matches.
//
-// A non-nil error is returned if some form of reconciliation was attemped and
+// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//
@@ -319,7 +306,7 @@ func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*extensions.Rep
var errlist []error
match := func(obj metav1.Object) bool {
- return m.selector.Matches(labels.Set(obj.GetLabels()))
+ return m.Selector.Matches(labels.Set(obj.GetLabels()))
}
adopt := func(obj metav1.Object) error {
return m.AdoptReplicaSet(obj.(*extensions.ReplicaSet))
@@ -329,7 +316,7 @@ func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*extensions.Rep
}
for _, rs := range sets {
- ok, err := m.claimObject(rs, match, adopt, release)
+ ok, err := m.ClaimObject(rs, match, adopt, release)
if err != nil {
errlist = append(errlist, err)
continue
@@ -344,7 +331,7 @@ func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*extensions.Rep
// AdoptReplicaSet sends a patch to take control of the ReplicaSet. It returns
// the error if the patching fails.
func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *extensions.ReplicaSet) error {
- if err := m.canAdopt(); err != nil {
+ if err := m.CanAdopt(); err != nil {
return fmt.Errorf("can't adopt ReplicaSet %v/%v (%v): %v", rs.Namespace, rs.Name, rs.UID, err)
}
// Note that ValidateOwnerReferences() will reject this patch if another
@@ -352,7 +339,7 @@ func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *extensions.ReplicaS
addControllerPatch := fmt.Sprintf(
`{"metadata":{"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}],"uid":"%s"}}`,
m.controllerKind.GroupVersion(), m.controllerKind.Kind,
- m.controller.GetName(), m.controller.GetUID(), rs.UID)
+ m.Controller.GetName(), m.Controller.GetUID(), rs.UID)
return m.rsControl.PatchReplicaSet(rs.Namespace, rs.Name, []byte(addControllerPatch))
}
@@ -360,8 +347,8 @@ func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *extensions.ReplicaS
// It returns the error if the patching fails. 404 and 422 errors are ignored.
func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(replicaSet *extensions.ReplicaSet) error {
glog.V(2).Infof("patching ReplicaSet %s_%s to remove its controllerRef to %s/%s:%s",
- replicaSet.Namespace, replicaSet.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.controller.GetName())
- deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.controller.GetUID(), replicaSet.UID)
+ replicaSet.Namespace, replicaSet.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
+ deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), replicaSet.UID)
err := m.rsControl.PatchReplicaSet(replicaSet.Namespace, replicaSet.Name, []byte(deleteOwnerRefPatch))
if err != nil {
if errors.IsNotFound(err) {
@@ -379,9 +366,9 @@ func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(replicaSet *extension
return err
}
-// RecheckDeletionTimestamp returns a canAdopt() function to recheck deletion.
+// RecheckDeletionTimestamp returns a CanAdopt() function to recheck deletion.
//
-// The canAdopt() function calls getObject() to fetch the latest value,
+// The CanAdopt() function calls getObject() to fetch the latest value,
// and denies adoption attempts if that object has a non-nil DeletionTimestamp.
func RecheckDeletionTimestamp(getObject func() (metav1.Object, error)) func() error {
return func() error {
@@ -402,7 +389,7 @@ func RecheckDeletionTimestamp(getObject func() (metav1.Object, error)) func() er
// categories and accordingly adopt or release them. See comments on these functions
// for more details.
type ControllerRevisionControllerRefManager struct {
- baseControllerRefManager
+ BaseControllerRefManager
controllerKind schema.GroupVersionKind
crControl ControllerRevisionControlInterface
}
@@ -426,10 +413,10 @@ func NewControllerRevisionControllerRefManager(
canAdopt func() error,
) *ControllerRevisionControllerRefManager {
return &ControllerRevisionControllerRefManager{
- baseControllerRefManager: baseControllerRefManager{
- controller: controller,
- selector: selector,
- canAdoptFunc: canAdopt,
+ BaseControllerRefManager: BaseControllerRefManager{
+ Controller: controller,
+ Selector: selector,
+ CanAdoptFunc: canAdopt,
},
controllerKind: controllerKind,
crControl: crControl,
@@ -442,29 +429,29 @@ func NewControllerRevisionControllerRefManager(
// * Adopt orphans if the selector matches.
// * Release owned objects if the selector no longer matches.
//
-// A non-nil error is returned if some form of reconciliation was attemped and
+// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//
// If the error is nil, either the reconciliation succeeded, or no
// reconciliation was necessary. The list of ControllerRevisions that you now own is
// returned.
-func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histories []*appsv1beta1.ControllerRevision) ([]*appsv1beta1.ControllerRevision, error) {
- var claimed []*appsv1beta1.ControllerRevision
+func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histories []*apps.ControllerRevision) ([]*apps.ControllerRevision, error) {
+ var claimed []*apps.ControllerRevision
var errlist []error
match := func(obj metav1.Object) bool {
- return m.selector.Matches(labels.Set(obj.GetLabels()))
+ return m.Selector.Matches(labels.Set(obj.GetLabels()))
}
adopt := func(obj metav1.Object) error {
- return m.AdoptControllerRevision(obj.(*appsv1beta1.ControllerRevision))
+ return m.AdoptControllerRevision(obj.(*apps.ControllerRevision))
}
release := func(obj metav1.Object) error {
- return m.ReleaseControllerRevision(obj.(*appsv1beta1.ControllerRevision))
+ return m.ReleaseControllerRevision(obj.(*apps.ControllerRevision))
}
for _, h := range histories {
- ok, err := m.claimObject(h, match, adopt, release)
+ ok, err := m.ClaimObject(h, match, adopt, release)
if err != nil {
errlist = append(errlist, err)
continue
@@ -478,8 +465,8 @@ func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histor
// AdoptControllerRevision sends a patch to take control of the ControllerRevision. It returns the error if
// the patching fails.
-func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history *appsv1beta1.ControllerRevision) error {
- if err := m.canAdopt(); err != nil {
+func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history *apps.ControllerRevision) error {
+ if err := m.CanAdopt(); err != nil {
return fmt.Errorf("can't adopt ControllerRevision %v/%v (%v): %v", history.Namespace, history.Name, history.UID, err)
}
// Note that ValidateOwnerReferences() will reject this patch if another
@@ -487,16 +474,16 @@ func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history
addControllerPatch := fmt.Sprintf(
`{"metadata":{"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}],"uid":"%s"}}`,
m.controllerKind.GroupVersion(), m.controllerKind.Kind,
- m.controller.GetName(), m.controller.GetUID(), history.UID)
+ m.Controller.GetName(), m.Controller.GetUID(), history.UID)
return m.crControl.PatchControllerRevision(history.Namespace, history.Name, []byte(addControllerPatch))
}
// ReleaseControllerRevision sends a patch to free the ControllerRevision from the control of its controller.
// It returns the error if the patching fails. 404 and 422 errors are ignored.
-func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(history *appsv1beta1.ControllerRevision) error {
+func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(history *apps.ControllerRevision) error {
glog.V(2).Infof("patching ControllerRevision %s_%s to remove its controllerRef to %s/%s:%s",
- history.Namespace, history.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.controller.GetName())
- deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.controller.GetUID(), history.UID)
+ history.Namespace, history.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
+ deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), history.UID)
err := m.crControl.PatchControllerRevision(history.Namespace, history.Name, []byte(deleteOwnerRefPatch))
if err != nil {
if errors.IsNotFound(err) {
diff --git a/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go b/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go
index 9f1a17767..d7d755ebf 100644
--- a/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go
+++ b/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go
@@ -25,30 +25,28 @@ import (
"sync/atomic"
"time"
+ "k8s.io/api/core/v1"
+ extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/wait"
+ clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/integer"
- "k8s.io/kubernetes/pkg/api"
- "k8s.io/kubernetes/pkg/api/v1"
- v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
+ clientretry "k8s.io/client-go/util/retry"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
- "k8s.io/kubernetes/pkg/api/v1/ref"
- "k8s.io/kubernetes/pkg/api/validation"
- extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
- "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
- clientretry "k8s.io/kubernetes/pkg/client/retry"
+ _ "k8s.io/kubernetes/pkg/apis/core/install"
+ "k8s.io/kubernetes/pkg/apis/core/validation"
hashutil "k8s.io/kubernetes/pkg/util/hash"
+ taintutils "k8s.io/kubernetes/pkg/util/taints"
"github.com/golang/glog"
)
@@ -65,6 +63,21 @@ const (
// 500 pods. Just creation is limited to 20qps, and watching happens with ~10-30s
// latency/pod at the scale of 3000 pods over 100 nodes.
ExpectationsTimeout = 5 * time.Minute
+ // When batching pod creates, SlowStartInitialBatchSize is the size of the
+ // initial batch. The size of each successive batch is twice the size of
+ // the previous batch. For example, for a value of 1, batch sizes would be
+ // 1, 2, 4, 8, ... and for a value of 10, batch sizes would be
+ // 10, 20, 40, 80, ... Setting the value higher means that quota denials
+ // will result in more doomed API calls and associated event spam. Setting
+ // the value lower will result in more API call round trip periods for
+ // large batches.
+ //
+ // Given a number of pods to start "N":
+ // The number of doomed calls per sync once quota is exceeded is given by:
+ // min(N,SlowStartInitialBatchSize)
+ // The number of batches is given by:
+ // 1+floor(log_2(ceil(N/SlowStartInitialBatchSize)))
+ SlowStartInitialBatchSize = 1
)
var UpdateTaintBackoff = wait.Backoff{
@@ -397,7 +410,7 @@ type RealRSControl struct {
var _ RSControlInterface = &RealRSControl{}
func (r RealRSControl) PatchReplicaSet(namespace, name string, data []byte) error {
- _, err := r.KubeClient.Extensions().ReplicaSets(namespace).Patch(name, types.StrategicMergePatchType, data)
+ _, err := r.KubeClient.ExtensionsV1beta1().ReplicaSets(namespace).Patch(name, types.StrategicMergePatchType, data)
return err
}
@@ -459,29 +472,12 @@ func getPodsFinalizers(template *v1.PodTemplateSpec) []string {
return desiredFinalizers
}
-func getPodsAnnotationSet(template *v1.PodTemplateSpec, object runtime.Object) (labels.Set, error) {
+func getPodsAnnotationSet(template *v1.PodTemplateSpec) labels.Set {
desiredAnnotations := make(labels.Set)
for k, v := range template.Annotations {
desiredAnnotations[k] = v
}
- createdByRef, err := ref.GetReference(api.Scheme, object)
- if err != nil {
- return desiredAnnotations, fmt.Errorf("unable to get controller reference: %v", err)
- }
-
- // TODO: this code was not safe previously - as soon as new code came along that switched to v2, old clients
- // would be broken upon reading it. This is explicitly hardcoded to v1 to guarantee predictable deployment.
- // We need to consistently handle this case of annotation versioning.
- codec := api.Codecs.LegacyCodec(schema.GroupVersion{Group: v1.GroupName, Version: "v1"})
-
- createdByRefJson, err := runtime.Encode(codec, &v1.SerializedReference{
- Reference: *createdByRef,
- })
- if err != nil {
- return desiredAnnotations, fmt.Errorf("unable to serialize controller reference: %v", err)
- }
- desiredAnnotations[v1.CreatedByAnnotation] = string(createdByRefJson)
- return desiredAnnotations, nil
+ return desiredAnnotations
}
func getPodsPrefix(controllerName string) string {
@@ -531,17 +527,14 @@ func (r RealPodControl) CreatePodsOnNode(nodeName, namespace string, template *v
}
func (r RealPodControl) PatchPod(namespace, name string, data []byte) error {
- _, err := r.KubeClient.Core().Pods(namespace).Patch(name, types.StrategicMergePatchType, data)
+ _, err := r.KubeClient.CoreV1().Pods(namespace).Patch(name, types.StrategicMergePatchType, data)
return err
}
func GetPodFromTemplate(template *v1.PodTemplateSpec, parentObject runtime.Object, controllerRef *metav1.OwnerReference) (*v1.Pod, error) {
desiredLabels := getPodsLabelSet(template)
desiredFinalizers := getPodsFinalizers(template)
- desiredAnnotations, err := getPodsAnnotationSet(template, parentObject)
- if err != nil {
- return nil, err
- }
+ desiredAnnotations := getPodsAnnotationSet(template)
accessor, err := meta.Accessor(parentObject)
if err != nil {
return nil, fmt.Errorf("parentObject does not have ObjectMeta, %v", err)
@@ -559,11 +552,7 @@ func GetPodFromTemplate(template *v1.PodTemplateSpec, parentObject runtime.Objec
if controllerRef != nil {
pod.OwnerReferences = append(pod.OwnerReferences, *controllerRef)
}
- clone, err := api.Scheme.DeepCopy(&template.Spec)
- if err != nil {
- return nil, err
- }
- pod.Spec = *clone.(*v1.PodSpec)
+ pod.Spec = *template.Spec.DeepCopy()
return pod, nil
}
@@ -578,9 +567,9 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *v1.PodT
if labels.Set(pod.Labels).AsSelectorPreValidated().Empty() {
return fmt.Errorf("unable to create pods, no labels")
}
- if newPod, err := r.KubeClient.Core().Pods(namespace).Create(pod); err != nil {
+ if newPod, err := r.KubeClient.CoreV1().Pods(namespace).Create(pod); err != nil {
r.Recorder.Eventf(object, v1.EventTypeWarning, FailedCreatePodReason, "Error creating: %v", err)
- return fmt.Errorf("unable to create pods: %v", err)
+ return err
} else {
accessor, err := meta.Accessor(object)
if err != nil {
@@ -599,7 +588,7 @@ func (r RealPodControl) DeletePod(namespace string, podID string, object runtime
return fmt.Errorf("object does not have ObjectMeta, %v", err)
}
glog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID)
- if err := r.KubeClient.Core().Pods(namespace).Delete(podID, nil); err != nil {
+ if err := r.KubeClient.CoreV1().Pods(namespace).Delete(podID, nil); err != nil {
r.Recorder.Eventf(object, v1.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err)
return fmt.Errorf("unable to delete pods: %v", err)
} else {
@@ -610,11 +599,13 @@ func (r RealPodControl) DeletePod(namespace string, podID string, object runtime
type FakePodControl struct {
sync.Mutex
- Templates []v1.PodTemplateSpec
- ControllerRefs []metav1.OwnerReference
- DeletePodName []string
- Patches [][]byte
- Err error
+ Templates []v1.PodTemplateSpec
+ ControllerRefs []metav1.OwnerReference
+ DeletePodName []string
+ Patches [][]byte
+ Err error
+ CreateLimit int
+ CreateCallCount int
}
var _ PodControlInterface = &FakePodControl{}
@@ -632,6 +623,10 @@ func (f *FakePodControl) PatchPod(namespace, name string, data []byte) error {
func (f *FakePodControl) CreatePods(namespace string, spec *v1.PodTemplateSpec, object runtime.Object) error {
f.Lock()
defer f.Unlock()
+ f.CreateCallCount++
+ if f.CreateLimit != 0 && f.CreateCallCount > f.CreateLimit {
+ return fmt.Errorf("Not creating pod, limit %d already reached (create call %d)", f.CreateLimit, f.CreateCallCount)
+ }
f.Templates = append(f.Templates, *spec)
if f.Err != nil {
return f.Err
@@ -642,6 +637,10 @@ func (f *FakePodControl) CreatePods(namespace string, spec *v1.PodTemplateSpec,
func (f *FakePodControl) CreatePodsWithControllerRef(namespace string, spec *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error {
f.Lock()
defer f.Unlock()
+ f.CreateCallCount++
+ if f.CreateLimit != 0 && f.CreateCallCount > f.CreateLimit {
+ return fmt.Errorf("Not creating pod, limit %d already reached (create call %d)", f.CreateLimit, f.CreateCallCount)
+ }
f.Templates = append(f.Templates, *spec)
f.ControllerRefs = append(f.ControllerRefs, *controllerRef)
if f.Err != nil {
@@ -653,6 +652,10 @@ func (f *FakePodControl) CreatePodsWithControllerRef(namespace string, spec *v1.
func (f *FakePodControl) CreatePodsOnNode(nodeName, namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error {
f.Lock()
defer f.Unlock()
+ f.CreateCallCount++
+ if f.CreateLimit != 0 && f.CreateCallCount > f.CreateLimit {
+ return fmt.Errorf("Not creating pod, limit %d already reached (create call %d)", f.CreateLimit, f.CreateCallCount)
+ }
f.Templates = append(f.Templates, *template)
f.ControllerRefs = append(f.ControllerRefs, *controllerRef)
if f.Err != nil {
@@ -678,6 +681,8 @@ func (f *FakePodControl) Clear() {
f.Templates = []v1.PodTemplateSpec{}
f.ControllerRefs = []metav1.OwnerReference{}
f.Patches = [][]byte{}
+ f.CreateLimit = 0
+ f.CreateCallCount = 0
}
// ByLogging allows custom sorting of pods so the best one can be picked for getting its logs.
@@ -711,8 +716,8 @@ func (s ByLogging) Less(i, j int) bool {
return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j])
}
// 6. older pods < newer pods < empty timestamp pods
- if !s[i].CreationTimestamp.Equal(s[j].CreationTimestamp) {
- return afterOrZero(s[j].CreationTimestamp, s[i].CreationTimestamp)
+ if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) {
+ return afterOrZero(&s[j].CreationTimestamp, &s[i].CreationTimestamp)
}
return false
}
@@ -751,31 +756,31 @@ func (s ActivePods) Less(i, j int) bool {
return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j])
}
// 6. Empty creation time pods < newer pods < older pods
- if !s[i].CreationTimestamp.Equal(s[j].CreationTimestamp) {
- return afterOrZero(s[i].CreationTimestamp, s[j].CreationTimestamp)
+ if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) {
+ return afterOrZero(&s[i].CreationTimestamp, &s[j].CreationTimestamp)
}
return false
}
// afterOrZero checks if time t1 is after time t2; if one of them
// is zero, the zero time is seen as after non-zero time.
-func afterOrZero(t1, t2 metav1.Time) bool {
+func afterOrZero(t1, t2 *metav1.Time) bool {
if t1.Time.IsZero() || t2.Time.IsZero() {
return t1.Time.IsZero()
}
return t1.After(t2.Time)
}
-func podReadyTime(pod *v1.Pod) metav1.Time {
+func podReadyTime(pod *v1.Pod) *metav1.Time {
if podutil.IsPodReady(pod) {
for _, c := range pod.Status.Conditions {
// we only care about pod ready conditions
if c.Type == v1.PodReady && c.Status == v1.ConditionTrue {
- return c.LastTransitionTime
+ return &c.LastTransitionTime
}
}
}
- return metav1.Time{}
+ return &metav1.Time{}
}
func maxContainerRestarts(pod *v1.Pod) int {
@@ -841,10 +846,10 @@ type ControllersByCreationTimestamp []*v1.ReplicationController
func (o ControllersByCreationTimestamp) Len() int { return len(o) }
func (o ControllersByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o ControllersByCreationTimestamp) Less(i, j int) bool {
- if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) {
+ if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) {
return o[i].Name < o[j].Name
}
- return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
+ return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp)
}
// ReplicaSetsByCreationTimestamp sorts a list of ReplicaSet by creation timestamp, using their names as a tie breaker.
@@ -853,10 +858,10 @@ type ReplicaSetsByCreationTimestamp []*extensions.ReplicaSet
func (o ReplicaSetsByCreationTimestamp) Len() int { return len(o) }
func (o ReplicaSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o ReplicaSetsByCreationTimestamp) Less(i, j int) bool {
- if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) {
+ if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) {
return o[i].Name < o[j].Name
}
- return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
+ return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp)
}
// ReplicaSetsBySizeOlder sorts a list of ReplicaSet by size in descending order, using their creation timestamp or name as a tie breaker.
@@ -885,7 +890,12 @@ func (o ReplicaSetsBySizeNewer) Less(i, j int) bool {
return *(o[i].Spec.Replicas) > *(o[j].Spec.Replicas)
}
-func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint *v1.Taint) error {
+// AddOrUpdateTaintOnNode add taints to the node. If taint was added into node, it'll issue API calls
+// to update nodes; otherwise, no API calls. Return error if any.
+func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v1.Taint) error {
+ if len(taints) == 0 {
+ return nil
+ }
firstTry := true
return clientretry.RetryOnConflict(UpdateTaintBackoff, func() error {
var err error
@@ -893,19 +903,28 @@ func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint *v1.Ta
// First we try getting node from the API server cache, as it's cheaper. If it fails
// we get it from etcd to be sure to have fresh data.
if firstTry {
- oldNode, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"})
+ oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"})
firstTry = false
} else {
- oldNode, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
+ oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
}
if err != nil {
return err
}
- newNode, ok, err := v1helper.AddOrUpdateTaint(oldNode, taint)
- if err != nil {
- return fmt.Errorf("Failed to update taint annotation!")
+
+ var newNode *v1.Node
+ oldNodeCopy := oldNode
+ updated := false
+ for _, taint := range taints {
+ curNewNode, ok, err := taintutils.AddOrUpdateTaint(oldNodeCopy, taint)
+ if err != nil {
+ return fmt.Errorf("Failed to update taint of node!")
+ }
+ updated = updated || ok
+ newNode = curNewNode
+ oldNodeCopy = curNewNode
}
- if !ok {
+ if !updated {
return nil
}
return PatchNodeTaints(c, nodeName, oldNode, newNode)
@@ -916,12 +935,15 @@ func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint *v1.Ta
// won't fail if target taint doesn't exist or has been removed.
// If passed a node it'll check if there's anything to be done, if taint is not present it won't issue
// any API calls.
-func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint *v1.Taint, node *v1.Node) error {
+func RemoveTaintOffNode(c clientset.Interface, nodeName string, node *v1.Node, taints ...*v1.Taint) error {
+ if len(taints) == 0 {
+ return nil
+ }
// Short circuit for limiting amount of API calls.
if node != nil {
match := false
- for i := range node.Spec.Taints {
- if node.Spec.Taints[i].MatchTaint(taint) {
+ for _, taint := range taints {
+ if taintutils.TaintExists(node.Spec.Taints, taint) {
match = true
break
}
@@ -930,6 +952,7 @@ func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint *v1.Taint,
return nil
}
}
+
firstTry := true
return clientretry.RetryOnConflict(UpdateTaintBackoff, func() error {
var err error
@@ -937,19 +960,28 @@ func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint *v1.Taint,
// First we try getting node from the API server cache, as it's cheaper. If it fails
// we get it from etcd to be sure to have fresh data.
if firstTry {
- oldNode, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"})
+ oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"})
firstTry = false
} else {
- oldNode, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
+ oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
}
if err != nil {
return err
}
- newNode, ok, err := v1helper.RemoveTaint(oldNode, taint)
- if err != nil {
- return fmt.Errorf("Failed to update taint annotation!")
+
+ var newNode *v1.Node
+ oldNodeCopy := oldNode
+ updated := false
+ for _, taint := range taints {
+ curNewNode, ok, err := taintutils.RemoveTaint(oldNodeCopy, taint)
+ if err != nil {
+ return fmt.Errorf("Failed to remove taint of node!")
+ }
+ updated = updated || ok
+ newNode = curNewNode
+ oldNodeCopy = curNewNode
}
- if !ok {
+ if !updated {
return nil
}
return PatchNodeTaints(c, nodeName, oldNode, newNode)
@@ -964,18 +996,11 @@ func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n
}
newTaints := newNode.Spec.Taints
- objCopy, err := api.Scheme.DeepCopy(oldNode)
- if err != nil {
- return fmt.Errorf("failed to copy node object %#v: %v", oldNode, err)
- }
- newNode, ok := (objCopy).(*v1.Node)
- if !ok {
- return fmt.Errorf("failed to cast copy onto node object %#v: %v", newNode, err)
- }
- newNode.Spec.Taints = newTaints
- newData, err := json.Marshal(newNode)
+ newNodeClone := oldNode.DeepCopy()
+ newNodeClone.Spec.Taints = newTaints
+ newData, err := json.Marshal(newNodeClone)
if err != nil {
- return fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNode, nodeName, err)
+ return fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNodeClone, nodeName, err)
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
@@ -983,7 +1008,7 @@ func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n
return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err)
}
- _, err = c.Core().Nodes().Patch(string(nodeName), types.StrategicMergePatchType, patchBytes)
+ _, err = c.CoreV1().Nodes().Patch(nodeName, types.StrategicMergePatchType, patchBytes)
return err
}
@@ -1003,14 +1028,14 @@ func WaitForCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs
}
// ComputeHash returns a hash value calculated from pod template and a collisionCount to avoid hash collision
-func ComputeHash(template *v1.PodTemplateSpec, collisionCount *int64) uint32 {
+func ComputeHash(template *v1.PodTemplateSpec, collisionCount *int32) uint32 {
podTemplateSpecHasher := fnv.New32a()
hashutil.DeepHashObject(podTemplateSpecHasher, *template)
// Add collisionCount in the hash if it exists.
if collisionCount != nil {
collisionCountBytes := make([]byte, 8)
- binary.LittleEndian.PutUint64(collisionCountBytes, uint64(*collisionCount))
+ binary.LittleEndian.PutUint32(collisionCountBytes, uint32(*collisionCount))
podTemplateSpecHasher.Write(collisionCountBytes)
}