summaryrefslogtreecommitdiff
path: root/vendor/k8s.io/kubernetes/pkg/volume
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/k8s.io/kubernetes/pkg/volume')
-rw-r--r--vendor/k8s.io/kubernetes/pkg/volume/doc.go19
-rw-r--r--vendor/k8s.io/kubernetes/pkg/volume/metrics_cached.go74
-rw-r--r--vendor/k8s.io/kubernetes/pkg/volume/metrics_du.go99
-rw-r--r--vendor/k8s.io/kubernetes/pkg/volume/metrics_errors.go77
-rw-r--r--vendor/k8s.io/kubernetes/pkg/volume/metrics_nil.go30
-rw-r--r--vendor/k8s.io/kubernetes/pkg/volume/metrics_statfs.go69
-rw-r--r--vendor/k8s.io/kubernetes/pkg/volume/plugins.go603
-rw-r--r--vendor/k8s.io/kubernetes/pkg/volume/util.go456
-rw-r--r--vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go462
-rw-r--r--vendor/k8s.io/kubernetes/pkg/volume/util/device_util.go31
-rw-r--r--vendor/k8s.io/kubernetes/pkg/volume/util/device_util_linux.go61
-rw-r--r--vendor/k8s.io/kubernetes/pkg/volume/util/device_util_unsupported.go24
-rw-r--r--vendor/k8s.io/kubernetes/pkg/volume/util/doc.go18
-rw-r--r--vendor/k8s.io/kubernetes/pkg/volume/util/fs.go96
-rw-r--r--vendor/k8s.io/kubernetes/pkg/volume/util/fs_unsupported.go38
-rw-r--r--vendor/k8s.io/kubernetes/pkg/volume/util/io_util.go47
-rw-r--r--vendor/k8s.io/kubernetes/pkg/volume/util/util.go213
-rw-r--r--vendor/k8s.io/kubernetes/pkg/volume/volume.go230
-rw-r--r--vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go91
-rw-r--r--vendor/k8s.io/kubernetes/pkg/volume/volume_unsupported.go23
20 files changed, 2761 insertions, 0 deletions
diff --git a/vendor/k8s.io/kubernetes/pkg/volume/doc.go b/vendor/k8s.io/kubernetes/pkg/volume/doc.go
new file mode 100644
index 000000000..c98a5a153
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/volume/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package volume includes internal representations of external volume types
+// as well as utility methods required to mount/unmount volumes to kubelets.
+package volume // import "k8s.io/kubernetes/pkg/volume"
diff --git a/vendor/k8s.io/kubernetes/pkg/volume/metrics_cached.go b/vendor/k8s.io/kubernetes/pkg/volume/metrics_cached.go
new file mode 100644
index 000000000..ac0dc9b7a
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/volume/metrics_cached.go
@@ -0,0 +1,74 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package volume
+
+import (
+ "sync"
+ "sync/atomic"
+)
+
+var _ MetricsProvider = &cachedMetrics{}
+
+// cachedMetrics represents a MetricsProvider that wraps another provider and
+// caches the result.
+type cachedMetrics struct {
+ wrapped MetricsProvider
+ resultError error
+ resultMetrics *Metrics
+ once cacheOnce
+}
+
+// NewCachedMetrics creates a new cachedMetrics wrapping another
+// MetricsProvider and caching the results.
+func NewCachedMetrics(provider MetricsProvider) MetricsProvider {
+ return &cachedMetrics{wrapped: provider}
+}
+
+// GetMetrics runs the wrapped metrics provider's GetMetrics methd once and
+// caches the result. Will not cache result if there is an error.
+// See MetricsProvider.GetMetrics
+func (md *cachedMetrics) GetMetrics() (*Metrics, error) {
+ md.once.cache(func() error {
+ md.resultMetrics, md.resultError = md.wrapped.GetMetrics()
+ return md.resultError
+ })
+ return md.resultMetrics, md.resultError
+}
+
+// Copied from sync.Once but we don't want to cache the results if there is an
+// error
+type cacheOnce struct {
+ m sync.Mutex
+ done uint32
+}
+
+// Copied from sync.Once but we don't want to cache the results if there is an
+// error
+func (o *cacheOnce) cache(f func() error) {
+ if atomic.LoadUint32(&o.done) == 1 {
+ return
+ }
+ // Slow-path.
+ o.m.Lock()
+ defer o.m.Unlock()
+ if o.done == 0 {
+ err := f()
+ if err == nil {
+ atomic.StoreUint32(&o.done, 1)
+ }
+ }
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/volume/metrics_du.go b/vendor/k8s.io/kubernetes/pkg/volume/metrics_du.go
new file mode 100644
index 000000000..19a29cbbc
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/volume/metrics_du.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package volume
+
+import (
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/kubernetes/pkg/volume/util"
+)
+
+var _ MetricsProvider = &metricsDu{}
+
+// metricsDu represents a MetricsProvider that calculates the used and
+// available Volume space by executing the "du" command and gathering
+// filesystem info for the Volume path.
+type metricsDu struct {
+ // the directory path the volume is mounted to.
+ path string
+}
+
+// NewMetricsDu creates a new metricsDu with the Volume path.
+func NewMetricsDu(path string) MetricsProvider {
+ return &metricsDu{path}
+}
+
+// GetMetrics calculates the volume usage and device free space by executing "du"
+// and gathering filesystem info for the Volume path.
+// See MetricsProvider.GetMetrics
+func (md *metricsDu) GetMetrics() (*Metrics, error) {
+ metrics := &Metrics{Time: metav1.Now()}
+ if md.path == "" {
+ return metrics, NewNoPathDefinedError()
+ }
+
+ err := md.runDu(metrics)
+ if err != nil {
+ return metrics, err
+ }
+
+ err = md.runFind(metrics)
+ if err != nil {
+ return metrics, err
+ }
+
+ err = md.getFsInfo(metrics)
+ if err != nil {
+ return metrics, err
+ }
+
+ return metrics, nil
+}
+
+// runDu executes the "du" command and writes the results to metrics.Used
+func (md *metricsDu) runDu(metrics *Metrics) error {
+ used, err := util.Du(md.path)
+ if err != nil {
+ return err
+ }
+ metrics.Used = used
+ return nil
+}
+
+// runFind executes the "find" command and writes the results to metrics.InodesUsed
+func (md *metricsDu) runFind(metrics *Metrics) error {
+ inodesUsed, err := util.Find(md.path)
+ if err != nil {
+ return err
+ }
+ metrics.InodesUsed = resource.NewQuantity(inodesUsed, resource.BinarySI)
+ return nil
+}
+
+// getFsInfo writes metrics.Capacity and metrics.Available from the filesystem
+// info
+func (md *metricsDu) getFsInfo(metrics *Metrics) error {
+ available, capacity, _, inodes, inodesFree, _, err := util.FsInfo(md.path)
+ if err != nil {
+ return NewFsInfoFailedError(err)
+ }
+ metrics.Available = resource.NewQuantity(available, resource.BinarySI)
+ metrics.Capacity = resource.NewQuantity(capacity, resource.BinarySI)
+ metrics.Inodes = resource.NewQuantity(inodes, resource.BinarySI)
+ metrics.InodesFree = resource.NewQuantity(inodesFree, resource.BinarySI)
+ return nil
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/volume/metrics_errors.go b/vendor/k8s.io/kubernetes/pkg/volume/metrics_errors.go
new file mode 100644
index 000000000..50e7c2a21
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/volume/metrics_errors.go
@@ -0,0 +1,77 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package volume
+
+import (
+ "fmt"
+)
+
+const (
+ // ErrCodeNotSupported code for NotSupported Errors.
+ ErrCodeNotSupported int = iota + 1
+ ErrCodeNoPathDefined
+ ErrCodeFsInfoFailed
+)
+
+// NewNotSupportedError creates a new MetricsError with code NotSupported.
+func NewNotSupportedError() *MetricsError {
+ return &MetricsError{
+ Code: ErrCodeNotSupported,
+ Msg: "metrics are not supported for MetricsNil Volumes",
+ }
+}
+
+// NewNoPathDefined creates a new MetricsError with code NoPathDefined.
+func NewNoPathDefinedError() *MetricsError {
+ return &MetricsError{
+ Code: ErrCodeNoPathDefined,
+ Msg: "no path defined for disk usage metrics.",
+ }
+}
+
+// NewFsInfoFailedError creates a new MetricsError with code FsInfoFailed.
+func NewFsInfoFailedError(err error) *MetricsError {
+ return &MetricsError{
+ Code: ErrCodeFsInfoFailed,
+ Msg: fmt.Sprintf("Failed to get FsInfo due to error %v", err),
+ }
+}
+
+// MetricsError to distinguish different Metrics Errors.
+type MetricsError struct {
+ Code int
+ Msg string
+}
+
+func (e *MetricsError) Error() string {
+ return fmt.Sprintf("%s", e.Msg)
+}
+
+// IsNotSupported returns true if and only if err is "key" not found error.
+func IsNotSupported(err error) bool {
+ return isErrCode(err, ErrCodeNotSupported)
+}
+
+func isErrCode(err error, code int) bool {
+ if err == nil {
+ return false
+ }
+ if e, ok := err.(*MetricsError); ok {
+ return e.Code == code
+ }
+ return false
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/volume/metrics_nil.go b/vendor/k8s.io/kubernetes/pkg/volume/metrics_nil.go
new file mode 100644
index 000000000..5438dc3de
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/volume/metrics_nil.go
@@ -0,0 +1,30 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package volume
+
+var _ MetricsProvider = &MetricsNil{}
+
+// MetricsNil represents a MetricsProvider that does not support returning
+// Metrics. It serves as a placeholder for Volumes that do not yet support
+// metrics.
+type MetricsNil struct{}
+
+// GetMetrics returns an empty Metrics and an error.
+// See MetricsProvider.GetMetrics
+func (*MetricsNil) GetMetrics() (*Metrics, error) {
+ return &Metrics{}, NewNotSupportedError()
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/volume/metrics_statfs.go b/vendor/k8s.io/kubernetes/pkg/volume/metrics_statfs.go
new file mode 100644
index 000000000..ede4f6ef8
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/volume/metrics_statfs.go
@@ -0,0 +1,69 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package volume
+
+import (
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/kubernetes/pkg/volume/util"
+)
+
+var _ MetricsProvider = &metricsStatFS{}
+
+// metricsStatFS represents a MetricsProvider that calculates the used and available
+// Volume space by stat'ing and gathering filesystem info for the Volume path.
+type metricsStatFS struct {
+ // the directory path the volume is mounted to.
+ path string
+}
+
+// NewMetricsStatfs creates a new metricsStatFS with the Volume path.
+func NewMetricsStatFS(path string) MetricsProvider {
+ return &metricsStatFS{path}
+}
+
+// See MetricsProvider.GetMetrics
+// GetMetrics calculates the volume usage and device free space by executing "du"
+// and gathering filesystem info for the Volume path.
+func (md *metricsStatFS) GetMetrics() (*Metrics, error) {
+ metrics := &Metrics{Time: metav1.Now()}
+ if md.path == "" {
+ return metrics, NewNoPathDefinedError()
+ }
+
+ err := md.getFsInfo(metrics)
+ if err != nil {
+ return metrics, err
+ }
+
+ return metrics, nil
+}
+
+// getFsInfo writes metrics.Capacity, metrics.Used and metrics.Available from the filesystem info
+func (md *metricsStatFS) getFsInfo(metrics *Metrics) error {
+ available, capacity, usage, inodes, inodesFree, inodesUsed, err := util.FsInfo(md.path)
+ if err != nil {
+ return NewFsInfoFailedError(err)
+ }
+ metrics.Available = resource.NewQuantity(available, resource.BinarySI)
+ metrics.Capacity = resource.NewQuantity(capacity, resource.BinarySI)
+ metrics.Used = resource.NewQuantity(usage, resource.BinarySI)
+ metrics.Inodes = resource.NewQuantity(inodes, resource.BinarySI)
+ metrics.InodesFree = resource.NewQuantity(inodesFree, resource.BinarySI)
+ metrics.InodesUsed = resource.NewQuantity(inodesUsed, resource.BinarySI)
+ return nil
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/volume/plugins.go b/vendor/k8s.io/kubernetes/pkg/volume/plugins.go
new file mode 100644
index 000000000..41721d1ee
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/volume/plugins.go
@@ -0,0 +1,603 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package volume
+
+import (
+ "fmt"
+ "net"
+ "strings"
+ "sync"
+
+ "github.com/golang/glog"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ utilerrors "k8s.io/apimachinery/pkg/util/errors"
+ "k8s.io/apimachinery/pkg/util/validation"
+ "k8s.io/kubernetes/pkg/api/v1"
+ "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
+ "k8s.io/kubernetes/pkg/cloudprovider"
+ "k8s.io/kubernetes/pkg/util/io"
+ "k8s.io/kubernetes/pkg/util/mount"
+)
+
+// VolumeOptions contains option information about a volume.
+type VolumeOptions struct {
+ // The attributes below are required by volume.Provisioner
+ // TODO: refactor all of this out of volumes when an admin can configure
+ // many kinds of provisioners.
+
+ // Reclamation policy for a persistent volume
+ PersistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy
+ // Suggested PV.Name of the PersistentVolume to provision.
+ // This is a generated name guaranteed to be unique in Kubernetes cluster.
+ // If you choose not to use it as volume name, ensure uniqueness by either
+ // combining it with your value or create unique values of your own.
+ PVName string
+ // PVC is reference to the claim that lead to provisioning of a new PV.
+ // Provisioners *must* create a PV that would be matched by this PVC,
+ // i.e. with required capacity, accessMode, labels matching PVC.Selector and
+ // so on.
+ PVC *v1.PersistentVolumeClaim
+ // Unique name of Kubernetes cluster.
+ ClusterName string
+ // Tags to attach to the real volume in the cloud provider - e.g. AWS EBS
+ CloudTags *map[string]string
+ // Volume provisioning parameters from StorageClass
+ Parameters map[string]string
+}
+
+// VolumePlugin is an interface to volume plugins that can be used on a
+// kubernetes node (e.g. by kubelet) to instantiate and manage volumes.
+type VolumePlugin interface {
+ // Init initializes the plugin. This will be called exactly once
+ // before any New* calls are made - implementations of plugins may
+ // depend on this.
+ Init(host VolumeHost) error
+
+ // Name returns the plugin's name. Plugins must use namespaced names
+ // such as "example.com/volume" and contain exactly one '/' character.
+ // The "kubernetes.io" namespace is reserved for plugins which are
+ // bundled with kubernetes.
+ GetPluginName() string
+
+ // GetVolumeName returns the name/ID to uniquely identifying the actual
+ // backing device, directory, path, etc. referenced by the specified volume
+ // spec.
+ // For Attachable volumes, this value must be able to be passed back to
+ // volume Detach methods to identify the device to act on.
+ // If the plugin does not support the given spec, this returns an error.
+ GetVolumeName(spec *Spec) (string, error)
+
+ // CanSupport tests whether the plugin supports a given volume
+ // specification from the API. The spec pointer should be considered
+ // const.
+ CanSupport(spec *Spec) bool
+
+ // RequiresRemount returns true if this plugin requires mount calls to be
+ // reexecuted. Atomically updating volumes, like Downward API, depend on
+ // this to update the contents of the volume.
+ RequiresRemount() bool
+
+ // NewMounter creates a new volume.Mounter from an API specification.
+ // Ownership of the spec pointer in *not* transferred.
+ // - spec: The v1.Volume spec
+ // - pod: The enclosing pod
+ NewMounter(spec *Spec, podRef *v1.Pod, opts VolumeOptions) (Mounter, error)
+
+ // NewUnmounter creates a new volume.Unmounter from recoverable state.
+ // - name: The volume name, as per the v1.Volume spec.
+ // - podUID: The UID of the enclosing pod
+ NewUnmounter(name string, podUID types.UID) (Unmounter, error)
+
+ // ConstructVolumeSpec constructs a volume spec based on the given volume name
+ // and mountPath. The spec may have incomplete information due to limited
+ // information from input. This function is used by volume manager to reconstruct
+ // volume spec by reading the volume directories from disk
+ ConstructVolumeSpec(volumeName, mountPath string) (*Spec, error)
+
+ // SupportsMountOption returns true if volume plugins supports Mount options
+ // Specifying mount options in a volume plugin that doesn't support
+ // user specified mount options will result in error creating persistent volumes
+ SupportsMountOption() bool
+
+ // SupportsBulkVolumeVerification checks if volume plugin type is capable
+ // of enabling bulk polling of all nodes. This can speed up verification of
+ // attached volumes by quite a bit, but underlying pluging must support it.
+ SupportsBulkVolumeVerification() bool
+}
+
+// PersistentVolumePlugin is an extended interface of VolumePlugin and is used
+// by volumes that want to provide long term persistence of data
+type PersistentVolumePlugin interface {
+ VolumePlugin
+ // GetAccessModes describes the ways a given volume can be accessed/mounted.
+ GetAccessModes() []v1.PersistentVolumeAccessMode
+}
+
+// RecyclableVolumePlugin is an extended interface of VolumePlugin and is used
+// by persistent volumes that want to be recycled before being made available
+// again to new claims
+type RecyclableVolumePlugin interface {
+ VolumePlugin
+
+ // Recycle knows how to reclaim this
+ // resource after the volume's release from a PersistentVolumeClaim.
+ // Recycle will use the provided recorder to write any events that might be
+ // interesting to user. It's expected that caller will pass these events to
+ // the PV being recycled.
+ Recycle(pvName string, spec *Spec, eventRecorder RecycleEventRecorder) error
+}
+
+// DeletableVolumePlugin is an extended interface of VolumePlugin and is used
+// by persistent volumes that want to be deleted from the cluster after their
+// release from a PersistentVolumeClaim.
+type DeletableVolumePlugin interface {
+ VolumePlugin
+ // NewDeleter creates a new volume.Deleter which knows how to delete this
+ // resource in accordance with the underlying storage provider after the
+ // volume's release from a claim
+ NewDeleter(spec *Spec) (Deleter, error)
+}
+
+const (
+ // Name of a volume in external cloud that is being provisioned and thus
+ // should be ignored by rest of Kubernetes.
+ ProvisionedVolumeName = "placeholder-for-provisioning"
+)
+
+// ProvisionableVolumePlugin is an extended interface of VolumePlugin and is
+// used to create volumes for the cluster.
+type ProvisionableVolumePlugin interface {
+ VolumePlugin
+ // NewProvisioner creates a new volume.Provisioner which knows how to
+ // create PersistentVolumes in accordance with the plugin's underlying
+ // storage provider
+ NewProvisioner(options VolumeOptions) (Provisioner, error)
+}
+
+// AttachableVolumePlugin is an extended interface of VolumePlugin and is used for volumes that require attachment
+// to a node before mounting.
+type AttachableVolumePlugin interface {
+ VolumePlugin
+ NewAttacher() (Attacher, error)
+ NewDetacher() (Detacher, error)
+ GetDeviceMountRefs(deviceMountPath string) ([]string, error)
+}
+
+// VolumeHost is an interface that plugins can use to access the kubelet.
+type VolumeHost interface {
+ // GetPluginDir returns the absolute path to a directory under which
+ // a given plugin may store data. This directory might not actually
+ // exist on disk yet. For plugin data that is per-pod, see
+ // GetPodPluginDir().
+ GetPluginDir(pluginName string) string
+
+ // GetPodVolumeDir returns the absolute path a directory which
+ // represents the named volume under the named plugin for the given
+ // pod. If the specified pod does not exist, the result of this call
+ // might not exist.
+ GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string
+
+ // GetPodPluginDir returns the absolute path to a directory under which
+ // a given plugin may store data for a given pod. If the specified pod
+ // does not exist, the result of this call might not exist. This
+ // directory might not actually exist on disk yet.
+ GetPodPluginDir(podUID types.UID, pluginName string) string
+
+ // GetKubeClient returns a client interface
+ GetKubeClient() clientset.Interface
+
+ // NewWrapperMounter finds an appropriate plugin with which to handle
+ // the provided spec. This is used to implement volume plugins which
+ // "wrap" other plugins. For example, the "secret" volume is
+ // implemented in terms of the "emptyDir" volume.
+ NewWrapperMounter(volName string, spec Spec, pod *v1.Pod, opts VolumeOptions) (Mounter, error)
+
+ // NewWrapperUnmounter finds an appropriate plugin with which to handle
+ // the provided spec. See comments on NewWrapperMounter for more
+ // context.
+ NewWrapperUnmounter(volName string, spec Spec, podUID types.UID) (Unmounter, error)
+
+ // Get cloud provider from kubelet.
+ GetCloudProvider() cloudprovider.Interface
+
+ // Get mounter interface.
+ GetMounter() mount.Interface
+
+ // Get writer interface for writing data to disk.
+ GetWriter() io.Writer
+
+ // Returns the hostname of the host kubelet is running on
+ GetHostName() string
+
+ // Returns host IP or nil in the case of error.
+ GetHostIP() (net.IP, error)
+
+ // Returns node allocatable.
+ GetNodeAllocatable() (v1.ResourceList, error)
+
+ // Returns a function that returns a secret.
+ GetSecretFunc() func(namespace, name string) (*v1.Secret, error)
+
+ // Returns a function that returns a configmap.
+ GetConfigMapFunc() func(namespace, name string) (*v1.ConfigMap, error)
+
+ // Returns the labels on the node
+ GetNodeLabels() (map[string]string, error)
+}
+
+// VolumePluginMgr tracks registered plugins.
+type VolumePluginMgr struct {
+ mutex sync.Mutex
+ plugins map[string]VolumePlugin
+ Host VolumeHost
+}
+
+// Spec is an internal representation of a volume. All API volume types translate to Spec.
+type Spec struct {
+ Volume *v1.Volume
+ PersistentVolume *v1.PersistentVolume
+ ReadOnly bool
+}
+
+// Name returns the name of either Volume or PersistentVolume, one of which must not be nil.
+func (spec *Spec) Name() string {
+ switch {
+ case spec.Volume != nil:
+ return spec.Volume.Name
+ case spec.PersistentVolume != nil:
+ return spec.PersistentVolume.Name
+ default:
+ return ""
+ }
+}
+
+// VolumeConfig is how volume plugins receive configuration. An instance
+// specific to the plugin will be passed to the plugin's
+// ProbeVolumePlugins(config) func. Reasonable defaults will be provided by
+// the binary hosting the plugins while allowing override of those default
+// values. Those config values are then set to an instance of VolumeConfig
+// and passed to the plugin.
+//
+// Values in VolumeConfig are intended to be relevant to several plugins, but
+// not necessarily all plugins. The preference is to leverage strong typing
+// in this struct. All config items must have a descriptive but non-specific
+// name (i.e, RecyclerMinimumTimeout is OK but RecyclerMinimumTimeoutForNFS is
+// !OK). An instance of config will be given directly to the plugin, so
+// config names specific to plugins are unneeded and wrongly expose plugins in
+// this VolumeConfig struct.
+//
+// OtherAttributes is a map of string values intended for one-off
+// configuration of a plugin or config that is only relevant to a single
+// plugin. All values are passed by string and require interpretation by the
+// plugin. Passing config as strings is the least desirable option but can be
+// used for truly one-off configuration. The binary should still use strong
+// typing for this value when binding CLI values before they are passed as
+// strings in OtherAttributes.
+type VolumeConfig struct {
+ // RecyclerPodTemplate is pod template that understands how to scrub clean
+ // a persistent volume after its release. The template is used by plugins
+ // which override specific properties of the pod in accordance with that
+ // plugin. See NewPersistentVolumeRecyclerPodTemplate for the properties
+ // that are expected to be overridden.
+ RecyclerPodTemplate *v1.Pod
+
+ // RecyclerMinimumTimeout is the minimum amount of time in seconds for the
+ // recycler pod's ActiveDeadlineSeconds attribute. Added to the minimum
+ // timeout is the increment per Gi of capacity.
+ RecyclerMinimumTimeout int
+
+ // RecyclerTimeoutIncrement is the number of seconds added to the recycler
+ // pod's ActiveDeadlineSeconds for each Gi of capacity in the persistent
+ // volume. Example: 5Gi volume x 30s increment = 150s + 30s minimum = 180s
+ // ActiveDeadlineSeconds for recycler pod
+ RecyclerTimeoutIncrement int
+
+ // PVName is name of the PersistentVolume instance that is being recycled.
+ // It is used to generate unique recycler pod name.
+ PVName string
+
+ // OtherAttributes stores config as strings. These strings are opaque to
+ // the system and only understood by the binary hosting the plugin and the
+ // plugin itself.
+ OtherAttributes map[string]string
+
+ // ProvisioningEnabled configures whether provisioning of this plugin is
+ // enabled or not. Currently used only in host_path plugin.
+ ProvisioningEnabled bool
+}
+
+// NewSpecFromVolume creates an Spec from an v1.Volume
+func NewSpecFromVolume(vs *v1.Volume) *Spec {
+ return &Spec{
+ Volume: vs,
+ }
+}
+
+// NewSpecFromPersistentVolume creates an Spec from an v1.PersistentVolume
+func NewSpecFromPersistentVolume(pv *v1.PersistentVolume, readOnly bool) *Spec {
+ return &Spec{
+ PersistentVolume: pv,
+ ReadOnly: readOnly,
+ }
+}
+
+// InitPlugins initializes each plugin. All plugins must have unique names.
+// This must be called exactly once before any New* methods are called on any
+// plugins.
+func (pm *VolumePluginMgr) InitPlugins(plugins []VolumePlugin, host VolumeHost) error {
+ pm.mutex.Lock()
+ defer pm.mutex.Unlock()
+
+ pm.Host = host
+ if pm.plugins == nil {
+ pm.plugins = map[string]VolumePlugin{}
+ }
+
+ allErrs := []error{}
+ for _, plugin := range plugins {
+ name := plugin.GetPluginName()
+ if errs := validation.IsQualifiedName(name); len(errs) != 0 {
+ allErrs = append(allErrs, fmt.Errorf("volume plugin has invalid name: %q: %s", name, strings.Join(errs, ";")))
+ continue
+ }
+
+ if _, found := pm.plugins[name]; found {
+ allErrs = append(allErrs, fmt.Errorf("volume plugin %q was registered more than once", name))
+ continue
+ }
+ err := plugin.Init(host)
+ if err != nil {
+ glog.Errorf("Failed to load volume plugin %s, error: %s", plugin, err.Error())
+ allErrs = append(allErrs, err)
+ continue
+ }
+ pm.plugins[name] = plugin
+ glog.V(1).Infof("Loaded volume plugin %q", name)
+ }
+ return utilerrors.NewAggregate(allErrs)
+}
+
+// FindPluginBySpec looks for a plugin that can support a given volume
+// specification. If no plugins can support or more than one plugin can
+// support it, return error.
+func (pm *VolumePluginMgr) FindPluginBySpec(spec *Spec) (VolumePlugin, error) {
+ pm.mutex.Lock()
+ defer pm.mutex.Unlock()
+
+ matches := []string{}
+ for k, v := range pm.plugins {
+ if v.CanSupport(spec) {
+ matches = append(matches, k)
+ }
+ }
+ if len(matches) == 0 {
+ return nil, fmt.Errorf("no volume plugin matched")
+ }
+ if len(matches) > 1 {
+ return nil, fmt.Errorf("multiple volume plugins matched: %s", strings.Join(matches, ","))
+ }
+ return pm.plugins[matches[0]], nil
+}
+
+// FindPluginByName fetches a plugin by name or by legacy name. If no plugin
+// is found, returns error.
+func (pm *VolumePluginMgr) FindPluginByName(name string) (VolumePlugin, error) {
+ pm.mutex.Lock()
+ defer pm.mutex.Unlock()
+
+ // Once we can get rid of legacy names we can reduce this to a map lookup.
+ matches := []string{}
+ for k, v := range pm.plugins {
+ if v.GetPluginName() == name {
+ matches = append(matches, k)
+ }
+ }
+ if len(matches) == 0 {
+ return nil, fmt.Errorf("no volume plugin matched")
+ }
+ if len(matches) > 1 {
+ return nil, fmt.Errorf("multiple volume plugins matched: %s", strings.Join(matches, ","))
+ }
+ return pm.plugins[matches[0]], nil
+}
+
+// FindPersistentPluginBySpec looks for a persistent volume plugin that can
+// support a given volume specification. If no plugin is found, return an
+// error
+func (pm *VolumePluginMgr) FindPersistentPluginBySpec(spec *Spec) (PersistentVolumePlugin, error) {
+ volumePlugin, err := pm.FindPluginBySpec(spec)
+ if err != nil {
+ return nil, fmt.Errorf("Could not find volume plugin for spec: %#v", spec)
+ }
+ if persistentVolumePlugin, ok := volumePlugin.(PersistentVolumePlugin); ok {
+ return persistentVolumePlugin, nil
+ }
+ return nil, fmt.Errorf("no persistent volume plugin matched")
+}
+
+// FindPersistentPluginByName fetches a persistent volume plugin by name. If
+// no plugin is found, returns error.
+func (pm *VolumePluginMgr) FindPersistentPluginByName(name string) (PersistentVolumePlugin, error) {
+ volumePlugin, err := pm.FindPluginByName(name)
+ if err != nil {
+ return nil, err
+ }
+ if persistentVolumePlugin, ok := volumePlugin.(PersistentVolumePlugin); ok {
+ return persistentVolumePlugin, nil
+ }
+ return nil, fmt.Errorf("no persistent volume plugin matched")
+}
+
+// FindRecyclablePluginByName fetches a persistent volume plugin by name. If
+// no plugin is found, returns error.
+func (pm *VolumePluginMgr) FindRecyclablePluginBySpec(spec *Spec) (RecyclableVolumePlugin, error) {
+ volumePlugin, err := pm.FindPluginBySpec(spec)
+ if err != nil {
+ return nil, err
+ }
+ if recyclableVolumePlugin, ok := volumePlugin.(RecyclableVolumePlugin); ok {
+ return recyclableVolumePlugin, nil
+ }
+ return nil, fmt.Errorf("no recyclable volume plugin matched")
+}
+
+// FindProvisionablePluginByName fetches a persistent volume plugin by name. If
+// no plugin is found, returns error.
+func (pm *VolumePluginMgr) FindProvisionablePluginByName(name string) (ProvisionableVolumePlugin, error) {
+ volumePlugin, err := pm.FindPluginByName(name)
+ if err != nil {
+ return nil, err
+ }
+ if provisionableVolumePlugin, ok := volumePlugin.(ProvisionableVolumePlugin); ok {
+ return provisionableVolumePlugin, nil
+ }
+ return nil, fmt.Errorf("no provisionable volume plugin matched")
+}
+
+// FindDeletablePluginBySppec fetches a persistent volume plugin by spec. If
+// no plugin is found, returns error.
+func (pm *VolumePluginMgr) FindDeletablePluginBySpec(spec *Spec) (DeletableVolumePlugin, error) {
+ volumePlugin, err := pm.FindPluginBySpec(spec)
+ if err != nil {
+ return nil, err
+ }
+ if deletableVolumePlugin, ok := volumePlugin.(DeletableVolumePlugin); ok {
+ return deletableVolumePlugin, nil
+ }
+ return nil, fmt.Errorf("no deletable volume plugin matched")
+}
+
+// FindDeletablePluginByName fetches a persistent volume plugin by name. If
+// no plugin is found, returns error.
+func (pm *VolumePluginMgr) FindDeletablePluginByName(name string) (DeletableVolumePlugin, error) {
+ volumePlugin, err := pm.FindPluginByName(name)
+ if err != nil {
+ return nil, err
+ }
+ if deletableVolumePlugin, ok := volumePlugin.(DeletableVolumePlugin); ok {
+ return deletableVolumePlugin, nil
+ }
+ return nil, fmt.Errorf("no deletable volume plugin matched")
+}
+
+// FindCreatablePluginBySpec fetches a persistent volume plugin by name. If
+// no plugin is found, returns error.
+func (pm *VolumePluginMgr) FindCreatablePluginBySpec(spec *Spec) (ProvisionableVolumePlugin, error) {
+ volumePlugin, err := pm.FindPluginBySpec(spec)
+ if err != nil {
+ return nil, err
+ }
+ if provisionableVolumePlugin, ok := volumePlugin.(ProvisionableVolumePlugin); ok {
+ return provisionableVolumePlugin, nil
+ }
+ return nil, fmt.Errorf("no creatable volume plugin matched")
+}
+
+// FindAttachablePluginBySpec fetches a persistent volume plugin by name.
+// Unlike the other "FindPlugin" methods, this does not return error if no
+// plugin is found. All volumes require a mounter and unmounter, but not
+// every volume will have an attacher/detacher.
+func (pm *VolumePluginMgr) FindAttachablePluginBySpec(spec *Spec) (AttachableVolumePlugin, error) {
+ volumePlugin, err := pm.FindPluginBySpec(spec)
+ if err != nil {
+ return nil, err
+ }
+ if attachableVolumePlugin, ok := volumePlugin.(AttachableVolumePlugin); ok {
+ return attachableVolumePlugin, nil
+ }
+ return nil, nil
+}
+
+// FindAttachablePluginByName fetches an attachable volume plugin by name.
+// Unlike the other "FindPlugin" methods, this does not return error if no
+// plugin is found. All volumes require a mounter and unmounter, but not
+// every volume will have an attacher/detacher.
+func (pm *VolumePluginMgr) FindAttachablePluginByName(name string) (AttachableVolumePlugin, error) {
+ volumePlugin, err := pm.FindPluginByName(name)
+ if err != nil {
+ return nil, err
+ }
+ if attachablePlugin, ok := volumePlugin.(AttachableVolumePlugin); ok {
+ return attachablePlugin, nil
+ }
+ return nil, nil
+}
+
+// NewPersistentVolumeRecyclerPodTemplate creates a template for a recycler
+// pod. By default, a recycler pod simply runs "rm -rf" on a volume and tests
+// for emptiness. Most attributes of the template will be correct for most
+// plugin implementations. The following attributes can be overridden per
+// plugin via configuration:
+//
+// 1. pod.Spec.Volumes[0].VolumeSource must be overridden. Recycler
+// implementations without a valid VolumeSource will fail.
+// 2. pod.GenerateName helps distinguish recycler pods by name. Recommended.
+// Default is "pv-recycler-".
+// 3. pod.Spec.ActiveDeadlineSeconds gives the recycler pod a maximum timeout
+// before failing. Recommended. Default is 60 seconds.
+//
+// See HostPath and NFS for working recycler examples
+func NewPersistentVolumeRecyclerPodTemplate() *v1.Pod {
+ timeout := int64(60)
+ pod := &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: "pv-recycler-",
+ Namespace: metav1.NamespaceDefault,
+ },
+ Spec: v1.PodSpec{
+ ActiveDeadlineSeconds: &timeout,
+ RestartPolicy: v1.RestartPolicyNever,
+ Volumes: []v1.Volume{
+ {
+ Name: "vol",
+ // IMPORTANT! All plugins using this template MUST
+ // override pod.Spec.Volumes[0].VolumeSource Recycler
+ // implementations without a valid VolumeSource will fail.
+ VolumeSource: v1.VolumeSource{},
+ },
+ },
+ Containers: []v1.Container{
+ {
+ Name: "pv-recycler",
+ Image: "gcr.io/google_containers/busybox",
+ Command: []string{"/bin/sh"},
+ Args: []string{"-c", "test -e /scrub && rm -rf /scrub/..?* /scrub/.[!.]* /scrub/* && test -z \"$(ls -A /scrub)\" || exit 1"},
+ VolumeMounts: []v1.VolumeMount{
+ {
+ Name: "vol",
+ MountPath: "/scrub",
+ },
+ },
+ },
+ },
+ },
+ }
+ return pod
+}
+
+// Check validity of recycle pod template
+// List of checks:
+// - at least one volume is defined in the recycle pod template
+// If successful, returns nil
+// if unsuccessful, returns an error.
+func ValidateRecyclerPodTemplate(pod *v1.Pod) error {
+ if len(pod.Spec.Volumes) < 1 {
+ return fmt.Errorf("does not contain any volume(s)")
+ }
+ return nil
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util.go b/vendor/k8s.io/kubernetes/pkg/volume/util.go
new file mode 100644
index 000000000..2e5610362
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/volume/util.go
@@ -0,0 +1,456 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package volume
+
+import (
+ "fmt"
+ "reflect"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/fields"
+ "k8s.io/apimachinery/pkg/watch"
+ "k8s.io/kubernetes/pkg/api/v1"
+ "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
+
+ "hash/fnv"
+ "math/rand"
+ "strconv"
+ "strings"
+
+ "github.com/golang/glog"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/resource"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/sets"
+ volutil "k8s.io/kubernetes/pkg/volume/util"
+)
+
+type RecycleEventRecorder func(eventtype, message string)
+
+// RecycleVolumeByWatchingPodUntilCompletion is intended for use with volume
+// Recyclers. This function will save the given Pod to the API and watch it
+// until it completes, fails, or the pod's ActiveDeadlineSeconds is exceeded,
+// whichever comes first. An attempt to delete a recycler pod is always
+// attempted before returning.
+//
+// In case there is a pod with the same namespace+name already running, this
+// function assumes it's an older instance of the recycler pod and watches
+// this old pod instead of starting a new one.
+//
+// pod - the pod designed by a volume plugin to recycle the volume. pod.Name
+// will be overwritten with unique name based on PV.Name.
+// client - kube client for API operations.
+func RecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, kubeClient clientset.Interface, recorder RecycleEventRecorder) error {
+ return internalRecycleVolumeByWatchingPodUntilCompletion(pvName, pod, newRecyclerClient(kubeClient, recorder))
+}
+
+// same as above func comments, except 'recyclerClient' is a narrower pod API
+// interface to ease testing
+func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, recyclerClient recyclerClient) error {
+ glog.V(5).Infof("creating recycler pod for volume %s\n", pod.Name)
+
+ // Generate unique name for the recycler pod - we need to get "already
+ // exists" error when a previous controller has already started recycling
+ // the volume. Here we assume that pv.Name is already unique.
+ pod.Name = "recycler-for-" + pvName
+ pod.GenerateName = ""
+
+ stopChannel := make(chan struct{})
+ defer close(stopChannel)
+ podCh, err := recyclerClient.WatchPod(pod.Name, pod.Namespace, stopChannel)
+ if err != nil {
+ glog.V(4).Infof("cannot start watcher for pod %s/%s: %v", pod.Namespace, pod.Name, err)
+ return err
+ }
+
+ // Start the pod
+ _, err = recyclerClient.CreatePod(pod)
+ if err != nil {
+ if errors.IsAlreadyExists(err) {
+ glog.V(5).Infof("old recycler pod %q found for volume", pod.Name)
+ } else {
+ return fmt.Errorf("unexpected error creating recycler pod: %+v\n", err)
+ }
+ }
+ defer func(pod *v1.Pod) {
+ glog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name)
+ if err := recyclerClient.DeletePod(pod.Name, pod.Namespace); err != nil {
+ glog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, err)
+ }
+ }(pod)
+
+ // Now only the old pod or the new pod run. Watch it until it finishes
+ // and send all events on the pod to the PV
+ for {
+ event, ok := <-podCh
+ if !ok {
+ return fmt.Errorf("recycler pod %q watch channel had been closed", pod.Name)
+ }
+ switch event.Object.(type) {
+ case *v1.Pod:
+ // POD changed
+ pod := event.Object.(*v1.Pod)
+ glog.V(4).Infof("recycler pod update received: %s %s/%s %s", event.Type, pod.Namespace, pod.Name, pod.Status.Phase)
+ switch event.Type {
+ case watch.Added, watch.Modified:
+ if pod.Status.Phase == v1.PodSucceeded {
+ // Recycle succeeded.
+ return nil
+ }
+ if pod.Status.Phase == v1.PodFailed {
+ if pod.Status.Message != "" {
+ return fmt.Errorf(pod.Status.Message)
+ } else {
+ return fmt.Errorf("pod failed, pod.Status.Message unknown.")
+ }
+ }
+
+ case watch.Deleted:
+ return fmt.Errorf("recycler pod was deleted")
+
+ case watch.Error:
+ return fmt.Errorf("recycler pod watcher failed")
+ }
+
+ case *v1.Event:
+ // Event received
+ podEvent := event.Object.(*v1.Event)
+ glog.V(4).Infof("recycler event received: %s %s/%s %s/%s %s", event.Type, podEvent.Namespace, podEvent.Name, podEvent.InvolvedObject.Namespace, podEvent.InvolvedObject.Name, podEvent.Message)
+ if event.Type == watch.Added {
+ recyclerClient.Event(podEvent.Type, podEvent.Message)
+ }
+ }
+ }
+}
+
+// recyclerClient abstracts access to a Pod by providing a narrower interface.
+// This makes it easier to mock a client for testing.
+type recyclerClient interface {
+ CreatePod(pod *v1.Pod) (*v1.Pod, error)
+ GetPod(name, namespace string) (*v1.Pod, error)
+ DeletePod(name, namespace string) error
+ // WatchPod returns a ListWatch for watching a pod. The stopChannel is used
+ // to close the reflector backing the watch. The caller is responsible for
+ // derring a close on the channel to stop the reflector.
+ WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error)
+ // Event sends an event to the volume that is being recycled.
+ Event(eventtype, message string)
+}
+
+func newRecyclerClient(client clientset.Interface, recorder RecycleEventRecorder) recyclerClient {
+ return &realRecyclerClient{
+ client,
+ recorder,
+ }
+}
+
+type realRecyclerClient struct {
+ client clientset.Interface
+ recorder RecycleEventRecorder
+}
+
+func (c *realRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) {
+ return c.client.Core().Pods(pod.Namespace).Create(pod)
+}
+
+func (c *realRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) {
+ return c.client.Core().Pods(namespace).Get(name, metav1.GetOptions{})
+}
+
+func (c *realRecyclerClient) DeletePod(name, namespace string) error {
+ return c.client.Core().Pods(namespace).Delete(name, nil)
+}
+
+func (c *realRecyclerClient) Event(eventtype, message string) {
+ c.recorder(eventtype, message)
+}
+
+func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error) {
+ podSelector, _ := fields.ParseSelector("metadata.name=" + name)
+ options := metav1.ListOptions{
+ FieldSelector: podSelector.String(),
+ Watch: true,
+ }
+
+ podWatch, err := c.client.Core().Pods(namespace).Watch(options)
+ if err != nil {
+ return nil, err
+ }
+
+ eventSelector, _ := fields.ParseSelector("involvedObject.name=" + name)
+ eventWatch, err := c.client.Core().Events(namespace).Watch(metav1.ListOptions{
+ FieldSelector: eventSelector.String(),
+ Watch: true,
+ })
+ if err != nil {
+ podWatch.Stop()
+ return nil, err
+ }
+
+ eventCh := make(chan watch.Event, 30)
+
+ go func() {
+ defer eventWatch.Stop()
+ defer podWatch.Stop()
+ defer close(eventCh)
+ var podWatchChannelClosed bool
+ var eventWatchChannelClosed bool
+ for {
+ select {
+ case _ = <-stopChannel:
+ return
+
+ case podEvent, ok := <-podWatch.ResultChan():
+ if !ok {
+ podWatchChannelClosed = true
+ } else {
+ eventCh <- podEvent
+ }
+ case eventEvent, ok := <-eventWatch.ResultChan():
+ if !ok {
+ eventWatchChannelClosed = true
+ } else {
+ eventCh <- eventEvent
+ }
+ }
+ if podWatchChannelClosed && eventWatchChannelClosed {
+ break
+ }
+ }
+ }()
+
+ return eventCh, nil
+}
+
+// CalculateTimeoutForVolume calculates time for a Recycler pod to complete a
+// recycle operation. The calculation and return value is either the
+// minimumTimeout or the timeoutIncrement per Gi of storage size, whichever is
+// greater.
+func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *v1.PersistentVolume) int64 {
+ giQty := resource.MustParse("1Gi")
+ pvQty := pv.Spec.Capacity[v1.ResourceStorage]
+ giSize := giQty.Value()
+ pvSize := pvQty.Value()
+ timeout := (pvSize / giSize) * int64(timeoutIncrement)
+ if timeout < int64(minimumTimeout) {
+ return int64(minimumTimeout)
+ } else {
+ return timeout
+ }
+}
+
+// RoundUpSize calculates how many allocation units are needed to accommodate
+// a volume of given size. E.g. when user wants 1500MiB volume, while AWS EBS
+// allocates volumes in gibibyte-sized chunks,
+// RoundUpSize(1500 * 1024*1024, 1024*1024*1024) returns '2'
+// (2 GiB is the smallest allocatable volume that can hold 1500MiB)
+func RoundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 {
+ return (volumeSizeBytes + allocationUnitBytes - 1) / allocationUnitBytes
+}
+
+// GenerateVolumeName returns a PV name with clusterName prefix. The function
+// should be used to generate a name of GCE PD or Cinder volume. It basically
+// adds "<clusterName>-dynamic-" before the PV name, making sure the resulting
+// string fits given length and cuts "dynamic" if not.
+func GenerateVolumeName(clusterName, pvName string, maxLength int) string {
+ prefix := clusterName + "-dynamic"
+ pvLen := len(pvName)
+
+ // cut the "<clusterName>-dynamic" to fit full pvName into maxLength
+ // +1 for the '-' dash
+ if pvLen+1+len(prefix) > maxLength {
+ prefix = prefix[:maxLength-pvLen-1]
+ }
+ return prefix + "-" + pvName
+}
+
+// Check if the path from the mounter is empty.
+func GetPath(mounter Mounter) (string, error) {
+ path := mounter.GetPath()
+ if path == "" {
+ return "", fmt.Errorf("Path is empty %s", reflect.TypeOf(mounter).String())
+ }
+ return path, nil
+}
+
+// ChooseZone implements our heuristics for choosing a zone for volume creation based on the volume name
+// Volumes are generally round-robin-ed across all active zones, using the hash of the PVC Name.
+// However, if the PVCName ends with `-<integer>`, we will hash the prefix, and then add the integer to the hash.
+// This means that a StatefulSet's volumes (`claimname-statefulsetname-id`) will spread across available zones,
+// assuming the id values are consecutive.
+func ChooseZoneForVolume(zones sets.String, pvcName string) string {
+ // We create the volume in a zone determined by the name
+ // Eventually the scheduler will coordinate placement into an available zone
+ var hash uint32
+ var index uint32
+
+ if pvcName == "" {
+ // We should always be called with a name; this shouldn't happen
+ glog.Warningf("No name defined during volume create; choosing random zone")
+
+ hash = rand.Uint32()
+ } else {
+ hashString := pvcName
+
+ // Heuristic to make sure that volumes in a StatefulSet are spread across zones
+ // StatefulSet PVCs are (currently) named ClaimName-StatefulSetName-Id,
+ // where Id is an integer index.
+ // Note though that if a StatefulSet pod has multiple claims, we need them to be
+ // in the same zone, because otherwise the pod will be unable to mount both volumes,
+ // and will be unschedulable. So we hash _only_ the "StatefulSetName" portion when
+ // it looks like `ClaimName-StatefulSetName-Id`.
+ // We continue to round-robin volume names that look like `Name-Id` also; this is a useful
+ // feature for users that are creating statefulset-like functionality without using statefulsets.
+ lastDash := strings.LastIndexByte(pvcName, '-')
+ if lastDash != -1 {
+ statefulsetIDString := pvcName[lastDash+1:]
+ statefulsetID, err := strconv.ParseUint(statefulsetIDString, 10, 32)
+ if err == nil {
+ // Offset by the statefulsetID, so we round-robin across zones
+ index = uint32(statefulsetID)
+ // We still hash the volume name, but only the prefix
+ hashString = pvcName[:lastDash]
+
+ // In the special case where it looks like `ClaimName-StatefulSetName-Id`,
+ // hash only the StatefulSetName, so that different claims on the same StatefulSet
+ // member end up in the same zone.
+ // Note that StatefulSetName (and ClaimName) might themselves both have dashes.
+ // We actually just take the portion after the final - of ClaimName-StatefulSetName.
+ // For our purposes it doesn't much matter (just suboptimal spreading).
+ lastDash := strings.LastIndexByte(hashString, '-')
+ if lastDash != -1 {
+ hashString = hashString[lastDash+1:]
+ }
+
+ glog.V(2).Infof("Detected StatefulSet-style volume name %q; index=%d", pvcName, index)
+ }
+ }
+
+ // We hash the (base) volume name, so we don't bias towards the first N zones
+ h := fnv.New32()
+ h.Write([]byte(hashString))
+ hash = h.Sum32()
+ }
+
+ // Zones.List returns zones in a consistent order (sorted)
+ // We do have a potential failure case where volumes will not be properly spread,
+ // if the set of zones changes during StatefulSet volume creation. However, this is
+ // probably relatively unlikely because we expect the set of zones to be essentially
+ // static for clusters.
+ // Hopefully we can address this problem if/when we do full scheduler integration of
+ // PVC placement (which could also e.g. avoid putting volumes in overloaded or
+ // unhealthy zones)
+ zoneSlice := zones.List()
+ zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))]
+
+ glog.V(2).Infof("Creating volume for PVC %q; chose zone=%q from zones=%q", pvcName, zone, zoneSlice)
+ return zone
+}
+
+// UnmountViaEmptyDir delegates the tear down operation for secret, configmap, git_repo and downwardapi
+// to empty_dir
+func UnmountViaEmptyDir(dir string, host VolumeHost, volName string, volSpec Spec, podUID types.UID) error {
+ glog.V(3).Infof("Tearing down volume %v for pod %v at %v", volName, podUID, dir)
+
+ if pathExists, pathErr := volutil.PathExists(dir); pathErr != nil {
+ return fmt.Errorf("Error checking if path exists: %v", pathErr)
+ } else if !pathExists {
+ glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
+ return nil
+ }
+
+ // Wrap EmptyDir, let it do the teardown.
+ wrapped, err := host.NewWrapperUnmounter(volName, volSpec, podUID)
+ if err != nil {
+ return err
+ }
+ return wrapped.TearDownAt(dir)
+}
+
+// MountOptionFromSpec extracts and joins mount options from volume spec with supplied options
+func MountOptionFromSpec(spec *Spec, options ...string) []string {
+ pv := spec.PersistentVolume
+
+ if pv != nil {
+ if mo, ok := pv.Annotations[v1.MountOptionAnnotation]; ok {
+ moList := strings.Split(mo, ",")
+ return JoinMountOptions(moList, options)
+ }
+
+ }
+ return options
+}
+
+// JoinMountOptions joins mount options eliminating duplicates
+func JoinMountOptions(userOptions []string, systemOptions []string) []string {
+ allMountOptions := sets.NewString()
+
+ for _, mountOption := range userOptions {
+ if len(mountOption) > 0 {
+ allMountOptions.Insert(mountOption)
+ }
+ }
+
+ for _, mountOption := range systemOptions {
+ allMountOptions.Insert(mountOption)
+ }
+ return allMountOptions.UnsortedList()
+}
+
+// ZonesToSet converts a string containing a comma separated list of zones to set
+func ZonesToSet(zonesString string) (sets.String, error) {
+ zonesSlice := strings.Split(zonesString, ",")
+ zonesSet := make(sets.String)
+ for _, zone := range zonesSlice {
+ trimmedZone := strings.TrimSpace(zone)
+ if trimmedZone == "" {
+ return make(sets.String), fmt.Errorf("comma separated list of zones (%q) must not contain an empty zone", zonesString)
+ }
+ zonesSet.Insert(trimmedZone)
+ }
+ return zonesSet, nil
+}
+
+// ValidateZone returns:
+// - an error in case zone is an empty string or contains only any combination of spaces and tab characters
+// - nil otherwise
+func ValidateZone(zone string) error {
+ if strings.TrimSpace(zone) == "" {
+ return fmt.Errorf("the provided %q zone is not valid, it's an empty string or contains only spaces and tab characters", zone)
+ }
+ return nil
+}
+
+// AccessModesContains returns whether the requested mode is contained by modes
+func AccessModesContains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
+ for _, m := range modes {
+ if m == mode {
+ return true
+ }
+ }
+ return false
+}
+
+// AccessModesContainedInAll returns whether all of the requested modes are contained by modes
+func AccessModesContainedInAll(indexedModes []v1.PersistentVolumeAccessMode, requestedModes []v1.PersistentVolumeAccessMode) bool {
+ for _, mode := range requestedModes {
+ if !AccessModesContains(indexedModes, mode) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go b/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go
new file mode 100644
index 000000000..5eef55b45
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go
@@ -0,0 +1,462 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/golang/glog"
+
+ "k8s.io/apimachinery/pkg/util/sets"
+)
+
+const (
+ maxFileNameLength = 255
+ maxPathLength = 4096
+)
+
+// AtomicWriter handles atomically projecting content for a set of files into
+// a target directory.
+//
+// Note:
+//
+// 1. AtomicWriter reserves the set of pathnames starting with `..`.
+// 2. AtomicWriter offers no concurrency guarantees and must be synchronized
+// by the caller.
+//
+// The visible files in this volume are symlinks to files in the writer's data
+// directory. Actual files are stored in a hidden timestamped directory which
+// is symlinked to by the data directory. The timestamped directory and
+// data directory symlink are created in the writer's target dir.  This scheme
+// allows the files to be atomically updated by changing the target of the
+// data directory symlink.
+//
+// Consumers of the target directory can monitor the ..data symlink using
+// inotify or fanotify to receive events when the content in the volume is
+// updated.
+type AtomicWriter struct {
+ targetDir string
+ logContext string
+}
+
+type FileProjection struct {
+ Data []byte
+ Mode int32
+}
+
+// NewAtomicWriter creates a new AtomicWriter configured to write to the given
+// target directory, or returns an error if the target directory does not exist.
+func NewAtomicWriter(targetDir string, logContext string) (*AtomicWriter, error) {
+ _, err := os.Stat(targetDir)
+ if os.IsNotExist(err) {
+ return nil, err
+ }
+
+ return &AtomicWriter{targetDir: targetDir, logContext: logContext}, nil
+}
+
+const (
+ dataDirName = "..data"
+ newDataDirName = "..data_tmp"
+)
+
+// Write does an atomic projection of the given payload into the writer's target
+// directory. Input paths must not begin with '..'.
+//
+// The Write algorithm is:
+//
+// 1. The payload is validated; if the payload is invalid, the function returns
+// 2. The user-visible portion of the volume is walked to determine whether any
+// portion of the payload was deleted and is still present on disk.
+// If the payload is already present on disk and there are no deleted files,
+// the function returns
+// 3. A check is made to determine whether data present in the payload has changed
+// 4.  A new timestamped dir is created
+// 5. The payload is written to the new timestamped directory
+// 6.  Symlinks and directory for new user-visible files are created (if needed).
+//
+// For example, consider the files:
+// <target-dir>/podName
+// <target-dir>/user/labels
+// <target-dir>/k8s/annotations
+//
+// The user visible files are symbolic links into the internal data directory:
+// <target-dir>/podName -> ..data/podName
+// <target-dir>/usr/labels -> ../..data/usr/labels
+// <target-dir>/k8s/annotations -> ../..data/k8s/annotations
+//
+// Relative links are created into the data directory for files in subdirectories.
+//
+// The data directory itself is a link to a timestamped directory with
+// the real data:
+// <target-dir>/..data -> ..2016_02_01_15_04_05.12345678/
+// 7.  The current timestamped directory is detected by reading the data directory
+// symlink
+// 8.  A symlink to the new timestamped directory ..data_tmp is created that will
+// become the new data directory
+// 9.  The new data directory symlink is renamed to the data directory; rename is atomic
+// 10. Old paths are removed from the user-visible portion of the target directory
+// 11.  The previous timestamped directory is removed, if it exists
+func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
+ // (1)
+ cleanPayload, err := validatePayload(payload)
+ if err != nil {
+ glog.Errorf("%s: invalid payload: %v", w.logContext, err)
+ return err
+ }
+
+ // (2)
+ pathsToRemove, err := w.pathsToRemove(cleanPayload)
+ if err != nil {
+ glog.Errorf("%s: error determining user-visible files to remove: %v", w.logContext, err)
+ return err
+ }
+
+ // (3)
+ if should, err := w.shouldWritePayload(cleanPayload); err != nil {
+ glog.Errorf("%s: error determining whether payload should be written to disk: %v", w.logContext, err)
+ return err
+ } else if !should && len(pathsToRemove) == 0 {
+ glog.V(4).Infof("%s: no update required for target directory %v", w.logContext, w.targetDir)
+ return nil
+ } else {
+ glog.V(4).Infof("%s: write required for target directory %v", w.logContext, w.targetDir)
+ }
+
+ // (4)
+ tsDir, err := w.newTimestampDir()
+ if err != nil {
+ glog.V(4).Infof("%s: error creating new ts data directory: %v", w.logContext, err)
+ return err
+ }
+
+ // (5)
+ if err = w.writePayloadToDir(cleanPayload, tsDir); err != nil {
+ glog.Errorf("%s: error writing payload to ts data directory %s: %v", w.logContext, tsDir, err)
+ return err
+ } else {
+ glog.V(4).Infof("%s: performed write of new data to ts data directory: %s", w.logContext, tsDir)
+ }
+
+ // (6)
+ if err = w.createUserVisibleFiles(cleanPayload); err != nil {
+ glog.Errorf("%s: error creating visible symlinks in %s: %v", w.logContext, w.targetDir, err)
+ return err
+ }
+
+ // (7)
+ _, tsDirName := filepath.Split(tsDir)
+ dataDirPath := path.Join(w.targetDir, dataDirName)
+ oldTsDir, err := os.Readlink(dataDirPath)
+ if err != nil && !os.IsNotExist(err) {
+ glog.Errorf("%s: error reading link for data directory: %v", w.logContext, err)
+ return err
+ }
+
+ // (8)
+ newDataDirPath := path.Join(w.targetDir, newDataDirName)
+ if err = os.Symlink(tsDirName, newDataDirPath); err != nil {
+ os.RemoveAll(tsDir)
+ glog.Errorf("%s: error creating symbolic link for atomic update: %v", w.logContext, err)
+ return err
+ }
+
+ // (9)
+ if runtime.GOOS == "windows" {
+ os.Remove(dataDirPath)
+ err = os.Symlink(tsDirName, dataDirPath)
+ os.Remove(newDataDirPath)
+ } else {
+ err = os.Rename(newDataDirPath, dataDirPath)
+ }
+ if err != nil {
+ os.Remove(newDataDirPath)
+ os.RemoveAll(tsDir)
+ glog.Errorf("%s: error renaming symbolic link for data directory %s: %v", w.logContext, newDataDirPath, err)
+ return err
+ }
+
+ // (10)
+ if err = w.removeUserVisiblePaths(pathsToRemove); err != nil {
+ glog.Errorf("%s: error removing old visible symlinks: %v", w.logContext, err)
+ return err
+ }
+
+ // (11)
+ if len(oldTsDir) > 0 {
+ if err = os.RemoveAll(path.Join(w.targetDir, oldTsDir)); err != nil {
+ glog.Errorf("%s: error removing old data directory %s: %v", w.logContext, oldTsDir, err)
+ return err
+ }
+ }
+
+ return nil
+}
+
+// validatePayload returns an error if any path in the payload returns a copy of the payload with the paths cleaned.
+func validatePayload(payload map[string]FileProjection) (map[string]FileProjection, error) {
+ cleanPayload := make(map[string]FileProjection)
+ for k, content := range payload {
+ if err := validatePath(k); err != nil {
+ return nil, err
+ }
+
+ cleanPayload[path.Clean(k)] = content
+ }
+
+ return cleanPayload, nil
+}
+
+// validatePath validates a single path, returning an error if the path is
+// invalid. paths may not:
+//
+// 1. be absolute
+// 2. contain '..' as an element
+// 3. start with '..'
+// 4. contain filenames larger than 255 characters
+// 5. be longer than 4096 characters
+func validatePath(targetPath string) error {
+ // TODO: somehow unify this with the similar api validation,
+ // validateVolumeSourcePath; the error semantics are just different enough
+ // from this that it was time-prohibitive trying to find the right
+ // refactoring to re-use.
+ if targetPath == "" {
+ return fmt.Errorf("invalid path: must not be empty: %q", targetPath)
+ }
+ if path.IsAbs(targetPath) {
+ return fmt.Errorf("invalid path: must be relative path: %s", targetPath)
+ }
+
+ if len(targetPath) > maxPathLength {
+ return fmt.Errorf("invalid path: must be less than %d characters", maxPathLength)
+ }
+
+ items := strings.Split(targetPath, string(os.PathSeparator))
+ for _, item := range items {
+ if item == ".." {
+ return fmt.Errorf("invalid path: must not contain '..': %s", targetPath)
+ }
+ if len(item) > maxFileNameLength {
+ return fmt.Errorf("invalid path: filenames must be less than %d characters", maxFileNameLength)
+ }
+ }
+ if strings.HasPrefix(items[0], "..") && len(items[0]) > 2 {
+ return fmt.Errorf("invalid path: must not start with '..': %s", targetPath)
+ }
+
+ return nil
+}
+
+// shouldWritePayload returns whether the payload should be written to disk.
+func (w *AtomicWriter) shouldWritePayload(payload map[string]FileProjection) (bool, error) {
+ for userVisiblePath, fileProjection := range payload {
+ shouldWrite, err := w.shouldWriteFile(path.Join(w.targetDir, userVisiblePath), fileProjection.Data)
+ if err != nil {
+ return false, err
+ }
+
+ if shouldWrite {
+ return true, nil
+ }
+ }
+
+ return false, nil
+}
+
+// shouldWriteFile returns whether a new version of a file should be written to disk.
+func (w *AtomicWriter) shouldWriteFile(path string, content []byte) (bool, error) {
+ _, err := os.Lstat(path)
+ if os.IsNotExist(err) {
+ return true, nil
+ }
+
+ contentOnFs, err := ioutil.ReadFile(path)
+ if err != nil {
+ return false, err
+ }
+
+ return (bytes.Compare(content, contentOnFs) != 0), nil
+}
+
+// pathsToRemove walks the user-visible portion of the target directory and
+// determines which paths should be removed (if any) after the payload is
+// written to the target directory.
+func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection) (sets.String, error) {
+ paths := sets.NewString()
+ visitor := func(path string, info os.FileInfo, err error) error {
+ if path == w.targetDir {
+ return nil
+ }
+
+ relativePath := strings.TrimPrefix(path, w.targetDir)
+ if runtime.GOOS == "windows" {
+ relativePath = strings.TrimPrefix(relativePath, "\\")
+ } else {
+ relativePath = strings.TrimPrefix(relativePath, "/")
+ }
+ if strings.HasPrefix(relativePath, "..") {
+ return nil
+ }
+
+ paths.Insert(relativePath)
+ return nil
+ }
+
+ err := filepath.Walk(w.targetDir, visitor)
+ if os.IsNotExist(err) {
+ return nil, nil
+ } else if err != nil {
+ return nil, err
+ }
+ glog.V(5).Infof("%s: current paths: %+v", w.targetDir, paths.List())
+
+ newPaths := sets.NewString()
+ for file := range payload {
+ // add all subpaths for the payload to the set of new paths
+ // to avoid attempting to remove non-empty dirs
+ for subPath := file; subPath != ""; {
+ newPaths.Insert(subPath)
+ subPath, _ = filepath.Split(subPath)
+ subPath = strings.TrimSuffix(subPath, "/")
+ }
+ }
+ glog.V(5).Infof("%s: new paths: %+v", w.targetDir, newPaths.List())
+
+ result := paths.Difference(newPaths)
+ glog.V(5).Infof("%s: paths to remove: %+v", w.targetDir, result)
+
+ return result, nil
+}
+
+// newTimestampDir creates a new timestamp directory
+func (w *AtomicWriter) newTimestampDir() (string, error) {
+ tsDir, err := ioutil.TempDir(w.targetDir, fmt.Sprintf("..%s.", time.Now().Format("1981_02_01_15_04_05")))
+ if err != nil {
+ glog.Errorf("%s: unable to create new temp directory: %v", w.logContext, err)
+ return "", err
+ }
+
+ // 0755 permissions are needed to allow 'group' and 'other' to recurse the
+ // directory tree. do a chmod here to ensure that permissions are set correctly
+ // regardless of the process' umask.
+ err = os.Chmod(tsDir, 0755)
+ if err != nil {
+ glog.Errorf("%s: unable to set mode on new temp directory: %v", w.logContext, err)
+ return "", err
+ }
+
+ return tsDir, nil
+}
+
+// writePayloadToDir writes the given payload to the given directory. The
+// directory must exist.
+func (w *AtomicWriter) writePayloadToDir(payload map[string]FileProjection, dir string) error {
+ for userVisiblePath, fileProjection := range payload {
+ content := fileProjection.Data
+ mode := os.FileMode(fileProjection.Mode)
+ fullPath := path.Join(dir, userVisiblePath)
+ baseDir, _ := filepath.Split(fullPath)
+
+ err := os.MkdirAll(baseDir, os.ModePerm)
+ if err != nil {
+ glog.Errorf("%s: unable to create directory %s: %v", w.logContext, baseDir, err)
+ return err
+ }
+
+ err = ioutil.WriteFile(fullPath, content, mode)
+ if err != nil {
+ glog.Errorf("%s: unable to write file %s with mode %v: %v", w.logContext, fullPath, mode, err)
+ return err
+ }
+ // Chmod is needed because ioutil.WriteFile() ends up calling
+ // open(2) to create the file, so the final mode used is "mode &
+ // ~umask". But we want to make sure the specified mode is used
+ // in the file no matter what the umask is.
+ err = os.Chmod(fullPath, mode)
+ if err != nil {
+ glog.Errorf("%s: unable to write file %s with mode %v: %v", w.logContext, fullPath, mode, err)
+ }
+ }
+
+ return nil
+}
+
+// createUserVisibleFiles creates the relative symlinks for all the
+// files configured in the payload. If the directory in a file path does not
+// exist, it is created.
+//
+// Viz:
+// For files: "bar", "foo/bar", "baz/bar", "foo/baz/blah"
+// the following symlinks and subdirectories are created:
+// bar -> ..data/bar
+// foo/bar -> ../..data/foo/bar
+// baz/bar -> ../..data/baz/bar
+// foo/baz/blah -> ../../..data/foo/baz/blah
+func (w *AtomicWriter) createUserVisibleFiles(payload map[string]FileProjection) error {
+ for userVisiblePath := range payload {
+ dir, _ := filepath.Split(userVisiblePath)
+ subDirs := 0
+ if len(dir) > 0 {
+ // If dir is not empty, the projection path contains at least one
+ // subdirectory (example: userVisiblePath := "foo/bar").
+ // Since filepath.Split leaves a trailing path separator, in this
+ // example, dir = "foo/". In order to calculate the number of
+ // subdirectories, we must subtract 1 from the number returned by split.
+ subDirs = len(strings.Split(dir, "/")) - 1
+ err := os.MkdirAll(path.Join(w.targetDir, dir), os.ModePerm)
+ if err != nil {
+ return err
+ }
+ }
+ _, err := os.Readlink(path.Join(w.targetDir, userVisiblePath))
+ if err != nil && os.IsNotExist(err) {
+ // The link into the data directory for this path doesn't exist; create it,
+ // respecting the number of subdirectories necessary to link
+ // correctly back into the data directory.
+ visibleFile := path.Join(w.targetDir, userVisiblePath)
+ dataDirFile := path.Join(strings.Repeat("../", subDirs), dataDirName, userVisiblePath)
+
+ err = os.Symlink(dataDirFile, visibleFile)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// removeUserVisiblePaths removes the set of paths from the user-visible
+// portion of the writer's target directory.
+func (w *AtomicWriter) removeUserVisiblePaths(paths sets.String) error {
+ orderedPaths := paths.List()
+ for ii := len(orderedPaths) - 1; ii >= 0; ii-- {
+ if err := os.Remove(path.Join(w.targetDir, orderedPaths[ii])); err != nil {
+ glog.Errorf("%s: error pruning old user-visible path %s: %v", w.logContext, orderedPaths[ii], err)
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/device_util.go b/vendor/k8s.io/kubernetes/pkg/volume/util/device_util.go
new file mode 100644
index 000000000..9098d7b85
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/volume/util/device_util.go
@@ -0,0 +1,31 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+//DeviceUtil is a util for common device methods
+type DeviceUtil interface {
+ FindMultipathDeviceForDevice(disk string) string
+}
+
+type deviceHandler struct {
+ get_io IoUtil
+}
+
+//NewDeviceHandler Create a new IoHandler implementation
+func NewDeviceHandler(io IoUtil) DeviceUtil {
+ return &deviceHandler{get_io: io}
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/device_util_linux.go b/vendor/k8s.io/kubernetes/pkg/volume/util/device_util_linux.go
new file mode 100644
index 000000000..0d9851140
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/volume/util/device_util_linux.go
@@ -0,0 +1,61 @@
+// +build linux
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "errors"
+ "strings"
+)
+
+// FindMultipathDeviceForDevice given a device name like /dev/sdx, find the devicemapper parent
+func (handler *deviceHandler) FindMultipathDeviceForDevice(device string) string {
+ io := handler.get_io
+ disk, err := findDeviceForPath(device, io)
+ if err != nil {
+ return ""
+ }
+ sysPath := "/sys/block/"
+ if dirs, err := io.ReadDir(sysPath); err == nil {
+ for _, f := range dirs {
+ name := f.Name()
+ if strings.HasPrefix(name, "dm-") {
+ if _, err1 := io.Lstat(sysPath + name + "/slaves/" + disk); err1 == nil {
+ return "/dev/" + name
+ }
+ }
+ }
+ }
+ return ""
+}
+
+// findDeviceForPath Find the underlaying disk for a linked path such as /dev/disk/by-path/XXXX or /dev/mapper/XXXX
+// will return sdX or hdX etc, if /dev/sdX is passed in then sdX will be returned
+func findDeviceForPath(path string, io IoUtil) (string, error) {
+ devicePath, err := io.EvalSymlinks(path)
+ if err != nil {
+ return "", err
+ }
+ // if path /dev/hdX split into "", "dev", "hdX" then we will
+ // return just the last part
+ parts := strings.Split(devicePath, "/")
+ if len(parts) == 3 && strings.HasPrefix(parts[1], "dev") {
+ return parts[2], nil
+ }
+ return "", errors.New("Illegal path for device " + devicePath)
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/device_util_unsupported.go b/vendor/k8s.io/kubernetes/pkg/volume/util/device_util_unsupported.go
new file mode 100644
index 000000000..6afb1f139
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/volume/util/device_util_unsupported.go
@@ -0,0 +1,24 @@
+// +build !linux
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+// FindMultipathDeviceForDevice unsupported returns ""
+func (handler *deviceHandler) FindMultipathDeviceForDevice(device string) string {
+ return ""
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/doc.go b/vendor/k8s.io/kubernetes/pkg/volume/util/doc.go
new file mode 100644
index 000000000..620add69d
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/volume/util/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Contains utility code for use by volume plugins.
+package util // import "k8s.io/kubernetes/pkg/volume/util"
diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/fs.go b/vendor/k8s.io/kubernetes/pkg/volume/util/fs.go
new file mode 100644
index 000000000..cfa7e30b4
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/volume/util/fs.go
@@ -0,0 +1,96 @@
+// +build linux darwin
+
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "bytes"
+ "fmt"
+ "os/exec"
+ "strings"
+ "syscall"
+
+ "k8s.io/apimachinery/pkg/api/resource"
+)
+
+// FSInfo linux returns (available bytes, byte capacity, byte usage, total inodes, inodes free, inode usage, error)
+// for the filesystem that path resides upon.
+func FsInfo(path string) (int64, int64, int64, int64, int64, int64, error) {
+ statfs := &syscall.Statfs_t{}
+ err := syscall.Statfs(path, statfs)
+ if err != nil {
+ return 0, 0, 0, 0, 0, 0, err
+ }
+
+ // Available is blocks available * fragment size
+ available := int64(statfs.Bavail) * int64(statfs.Bsize)
+
+ // Capacity is total block count * fragment size
+ capacity := int64(statfs.Blocks) * int64(statfs.Bsize)
+
+ // Usage is block being used * fragment size (aka block size).
+ usage := (int64(statfs.Blocks) - int64(statfs.Bfree)) * int64(statfs.Bsize)
+
+ inodes := int64(statfs.Files)
+ inodesFree := int64(statfs.Ffree)
+ inodesUsed := inodes - inodesFree
+
+ return available, capacity, usage, inodes, inodesFree, inodesUsed, nil
+}
+
+func Du(path string) (*resource.Quantity, error) {
+ // Uses the same niceness level as cadvisor.fs does when running du
+ // Uses -B 1 to always scale to a blocksize of 1 byte
+ out, err := exec.Command("nice", "-n", "19", "du", "-s", "-B", "1", path).CombinedOutput()
+ if err != nil {
+ return nil, fmt.Errorf("failed command 'du' ($ nice -n 19 du -s -B 1) on path %s with error %v", path, err)
+ }
+ used, err := resource.ParseQuantity(strings.Fields(string(out))[0])
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse 'du' output %s due to error %v", out, err)
+ }
+ used.Format = resource.BinarySI
+ return &used, nil
+}
+
+// Find uses the equivalent of the command `find <path> -dev -printf '.' | wc -c` to count files and directories.
+// While this is not an exact measure of inodes used, it is a very good approximation.
+func Find(path string) (int64, error) {
+ if path == "" {
+ return 0, fmt.Errorf("invalid directory")
+ }
+ var counter byteCounter
+ var stderr bytes.Buffer
+ findCmd := exec.Command("find", path, "-xdev", "-printf", ".")
+ findCmd.Stdout, findCmd.Stderr = &counter, &stderr
+ if err := findCmd.Start(); err != nil {
+ return 0, fmt.Errorf("failed to exec cmd %v - %v; stderr: %v", findCmd.Args, err, stderr.String())
+ }
+ if err := findCmd.Wait(); err != nil {
+ return 0, fmt.Errorf("cmd %v failed. stderr: %s; err: %v", findCmd.Args, stderr.String(), err)
+ }
+ return counter.bytesWritten, nil
+}
+
+// Simple io.Writer implementation that counts how many bytes were written.
+type byteCounter struct{ bytesWritten int64 }
+
+func (b *byteCounter) Write(p []byte) (int, error) {
+ b.bytesWritten += int64(len(p))
+ return len(p), nil
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/fs_unsupported.go b/vendor/k8s.io/kubernetes/pkg/volume/util/fs_unsupported.go
new file mode 100644
index 000000000..8d35d5dae
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/volume/util/fs_unsupported.go
@@ -0,0 +1,38 @@
+// +build !linux,!darwin
+
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/api/resource"
+)
+
+// FSInfo unsupported returns 0 values for available and capacity and an error.
+func FsInfo(path string) (int64, int64, int64, int64, int64, int64, error) {
+ return 0, 0, 0, 0, 0, 0, fmt.Errorf("FsInfo not supported for this build.")
+}
+
+func Du(path string) (*resource.Quantity, error) {
+ return nil, fmt.Errorf("Du not supported for this build.")
+}
+
+func Find(path string) (int64, error) {
+ return 0, fmt.Errorf("Find not supported for this build.")
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/io_util.go b/vendor/k8s.io/kubernetes/pkg/volume/util/io_util.go
new file mode 100644
index 000000000..e1f30f5c3
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/volume/util/io_util.go
@@ -0,0 +1,47 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+// IoUtil is a mockable util for common IO operations
+type IoUtil interface {
+ ReadDir(dirname string) ([]os.FileInfo, error)
+ Lstat(name string) (os.FileInfo, error)
+ EvalSymlinks(path string) (string, error)
+}
+
+type osIOHandler struct{}
+
+//NewIOHandler Create a new IoHandler implementation
+func NewIOHandler() IoUtil {
+ return &osIOHandler{}
+}
+
+func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
+ return ioutil.ReadDir(dirname)
+}
+func (handler *osIOHandler) Lstat(name string) (os.FileInfo, error) {
+ return os.Lstat(name)
+}
+func (handler *osIOHandler) EvalSymlinks(path string) (string, error) {
+ return filepath.EvalSymlinks(path)
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/util.go b/vendor/k8s.io/kubernetes/pkg/volume/util/util.go
new file mode 100644
index 000000000..660c3c9db
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/volume/util/util.go
@@ -0,0 +1,213 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "fmt"
+ "os"
+ "path"
+
+ "github.com/golang/glog"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/kubernetes/pkg/api/v1"
+ v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
+ storage "k8s.io/kubernetes/pkg/apis/storage/v1"
+ "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
+ "k8s.io/kubernetes/pkg/util/mount"
+)
+
+const readyFileName = "ready"
+
+// IsReady checks for the existence of a regular file
+// called 'ready' in the given directory and returns
+// true if that file exists.
+func IsReady(dir string) bool {
+ readyFile := path.Join(dir, readyFileName)
+ s, err := os.Stat(readyFile)
+ if err != nil {
+ return false
+ }
+
+ if !s.Mode().IsRegular() {
+ glog.Errorf("ready-file is not a file: %s", readyFile)
+ return false
+ }
+
+ return true
+}
+
+// SetReady creates a file called 'ready' in the given
+// directory. It logs an error if the file cannot be
+// created.
+func SetReady(dir string) {
+ if err := os.MkdirAll(dir, 0750); err != nil && !os.IsExist(err) {
+ glog.Errorf("Can't mkdir %s: %v", dir, err)
+ return
+ }
+
+ readyFile := path.Join(dir, readyFileName)
+ file, err := os.Create(readyFile)
+ if err != nil {
+ glog.Errorf("Can't touch %s: %v", readyFile, err)
+ return
+ }
+ file.Close()
+}
+
+// UnmountPath is a common unmount routine that unmounts the given path and
+// deletes the remaining directory if successful.
+func UnmountPath(mountPath string, mounter mount.Interface) error {
+ return UnmountMountPoint(mountPath, mounter, false /* extensiveMountPointCheck */)
+}
+
+// UnmountMountPoint is a common unmount routine that unmounts the given path and
+// deletes the remaining directory if successful.
+// if extensiveMountPointCheck is true
+// IsNotMountPoint will be called instead of IsLikelyNotMountPoint.
+// IsNotMountPoint is more expensive but properly handles bind mounts.
+func UnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMountPointCheck bool) error {
+ if pathExists, pathErr := PathExists(mountPath); pathErr != nil {
+ return fmt.Errorf("Error checking if path exists: %v", pathErr)
+ } else if !pathExists {
+ glog.Warningf("Warning: Unmount skipped because path does not exist: %v", mountPath)
+ return nil
+ }
+
+ var notMnt bool
+ var err error
+
+ if extensiveMountPointCheck {
+ notMnt, err = mount.IsNotMountPoint(mounter, mountPath)
+ } else {
+ notMnt, err = mounter.IsLikelyNotMountPoint(mountPath)
+ }
+
+ if err != nil {
+ return err
+ }
+
+ if notMnt {
+ glog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath)
+ return os.Remove(mountPath)
+ }
+
+ // Unmount the mount path
+ glog.V(4).Infof("%q is a mountpoint, unmounting", mountPath)
+ if err := mounter.Unmount(mountPath); err != nil {
+ return err
+ }
+ notMnt, mntErr := mounter.IsLikelyNotMountPoint(mountPath)
+ if mntErr != nil {
+ return err
+ }
+ if notMnt {
+ glog.V(4).Infof("%q is unmounted, deleting the directory", mountPath)
+ return os.Remove(mountPath)
+ }
+ return fmt.Errorf("Failed to unmount path %v", mountPath)
+}
+
+// PathExists returns true if the specified path exists.
+func PathExists(path string) (bool, error) {
+ _, err := os.Stat(path)
+ if err == nil {
+ return true, nil
+ } else if os.IsNotExist(err) {
+ return false, nil
+ } else {
+ return false, err
+ }
+}
+
+// GetSecretForPod locates secret by name in the pod's namespace and returns secret map
+func GetSecretForPod(pod *v1.Pod, secretName string, kubeClient clientset.Interface) (map[string]string, error) {
+ secret := make(map[string]string)
+ if kubeClient == nil {
+ return secret, fmt.Errorf("Cannot get kube client")
+ }
+ secrets, err := kubeClient.Core().Secrets(pod.Namespace).Get(secretName, metav1.GetOptions{})
+ if err != nil {
+ return secret, err
+ }
+ for name, data := range secrets.Data {
+ secret[name] = string(data)
+ }
+ return secret, nil
+}
+
+// GetSecretForPV locates secret by name and namespace, verifies the secret type, and returns secret map
+func GetSecretForPV(secretNamespace, secretName, volumePluginName string, kubeClient clientset.Interface) (map[string]string, error) {
+ secret := make(map[string]string)
+ if kubeClient == nil {
+ return secret, fmt.Errorf("Cannot get kube client")
+ }
+ secrets, err := kubeClient.Core().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{})
+ if err != nil {
+ return secret, err
+ }
+ if secrets.Type != v1.SecretType(volumePluginName) {
+ return secret, fmt.Errorf("Cannot get secret of type %s", volumePluginName)
+ }
+ for name, data := range secrets.Data {
+ secret[name] = string(data)
+ }
+ return secret, nil
+}
+
+func GetClassForVolume(kubeClient clientset.Interface, pv *v1.PersistentVolume) (*storage.StorageClass, error) {
+ if kubeClient == nil {
+ return nil, fmt.Errorf("Cannot get kube client")
+ }
+ className := v1helper.GetPersistentVolumeClass(pv)
+ if className == "" {
+ return nil, fmt.Errorf("Volume has no storage class")
+ }
+
+ class, err := kubeClient.StorageV1().StorageClasses().Get(className, metav1.GetOptions{})
+ if err != nil {
+ return nil, err
+ }
+ return class, nil
+}
+
+// CheckNodeAffinity looks at the PV node affinity, and checks if the node has the same corresponding labels
+// This ensures that we don't mount a volume that doesn't belong to this node
+func CheckNodeAffinity(pv *v1.PersistentVolume, nodeLabels map[string]string) error {
+ affinity, err := v1helper.GetStorageNodeAffinityFromAnnotation(pv.Annotations)
+ if err != nil {
+ return fmt.Errorf("Error getting storage node affinity: %v", err)
+ }
+ if affinity == nil {
+ return nil
+ }
+
+ if affinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ terms := affinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
+ glog.V(10).Infof("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector terms %+v", terms)
+ for _, term := range terms {
+ selector, err := v1helper.NodeSelectorRequirementsAsSelector(term.MatchExpressions)
+ if err != nil {
+ return fmt.Errorf("Failed to parse MatchExpressions: %v", err)
+ }
+ if !selector.Matches(labels.Set(nodeLabels)) {
+ return fmt.Errorf("NodeSelectorTerm %+v does not match node labels", term.MatchExpressions)
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/volume/volume.go b/vendor/k8s.io/kubernetes/pkg/volume/volume.go
new file mode 100644
index 000000000..76c96d2e2
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/volume/volume.go
@@ -0,0 +1,230 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package volume
+
+import (
+ "time"
+
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/kubernetes/pkg/api/v1"
+)
+
+// Volume represents a directory used by pods or hosts on a node. All method
+// implementations of methods in the volume interface must be idempotent.
+type Volume interface {
+ // GetPath returns the path to which the volume should be mounted for the
+ // pod.
+ GetPath() string
+
+ // MetricsProvider embeds methods for exposing metrics (e.g.
+ // used, available space).
+ MetricsProvider
+}
+
+// MetricsProvider exposes metrics (e.g. used,available space) related to a
+// Volume.
+type MetricsProvider interface {
+ // GetMetrics returns the Metrics for the Volume. Maybe expensive for
+ // some implementations.
+ GetMetrics() (*Metrics, error)
+}
+
+// Metrics represents the used and available bytes of the Volume.
+type Metrics struct {
+ // The time at which these stats were updated.
+ Time metav1.Time
+
+ // Used represents the total bytes used by the Volume.
+ // Note: For block devices this maybe more than the total size of the files.
+ Used *resource.Quantity
+
+ // Capacity represents the total capacity (bytes) of the volume's
+ // underlying storage. For Volumes that share a filesystem with the host
+ // (e.g. emptydir, hostpath) this is the size of the underlying storage,
+ // and will not equal Used + Available as the fs is shared.
+ Capacity *resource.Quantity
+
+ // Available represents the storage space available (bytes) for the
+ // Volume. For Volumes that share a filesystem with the host (e.g.
+ // emptydir, hostpath), this is the available space on the underlying
+ // storage, and is shared with host processes and other Volumes.
+ Available *resource.Quantity
+
+ // InodesUsed represents the total inodes used by the Volume.
+ InodesUsed *resource.Quantity
+
+ // Inodes represents the total number of inodes available in the volume.
+ // For volumes that share a filesystem with the host (e.g. emptydir, hostpath),
+ // this is the inodes available in the underlying storage,
+ // and will not equal InodesUsed + InodesFree as the fs is shared.
+ Inodes *resource.Quantity
+
+ // InodesFree represent the inodes available for the volume. For Volumes that share
+ // a filesystem with the host (e.g. emptydir, hostpath), this is the free inodes
+ // on the underlying storage, and is shared with host processes and other volumes
+ InodesFree *resource.Quantity
+}
+
+// Attributes represents the attributes of this mounter.
+type Attributes struct {
+ ReadOnly bool
+ Managed bool
+ SupportsSELinux bool
+}
+
+// Mounter interface provides methods to set up/mount the volume.
+type Mounter interface {
+ // Uses Interface to provide the path for Docker binds.
+ Volume
+
+ // CanMount is called immediately prior to Setup to check if
+ // the required components (binaries, etc.) are available on
+ // the underlying node to complete the subsequent SetUp (mount)
+ // operation. If CanMount returns error, the mount operation is
+ // aborted and an event is generated indicating that the node
+ // does not have the required binaries to complete the mount.
+ // If CanMount succeeds, the mount operation continues
+ // normally. The CanMount check can be enabled or disabled
+ // using the experimental-check-mount-binaries binary flag
+ CanMount() error
+
+ // SetUp prepares and mounts/unpacks the volume to a
+ // self-determined directory path. The mount point and its
+ // content should be owned by 'fsGroup' so that it can be
+ // accessed by the pod. This may be called more than once, so
+ // implementations must be idempotent.
+ SetUp(fsGroup *int64) error
+ // SetUpAt prepares and mounts/unpacks the volume to the
+ // specified directory path, which may or may not exist yet.
+ // The mount point and its content should be owned by
+ // 'fsGroup' so that it can be accessed by the pod. This may
+ // be called more than once, so implementations must be
+ // idempotent.
+ SetUpAt(dir string, fsGroup *int64) error
+ // GetAttributes returns the attributes of the mounter.
+ GetAttributes() Attributes
+}
+
+// Unmounter interface provides methods to cleanup/unmount the volumes.
+type Unmounter interface {
+ Volume
+ // TearDown unmounts the volume from a self-determined directory and
+ // removes traces of the SetUp procedure.
+ TearDown() error
+ // TearDown unmounts the volume from the specified directory and
+ // removes traces of the SetUp procedure.
+ TearDownAt(dir string) error
+}
+
+// Provisioner is an interface that creates templates for PersistentVolumes
+// and can create the volume as a new resource in the infrastructure provider.
+type Provisioner interface {
+ // Provision creates the resource by allocating the underlying volume in a
+ // storage system. This method should block until completion and returns
+ // PersistentVolume representing the created storage resource.
+ Provision() (*v1.PersistentVolume, error)
+}
+
+// Deleter removes the resource from the underlying storage provider. Calls
+// to this method should block until the deletion is complete. Any error
+// returned indicates the volume has failed to be reclaimed. A nil return
+// indicates success.
+type Deleter interface {
+ Volume
+ // This method should block until completion.
+ // deletedVolumeInUseError returned from this function will not be reported
+ // as error and it will be sent as "Info" event to the PV being deleted. The
+ // volume controller will retry deleting the volume in the next periodic
+ // sync. This can be used to postpone deletion of a volume that is being
+ // detached from a node. Deletion of such volume would fail anyway and such
+ // error would confuse users.
+ Delete() error
+}
+
+// Attacher can attach a volume to a node.
+type Attacher interface {
+ // Attaches the volume specified by the given spec to the node with the given Name.
+ // On success, returns the device path where the device was attached on the
+ // node.
+ Attach(spec *Spec, nodeName types.NodeName) (string, error)
+
+ // VolumesAreAttached checks whether the list of volumes still attached to the specified
+ // node. It returns a map which maps from the volume spec to the checking result.
+ // If an error is occurred during checking, the error will be returned
+ VolumesAreAttached(specs []*Spec, nodeName types.NodeName) (map[*Spec]bool, error)
+
+ // WaitForAttach blocks until the device is attached to this
+ // node. If it successfully attaches, the path to the device
+ // is returned. Otherwise, if the device does not attach after
+ // the given timeout period, an error will be returned.
+ WaitForAttach(spec *Spec, devicePath string, timeout time.Duration) (string, error)
+
+ // GetDeviceMountPath returns a path where the device should
+ // be mounted after it is attached. This is a global mount
+ // point which should be bind mounted for individual volumes.
+ GetDeviceMountPath(spec *Spec) (string, error)
+
+ // MountDevice mounts the disk to a global path which
+ // individual pods can then bind mount
+ MountDevice(spec *Spec, devicePath string, deviceMountPath string) error
+}
+
+type BulkVolumeVerifier interface {
+ // BulkVerifyVolumes checks whether the list of volumes still attached to the
+ // the clusters in the node. It returns a map which maps from the volume spec to the checking result.
+ // If an error occurs during check - error should be returned and volume on nodes
+ // should be assumed as still attached.
+ BulkVerifyVolumes(volumesByNode map[types.NodeName][]*Spec) (map[types.NodeName]map[*Spec]bool, error)
+}
+
+// Detacher can detach a volume from a node.
+type Detacher interface {
+ // Detach the given device from the node with the given Name.
+ Detach(deviceName string, nodeName types.NodeName) error
+
+ // UnmountDevice unmounts the global mount of the disk. This
+ // should only be called once all bind mounts have been
+ // unmounted.
+ UnmountDevice(deviceMountPath string) error
+}
+
+// NewDeletedVolumeInUseError returns a new instance of DeletedVolumeInUseError
+// error.
+func NewDeletedVolumeInUseError(message string) error {
+ return deletedVolumeInUseError(message)
+}
+
+type deletedVolumeInUseError string
+
+var _ error = deletedVolumeInUseError("")
+
+// IsDeletedVolumeInUse returns true if an error returned from Delete() is
+// deletedVolumeInUseError
+func IsDeletedVolumeInUse(err error) bool {
+ switch err.(type) {
+ case deletedVolumeInUseError:
+ return true
+ default:
+ return false
+ }
+}
+
+func (err deletedVolumeInUseError) Error() string {
+ return string(err)
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go b/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go
new file mode 100644
index 000000000..ef1f45208
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go
@@ -0,0 +1,91 @@
+// +build linux
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package volume
+
+import (
+ "path/filepath"
+ "syscall"
+
+ "os"
+
+ "github.com/golang/glog"
+)
+
+const (
+ rwMask = os.FileMode(0660)
+ roMask = os.FileMode(0440)
+)
+
+// SetVolumeOwnership modifies the given volume to be owned by
+// fsGroup, and sets SetGid so that newly created files are owned by
+// fsGroup. If fsGroup is nil nothing is done.
+func SetVolumeOwnership(mounter Mounter, fsGroup *int64) error {
+
+ if fsGroup == nil {
+ return nil
+ }
+
+ return filepath.Walk(mounter.GetPath(), func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // chown and chmod pass through to the underlying file for symlinks.
+ // Symlinks have a mode of 777 but this really doesn't mean anything.
+ // The permissions of the underlying file are what matter.
+ // However, if one reads the mode of a symlink then chmods the symlink
+ // with that mode, it changes the mode of the underlying file, overridden
+ // the defaultMode and permissions initialized by the volume plugin, which
+ // is not what we want; thus, we skip chown/chmod for symlinks.
+ if info.Mode()&os.ModeSymlink != 0 {
+ return nil
+ }
+
+ stat, ok := info.Sys().(*syscall.Stat_t)
+ if !ok {
+ return nil
+ }
+
+ if stat == nil {
+ glog.Errorf("Got nil stat_t for path %v while setting ownership of volume", path)
+ return nil
+ }
+
+ err = os.Chown(path, int(stat.Uid), int(*fsGroup))
+ if err != nil {
+ glog.Errorf("Chown failed on %v: %v", path, err)
+ }
+
+ mask := rwMask
+ if mounter.GetAttributes().ReadOnly {
+ mask = roMask
+ }
+
+ if info.IsDir() {
+ mask |= os.ModeSetgid
+ }
+
+ err = os.Chmod(path, info.Mode()|mask)
+ if err != nil {
+ glog.Errorf("Chmod failed on %v: %v", path, err)
+ }
+
+ return nil
+ })
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/volume/volume_unsupported.go b/vendor/k8s.io/kubernetes/pkg/volume/volume_unsupported.go
new file mode 100644
index 000000000..45a6cc5ca
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/volume/volume_unsupported.go
@@ -0,0 +1,23 @@
+// +build !linux
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package volume
+
+func SetVolumeOwnership(mounter Mounter, fsGroup *int64) error {
+ return nil
+}