diff --git a/pkg/cmd/server/bootstrappolicy/infra_sa_policy.go b/pkg/cmd/server/bootstrappolicy/infra_sa_policy.go index 4bbc60d3e68f..f9144220519a 100644 --- a/pkg/cmd/server/bootstrappolicy/infra_sa_policy.go +++ b/pkg/cmd/server/bootstrappolicy/infra_sa_policy.go @@ -506,6 +506,12 @@ func init() { Verbs: sets.NewString("create", "update", "patch"), Resources: sets.NewString("events"), }, + // PersistentVolumeController.syncClaim() -> provisionClaim() + { + APIGroups: []string{extensions.GroupName}, + Verbs: sets.NewString("list", "watch"), + Resources: sets.NewString("storageclasses"), + }, }, }, ) diff --git a/pkg/cmd/server/kubernetes/master.go b/pkg/cmd/server/kubernetes/master.go index b0f8c819a65a..a96165c232c5 100644 --- a/pkg/cmd/server/kubernetes/master.go +++ b/pkg/cmd/server/kubernetes/master.go @@ -154,20 +154,15 @@ func (c *MasterConfig) RunNamespaceController(kubeClient internalclientset.Inter func (c *MasterConfig) RunPersistentVolumeController(client *client.Client, namespace, recyclerImageName, recyclerServiceAccountName string) { s := c.ControllerManager - provisioner, err := kctrlmgr.NewVolumeProvisioner(c.CloudProvider, s.VolumeConfiguration) - if err != nil { - glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.") - } - volumeController := persistentvolumecontroller.NewPersistentVolumeController( clientadapter.FromUnversionedClient(client), s.PVClaimBinderSyncPeriod.Duration, - provisioner, - probeRecyclableVolumePlugins(s.VolumeConfiguration, namespace, recyclerImageName, recyclerServiceAccountName), + probeControllerVolumePlugins(s.VolumeConfiguration, namespace, recyclerImageName, recyclerServiceAccountName), c.CloudProvider, s.ClusterName, - nil, nil, nil, + nil, nil, nil, nil, s.VolumeConfiguration.EnableDynamicProvisioning, + "", ) volumeController.Run() @@ -187,8 +182,8 @@ func (c *MasterConfig) RunPersistentVolumeController(client *client.Client, name } } -// probeRecyclableVolumePlugins collects all persistent volume plugins into an easy to use list. -func probeRecyclableVolumePlugins(config componentconfig.VolumeConfiguration, namespace, recyclerImageName, recyclerServiceAccountName string) []volume.VolumePlugin { +// probeControllerVolumePlugins collects all persistent volume plugins into an easy to use list. +func probeControllerVolumePlugins(config componentconfig.VolumeConfiguration, namespace, recyclerImageName, recyclerServiceAccountName string) []volume.VolumePlugin { uid := int64(0) defaultScrubPod := volume.NewPersistentVolumeRecyclerPodTemplate() defaultScrubPod.Namespace = namespace @@ -214,6 +209,7 @@ func probeRecyclableVolumePlugins(config componentconfig.VolumeConfiguration, na RecyclerMinimumTimeout: int(config.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath), RecyclerTimeoutIncrement: int(config.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath), RecyclerPodTemplate: defaultScrubPod, + ProvisioningEnabled: config.EnableHostPathProvisioning, } if err := kctrlmgr.AttemptToLoadRecycler(config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, &hostPathConfig); err != nil { glog.Fatalf("Could not create hostpath recycler pod from file %s: %+v", config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, err) diff --git a/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml b/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml index 3d6b61a220a7..1bd04377979b 100644 --- a/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml +++ b/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml @@ -2660,6 +2660,14 @@ items: - create - patch - update + - apiGroups: + - extensions + attributeRestrictions: null + resources: + - storageclasses + verbs: + - list + - watch - apiVersion: v1 kind: ClusterRole metadata: diff --git a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/controllermanager.go b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/controllermanager.go index 9947e453437b..b9dff24297cd 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/controllermanager.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/controllermanager.go @@ -386,20 +386,17 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig } } - provisioner, err := NewVolumeProvisioner(cloud, s.VolumeConfiguration) - if err != nil { - glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.") - } - volumeController := persistentvolumecontroller.NewPersistentVolumeController( clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")), s.PVClaimBinderSyncPeriod.Duration, - provisioner, - ProbeRecyclableVolumePlugins(s.VolumeConfiguration), + ProbeControllerVolumePlugins(cloud, s.VolumeConfiguration), cloud, s.ClusterName, - nil, nil, nil, + // volumeSource, claimSource, classSource, eventRecorder + nil, nil, nil, nil, s.VolumeConfiguration.EnableDynamicProvisioning, + // deault storageClass + "", ) volumeController.Run() time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) diff --git a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/plugins.go b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/plugins.go index 6b11dd382265..d55b819a3f61 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/plugins.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/plugins.go @@ -62,8 +62,9 @@ func ProbeAttachableVolumePlugins(config componentconfig.VolumeConfiguration) [] return allPlugins } -// ProbeRecyclableVolumePlugins collects all persistent volume plugins into an easy to use list. -func ProbeRecyclableVolumePlugins(config componentconfig.VolumeConfiguration) []volume.VolumePlugin { +// ProbeControllerVolumePlugins collects all persistent volume plugins into an easy to use list. +// Only provisioner/recycler/deleter volume plugins should be returned. +func ProbeControllerVolumePlugins(cloud cloudprovider.Interface, config componentconfig.VolumeConfiguration) []volume.VolumePlugin { allPlugins := []volume.VolumePlugin{} // The list of plugins to probe is decided by this binary, not @@ -79,6 +80,7 @@ func ProbeRecyclableVolumePlugins(config componentconfig.VolumeConfiguration) [] RecyclerMinimumTimeout: int(config.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath), RecyclerTimeoutIncrement: int(config.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath), RecyclerPodTemplate: volume.NewPersistentVolumeRecyclerPodTemplate(), + ProvisioningEnabled: config.EnableHostPathProvisioning, } if err := AttemptToLoadRecycler(config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, &hostPathConfig); err != nil { glog.Fatalf("Could not create hostpath recycler pod from file %s: %+v", config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, err) @@ -95,32 +97,18 @@ func ProbeRecyclableVolumePlugins(config componentconfig.VolumeConfiguration) [] } allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(nfsConfig)...) - allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...) - allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...) - allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...) - allPlugins = append(allPlugins, vsphere_volume.ProbeVolumePlugins()...) - - return allPlugins -} - -// NewVolumeProvisioner returns a volume provisioner to use when running in a cloud or development environment. -// The beta implementation of provisioning allows 1 implied provisioner per cloud, until we allow configuration of many. -// We explicitly map clouds to volume plugins here which allows us to configure many later without backwards compatibility issues. -// Not all cloudproviders have provisioning capability, which is the reason for the bool in the return to tell the caller to expect one or not. -func NewVolumeProvisioner(cloud cloudprovider.Interface, config componentconfig.VolumeConfiguration) (volume.ProvisionableVolumePlugin, error) { switch { - case cloud == nil && config.EnableHostPathProvisioning: - return getProvisionablePluginFromVolumePlugins(host_path.ProbeVolumePlugins(volume.VolumeConfig{})) case cloud != nil && aws.ProviderName == cloud.ProviderName(): - return getProvisionablePluginFromVolumePlugins(aws_ebs.ProbeVolumePlugins()) + allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...) case cloud != nil && gce.ProviderName == cloud.ProviderName(): - return getProvisionablePluginFromVolumePlugins(gce_pd.ProbeVolumePlugins()) + allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...) case cloud != nil && openstack.ProviderName == cloud.ProviderName(): - return getProvisionablePluginFromVolumePlugins(cinder.ProbeVolumePlugins()) + allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...) case cloud != nil && vsphere.ProviderName == cloud.ProviderName(): - return getProvisionablePluginFromVolumePlugins(vsphere_volume.ProbeVolumePlugins()) + allPlugins = append(allPlugins, vsphere_volume.ProbeVolumePlugins()...) } - return nil, nil + + return allPlugins } func getProvisionablePluginFromVolumePlugins(plugins []volume.VolumePlugin) (volume.ProvisionableVolumePlugin, error) { diff --git a/vendor/k8s.io/kubernetes/examples/experimental/persistent-volume-provisioning/README.md b/vendor/k8s.io/kubernetes/examples/experimental/persistent-volume-provisioning/README.md index e7796576967e..8e293488db71 100644 --- a/vendor/k8s.io/kubernetes/examples/experimental/persistent-volume-provisioning/README.md +++ b/vendor/k8s.io/kubernetes/examples/experimental/persistent-volume-provisioning/README.md @@ -14,27 +14,49 @@ scripts that launch kube-controller-manager. ### Admin Configuration -No configuration is required by the admin! 3 cloud providers will be provided in the alpha version -of this feature: EBS, GCE, and Cinder. - -When Kubernetes is running in one of those clouds, there will be an implied provisioner. -There is no provisioner when running outside of any of those 3 cloud providers. +The admin must define `StorageClass` objects that describe named "classes" of storage offered in a cluster. Different classes might map to arbitrary levels or policies determined by the admin. When configuring a `StorageClass` object for persistent volume provisioning, the admin will need to describe the type of provisioner to use and the parameters that will be used by the provisioner when it provisions a `PersistentVolume` belonging to the class. + +The name of a StorageClass object is significant, and is how users can request a particular class, by specifying the name in their `PersistentVolumeClaim`. The `provisioner` field must be specified as it determines what volume plugin is used for provisioning PVs. 2 cloud providers will be provided in the beta version of this feature: EBS and GCE. The `parameters` field contains the parameters that describe volumes belonging to the storage class. Different parameters may be accepted depending on the `provisioner`. For example, the value `io1`, for the parameter `type`, and the parameter `iopsPerGB` are specific to EBS . When a parameter is omitted, some default is used. + +#### AWS + +```yaml +kind: StorageClass +apiVersion: extensions/v1beta1 +metadata: + name: slow +provisioner: kubernetes.io/aws-ebs +parameters: + type: io1 + zone: us-east-1d + iopsPerGB: "10" +``` -A fourth provisioner is included for testing and development only. It creates HostPath volumes, -which will never work outside of a single node cluster. It is not supported in any way except for -local for testing and development. +* `type`: `io1`, `gp2`, `sc1`, `st1`. See AWS docs for details. Default: `gp2`. +* `zone`: AWS zone +* `iopsPerGB`: only for `io1` volumes. I/O operations per second per GiB. AWS volume plugin multiplies this with size of requested volume to compute IOPS of the volume and caps it at 20 000 IOPS (maximum supported by AWS, see AWS docs). + +#### GCE + +```yaml +kind: StorageClass +apiVersion: extensions/v1beta1 +metadata: + name: slow +provisioner: kubernetes.io/gce-pd +parameters: + type: pd-standard + zone: us-central1-a +``` +* `type`: `pd-standard` or `pd-ssd`. Default: `pd-ssd` +* `zone`: GCE zone ### User provisioning requests Users request dynamically provisioned storage by including a storage class in their `PersistentVolumeClaim`. -The annotation `volume.alpha.kubernetes.io/storage-class` is used to access this experimental feature. -In the future, admins will be able to define many storage classes. -The storage class may remain in an annotation or become a field on the claim itself. - -> The value of the storage-class annotation does not matter in the alpha version of this feature. There is -a single implied provisioner per cloud (which creates 1 kind of volume in the provider). The full version of the feature -will require that this value matches what is configured by the administrator. +The annotation `volume.beta.kubernetes.io/storage-class` is used to access this experimental feature. It is required that this value matches the name of a `StorageClass` configured by the administrator. +In the future, the storage class may remain in an annotation or become a field on the claim itself. ``` { @@ -43,7 +65,7 @@ will require that this value matches what is configured by the administrator. "metadata": { "name": "claim1", "annotations": { - "volume.alpha.kubernetes.io/storage-class": "foo" + "volume.beta.kubernetes.io/storage-class": "slow" } }, "spec": { @@ -61,26 +83,28 @@ will require that this value matches what is configured by the administrator. ### Sample output -This example uses HostPath but any provisioner would follow the same flow. +This example uses gce but any provisioner would follow the same flow. -First we note there are no Persistent Volumes in the cluster. After creating a claim, we see a new PV is created +First we note there are no Persistent Volumes in the cluster. After creating a storage class and a claim including that storage class, we see a new PV is created and automatically bound to the claim requesting storage. ``` $ kubectl get pv +$ kubectl create -f examples/experimental/persistent-volume-provisioning/gce-pd.yaml +storageclass "slow" created + $ kubectl create -f examples/experimental/persistent-volume-provisioning/claim1.json -I1012 13:07:57.666759 22875 decoder.go:141] decoding stream as JSON persistentvolumeclaim "claim1" created $ kubectl get pv -NAME LABELS CAPACITY ACCESSMODES STATUS CLAIM REASON AGE -pv-hostpath-r6z5o createdby=hostpath-dynamic-provisioner 3Gi RWO Bound default/claim1 2s +NAME CAPACITY ACCESSMODES STATUS CLAIM REASON AGE +pvc-bb6d2f0c-534c-11e6-9348-42010af00002 3Gi RWO Bound default/claim1 4s $ kubectl get pvc -NAME LABELS STATUS VOLUME CAPACITY ACCESSMODES AGE -claim1 Bound pv-hostpath-r6z5o 3Gi RWO 7s +NAME LABELS STATUS VOLUME CAPACITY ACCESSMODES AGE +claim1 Bound pvc-bb6d2f0c-534c-11e6-9348-42010af00002 3Gi RWO 7s # delete the claim to release the volume $ kubectl delete pvc claim1 diff --git a/vendor/k8s.io/kubernetes/examples/experimental/persistent-volume-provisioning/aws-ebs.yaml b/vendor/k8s.io/kubernetes/examples/experimental/persistent-volume-provisioning/aws-ebs.yaml new file mode 100644 index 000000000000..ee5b1e93a85b --- /dev/null +++ b/vendor/k8s.io/kubernetes/examples/experimental/persistent-volume-provisioning/aws-ebs.yaml @@ -0,0 +1,9 @@ +kind: StorageClass +apiVersion: extensions/v1beta1 +metadata: + name: slow +provisioner: kubernetes.io/aws-ebs +parameters: + type: io1 + zone: us-east-1d + iopsPerGB: "10" \ No newline at end of file diff --git a/vendor/k8s.io/kubernetes/examples/experimental/persistent-volume-provisioning/claim1.json b/vendor/k8s.io/kubernetes/examples/experimental/persistent-volume-provisioning/claim1.json index 48f28b3ca876..6dc3a0d41b14 100644 --- a/vendor/k8s.io/kubernetes/examples/experimental/persistent-volume-provisioning/claim1.json +++ b/vendor/k8s.io/kubernetes/examples/experimental/persistent-volume-provisioning/claim1.json @@ -4,7 +4,7 @@ "metadata": { "name": "claim1", "annotations": { - "volume.alpha.kubernetes.io/storage-class": "foo" + "volume.beta.kubernetes.io/storage-class": "slow" } }, "spec": { diff --git a/vendor/k8s.io/kubernetes/examples/experimental/persistent-volume-provisioning/claim2.json b/vendor/k8s.io/kubernetes/examples/experimental/persistent-volume-provisioning/claim2.json deleted file mode 100644 index 8ffd9c8e8f56..000000000000 --- a/vendor/k8s.io/kubernetes/examples/experimental/persistent-volume-provisioning/claim2.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "kind": "PersistentVolumeClaim", - "apiVersion": "v1", - "metadata": { - "name": "claim2", - "annotations": { - "volume.alpha.kubernetes.io/storage-class": "bar" - } - }, - "spec": { - "accessModes": [ - "ReadWriteOnce" - ], - "resources": { - "requests": { - "storage": "3Gi" - } - } - } -} diff --git a/vendor/k8s.io/kubernetes/examples/experimental/persistent-volume-provisioning/gce-pd.yaml b/vendor/k8s.io/kubernetes/examples/experimental/persistent-volume-provisioning/gce-pd.yaml new file mode 100644 index 000000000000..3afb7d352f95 --- /dev/null +++ b/vendor/k8s.io/kubernetes/examples/experimental/persistent-volume-provisioning/gce-pd.yaml @@ -0,0 +1,8 @@ +kind: StorageClass +apiVersion: extensions/v1beta1 +metadata: + name: slow +provisioner: kubernetes.io/gce-pd +parameters: + type: pd-standard + zone: us-central1-a \ No newline at end of file diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go index a2f9836f302d..98a3adc3f7bc 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go @@ -28,6 +28,8 @@ import ( "sync" "time" + "gopkg.in/gcfg.v1" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" @@ -39,7 +41,6 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/elb" "github.com/golang/glog" - "gopkg.in/gcfg.v1" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/service" @@ -208,11 +209,29 @@ type EC2Metadata interface { GetMetadata(path string) (string, error) } +// AWS volume types +const ( + // Provisioned IOPS SSD + VolumeTypeIO1 = "io1" + // General Purpose SSD + VolumeTypeGP2 = "gp2" + // Cold HDD (sc1) + VolumeTypeSC1 = "sc1" + // Throughput Optimized HDD + VolumeTypeST1 = "st1" +) + // VolumeOptions specifies capacity and tags for a volume. type VolumeOptions struct { - CapacityGB int - Tags map[string]string - PVCName string + CapacityGB int + Tags map[string]string + PVCName string + VolumeType string + AvailabilityZone string + // IOPSPerGB x CapacityGB will give total IOPS of the volume to create. + // IPSPerGB must be bigger than zero and smaller or equal to 30. + // Calculated total IOPS will be capped at 20000 IOPS. + IOPSPerGB int } // Volumes is an interface for managing cloud-provisioned volumes @@ -1459,14 +1478,47 @@ func (c *Cloud) CreateDisk(volumeOptions *VolumeOptions) (string, error) { return "", fmt.Errorf("error querying for all zones: %v", err) } - createAZ := volume.ChooseZoneForVolume(allZones, volumeOptions.PVCName) + createAZ := volumeOptions.AvailabilityZone + if createAZ == "" { + createAZ = volume.ChooseZoneForVolume(allZones, volumeOptions.PVCName) + } + + var createType string + var iops int64 + switch volumeOptions.VolumeType { + case VolumeTypeGP2, VolumeTypeSC1, VolumeTypeST1: + createType = volumeOptions.VolumeType + + case VolumeTypeIO1: + // See http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html for IOPS constraints + if volumeOptions.IOPSPerGB <= 0 || volumeOptions.IOPSPerGB > 30 { + return "", fmt.Errorf("invalid iopsPerGB value %d, must be 0 < IOPSPerGB <= 30", volumeOptions.IOPSPerGB) + } + createType = volumeOptions.VolumeType + iops = int64(volumeOptions.CapacityGB * volumeOptions.IOPSPerGB) + if iops < 100 { + iops = 100 + } + if iops > 20000 { + iops = 20000 + } + + case "": + createType = DefaultVolumeType + + default: + return "", fmt.Errorf("invalid AWS VolumeType %q", volumeOptions.VolumeType) + } // TODO: Should we tag this with the cluster id (so it gets deleted when the cluster does?) request := &ec2.CreateVolumeInput{} request.AvailabilityZone = &createAZ volSize := int64(volumeOptions.CapacityGB) request.Size = &volSize - request.VolumeType = aws.String(DefaultVolumeType) + request.VolumeType = &createType + if iops > 0 { + request.Iops = &iops + } response, err := c.ec2.CreateVolume(request) if err != nil { return "", err diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go index 40f247451e93..607ac78e182b 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go @@ -101,6 +101,16 @@ type Config struct { } } +type DiskType string + +const ( + DiskTypeSSD = "pd-ssd" + DiskTypeStandard = "pd-standard" + + diskTypeDefault = DiskTypeStandard + diskTypeUriTemplate = "https://www.googleapis.com/compute/v1/projects/%s/zones/%s/diskTypes/%s" +) + // Disks is interface for manipulation with GCE PDs. type Disks interface { // AttachDisk attaches given disk to given instance. Current instance @@ -116,7 +126,7 @@ type Disks interface { // CreateDisk creates a new PD with given properties. Tags are serialized // as JSON into Description field. - CreateDisk(name string, zone string, sizeGb int64, tags map[string]string) error + CreateDisk(name string, diskType string, zone string, sizeGb int64, tags map[string]string) error // DeleteDisk deletes PD. DeleteDisk(diskToDelete string) error @@ -2226,18 +2236,29 @@ func (gce *GCECloud) encodeDiskTags(tags map[string]string) (string, error) { } // CreateDisk creates a new Persistent Disk, with the specified name & size, in -// the specified zone. It stores specified tags endoced in JSON in Description +// the specified zone. It stores specified tags encoded in JSON in Description // field. -func (gce *GCECloud) CreateDisk(name string, zone string, sizeGb int64, tags map[string]string) error { +func (gce *GCECloud) CreateDisk(name string, diskType string, zone string, sizeGb int64, tags map[string]string) error { tagsStr, err := gce.encodeDiskTags(tags) if err != nil { return err } + switch diskType { + case DiskTypeSSD, DiskTypeStandard: + // noop + case "": + diskType = diskTypeDefault + default: + return fmt.Errorf("invalid GCE disk type %q", diskType) + } + diskTypeUri := fmt.Sprintf(diskTypeUriTemplate, gce.projectID, zone, diskType) + diskToCreate := &compute.Disk{ Name: name, SizeGb: sizeGb, Description: tagsStr, + Type: diskTypeUri, } createOp, err := gce.service.Disks.Insert(gce.projectID, zone, diskToCreate).Do() diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/binder_test.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/binder_test.go index 86ed41e3a72a..fd2e8614268e 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/binder_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/binder_test.go @@ -20,6 +20,7 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" ) // Test single call to syncClaim and syncVolume methods. @@ -422,7 +423,7 @@ func TestSync(t *testing.T) { noevents, noerrors, testSyncVolume, }, } - runSyncTests(t, tests) + runSyncTests(t, tests, []*extensions.StorageClass{}, "") } // Test multiple calls to syncClaim/syncVolume and periodic sync of all @@ -469,5 +470,5 @@ func TestMultiSync(t *testing.T) { }, } - runMultisyncTests(t, tests) + runMultisyncTests(t, tests, []*extensions.StorageClass{}, "") } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller.go index 87dc695c275c..b4f0b52489d5 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller.go @@ -21,6 +21,7 @@ import ( "time" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/cache" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/record" @@ -72,10 +73,19 @@ const annBindCompleted = "pv.kubernetes.io/bind-completed" // pre-bound). Value of this annotation does not matter. const annBoundByController = "pv.kubernetes.io/bound-by-controller" -// annClass annotation represents a new field which instructs dynamic -// provisioning to choose a particular storage class (aka profile). -// Value of this annotation should be empty. -const annClass = "volume.alpha.kubernetes.io/storage-class" +// annClass annotation represents the storage class associated with a resource: +// - in PersistentVolumeClaim it represents required class to match. +// Only PersistentVolumes with the same class (i.e. annotation with the same +// value) can be bound to the claim. In case no such volume exists, the +// controller will provision a new one using StorageClass instance with +// the same name as the annotation value. +// - in PersistentVolume it represents storage class to which the persistent +// volume belongs. +const annClass = "volume.beta.kubernetes.io/storage-class" + +// alphaAnnClass annotation represents the previous alpha storage class +// annotation. it's no longer used and held here for posterity. +const alphaAnnClass = "volume.alpha.kubernetes.io/storage-class" // This annotation is added to a PV that has been dynamically provisioned by // Kubernetes. Its value is name of volume plugin that created the volume. @@ -112,13 +122,16 @@ type PersistentVolumeController struct { claimController *framework.Controller claimControllerStopCh chan struct{} claimSource cache.ListerWatcher + classReflector *cache.Reflector + classReflectorStopCh chan struct{} + classSource cache.ListerWatcher kubeClient clientset.Interface eventRecorder record.EventRecorder cloud cloudprovider.Interface - recyclePluginMgr vol.VolumePluginMgr - provisioner vol.ProvisionableVolumePlugin + volumePluginMgr vol.VolumePluginMgr enableDynamicProvisioning bool clusterName string + defaultStorageClass string // Cache of the last known version of volumes and claims. This cache is // thread safe as long as the volumes/claims there are not modified, they @@ -127,6 +140,7 @@ type PersistentVolumeController struct { // it saves newer version to etcd. volumes persistentVolumeOrderedIndex claims cache.Store + classes cache.Store // Map of scheduled/running operations. runningOperations goroutinemap.GoRoutineMap @@ -885,7 +899,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{}) // Find a plugin. spec := vol.NewSpecFromPersistentVolume(volume, false) - plugin, err := ctrl.recyclePluginMgr.FindRecyclablePluginBySpec(spec) + plugin, err := ctrl.volumePluginMgr.FindRecyclablePluginBySpec(spec) if err != nil { // No recycler found. Emit an event and mark the volume Failed. if _, err = ctrl.updateVolumePhaseWithEvent(volume, api.VolumeFailed, api.EventTypeWarning, "VolumeFailedRecycle", "No recycler plugin found for the volume!"); err != nil { @@ -1039,13 +1053,32 @@ func (ctrl *PersistentVolumeController) isVolumeReleased(volume *api.PersistentV // (it will be re-used in future provisioner error cases). func (ctrl *PersistentVolumeController) doDeleteVolume(volume *api.PersistentVolume) error { glog.V(4).Infof("doDeleteVolume [%s]", volume.Name) - // Find a plugin. + var err error + + // Find a plugin. Try to find the same plugin that provisioned the volume + var plugin vol.DeletableVolumePlugin + if hasAnnotation(volume.ObjectMeta, annDynamicallyProvisioned) { + provisionPluginName := volume.Annotations[annDynamicallyProvisioned] + if provisionPluginName != "" { + plugin, err = ctrl.volumePluginMgr.FindDeletablePluginByName(provisionPluginName) + if err != nil { + glog.V(3).Infof("did not find a deleter plugin %q for volume %q: %v, will try to find a generic one", + provisionPluginName, volume.Name, err) + } + } + } + spec := vol.NewSpecFromPersistentVolume(volume, false) - plugin, err := ctrl.recyclePluginMgr.FindDeletablePluginBySpec(spec) - if err != nil { - // No deleter found. Emit an event and mark the volume Failed. - return fmt.Errorf("Error getting deleter volume plugin for volume %q: %v", volume.Name, err) + if plugin == nil { + // The plugin that provisioned the volume was not found or the volume + // was not dynamically provisioned. Try a generic plugin. + plugin, err = ctrl.volumePluginMgr.FindDeletablePluginBySpec(spec) + if err != nil { + // No deleter found. Emit an event and mark the volume Failed. + return fmt.Errorf("Error getting deleter volume plugin for volume %q: %v", volume.Name, err) + } } + glog.V(5).Infof("found a deleter plugin %q for volume %q", plugin.GetPluginName(), volume.Name) // Plugin found deleter, err := plugin.NewDeleter(spec) @@ -1107,12 +1140,10 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa return } - // TODO: find provisionable plugin based on a class/profile - plugin := ctrl.provisioner - if plugin == nil { - // No provisioner found. Emit an event. - ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ProvisioningFailed", "No provisioner plugin found for the claim!") - glog.V(2).Infof("no provisioner plugin found for claim %s!", claimToClaimKey(claim)) + plugin, storageClass, err := ctrl.findProvisionablePlugin(claim) + if err != nil { + ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ProvisioningFailed", err.Error()) + glog.V(2).Infof("error finding provisioning plugin for claim %s: %v", claimToClaimKey(claim), err) // The controller will retry provisioning the volume in every // syncVolume() call. return @@ -1132,21 +1163,23 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa ClusterName: ctrl.clusterName, PVName: pvName, PVCName: claim.Name, + Parameters: storageClass.Parameters, + ProvisionerSelector: claim.Spec.Selector, } // Provision the volume provisioner, err := plugin.NewProvisioner(options) if err != nil { strerr := fmt.Sprintf("Failed to create provisioner: %v", err) - glog.V(2).Infof("failed to create provisioner for claim %q: %v", claimToClaimKey(claim), err) + glog.V(2).Infof("failed to create provisioner for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err) ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ProvisioningFailed", strerr) return } volume, err = provisioner.Provision() if err != nil { - strerr := fmt.Sprintf("Failed to provision volume: %v", err) - glog.V(2).Infof("failed to provision volume for claim %q: %v", claimToClaimKey(claim), err) + strerr := fmt.Sprintf("Failed to provision volume with StorageClass %q: %v", storageClass.Name, err) + glog.V(2).Infof("failed to provision volume for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err) ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ProvisioningFailed", strerr) return } @@ -1162,6 +1195,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa // Add annBoundByController (used in deleting the volume) setAnnotation(&volume.ObjectMeta, annBoundByController, "yes") setAnnotation(&volume.ObjectMeta, annDynamicallyProvisioned, plugin.GetPluginName()) + setAnnotation(&volume.ObjectMeta, annClass, getClaimClass(claim)) // Try to create the PV object several times for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ { @@ -1240,3 +1274,40 @@ func (ctrl *PersistentVolumeController) scheduleOperation(operationName string, } } } + +func (ctrl *PersistentVolumeController) findProvisionablePlugin(claim *api.PersistentVolumeClaim) (vol.ProvisionableVolumePlugin, *extensions.StorageClass, error) { + storageClass, err := ctrl.findStorageClass(claim) + if err != nil { + return nil, nil, err + } + + // Find a plugin for the class + plugin, err := ctrl.volumePluginMgr.FindProvisionablePluginByName(string(storageClass.Provisioner)) + if err != nil { + return nil, nil, err + } + return plugin, storageClass, nil +} + +func (ctrl *PersistentVolumeController) findStorageClass(claim *api.PersistentVolumeClaim) (*extensions.StorageClass, error) { + className := getClaimClass(claim) + if className == "" { + className = ctrl.defaultStorageClass + } + if className == "" { + return nil, fmt.Errorf("No default StorageClass configured") + } + + classObj, found, err := ctrl.classes.GetByKey(className) + if err != nil { + return nil, err + } + if !found { + return nil, fmt.Errorf("StorageClass %q not found", className) + } + class, ok := classObj.(*extensions.StorageClass) + if !ok { + return nil, fmt.Errorf("Cannot convert object to StorageClass: %+v", classObj) + } + return class, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller_base.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller_base.go index 67a70b2f39e8..0f65e58acc14 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller_base.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller_base.go @@ -24,6 +24,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/cache" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" unversioned_core "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" @@ -47,13 +48,13 @@ import ( func NewPersistentVolumeController( kubeClient clientset.Interface, syncPeriod time.Duration, - provisioner vol.ProvisionableVolumePlugin, - recyclers []vol.VolumePlugin, + volumePlugins []vol.VolumePlugin, cloud cloudprovider.Interface, clusterName string, - volumeSource, claimSource cache.ListerWatcher, + volumeSource, claimSource, classSource cache.ListerWatcher, eventRecorder record.EventRecorder, enableDynamicProvisioning bool, + defaultStorageClass string, ) *PersistentVolumeController { if eventRecorder == nil { @@ -63,25 +64,20 @@ func NewPersistentVolumeController( } controller := &PersistentVolumeController{ - volumes: newPersistentVolumeOrderedIndex(), - claims: cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc), - kubeClient: kubeClient, - eventRecorder: eventRecorder, - runningOperations: goroutinemap.NewGoRoutineMap(false /* exponentialBackOffOnError */), - cloud: cloud, - provisioner: provisioner, + volumes: newPersistentVolumeOrderedIndex(), + claims: cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc), + kubeClient: kubeClient, + eventRecorder: eventRecorder, + runningOperations: goroutinemap.NewGoRoutineMap(false /* exponentialBackOffOnError */), + cloud: cloud, enableDynamicProvisioning: enableDynamicProvisioning, clusterName: clusterName, createProvisionedPVRetryCount: createProvisionedPVRetryCount, createProvisionedPVInterval: createProvisionedPVInterval, + defaultStorageClass: defaultStorageClass, } - controller.recyclePluginMgr.InitPlugins(recyclers, controller) - if controller.provisioner != nil { - if err := controller.provisioner.Init(controller); err != nil { - glog.Errorf("PersistentVolumeController: error initializing provisioner plugin: %v", err) - } - } + controller.volumePluginMgr.InitPlugins(volumePlugins, controller) if volumeSource == nil { volumeSource = &cache.ListWatch{ @@ -107,6 +103,18 @@ func NewPersistentVolumeController( } controller.claimSource = claimSource + if classSource == nil { + classSource = &cache.ListWatch{ + ListFunc: func(options api.ListOptions) (runtime.Object, error) { + return kubeClient.Extensions().StorageClasses().List(options) + }, + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + return kubeClient.Extensions().StorageClasses().Watch(options) + }, + } + } + controller.classSource = classSource + _, controller.volumeController = framework.NewIndexerInformer( volumeSource, &api.PersistentVolume{}, @@ -128,6 +136,16 @@ func NewPersistentVolumeController( DeleteFunc: controller.deleteClaim, }, ) + + // This is just a cache of StorageClass instances, no special actions are + // needed when a class is created/deleted/updated. + controller.classes = cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc) + controller.classReflector = cache.NewReflector( + classSource, + &extensions.StorageClass{}, + controller.classes, + syncPeriod, + ) return controller } @@ -433,6 +451,11 @@ func (ctrl *PersistentVolumeController) Run() { ctrl.claimControllerStopCh = make(chan struct{}) go ctrl.claimController.Run(ctrl.claimControllerStopCh) } + + if ctrl.classReflectorStopCh == nil { + ctrl.classReflectorStopCh = make(chan struct{}) + go ctrl.classReflector.RunUntil(ctrl.classReflectorStopCh) + } } // Stop gracefully shuts down this controller @@ -440,6 +463,7 @@ func (ctrl *PersistentVolumeController) Stop() { glog.V(4).Infof("stopping PersistentVolumeController") close(ctrl.volumeControllerStopCh) close(ctrl.claimControllerStopCh) + close(ctrl.classReflectorStopCh) } const ( @@ -578,3 +602,25 @@ func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bo } return true, nil } + +// getVolumeClass returns value of annClass annotation or empty string in case +// the annotation does not exist. +// TODO: change to PersistentVolume.Spec.Class value when this attribute is +// introduced. +func getVolumeClass(volume *api.PersistentVolume) string { + if class, found := volume.Annotations[annClass]; found { + return class + } + return "" +} + +// getClaimClass returns value of annClass annotation or empty string in case +// the annotation does not exist. +// TODO: change to PersistentVolumeClaim.Spec.Class value when this attribute is +// introduced. +func getClaimClass(claim *api.PersistentVolumeClaim) string { + if class, found := claim.Annotations[annClass]; found { + return class + } + return "" +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller_test.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller_test.go index 4233e1b6bd10..b9169127c98e 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller_test.go @@ -164,7 +164,7 @@ func TestControllerSync(t *testing.T) { client := &fake.Clientset{} volumeSource := framework.NewFakePVControllerSource() claimSource := framework.NewFakePVCControllerSource() - ctrl := newTestController(client, volumeSource, claimSource, true) + ctrl := newTestController(client, volumeSource, claimSource, nil, true, "") reactor := newVolumeReactor(client, ctrl, volumeSource, claimSource, test.errors) for _, claim := range test.initialClaims { claimSource.Add(claim) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/delete_test.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/delete_test.go index c486502b8bda..44d170038523 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/delete_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/delete_test.go @@ -21,6 +21,7 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" ) // Test single call to syncVolume, expecting recycling to happen. @@ -39,7 +40,7 @@ func TestDeleteSync(t *testing.T) { noevents, noerrors, // Inject deleter into the controller and call syncVolume. The // deleter simulates one delete() call that succeeds. - wrapTestWithControllerConfig(operationDelete, []error{nil}, testSyncVolume), + wrapTestWithReclaimCalls(operationDelete, []error{nil}, testSyncVolume), }, { // delete volume bound by user @@ -51,7 +52,7 @@ func TestDeleteSync(t *testing.T) { noevents, noerrors, // Inject deleter into the controller and call syncVolume. The // deleter simulates one delete() call that succeeds. - wrapTestWithControllerConfig(operationDelete, []error{nil}, testSyncVolume), + wrapTestWithReclaimCalls(operationDelete, []error{nil}, testSyncVolume), }, { // delete failure - plugin not found @@ -70,7 +71,7 @@ func TestDeleteSync(t *testing.T) { noclaims, noclaims, []string{"Warning VolumeFailedDelete"}, noerrors, - wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume), + wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), }, { // delete failure - delete() returns error @@ -80,7 +81,7 @@ func TestDeleteSync(t *testing.T) { noclaims, noclaims, []string{"Warning VolumeFailedDelete"}, noerrors, - wrapTestWithControllerConfig(operationDelete, []error{errors.New("Mock delete error")}, testSyncVolume), + wrapTestWithReclaimCalls(operationDelete, []error{errors.New("Mock delete error")}, testSyncVolume), }, { // delete success(?) - volume is deleted before doDelete() starts @@ -90,7 +91,7 @@ func TestDeleteSync(t *testing.T) { noclaims, noclaims, noevents, noerrors, - wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { + wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { // Delete the volume before delete operation starts reactor.lock.Lock() delete(reactor.volumes, "volume8-6") @@ -107,7 +108,7 @@ func TestDeleteSync(t *testing.T) { noclaims, newClaimArray("claim8-7", "uid8-7", "10Gi", "volume8-7", api.ClaimBound), noevents, noerrors, - wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { + wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { reactor.lock.Lock() defer reactor.lock.Unlock() // Bind the volume to resurrected claim (this should never @@ -130,10 +131,10 @@ func TestDeleteSync(t *testing.T) { noevents, noerrors, // Inject deleter into the controller and call syncVolume. The // deleter simulates one delete() call that succeeds. - wrapTestWithControllerConfig(operationDelete, []error{nil}, testSyncVolume), + wrapTestWithReclaimCalls(operationDelete, []error{nil}, testSyncVolume), }, } - runSyncTests(t, tests) + runSyncTests(t, tests, []*extensions.StorageClass{}, "") } // Test multiple calls to syncClaim/syncVolume and periodic sync of all @@ -161,9 +162,9 @@ func TestDeleteMultiSync(t *testing.T) { noclaims, noclaims, []string{"Warning VolumeFailedDelete"}, noerrors, - wrapTestWithControllerConfig(operationDelete, []error{errors.New("Mock delete error"), nil}, testSyncVolume), + wrapTestWithReclaimCalls(operationDelete, []error{errors.New("Mock delete error"), nil}, testSyncVolume), }, } - runMultisyncTests(t, tests) + runMultisyncTests(t, tests, []*extensions.StorageClass{}, "") } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/framework_test.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/framework_test.go index c4b5752560cd..53ff11f6f306 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/framework_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/framework_test.go @@ -33,6 +33,7 @@ import ( "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/cache" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" @@ -555,7 +556,7 @@ func newVolumeReactor(client *fake.Clientset, ctrl *PersistentVolumeController, return reactor } -func newTestController(kubeClient clientset.Interface, volumeSource, claimSource cache.ListerWatcher, enableDynamicProvisioning bool) *PersistentVolumeController { +func newTestController(kubeClient clientset.Interface, volumeSource, claimSource, classSource cache.ListerWatcher, enableDynamicProvisioning bool, defaultStorageClass string) *PersistentVolumeController { if volumeSource == nil { volumeSource = framework.NewFakePVControllerSource() } @@ -565,14 +566,15 @@ func newTestController(kubeClient clientset.Interface, volumeSource, claimSource ctrl := NewPersistentVolumeController( kubeClient, 5*time.Second, // sync period - nil, // provisioner []vol.VolumePlugin{}, // recyclers nil, // cloud "", volumeSource, claimSource, + classSource, record.NewFakeRecorder(1000), // event recorder enableDynamicProvisioning, + defaultStorageClass, ) // Speed up the test @@ -580,27 +582,6 @@ func newTestController(kubeClient clientset.Interface, volumeSource, claimSource return ctrl } -func addRecyclePlugin(ctrl *PersistentVolumeController, expectedRecycleCalls []error) { - plugin := &mockVolumePlugin{ - recycleCalls: expectedRecycleCalls, - } - ctrl.recyclePluginMgr.InitPlugins([]vol.VolumePlugin{plugin}, ctrl) -} - -func addDeletePlugin(ctrl *PersistentVolumeController, expectedDeleteCalls []error) { - plugin := &mockVolumePlugin{ - deleteCalls: expectedDeleteCalls, - } - ctrl.recyclePluginMgr.InitPlugins([]vol.VolumePlugin{plugin}, ctrl) -} - -func addProvisionPlugin(ctrl *PersistentVolumeController, expectedDeleteCalls []error) { - plugin := &mockVolumePlugin{ - provisionCalls: expectedDeleteCalls, - } - ctrl.provisioner = plugin -} - // newVolume returns a new volume with given attributes func newVolume(name, capacity, boundToClaimUID, boundToClaimName string, phase api.PersistentVolumePhase, reclaimPolicy api.PersistentVolumeReclaimPolicy, annotations ...string) *api.PersistentVolume { volume := api.PersistentVolume{ @@ -636,10 +617,13 @@ func newVolume(name, capacity, boundToClaimUID, boundToClaimName string, phase a if len(annotations) > 0 { volume.Annotations = make(map[string]string) for _, a := range annotations { - if a != annDynamicallyProvisioned { - volume.Annotations[a] = "yes" - } else { + switch a { + case annDynamicallyProvisioned: volume.Annotations[a] = mockPluginName + case annClass: + volume.Annotations[a] = "gold" + default: + volume.Annotations[a] = "yes" } } } @@ -674,6 +658,17 @@ func withMessage(message string, volumes []*api.PersistentVolume) []*api.Persist return volumes } +// volumeWithClass saves given class into annClass annotation. +// Meant to be used to compose claims specified inline in a test. +func volumeWithClass(className string, volumes []*api.PersistentVolume) []*api.PersistentVolume { + if volumes[0].Annotations == nil { + volumes[0].Annotations = map[string]string{annClass: className} + } else { + volumes[0].Annotations[annClass] = className + } + return volumes +} + // newVolumeArray returns array with a single volume that would be returned by // newVolume() with the same parameters. func newVolumeArray(name, capacity, boundToClaimUID, boundToClaimName string, phase api.PersistentVolumePhase, reclaimPolicy api.PersistentVolumeReclaimPolicy, annotations ...string) []*api.PersistentVolume { @@ -710,7 +705,12 @@ func newClaim(name, claimUID, capacity, boundToVolume string, phase api.Persiste if len(annotations) > 0 { claim.Annotations = make(map[string]string) for _, a := range annotations { - claim.Annotations[a] = "yes" + switch a { + case annClass: + claim.Annotations[a] = "gold" + default: + claim.Annotations[a] = "yes" + } } } return &claim @@ -724,6 +724,17 @@ func newClaimArray(name, claimUID, capacity, boundToVolume string, phase api.Per } } +// claimWithClass saves given class into annClass annotation. +// Meant to be used to compose claims specified inline in a test. +func claimWithClass(className string, claims []*api.PersistentVolumeClaim) []*api.PersistentVolumeClaim { + if claims[0].Annotations == nil { + claims[0].Annotations = map[string]string{annClass: className} + } else { + claims[0].Annotations[annClass] = className + } + return claims +} + func testSyncClaim(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { return ctrl.syncClaim(test.initialClaims[0]) } @@ -745,29 +756,45 @@ type operationType string const operationDelete = "Delete" const operationRecycle = "Recycle" -const operationProvision = "Provision" -// wrapTestWithControllerConfig returns a testCall that: -// - configures controller with recycler, deleter or provisioner which will -// return provided errors when a volume is deleted, recycled or provisioned +// wrapTestWithReclaimCalls returns a testCall that: +// - configures controller with a volume plugin that implements recycler, +// deleter and provisioner. The plugin retunrs provided errors when a volume +// is deleted, recycled or provisioned. // - calls given testCall -func wrapTestWithControllerConfig(operation operationType, expectedOperationCalls []error, toWrap testCall) testCall { - expected := expectedOperationCalls - +func wrapTestWithPluginCalls(expectedRecycleCalls, expectedDeleteCalls []error, expectedProvisionCalls []provisionCall, toWrap testCall) testCall { return func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { - switch operation { - case operationDelete: - addDeletePlugin(ctrl, expected) - case operationRecycle: - addRecyclePlugin(ctrl, expected) - case operationProvision: - addProvisionPlugin(ctrl, expected) + plugin := &mockVolumePlugin{ + recycleCalls: expectedRecycleCalls, + deleteCalls: expectedDeleteCalls, + provisionCalls: expectedProvisionCalls, } + ctrl.volumePluginMgr.InitPlugins([]vol.VolumePlugin{plugin}, ctrl) return toWrap(ctrl, reactor, test) } } +// wrapTestWithReclaimCalls returns a testCall that: +// - configures controller with recycler or deleter which will return provided +// errors when a volume is deleted or recycled +// - calls given testCall +func wrapTestWithReclaimCalls(operation operationType, expectedOperationCalls []error, toWrap testCall) testCall { + if operation == operationDelete { + return wrapTestWithPluginCalls(nil, expectedOperationCalls, nil, toWrap) + } else { + return wrapTestWithPluginCalls(expectedOperationCalls, nil, nil, toWrap) + } +} + +// wrapTestWithProvisionCalls returns a testCall that: +// - configures controller with a provisioner which will return provided errors +// when a claim is provisioned +// - calls given testCall +func wrapTestWithProvisionCalls(expectedProvisionCalls []provisionCall, toWrap testCall) testCall { + return wrapTestWithPluginCalls(nil, nil, expectedProvisionCalls, toWrap) +} + // wrapTestWithInjectedOperation returns a testCall that: // - starts the controller and lets it run original testCall until // scheduleOperation() call. It blocks the controller there and calls the @@ -825,13 +852,13 @@ func evaluateTestResults(ctrl *PersistentVolumeController, reactor *volumeReacto // 2. Call the tested function (syncClaim/syncVolume) via // controllerTest.testCall *once*. // 3. Compare resulting volumes and claims with expected volumes and claims. -func runSyncTests(t *testing.T, tests []controllerTest) { +func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*extensions.StorageClass, defaultStorageClass string) { for _, test := range tests { glog.V(4).Infof("starting test %q", test.name) // Initialize the controller client := &fake.Clientset{} - ctrl := newTestController(client, nil, nil, true) + ctrl := newTestController(client, nil, nil, nil, true, defaultStorageClass) reactor := newVolumeReactor(client, ctrl, nil, nil, test.errors) for _, claim := range test.initialClaims { ctrl.claims.Add(claim) @@ -842,6 +869,15 @@ func runSyncTests(t *testing.T, tests []controllerTest) { reactor.volumes[volume.Name] = volume } + // Convert classes to []interface{} and forcefully inject them into + // controller. + storageClassPtrs := make([]interface{}, len(storageClasses)) + for i, s := range storageClasses { + storageClassPtrs[i] = s + } + // 1 is the resource version + ctrl.classes.Replace(storageClassPtrs, "1") + // Run the tested functions err := test.test(ctrl, reactor, test) if err != nil { @@ -869,13 +905,22 @@ func runSyncTests(t *testing.T, tests []controllerTest) { // 5. When 3. does not do any changes, finish the tests and compare final set // of volumes/claims with expected claims/volumes and report differences. // Some limit of calls in enforced to prevent endless loops. -func runMultisyncTests(t *testing.T, tests []controllerTest) { +func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*extensions.StorageClass, defaultStorageClass string) { for _, test := range tests { glog.V(4).Infof("starting multisync test %q", test.name) // Initialize the controller client := &fake.Clientset{} - ctrl := newTestController(client, nil, nil, true) + ctrl := newTestController(client, nil, nil, nil, true, defaultStorageClass) + + // Convert classes to []interface{} and forcefully inject them into + // controller. + storageClassPtrs := make([]interface{}, len(storageClasses)) + for i, s := range storageClasses { + storageClassPtrs[i] = s + } + ctrl.classes.Replace(storageClassPtrs, "1") + reactor := newVolumeReactor(client, ctrl, nil, nil, test.errors) for _, claim := range test.initialClaims { ctrl.claims.Add(claim) @@ -971,7 +1016,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest) { // Dummy volume plugin for provisioning, deletion and recycling. It contains // lists of expected return values to simulate errors. type mockVolumePlugin struct { - provisionCalls []error + provisionCalls []provisionCall provisionCallCounter int deleteCalls []error deleteCallCounter int @@ -980,6 +1025,11 @@ type mockVolumePlugin struct { provisionOptions vol.VolumeOptions } +type provisionCall struct { + expectedParameters map[string]string + ret error +} + var _ vol.VolumePlugin = &mockVolumePlugin{} var _ vol.RecyclableVolumePlugin = &mockVolumePlugin{} var _ vol.DeletableVolumePlugin = &mockVolumePlugin{} @@ -1032,8 +1082,12 @@ func (plugin *mockVolumePlugin) Provision() (*api.PersistentVolume, error) { } var pv *api.PersistentVolume - err := plugin.provisionCalls[plugin.provisionCallCounter] - if err == nil { + call := plugin.provisionCalls[plugin.provisionCallCounter] + if !reflect.DeepEqual(call.expectedParameters, plugin.provisionOptions.Parameters) { + glog.Errorf("invalid provisioner call, expected options: %+v, got: %+v", call.expectedParameters, plugin.provisionOptions.Parameters) + return nil, fmt.Errorf("Mock plugin error: invalid provisioner call") + } + if call.ret == nil { // Create a fake PV with known GCE volume (to match expected volume) pv = &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ @@ -1053,8 +1107,8 @@ func (plugin *mockVolumePlugin) Provision() (*api.PersistentVolume, error) { } plugin.provisionCallCounter++ - glog.V(4).Infof("mock plugin Provision call nr. %d, returning %v: %v", plugin.provisionCallCounter, pv, err) - return pv, err + glog.V(4).Infof("mock plugin Provision call nr. %d, returning %v: %v", plugin.provisionCallCounter, pv, call.ret) + return pv, call.ret } // Deleter interfaces diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index.go index 1d4df66e4f30..fe9617de6bcf 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index.go @@ -18,12 +18,11 @@ package persistentvolume import ( "fmt" - "sort" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/labels" + "sort" ) // persistentVolumeOrderedIndex is a cache.Store that keeps persistent volumes @@ -126,11 +125,16 @@ func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *api.PersistentVo // filter out: // - volumes bound to another claim // - volumes whose labels don't match the claim's selector, if specified + // - volumes in Class that is not requested if volume.Spec.ClaimRef != nil { continue } else if selector != nil && !selector.Matches(labels.Set(volume.Labels)) { continue } + claimClass := getClaimClass(claim) + if claimClass != "" && claimClass != getVolumeClass(volume) { + continue + } volumeQty := volume.Spec.Capacity[api.ResourceStorage] volumeSize := volumeQty.Value() @@ -142,17 +146,6 @@ func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *api.PersistentVo } } - // We want to provision volumes if the annotation is set even if there - // is matching PV. Therefore, do not look for available PV and let - // a new volume to be provisioned. - // - // When provisioner creates a new PV to this claim, an exact match - // pre-bound to the claim will be found by the checks above during - // subsequent claim sync. - if hasAnnotation(claim.ObjectMeta, annClass) { - return nil, nil - } - if smallestVolume != nil { // Found a matching volume return smallestVolume, nil diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index_test.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index_test.go index 4fdb9c15ae5d..acaabf314149 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index_test.go @@ -164,7 +164,52 @@ func TestMatchVolume(t *testing.T) { AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, Resources: api.ResourceRequirements{ Requests: api.ResourceList{ - api.ResourceName(api.ResourceStorage): resource.MustParse("10000G"), + api.ResourceName(api.ResourceStorage): resource.MustParse("20000G"), + }, + }, + }, + }, + }, + "successful-match-with-class": { + expectedMatch: "gce-pd-silver1", + claim: &api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "claim01", + Namespace: "myns", + Annotations: map[string]string{ + annClass: "silver", + }, + }, + Spec: api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, + Selector: &unversioned.LabelSelector{ + MatchLabels: map[string]string{ + "should-exist": "true", + }, + }, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("1G"), + }, + }, + }, + }, + }, + "successful-match-with-class-and-labels": { + expectedMatch: "gce-pd-silver2", + claim: &api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "claim01", + Namespace: "myns", + Annotations: map[string]string{ + annClass: "silver", + }, + }, + Spec: api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("1G"), }, }, }, @@ -561,6 +606,29 @@ func createTestVolumes() []*api.PersistentVolume { "should-exist": "true", }, }, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("20000G"), + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, + }, + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + UID: "gce-pd-silver1", + Name: "gce0023", + Labels: map[string]string{ + "should-exist": "true", + }, + Annotations: map[string]string{ + annClass: "silver", + }, + }, Spec: api.PersistentVolumeSpec{ Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse("10000G"), @@ -573,6 +641,46 @@ func createTestVolumes() []*api.PersistentVolume { }, }, }, + { + ObjectMeta: api.ObjectMeta{ + UID: "gce-pd-silver2", + Name: "gce0024", + Annotations: map[string]string{ + annClass: "silver", + }, + }, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("100G"), + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, + }, + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + UID: "gce-pd-gold", + Name: "gce0025", + Annotations: map[string]string{ + annClass: "gold", + }, + }, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("50G"), + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, + }, + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + }, + }, + }, } } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/provision_test.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/provision_test.go index dc9c59ae05c4..14a5a6d294a1 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/provision_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/provision_test.go @@ -21,8 +21,59 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions" ) +var class1Parameters = map[string]string{ + "param1": "value1", +} +var class2Parameters = map[string]string{ + "param2": "value2", +} +var storageClasses = []*extensions.StorageClass{ + { + TypeMeta: unversioned.TypeMeta{ + Kind: "StorageClass", + }, + + ObjectMeta: api.ObjectMeta{ + Name: "gold", + }, + + Provisioner: mockPluginName, + Parameters: class1Parameters, + }, + { + TypeMeta: unversioned.TypeMeta{ + Kind: "StorageClass", + }, + ObjectMeta: api.ObjectMeta{ + Name: "silver", + }, + Provisioner: mockPluginName, + Parameters: class2Parameters, + }, +} + +// call to storageClass 1, returning an error +var provision1Error = provisionCall{ + ret: errors.New("Moc provisioner error"), + expectedParameters: class1Parameters, +} + +// call to storageClass 1, returning a valid PV +var provision1Success = provisionCall{ + ret: nil, + expectedParameters: class1Parameters, +} + +// call to storageClass 2, returning a valid PV +var provision2Success = provisionCall{ + ret: nil, + expectedParameters: class2Parameters, +} + // Test single call to syncVolume, expecting provisioning to happen. // 1. Fill in the controller with initial data // 2. Call the syncVolume *once*. @@ -30,14 +81,14 @@ import ( func TestProvisionSync(t *testing.T) { tests := []controllerTest{ { - // Provision a volume - "11-1 - successful provision", + // Provision a volume (with the default class) + "11-1 - successful provision with storage class 1", novolumes, - newVolumeArray("pvc-uid11-1", "1Gi", "uid11-1", "claim11-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), + newVolumeArray("pvc-uid11-1", "1Gi", "uid11-1", "claim11-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned, annClass), newClaimArray("claim11-1", "uid11-1", "1Gi", "", api.ClaimPending, annClass), // Binding will be completed in the next syncClaim newClaimArray("claim11-1", "uid11-1", "1Gi", "", api.ClaimPending, annClass), - noevents, noerrors, wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + noevents, noerrors, wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim), }, { // Provision failure - plugin not found @@ -57,7 +108,7 @@ func TestProvisionSync(t *testing.T) { newClaimArray("claim11-3", "uid11-3", "1Gi", "", api.ClaimPending, annClass), newClaimArray("claim11-3", "uid11-3", "1Gi", "", api.ClaimPending, annClass), []string{"Warning ProvisioningFailed"}, noerrors, - wrapTestWithControllerConfig(operationProvision, []error{}, testSyncClaim), + wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), }, { // Provision failure - Provision returns error @@ -67,40 +118,35 @@ func TestProvisionSync(t *testing.T) { newClaimArray("claim11-4", "uid11-4", "1Gi", "", api.ClaimPending, annClass), newClaimArray("claim11-4", "uid11-4", "1Gi", "", api.ClaimPending, annClass), []string{"Warning ProvisioningFailed"}, noerrors, - wrapTestWithControllerConfig(operationProvision, []error{errors.New("Moc provisioner error")}, testSyncClaim), + wrapTestWithProvisionCalls([]provisionCall{provision1Error}, testSyncClaim), }, { - // Provision success - there is already a volume available, still - // we provision a new one when requested. + // No provisioning if there is a matching volume available "11-6 - provisioning when there is a volume available", - newVolumeArray("volume11-6", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), - []*api.PersistentVolume{ - newVolume("volume11-6", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), - newVolume("pvc-uid11-6", "1Gi", "uid11-6", "claim11-6", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), - }, - newClaimArray("claim11-6", "uid11-6", "1Gi", "", api.ClaimPending, annClass), - // Binding will be completed in the next syncClaim + newVolumeArray("volume11-6", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain, annClass), + newVolumeArray("volume11-6", "1Gi", "uid11-6", "claim11-6", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController, annClass), newClaimArray("claim11-6", "uid11-6", "1Gi", "", api.ClaimPending, annClass), + newClaimArray("claim11-6", "uid11-6", "1Gi", "volume11-6", api.ClaimBound, annClass, annBoundByController, annBindCompleted), noevents, noerrors, // No provisioning plugin confingure - makes the test fail when // the controller errorneously tries to provision something - wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim), }, { // Provision success? - claim is bound before provisioner creates // a volume. "11-7 - claim is bound before provisioning", novolumes, - newVolumeArray("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), + newVolumeArray("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned, annClass), newClaimArray("claim11-7", "uid11-7", "1Gi", "", api.ClaimPending, annClass), // The claim would be bound in next syncClaim newClaimArray("claim11-7", "uid11-7", "1Gi", "", api.ClaimPending, annClass), noevents, noerrors, - wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationProvision, []error{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { + wrapTestWithInjectedOperation(wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { // Create a volume before provisionClaimOperation starts. // This similates a parallel controller provisioning the volume. reactor.lock.Lock() - volume := newVolume("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned) + volume := newVolume("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned, annClass) reactor.volumes[volume.Name] = volume reactor.lock.Unlock() }), @@ -110,7 +156,7 @@ func TestProvisionSync(t *testing.T) { // second retry succeeds "11-8 - cannot save provisioned volume", novolumes, - newVolumeArray("pvc-uid11-8", "1Gi", "uid11-8", "claim11-8", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), + newVolumeArray("pvc-uid11-8", "1Gi", "uid11-8", "claim11-8", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned, annClass), newClaimArray("claim11-8", "uid11-8", "1Gi", "", api.ClaimPending, annClass), // Binding will be completed in the next syncClaim newClaimArray("claim11-8", "uid11-8", "1Gi", "", api.ClaimPending, annClass), @@ -121,7 +167,7 @@ func TestProvisionSync(t *testing.T) { // will succeed. {"create", "persistentvolumes", errors.New("Mock creation error")}, }, - wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim), }, { // Provision success? - cannot save provisioned PV five times, @@ -141,8 +187,12 @@ func TestProvisionSync(t *testing.T) { {"create", "persistentvolumes", errors.New("Mock creation error4")}, {"create", "persistentvolumes", errors.New("Mock creation error5")}, }, - wrapTestWithControllerConfig(operationDelete, []error{nil}, - wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim)), + wrapTestWithPluginCalls( + nil, // recycle calls + []error{nil}, // delete calls + []provisionCall{provision1Success}, // provision calls + testSyncClaim, + ), }, { // Provision failure - cannot save provisioned PV five times, @@ -163,7 +213,7 @@ func TestProvisionSync(t *testing.T) { {"create", "persistentvolumes", errors.New("Mock creation error5")}, }, // No deleteCalls are configured, which results into no deleter plugin available for the volume - wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim), }, { // Provision failure - cannot save provisioned PV five times, @@ -183,16 +233,17 @@ func TestProvisionSync(t *testing.T) { {"create", "persistentvolumes", errors.New("Mock creation error4")}, {"create", "persistentvolumes", errors.New("Mock creation error5")}, }, - wrapTestWithControllerConfig( - operationDelete, []error{ + wrapTestWithPluginCalls( + nil, // recycle calls + []error{ // delete calls errors.New("Mock deletion error1"), errors.New("Mock deletion error2"), errors.New("Mock deletion error3"), errors.New("Mock deletion error4"), errors.New("Mock deletion error5"), }, - wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), - ), + []provisionCall{provision1Success}, // provision calls + testSyncClaim), }, { // Provision failure - cannot save provisioned PV five times, @@ -212,16 +263,37 @@ func TestProvisionSync(t *testing.T) { {"create", "persistentvolumes", errors.New("Mock creation error4")}, {"create", "persistentvolumes", errors.New("Mock creation error5")}, }, - wrapTestWithControllerConfig( - operationDelete, []error{ + wrapTestWithPluginCalls( + nil, // recycle calls + []error{ // delete calls errors.New("Mock deletion error1"), nil, - }, - wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + }, // provison calls + []provisionCall{provision1Success}, + testSyncClaim, ), }, + { + // Provision a volume (with non-default class) + "11-13 - successful provision with storage class 2", + novolumes, + volumeWithClass("silver", newVolumeArray("pvc-uid11-13", "1Gi", "uid11-13", "claim11-13", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned)), + claimWithClass("silver", newClaimArray("claim11-13", "uid11-13", "1Gi", "", api.ClaimPending)), + // Binding will be completed in the next syncClaim + claimWithClass("silver", newClaimArray("claim11-13", "uid11-13", "1Gi", "", api.ClaimPending)), + noevents, noerrors, wrapTestWithProvisionCalls([]provisionCall{provision2Success}, testSyncClaim), + }, + { + // Provision error - non existing class + "11-14 - fail due to non-existing class", + novolumes, + novolumes, + claimWithClass("non-existing", newClaimArray("claim11-14", "uid11-14", "1Gi", "", api.ClaimPending)), + claimWithClass("non-existing", newClaimArray("claim11-14", "uid11-14", "1Gi", "", api.ClaimPending)), + noevents, noerrors, wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), + }, } - runSyncTests(t, tests) + runSyncTests(t, tests, storageClasses, storageClasses[0].Name) } // Test multiple calls to syncClaim/syncVolume and periodic sync of all @@ -244,20 +316,20 @@ func TestProvisionMultiSync(t *testing.T) { // Provision a volume with binding "12-1 - successful provision", novolumes, - newVolumeArray("pvc-uid12-1", "1Gi", "uid12-1", "claim12-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), + newVolumeArray("pvc-uid12-1", "1Gi", "uid12-1", "claim12-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned, annClass), newClaimArray("claim12-1", "uid12-1", "1Gi", "", api.ClaimPending, annClass), // Binding will be completed in the next syncClaim newClaimArray("claim12-1", "uid12-1", "1Gi", "pvc-uid12-1", api.ClaimBound, annClass, annBoundByController, annBindCompleted), - noevents, noerrors, wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + noevents, noerrors, wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim), }, } - runMultisyncTests(t, tests) + runMultisyncTests(t, tests, storageClasses, storageClasses[0].Name) } // When provisioning is disabled, provisioning a claim should instantly return nil func TestDisablingDynamicProvisioner(t *testing.T) { - ctrl := newTestController(nil, nil, nil, false) + ctrl := newTestController(nil, nil, nil, nil, false, "") retVal := ctrl.provisionClaim(nil) if retVal != nil { t.Errorf("Expected nil return but got %v", retVal) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/recycle_test.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/recycle_test.go index 7832bf048656..03832a941ef2 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/recycle_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/recycle_test.go @@ -21,6 +21,7 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" ) // Test single call to syncVolume, expecting recycling to happen. @@ -39,7 +40,7 @@ func TestRecycleSync(t *testing.T) { noevents, noerrors, // Inject recycler into the controller and call syncVolume. The // recycler simulates one recycle() call that succeeds. - wrapTestWithControllerConfig(operationRecycle, []error{nil}, testSyncVolume), + wrapTestWithReclaimCalls(operationRecycle, []error{nil}, testSyncVolume), }, { // recycle volume bound by user @@ -51,7 +52,7 @@ func TestRecycleSync(t *testing.T) { noevents, noerrors, // Inject recycler into the controller and call syncVolume. The // recycler simulates one recycle() call that succeeds. - wrapTestWithControllerConfig(operationRecycle, []error{nil}, testSyncVolume), + wrapTestWithReclaimCalls(operationRecycle, []error{nil}, testSyncVolume), }, { // recycle failure - plugin not found @@ -70,7 +71,7 @@ func TestRecycleSync(t *testing.T) { noclaims, noclaims, []string{"Warning VolumeFailedRecycle"}, noerrors, - wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), + wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), }, { // recycle failure - recycle returns error @@ -80,7 +81,7 @@ func TestRecycleSync(t *testing.T) { noclaims, noclaims, []string{"Warning VolumeFailedRecycle"}, noerrors, - wrapTestWithControllerConfig(operationRecycle, []error{errors.New("Mock recycle error")}, testSyncVolume), + wrapTestWithReclaimCalls(operationRecycle, []error{errors.New("Mock recycle error")}, testSyncVolume), }, { // recycle success(?) - volume is deleted before doRecycle() starts @@ -90,7 +91,7 @@ func TestRecycleSync(t *testing.T) { noclaims, noclaims, noevents, noerrors, - wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { + wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { // Delete the volume before recycle operation starts reactor.lock.Lock() delete(reactor.volumes, "volume6-6") @@ -107,7 +108,7 @@ func TestRecycleSync(t *testing.T) { noclaims, noclaims, noevents, noerrors, - wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { + wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { // Mark the volume as Available before the recycler starts reactor.lock.Lock() volume := reactor.volumes["volume6-7"] @@ -128,7 +129,7 @@ func TestRecycleSync(t *testing.T) { noclaims, noclaims, noevents, noerrors, - wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { + wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { // Mark the volume as Available before the recycler starts reactor.lock.Lock() volume := reactor.volumes["volume6-8"] @@ -148,7 +149,7 @@ func TestRecycleSync(t *testing.T) { noevents, noerrors, // Inject recycler into the controller and call syncVolume. The // recycler simulates one recycle() call that succeeds. - wrapTestWithControllerConfig(operationRecycle, []error{nil}, testSyncVolume), + wrapTestWithReclaimCalls(operationRecycle, []error{nil}, testSyncVolume), }, { // volume has unknown reclaim policy - failure expected @@ -160,7 +161,7 @@ func TestRecycleSync(t *testing.T) { []string{"Warning VolumeUnknownReclaimPolicy"}, noerrors, testSyncVolume, }, } - runSyncTests(t, tests) + runSyncTests(t, tests, []*extensions.StorageClass{}, "") } // Test multiple calls to syncClaim/syncVolume and periodic sync of all @@ -188,9 +189,9 @@ func TestRecycleMultiSync(t *testing.T) { noclaims, noclaims, []string{"Warning VolumeFailedRecycle"}, noerrors, - wrapTestWithControllerConfig(operationRecycle, []error{errors.New("Mock recycle error"), nil}, testSyncVolume), + wrapTestWithReclaimCalls(operationRecycle, []error{errors.New("Mock recycle error"), nil}, testSyncVolume), }, } - runMultisyncTests(t, tests) + runMultisyncTests(t, tests, []*extensions.StorageClass{}, "") } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource_printer.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource_printer.go index e2ae42d8049c..c86e64436e35 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource_printer.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/resource_printer.go @@ -450,6 +450,7 @@ var roleColumns = []string{"NAME", "AGE"} var roleBindingColumns = []string{"NAME", "AGE"} var clusterRoleColumns = []string{"NAME", "AGE"} var clusterRoleBindingColumns = []string{"NAME", "AGE"} +var storageClassColumns = []string{"NAME", "TYPE"} // TODO: consider having 'KIND' for third party resource data var thirdPartyResourceDataColumns = []string{"NAME", "LABELS", "DATA"} @@ -530,6 +531,8 @@ func (h *HumanReadablePrinter) addDefaultHandlers() { h.Handler(clusterRoleBindingColumns, printClusterRoleBindingList) h.Handler(securityContextConstraintsColumns, printSecurityContextConstraints) h.Handler(securityContextConstraintsColumns, printSecurityContextConstraintsList) + h.Handler(storageClassColumns, printStorageClass) + h.Handler(storageClassColumns, printStorageClassList) } func (h *HumanReadablePrinter) unknown(data []byte, w io.Writer) error { @@ -1884,6 +1887,32 @@ func printNetworkPolicyList(list *extensions.NetworkPolicyList, w io.Writer, opt return nil } +func printStorageClass(sc *extensions.StorageClass, w io.Writer, options PrintOptions) error { + name := sc.Name + provtype := sc.Provisioner + + if _, err := fmt.Fprintf(w, "%s\t%s\t", name, provtype); err != nil { + return err + } + if _, err := fmt.Fprint(w, AppendLabels(sc.Labels, options.ColumnLabels)); err != nil { + return err + } + if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, sc.Labels)); err != nil { + return err + } + + return nil +} + +func printStorageClassList(scList *extensions.StorageClassList, w io.Writer, options PrintOptions) error { + for _, sc := range scList.Items { + if err := printStorageClass(&sc, w, options); err != nil { + return err + } + } + return nil +} + func AppendLabels(itemLabels map[string]string, columnLabels []string) string { var buffer bytes.Buffer diff --git a/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go b/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go index 76ebe72adc5a..96730fdd57ab 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go @@ -19,6 +19,8 @@ package aws_ebs import ( "fmt" "os" + "strconv" + "strings" "time" "github.com/golang/glog" @@ -84,6 +86,28 @@ func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (strin Tags: tags, PVCName: c.options.PVCName, } + // Apply Parameters (case-insensitive). We leave validation of + // the values to the cloud provider. + for k, v := range c.options.Parameters { + switch strings.ToLower(k) { + case "type": + volumeOptions.VolumeType = v + case "zone": + volumeOptions.AvailabilityZone = v + case "iopspergb": + volumeOptions.IOPSPerGB, err = strconv.Atoi(v) + if err != nil { + return "", 0, nil, fmt.Errorf("invalid iopsPerGB value %q, must be integer between 1 and 30: %v", v, err) + } + default: + return "", 0, nil, fmt.Errorf("invalid option %q for volume plugin %s", k, c.plugin.GetPluginName()) + } + } + + // TODO: implement c.options.ProvisionerSelector parsing + if c.options.ProvisionerSelector != nil { + return "", 0, nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on AWS") + } name, err := cloud.CreateDisk(volumeOptions) if err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/attacher_test.go b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/attacher_test.go index c3c4b596b4f2..8e7597c27af8 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/attacher_test.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/attacher_test.go @@ -351,7 +351,7 @@ func (testcase *testcase) DiskIsAttached(diskName, instanceID string) (bool, err return expected.isAttached, expected.ret } -func (testcase *testcase) CreateDisk(name string, zone string, sizeGb int64, tags map[string]string) error { +func (testcase *testcase) CreateDisk(name string, diskType string, zone string, sizeGb int64, tags map[string]string) error { return errors.New("Not implemented") } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_util.go b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_util.go index 717b9b0df100..d07d6a08be3b 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_util.go @@ -80,17 +80,37 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin // GCE works with gigabytes, convert to GiB with rounding up requestGB := volume.RoundUpSize(requestBytes, 1024*1024*1024) - // The disk will be created in the zone in which this code is currently running - // TODO: We should support auto-provisioning volumes in multiple/specified zones - zones, err := cloud.GetAllZones() - if err != nil { - glog.V(2).Infof("error getting zone information from GCE: %v", err) - return "", 0, nil, err + // Apply Parameters (case-insensitive). We leave validation of + // the values to the cloud provider. + diskType := "" + zone := "" + for k, v := range c.options.Parameters { + switch strings.ToLower(k) { + case "type": + diskType = v + case "zone": + zone = v + default: + return "", 0, nil, fmt.Errorf("invalid option %q for volume plugin %s", k, c.plugin.GetPluginName()) + } } - zone := volume.ChooseZoneForVolume(zones, c.options.PVCName) + if c.options.ProvisionerSelector != nil { + return "", 0, nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on GCE") + } + + if zone == "" { + // No zone specified, choose one randomly in the same region as the + // node is running. + zones, err := cloud.GetAllZones() + if err != nil { + glog.V(2).Infof("error getting zone information from GCE: %v", err) + return "", 0, nil, err + } + zone = volume.ChooseZoneForVolume(zones, c.options.PVCName) + } - err = cloud.CreateDisk(name, zone, int64(requestGB), *c.options.CloudTags) + err = cloud.CreateDisk(name, diskType, zone, int64(requestGB), *c.options.CloudTags) if err != nil { glog.V(2).Infof("Error creating GCE PD volume: %v", err) return "", 0, nil, err diff --git a/vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path.go b/vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path.go index 7f0b26a4c298..c681e6743c25 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path.go @@ -43,17 +43,6 @@ func ProbeVolumePlugins(volumeConfig volume.VolumeConfig) []volume.VolumePlugin } } -func ProbeRecyclableVolumePlugins(recyclerFunc func(pvName string, spec *volume.Spec, host volume.VolumeHost, volumeConfig volume.VolumeConfig) (volume.Recycler, error), volumeConfig volume.VolumeConfig) []volume.VolumePlugin { - return []volume.VolumePlugin{ - &hostPathPlugin{ - host: nil, - newRecyclerFunc: recyclerFunc, - newProvisionerFunc: newProvisioner, - config: volumeConfig, - }, - } -} - type hostPathPlugin struct { host volume.VolumeHost // decouple creating Recyclers/Deleters/Provisioners by deferring to a function. Allows for easier testing. @@ -132,6 +121,9 @@ func (plugin *hostPathPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, err } func (plugin *hostPathPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { + if !plugin.config.ProvisioningEnabled { + return nil, fmt.Errorf("Provisioning in volume plugin %q is disabled", plugin.GetPluginName()) + } if len(options.AccessModes) == 0 { options.AccessModes = plugin.GetAccessModes() } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path_test.go b/vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path_test.go index 77946b02010d..2be5a5f0629f 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path_test.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path_test.go @@ -153,7 +153,8 @@ func TestProvisioner(t *testing.T) { err := os.MkdirAll(tempPath, 0750) plugMgr := volume.VolumePluginMgr{} - plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil, "" /* rootContext */)) + plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{ProvisioningEnabled: true}), + volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil, "" /* rootContext */)) spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: tempPath}}}}} plug, err := plugMgr.FindCreatablePluginBySpec(spec) if err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/plugins.go b/vendor/k8s.io/kubernetes/pkg/volume/plugins.go index 6a58b68812de..87816cf95703 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/plugins.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/plugins.go @@ -25,6 +25,7 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/types" @@ -55,6 +56,10 @@ type VolumeOptions struct { ClusterName string // Tags to attach to the real volume in the cloud provider - e.g. AWS EBS CloudTags *map[string]string + // Volume provisioning parameters from StorageClass + Parameters map[string]string + // Volume selector from PersistentVolumeClaim + ProvisionerSelector *unversioned.LabelSelector } // VolumePlugin is an interface to volume plugins that can be used on a @@ -283,6 +288,10 @@ type VolumeConfig struct { // the system and only understood by the binary hosting the plugin and the // plugin itself. OtherAttributes map[string]string + + // ProvisioningEnabled configures whether provisioning of this plugin is + // enabled or not. Currently used only in host_path plugin. + ProvisioningEnabled bool } // NewSpecFromVolume creates an Spec from an api.Volume @@ -419,7 +428,20 @@ func (pm *VolumePluginMgr) FindRecyclablePluginBySpec(spec *Spec) (RecyclableVol return nil, fmt.Errorf("no recyclable volume plugin matched") } -// FindDeletablePluginByName fetches a persistent volume plugin by name. If +// FindProvisionablePluginByName fetches a persistent volume plugin by name. If +// no plugin is found, returns error. +func (pm *VolumePluginMgr) FindProvisionablePluginByName(name string) (ProvisionableVolumePlugin, error) { + volumePlugin, err := pm.FindPluginByName(name) + if err != nil { + return nil, err + } + if provisionableVolumePlugin, ok := volumePlugin.(ProvisionableVolumePlugin); ok { + return provisionableVolumePlugin, nil + } + return nil, fmt.Errorf("no provisionable volume plugin matched") +} + +// FindDeletablePluginBySppec fetches a persistent volume plugin by spec. If // no plugin is found, returns error. func (pm *VolumePluginMgr) FindDeletablePluginBySpec(spec *Spec) (DeletableVolumePlugin, error) { volumePlugin, err := pm.FindPluginBySpec(spec) @@ -432,6 +454,19 @@ func (pm *VolumePluginMgr) FindDeletablePluginBySpec(spec *Spec) (DeletableVolum return nil, fmt.Errorf("no deletable volume plugin matched") } +// FindDeletablePluginByName fetches a persistent volume plugin by name. If +// no plugin is found, returns error. +func (pm *VolumePluginMgr) FindDeletablePluginByName(name string) (DeletableVolumePlugin, error) { + volumePlugin, err := pm.FindPluginByName(name) + if err != nil { + return nil, err + } + if deletableVolumePlugin, ok := volumePlugin.(DeletableVolumePlugin); ok { + return deletableVolumePlugin, nil + } + return nil, fmt.Errorf("no deletable volume plugin matched") +} + // FindCreatablePluginBySpec fetches a persistent volume plugin by name. If // no plugin is found, returns error. func (pm *VolumePluginMgr) FindCreatablePluginBySpec(spec *Spec) (ProvisionableVolumePlugin, error) { diff --git a/vendor/k8s.io/kubernetes/test/e2e/pd.go b/vendor/k8s.io/kubernetes/test/e2e/pd.go index 28613fa30cfb..d74198d1529c 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/pd.go +++ b/vendor/k8s.io/kubernetes/test/e2e/pd.go @@ -469,7 +469,7 @@ func createPD() (string, error) { } tags := map[string]string{} - err = gceCloud.CreateDisk(pdName, framework.TestContext.CloudConfig.Zone, 10 /* sizeGb */, tags) + err = gceCloud.CreateDisk(pdName, gcecloud.DiskTypeSSD, framework.TestContext.CloudConfig.Zone, 10 /* sizeGb */, tags) if err != nil { return "", err } diff --git a/vendor/k8s.io/kubernetes/test/e2e/volume_provisioning.go b/vendor/k8s.io/kubernetes/test/e2e/volume_provisioning.go index 30bfa7bf465d..a10c602e16ed 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/volume_provisioning.go +++ b/vendor/k8s.io/kubernetes/test/e2e/volume_provisioning.go @@ -22,6 +22,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/test/e2e/framework" @@ -52,6 +53,12 @@ var _ = framework.KubeDescribe("Dynamic provisioning", func() { framework.KubeDescribe("DynamicProvisioner", func() { It("should create and delete persistent volumes", func() { framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke") + + By("creating a claim with a dynamic provisioning annotation") + class := createStorageClass() + c.Extensions().StorageClasses().Create(class) + defer c.Extensions().StorageClasses().Delete(class.Name) + By("creating a claim with a dynamic provisioning annotation") claim := createClaim(ns) defer func() { @@ -130,7 +137,7 @@ func createClaim(ns string) *api.PersistentVolumeClaim { GenerateName: "pvc-", Namespace: ns, Annotations: map[string]string{ - "volume.alpha.kubernetes.io/storage-class": "", + "volume.beta.kubernetes.io/storage-class": "fast", }, }, Spec: api.PersistentVolumeClaimSpec{ @@ -192,3 +199,26 @@ func runInPodWithVolume(c *client.Client, ns, claimName, command string) { framework.ExpectNoError(err, "Failed to create pod: %v", err) framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Spec.Containers[0].Name, pod.Namespace)) } + +func createStorageClass() *extensions.StorageClass { + var pluginName string + + switch { + case framework.ProviderIs("gke"), framework.ProviderIs("gce"): + pluginName = "kubernetes.io/gce-pd" + case framework.ProviderIs("aws"): + pluginName = "kubernetes.io/aws-ebs" + case framework.ProviderIs("openstack"): + pluginName = "kubernetes.io/cinder" + } + + return &extensions.StorageClass{ + TypeMeta: unversioned.TypeMeta{ + Kind: "StorageClass", + }, + ObjectMeta: api.ObjectMeta{ + Name: "fast", + }, + Provisioner: pluginName, + } +}