1
- """NetApp Dynamic SVM Cinder Driver"""
2
- from oslo_config import cfg
3
- from oslo_log import log as logging
4
- from cinder .volume .drivers .netapp .dataontap .block_cmode import NetAppBlockStorageCmodeLibrary
5
- from cinder .volume .drivers .netapp .dataontap .client .client_cmode_rest import RestClient as RestNaServer
1
+ """Metadata-based backend config."""
2
+
6
3
from cinder import exception
7
- from cinder .volume .drivers .netapp import options
8
4
from cinder .volume import driver as volume_driver
5
+ from cinder .volume .drivers .netapp import options
6
+ from cinder .volume .drivers .netapp .dataontap .block_cmode import (
7
+ NetAppBlockStorageCmodeLibrary ,
8
+ )
9
+ from cinder .volume .drivers .netapp .dataontap .client .client_cmode_rest import (
10
+ RestClient as RestNaServer ,
11
+ )
12
+ from oslo_config import cfg
13
+ from oslo_log import log as logging
9
14
10
- #Dev: from remote_pdb import RemotePdb
15
+ # Dev: from remote_pdb import RemotePdb
11
16
12
17
LOG = logging .getLogger (__name__ )
13
18
CONF = cfg .CONF
14
19
15
20
# Register necessary config options under a unique group name 'dynamic_netapp'
16
- CONF .register_opts (options .netapp_connection_opts , group = 'dynamic_netapp' )
17
- CONF .register_opts (options .netapp_transport_opts , group = 'dynamic_netapp' )
18
- CONF .register_opts (options .netapp_basicauth_opts , group = 'dynamic_netapp' )
19
- CONF .register_opts (options .netapp_provisioning_opts , group = 'dynamic_netapp' )
20
- CONF .register_opts (options .netapp_cluster_opts , group = 'dynamic_netapp' )
21
- CONF .register_opts (options .netapp_san_opts , group = 'dynamic_netapp' )
22
- CONF .register_opts (volume_driver .volume_opts , group = 'dynamic_netapp' )
21
+ CONF .register_opts (options .netapp_connection_opts , group = "dynamic_netapp" )
22
+ CONF .register_opts (options .netapp_transport_opts , group = "dynamic_netapp" )
23
+ CONF .register_opts (options .netapp_basicauth_opts , group = "dynamic_netapp" )
24
+ CONF .register_opts (options .netapp_provisioning_opts , group = "dynamic_netapp" )
25
+ CONF .register_opts (options .netapp_cluster_opts , group = "dynamic_netapp" )
26
+ CONF .register_opts (options .netapp_san_opts , group = "dynamic_netapp" )
27
+ CONF .register_opts (volume_driver .volume_opts , group = "dynamic_netapp" )
28
+
23
29
# CONF.set_override("storage_protocol", "NVMe", group="dynamic_netapp")
24
30
# CONF.set_override("netapp_storage_protocol", "NVMe", group="dynamic_netapp")
25
31
# Upstream NetApp driver registers this option with choices=["iSCSI", "FC"]
26
32
# So "NVMe" will raise a ValueError at boot. Instead, we handle this per-volume below.
33
+
34
+
27
35
class NetappCinderDynamicDriver (NetAppBlockStorageCmodeLibrary ):
28
- """metadata -based backend config"""
36
+ """Metadata -based backend config. """
29
37
30
38
def __init__ (self , * args , ** kwargs ):
31
39
# NetApp driver requires 'driver_name' and 'driver_protocol'
32
40
# These are mandatory for the superclass constructor
33
- driver_name = kwargs .pop ('driver_name' , 'NetappDynamicCmode' )
34
- driver_protocol = kwargs .pop ('driver_protocol' , 'NVMe' )
35
- super (NetappCinderDynamicDriver , self ).__init__ (
36
- driver_name = 'NetApp_Dynamic' ,
37
- driver_protocol = 'dynamic' ,
38
- * args , ** kwargs
41
+ driver_name = kwargs .pop ("driver_name" , "NetappDynamicCmode" ) # noqa: F841
42
+ driver_protocol = kwargs .pop ("driver_protocol" , "NVMe" ) # noqa: F841
43
+ super ().__init__ (
44
+ * args ,
45
+ driver_name = "NetApp_Dynamic" ,
46
+ driver_protocol = "dynamic" ,
47
+ ** kwargs ,
39
48
)
40
49
self .init_capabilities () # Needed by scheduler via get_volume_stats()
41
50
self .initialized = False # Required by set_initialized()
@@ -52,42 +61,48 @@ def get_version(self):
52
61
def init_capabilities (self ):
53
62
# Required by Cinder schedulers — called from get_volume_stats()
54
63
# If removed, scheduling filters based on capabilities may fail
64
+ max_over_subscription_ratio = (self .configuration .max_over_subscription_ratio ,)
55
65
self ._capabilities = {
56
- ' thin_provisioning_support' : True ,
57
- ' thick_provisioning_support' : True ,
58
- ' multiattach' : True ,
59
- ' snapshot_support' : True ,
60
- ' max_over_subscription_ratio' : self . configuration . max_over_subscription_ratio ,
66
+ " thin_provisioning_support" : True ,
67
+ " thick_provisioning_support" : True ,
68
+ " multiattach" : True ,
69
+ " snapshot_support" : True ,
70
+ " max_over_subscription_ratio" : max_over_subscription_ratio ,
61
71
}
62
72
63
73
def set_initialized (self ):
64
74
# Called by Cinder VolumeManager at the end of init_host()
65
75
# If not defined, VolumeManager may assume the driver is not ready
66
76
self .initialized = True
67
77
68
- # NetAppBlockStorageCmodeLibrary, which expects self.ssc_library to be initialized during setup.
78
+ # NetAppBlockStorageCmodeLibrary expects self.ssc_library to be initialized
79
+ # during setup.
69
80
# In the normal NetApp driver, this is done in do_setup().
70
- #Cinder expects drivers to return a dict with a specific schema from get_volume_stats().
81
+ # Cinder expects drivers to return a dict with a specific
82
+ # schema from get_volume_stats().
71
83
# This expected schema is:
72
- # Defined in cinder.volume.driver.BaseVD.get_volume_stats() ( the base driver class)
84
+ # Defined in cinder.volume.driver.BaseVD.get_volume_stats()the base driver class
73
85
# And used later by scheduler and service capability reporting
74
- #cinder/voulme/drivery.py
75
- #get_voulme_state() inside BAseVD
76
- #_update_volume_stats - contains the keys
77
- #_update_pools_and_stats
86
+ # cinder/volume/driver.py
87
+ # get_volume_stats() inside BaseVD
88
+ # _update_volume_stats - contains the keys
89
+ # _update_pools_and_stats
90
+
78
91
def get_volume_stats (self , refresh = False ):
79
92
# Called from VolumeManager._report_driver_status()
80
93
# Scheduler and Service report use this to advertise backend capabilities
94
+ # "storage_protocol": "NVMe"Used only for reporting, not actual volume logic
81
95
return {
82
96
"volume_backend_name" : "DynamicSVM" ,
83
97
"vendor_name" : "NetApp" ,
84
98
"driver_version" : "1.0" ,
85
- "storage_protocol" : "NVMe" , # <- Used only for reporting, not actual volume logic
86
- "pools" : [self ._get_dynamic_pool_stats ()]
99
+ "storage_protocol" : "NVMe" ,
100
+ "pools" : [self ._get_dynamic_pool_stats ()],
87
101
}
88
102
89
103
def _get_dynamic_pool_stats (self ):
90
- # Used internally by get_volume_stats(). The keys listed here are standard and expected by Cinder's scheduler filters.
104
+ # Used internally by get_volume_stats(). The keys listed here are standard
105
+ # and expected by Cinder's scheduler filters.
91
106
# Reference: https://docs.openstack.org/cinder/latest/contributor/drivers.html#reporting-pool-information
92
107
return {
93
108
"pool_name" : "dynamic_pool" ,
@@ -106,41 +121,48 @@ def _get_dynamic_pool_stats(self):
106
121
107
122
def get_filter_function (self ):
108
123
# Required for Cinder's scheduler. If not present, Cinder logs an AttributeError
109
- return self .configuration .safe_get (' filter_function' ) or None
124
+ return self .configuration .safe_get (" filter_function" ) or None
110
125
111
126
def get_goodness_function (self ):
112
- # paired with get_filter_function for scoring
113
- return self .configuration .safe_get (' goodness_function' ) or None
127
+ # Paired with get_filter_function for scoring
128
+ return self .configuration .safe_get (" goodness_function" ) or None
114
129
115
130
def do_setup (self , context ):
116
131
# Required by VolumeDriver base class.
117
- # In our case, all backend config is injected per volume, so we do not need static setup.
118
- self .ssc_library = '' # Set to avoid crash in _get_pool_stats()
132
+ # In our case, all backend config is injected per volume,
133
+ # so we do not need static setup.
134
+ self .ssc_library = "" # Set to avoid crash in _get_pool_stats()
119
135
120
136
def check_for_setup_error (self ):
121
137
# Called after do_setup() — used to validate static config.
122
138
# In our case, there's no static setup, so it's a no-op.
123
- LOG .info ("NetApp Dynamic Driver: No setup error check. Validating at volume runtime." )
139
+ LOG .info (
140
+ "NetApp Dynamic Driver: No setup error check. Validating at volume runtime."
141
+ )
124
142
125
143
def update_provider_info (self , * args , ** kwargs ):
126
144
# Called during _sync_provider_info() in VolumeManager.
127
145
# If not implemented, Cinder raises a TypeError during service startup.
128
- # wrote this logic because it was registerd with 3 and was called using two args
129
- # there is issue with in built drivers callinng logic
146
+ # Wrote this logic because it was registered with 3 and was called using 2 args
147
+ # There is issue with in- built drivers calling logic
130
148
if len (args ) == 2 :
131
149
volumes , snapshots = args
132
150
elif len (args ) >= 3 :
133
151
_ , volumes , snapshots = args [:3 ]
134
152
else :
135
- raise TypeError ("update_provider_info() expects at least volumes and snapshots." )
153
+ raise TypeError (
154
+ "update_provider_info() expects at least volumes and snapshots."
155
+ )
136
156
return {}, {}
137
157
138
158
def set_throttle (self ):
139
- # got attri error
159
+ # Got AttributeError
140
160
pass
141
161
142
- # Required if inheriting from block_cmode. Default uses ZAPI to delete old QoS groups.
162
+ # Required if inheriting from block_cmode.
163
+ # Default uses ZAPI to delete old QoS groups.
143
164
# Since we're using REST and dynamic config, we override this to avoid ZAPI use.
165
+
144
166
def _mark_qos_policy_group_for_deletion (self , * args , ** kwargs ):
145
167
LOG .debug ("Skipping ZAPI-based QoS deletion in dynamic REST driver." )
146
168
@@ -152,21 +174,23 @@ def _init_rest_client(self, hostname, username, password, vserver):
152
174
username = username ,
153
175
password = password ,
154
176
vserver = vserver ,
155
- api_trace_pattern = ' (.*)' ,
177
+ api_trace_pattern = " (.*)" ,
156
178
private_key_file = None ,
157
179
certificate_file = None ,
158
180
ca_certificate_file = None ,
159
181
certificate_host_validation = False ,
160
- transport_type = ' https' ,
182
+ transport_type = " https" ,
161
183
ssl_cert_path = None ,
162
184
ssl_cert_password = None ,
163
- port = 443
185
+ port = 443 ,
164
186
)
165
187
166
188
def clean_volume_file_locks (self , volume ):
167
- # got this , when volume was created and mocked the netApp connection.
168
- # when creation failed it started its cleanup process and error oout for this method.
169
- # In our case, REST-based NetApp doesn’t need this, but must be present to avoid errors.
189
+ # Got this when volume was created and mocked the NetApp connection.
190
+ # When creation failed,
191
+ # it started its cleanup process and errored out for this method.
192
+ # In our case, REST-based NetApp doesn’t need this,
193
+ # but must be present to avoid errors.
170
194
LOG .debug ("No-op clean_volume_file_locks in dynamic driver" )
171
195
172
196
def create_volume (self , volume ):
@@ -183,16 +207,16 @@ def create_volume(self, volume):
183
207
if not all ([hostname , username , password , vserver ]):
184
208
raise exception .VolumeBackendAPIException (data = "Missing NetApp metadata" )
185
209
186
- client = self ._init_rest_client (hostname , username , password , vserver )
210
+ client = self ._init_rest_client (hostname , username , password , vserver ) # noqa: F841
187
211
188
212
if protocol == "iscsi" :
189
213
LOG .info ("Provisioning via iSCSI" )
190
214
elif protocol == "NVMe" :
191
215
LOG .info ("Provisioning via NVMe" )
192
- #todo : inherti these from client_cmode
193
- #call create or get NVMe subsystem
194
- #add host initiator to subsy ,
195
- # create name backed by flex vol,
196
- # map namespace to subsystem
216
+ # TODO: Inherit these from client_cmode
217
+ # Call create or get NVMe subsystem
218
+ # Add host initiator to subsystem
219
+ # Create namespace backed by FlexVol
220
+ # Map namespace to subsystem
197
221
else :
198
222
LOG .info (" .WIP. " )
0 commit comments