1 | /* |
---|---|
2 | * Scsi Host Layer for MPT (Message Passing Technology) based controllers |
3 | * |
4 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c |
5 | * Copyright (C) 2012-2014 LSI Corporation |
6 | * Copyright (C) 2013-2014 Avago Technologies |
7 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) |
8 | * |
9 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License |
11 | * as published by the Free Software Foundation; either version 2 |
12 | * of the License, or (at your option) any later version. |
13 | * |
14 | * This program is distributed in the hope that it will be useful, |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
17 | * GNU General Public License for more details. |
18 | * |
19 | * NO WARRANTY |
20 | * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR |
21 | * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT |
22 | * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, |
23 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is |
24 | * solely responsible for determining the appropriateness of using and |
25 | * distributing the Program and assumes all risks associated with its |
26 | * exercise of rights under this Agreement, including but not limited to |
27 | * the risks and costs of program errors, damage to or loss of data, |
28 | * programs or equipment, and unavailability or interruption of operations. |
29 | |
30 | * DISCLAIMER OF LIABILITY |
31 | * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY |
32 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
33 | * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND |
34 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR |
35 | * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE |
36 | * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED |
37 | * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES |
38 | |
39 | * You should have received a copy of the GNU General Public License |
40 | * along with this program; if not, write to the Free Software |
41 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, |
42 | * USA. |
43 | */ |
44 | |
45 | #include <linux/module.h> |
46 | #include <linux/kernel.h> |
47 | #include <linux/init.h> |
48 | #include <linux/errno.h> |
49 | #include <linux/blkdev.h> |
50 | #include <linux/sched.h> |
51 | #include <linux/workqueue.h> |
52 | #include <linux/delay.h> |
53 | #include <linux/pci.h> |
54 | #include <linux/interrupt.h> |
55 | #include <linux/raid_class.h> |
56 | #include <linux/unaligned.h> |
57 | |
58 | #include "mpt3sas_base.h" |
59 | |
60 | #define RAID_CHANNEL 1 |
61 | |
62 | #define PCIE_CHANNEL 2 |
63 | |
64 | /* forward proto's */ |
65 | static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, |
66 | struct _sas_node *sas_expander); |
67 | static void _firmware_event_work(struct work_struct *work); |
68 | |
69 | static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, |
70 | struct _sas_device *sas_device); |
71 | static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, |
72 | u8 retry_count, u8 is_pd); |
73 | static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle); |
74 | static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, |
75 | struct _pcie_device *pcie_device); |
76 | static void |
77 | _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle); |
78 | static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid); |
79 | static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc); |
80 | |
81 | /* global parameters */ |
82 | LIST_HEAD(mpt3sas_ioc_list); |
83 | /* global ioc lock for list operations */ |
84 | DEFINE_SPINLOCK(gioc_lock); |
85 | |
86 | MODULE_AUTHOR(MPT3SAS_AUTHOR); |
87 | MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION); |
88 | MODULE_LICENSE("GPL"); |
89 | MODULE_VERSION(MPT3SAS_DRIVER_VERSION); |
90 | MODULE_ALIAS("mpt2sas"); |
91 | |
92 | /* local parameters */ |
93 | static u8 scsi_io_cb_idx = -1; |
94 | static u8 tm_cb_idx = -1; |
95 | static u8 ctl_cb_idx = -1; |
96 | static u8 base_cb_idx = -1; |
97 | static u8 port_enable_cb_idx = -1; |
98 | static u8 transport_cb_idx = -1; |
99 | static u8 scsih_cb_idx = -1; |
100 | static u8 config_cb_idx = -1; |
101 | static int mpt2_ids; |
102 | static int mpt3_ids; |
103 | |
104 | static u8 tm_tr_cb_idx = -1 ; |
105 | static u8 tm_tr_volume_cb_idx = -1 ; |
106 | static u8 tm_sas_control_cb_idx = -1; |
107 | |
108 | /* command line options */ |
109 | static u32 logging_level; |
110 | MODULE_PARM_DESC(logging_level, |
111 | " bits for enabling additional logging info (default=0)"); |
112 | |
113 | |
114 | static ushort max_sectors = 0xFFFF; |
115 | module_param(max_sectors, ushort, 0444); |
116 | MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767"); |
117 | |
118 | |
119 | static int missing_delay[2] = {-1, -1}; |
120 | module_param_array(missing_delay, int, NULL, 0444); |
121 | MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay"); |
122 | |
123 | /* scsi-mid layer global parmeter is max_report_luns, which is 511 */ |
124 | #define MPT3SAS_MAX_LUN (16895) |
125 | static u64 max_lun = MPT3SAS_MAX_LUN; |
126 | module_param(max_lun, ullong, 0444); |
127 | MODULE_PARM_DESC(max_lun, " max lun, default=16895 "); |
128 | |
129 | static ushort hbas_to_enumerate; |
130 | module_param(hbas_to_enumerate, ushort, 0444); |
131 | MODULE_PARM_DESC(hbas_to_enumerate, |
132 | " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \ |
133 | 1 - enumerates only SAS 2.0 generation HBAs\n \ |
134 | 2 - enumerates only SAS 3.0 generation HBAs (default=0)"); |
135 | |
136 | /* diag_buffer_enable is bitwise |
137 | * bit 0 set = TRACE |
138 | * bit 1 set = SNAPSHOT |
139 | * bit 2 set = EXTENDED |
140 | * |
141 | * Either bit can be set, or both |
142 | */ |
143 | static int diag_buffer_enable = -1; |
144 | module_param(diag_buffer_enable, int, 0444); |
145 | MODULE_PARM_DESC(diag_buffer_enable, |
146 | " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); |
147 | static int disable_discovery = -1; |
148 | module_param(disable_discovery, int, 0444); |
149 | MODULE_PARM_DESC(disable_discovery, " disable discovery "); |
150 | |
151 | |
152 | /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */ |
153 | static int prot_mask = -1; |
154 | module_param(prot_mask, int, 0444); |
155 | MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 "); |
156 | |
157 | static bool enable_sdev_max_qd; |
158 | module_param(enable_sdev_max_qd, bool, 0444); |
159 | MODULE_PARM_DESC(enable_sdev_max_qd, |
160 | "Enable sdev max qd as can_queue, def=disabled(0)"); |
161 | |
162 | static int multipath_on_hba = -1; |
163 | module_param(multipath_on_hba, int, 0); |
164 | MODULE_PARM_DESC(multipath_on_hba, |
165 | "Multipath support to add same target device\n\t\t" |
166 | "as many times as it is visible to HBA from various paths\n\t\t" |
167 | "(by default:\n\t\t" |
168 | "\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t" |
169 | "\t SAS 3.5 HBA - This will be enabled)"); |
170 | |
171 | static int host_tagset_enable = 1; |
172 | module_param(host_tagset_enable, int, 0444); |
173 | MODULE_PARM_DESC(host_tagset_enable, |
174 | "Shared host tagset enable/disable Default: enable(1)"); |
175 | |
176 | /* raid transport support */ |
177 | static struct raid_template *mpt3sas_raid_template; |
178 | static struct raid_template *mpt2sas_raid_template; |
179 | |
180 | |
181 | /** |
182 | * struct sense_info - common structure for obtaining sense keys |
183 | * @skey: sense key |
184 | * @asc: additional sense code |
185 | * @ascq: additional sense code qualifier |
186 | */ |
187 | struct sense_info { |
188 | u8 skey; |
189 | u8 asc; |
190 | u8 ascq; |
191 | }; |
192 | |
193 | #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB) |
194 | #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC) |
195 | #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD) |
196 | #define MPT3SAS_ABRT_TASK_SET (0xFFFE) |
197 | #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF) |
198 | /** |
199 | * struct fw_event_work - firmware event struct |
200 | * @list: link list framework |
201 | * @work: work object (ioc->fault_reset_work_q) |
202 | * @ioc: per adapter object |
203 | * @device_handle: device handle |
204 | * @VF_ID: virtual function id |
205 | * @VP_ID: virtual port id |
206 | * @ignore: flag meaning this event has been marked to ignore |
207 | * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h |
208 | * @refcount: kref for this event |
209 | * @event_data: reply event data payload follows |
210 | * |
211 | * This object stored on ioc->fw_event_list. |
212 | */ |
213 | struct fw_event_work { |
214 | struct list_head list; |
215 | struct work_struct work; |
216 | |
217 | struct MPT3SAS_ADAPTER *ioc; |
218 | u16 device_handle; |
219 | u8 VF_ID; |
220 | u8 VP_ID; |
221 | u8 ignore; |
222 | u16 event; |
223 | struct kref refcount; |
224 | char event_data[] __aligned(4); |
225 | }; |
226 | |
227 | static void fw_event_work_free(struct kref *r) |
228 | { |
229 | kfree(container_of(r, struct fw_event_work, refcount)); |
230 | } |
231 | |
232 | static void fw_event_work_get(struct fw_event_work *fw_work) |
233 | { |
234 | kref_get(kref: &fw_work->refcount); |
235 | } |
236 | |
237 | static void fw_event_work_put(struct fw_event_work *fw_work) |
238 | { |
239 | kref_put(kref: &fw_work->refcount, release: fw_event_work_free); |
240 | } |
241 | |
242 | static struct fw_event_work *alloc_fw_event_work(int len) |
243 | { |
244 | struct fw_event_work *fw_event; |
245 | |
246 | fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC); |
247 | if (!fw_event) |
248 | return NULL; |
249 | |
250 | kref_init(kref: &fw_event->refcount); |
251 | return fw_event; |
252 | } |
253 | |
254 | /** |
255 | * struct _scsi_io_transfer - scsi io transfer |
256 | * @handle: sas device handle (assigned by firmware) |
257 | * @is_raid: flag set for hidden raid components |
258 | * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE, |
259 | * @data_length: data transfer length |
260 | * @data_dma: dma pointer to data |
261 | * @sense: sense data |
262 | * @lun: lun number |
263 | * @cdb_length: cdb length |
264 | * @cdb: cdb contents |
265 | * @timeout: timeout for this command |
266 | * @VF_ID: virtual function id |
267 | * @VP_ID: virtual port id |
268 | * @valid_reply: flag set for reply message |
269 | * @sense_length: sense length |
270 | * @ioc_status: ioc status |
271 | * @scsi_state: scsi state |
272 | * @scsi_status: scsi staus |
273 | * @log_info: log information |
274 | * @transfer_length: data length transfer when there is a reply message |
275 | * |
276 | * Used for sending internal scsi commands to devices within this module. |
277 | * Refer to _scsi_send_scsi_io(). |
278 | */ |
279 | struct _scsi_io_transfer { |
280 | u16 handle; |
281 | u8 is_raid; |
282 | enum dma_data_direction dir; |
283 | u32 data_length; |
284 | dma_addr_t data_dma; |
285 | u8 sense[SCSI_SENSE_BUFFERSIZE]; |
286 | u32 lun; |
287 | u8 cdb_length; |
288 | u8 cdb[32]; |
289 | u8 timeout; |
290 | u8 VF_ID; |
291 | u8 VP_ID; |
292 | u8 valid_reply; |
293 | /* the following bits are only valid when 'valid_reply = 1' */ |
294 | u32 sense_length; |
295 | u16 ioc_status; |
296 | u8 scsi_state; |
297 | u8 scsi_status; |
298 | u32 log_info; |
299 | u32 transfer_length; |
300 | }; |
301 | |
302 | /** |
303 | * _scsih_set_debug_level - global setting of ioc->logging_level. |
304 | * @val: value of the parameter to be set |
305 | * @kp: pointer to kernel_param structure |
306 | * |
307 | * Note: The logging levels are defined in mpt3sas_debug.h. |
308 | */ |
309 | static int |
310 | _scsih_set_debug_level(const char *val, const struct kernel_param *kp) |
311 | { |
312 | int ret = param_set_int(val, kp); |
313 | struct MPT3SAS_ADAPTER *ioc; |
314 | |
315 | if (ret) |
316 | return ret; |
317 | |
318 | pr_info("setting logging_level(0x%08x)\n", logging_level); |
319 | spin_lock(lock: &gioc_lock); |
320 | list_for_each_entry(ioc, &mpt3sas_ioc_list, list) |
321 | ioc->logging_level = logging_level; |
322 | spin_unlock(lock: &gioc_lock); |
323 | return 0; |
324 | } |
325 | module_param_call(logging_level, _scsih_set_debug_level, param_get_int, |
326 | &logging_level, 0644); |
327 | |
328 | /** |
329 | * _scsih_srch_boot_sas_address - search based on sas_address |
330 | * @sas_address: sas address |
331 | * @boot_device: boot device object from bios page 2 |
332 | * |
333 | * Return: 1 when there's a match, 0 means no match. |
334 | */ |
335 | static inline int |
336 | _scsih_srch_boot_sas_address(u64 sas_address, |
337 | Mpi2BootDeviceSasWwid_t *boot_device) |
338 | { |
339 | return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0; |
340 | } |
341 | |
342 | /** |
343 | * _scsih_srch_boot_device_name - search based on device name |
344 | * @device_name: device name specified in INDENTIFY fram |
345 | * @boot_device: boot device object from bios page 2 |
346 | * |
347 | * Return: 1 when there's a match, 0 means no match. |
348 | */ |
349 | static inline int |
350 | _scsih_srch_boot_device_name(u64 device_name, |
351 | Mpi2BootDeviceDeviceName_t *boot_device) |
352 | { |
353 | return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0; |
354 | } |
355 | |
356 | /** |
357 | * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot |
358 | * @enclosure_logical_id: enclosure logical id |
359 | * @slot_number: slot number |
360 | * @boot_device: boot device object from bios page 2 |
361 | * |
362 | * Return: 1 when there's a match, 0 means no match. |
363 | */ |
364 | static inline int |
365 | _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number, |
366 | Mpi2BootDeviceEnclosureSlot_t *boot_device) |
367 | { |
368 | return (enclosure_logical_id == le64_to_cpu(boot_device-> |
369 | EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device-> |
370 | SlotNumber)) ? 1 : 0; |
371 | } |
372 | |
373 | /** |
374 | * mpt3sas_get_port_by_id - get hba port entry corresponding to provided |
375 | * port number from port list |
376 | * @ioc: per adapter object |
377 | * @port_id: port number |
378 | * @bypass_dirty_port_flag: when set look the matching hba port entry even |
379 | * if hba port entry is marked as dirty. |
380 | * |
381 | * Search for hba port entry corresponding to provided port number, |
382 | * if available return port object otherwise return NULL. |
383 | */ |
384 | struct hba_port * |
385 | mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc, |
386 | u8 port_id, u8 bypass_dirty_port_flag) |
387 | { |
388 | struct hba_port *port, *port_next; |
389 | |
390 | /* |
391 | * When multipath_on_hba is disabled then |
392 | * search the hba_port entry using default |
393 | * port id i.e. 255 |
394 | */ |
395 | if (!ioc->multipath_on_hba) |
396 | port_id = MULTIPATH_DISABLED_PORT_ID; |
397 | |
398 | list_for_each_entry_safe(port, port_next, |
399 | &ioc->port_table_list, list) { |
400 | if (port->port_id != port_id) |
401 | continue; |
402 | if (bypass_dirty_port_flag) |
403 | return port; |
404 | if (port->flags & HBA_PORT_FLAG_DIRTY_PORT) |
405 | continue; |
406 | return port; |
407 | } |
408 | |
409 | /* |
410 | * Allocate hba_port object for default port id (i.e. 255) |
411 | * when multipath_on_hba is disabled for the HBA. |
412 | * And add this object to port_table_list. |
413 | */ |
414 | if (!ioc->multipath_on_hba) { |
415 | port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC); |
416 | if (!port) |
417 | return NULL; |
418 | |
419 | port->port_id = port_id; |
420 | ioc_info(ioc, |
421 | "hba_port entry: %p, port: %d is added to hba_port list\n", |
422 | port, port->port_id); |
423 | list_add_tail(new: &port->list, |
424 | head: &ioc->port_table_list); |
425 | return port; |
426 | } |
427 | return NULL; |
428 | } |
429 | |
430 | /** |
431 | * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number |
432 | * @ioc: per adapter object |
433 | * @port: hba_port object |
434 | * @phy: phy number |
435 | * |
436 | * Return virtual_phy object corresponding to phy number. |
437 | */ |
438 | struct virtual_phy * |
439 | mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc, |
440 | struct hba_port *port, u32 phy) |
441 | { |
442 | struct virtual_phy *vphy, *vphy_next; |
443 | |
444 | if (!port->vphys_mask) |
445 | return NULL; |
446 | |
447 | list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) { |
448 | if (vphy->phy_mask & (1 << phy)) |
449 | return vphy; |
450 | } |
451 | return NULL; |
452 | } |
453 | |
454 | /** |
455 | * _scsih_is_boot_device - search for matching boot device. |
456 | * @sas_address: sas address |
457 | * @device_name: device name specified in INDENTIFY fram |
458 | * @enclosure_logical_id: enclosure logical id |
459 | * @slot: slot number |
460 | * @form: specifies boot device form |
461 | * @boot_device: boot device object from bios page 2 |
462 | * |
463 | * Return: 1 when there's a match, 0 means no match. |
464 | */ |
465 | static int |
466 | _scsih_is_boot_device(u64 sas_address, u64 device_name, |
467 | u64 enclosure_logical_id, u16 slot, u8 form, |
468 | Mpi2BiosPage2BootDevice_t *boot_device) |
469 | { |
470 | int rc = 0; |
471 | |
472 | switch (form) { |
473 | case MPI2_BIOSPAGE2_FORM_SAS_WWID: |
474 | if (!sas_address) |
475 | break; |
476 | rc = _scsih_srch_boot_sas_address( |
477 | sas_address, boot_device: &boot_device->SasWwid); |
478 | break; |
479 | case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT: |
480 | if (!enclosure_logical_id) |
481 | break; |
482 | rc = _scsih_srch_boot_encl_slot( |
483 | enclosure_logical_id, |
484 | slot_number: slot, boot_device: &boot_device->EnclosureSlot); |
485 | break; |
486 | case MPI2_BIOSPAGE2_FORM_DEVICE_NAME: |
487 | if (!device_name) |
488 | break; |
489 | rc = _scsih_srch_boot_device_name( |
490 | device_name, boot_device: &boot_device->DeviceName); |
491 | break; |
492 | case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED: |
493 | break; |
494 | } |
495 | |
496 | return rc; |
497 | } |
498 | |
499 | /** |
500 | * _scsih_get_sas_address - set the sas_address for given device handle |
501 | * @ioc: ? |
502 | * @handle: device handle |
503 | * @sas_address: sas address |
504 | * |
505 | * Return: 0 success, non-zero when failure |
506 | */ |
507 | static int |
508 | _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle, |
509 | u64 *sas_address) |
510 | { |
511 | Mpi2SasDevicePage0_t sas_device_pg0; |
512 | Mpi2ConfigReply_t mpi_reply; |
513 | u32 ioc_status; |
514 | |
515 | *sas_address = 0; |
516 | |
517 | if ((mpt3sas_config_get_sas_device_pg0(ioc, mpi_reply: &mpi_reply, config_page: &sas_device_pg0, |
518 | MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { |
519 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
520 | __FILE__, __LINE__, __func__); |
521 | return -ENXIO; |
522 | } |
523 | |
524 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; |
525 | if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { |
526 | /* For HBA, vSES doesn't return HBA SAS address. Instead return |
527 | * vSES's sas address. |
528 | */ |
529 | if ((handle <= ioc->sas_hba.num_phys) && |
530 | (!(le32_to_cpu(sas_device_pg0.DeviceInfo) & |
531 | MPI2_SAS_DEVICE_INFO_SEP))) |
532 | *sas_address = ioc->sas_hba.sas_address; |
533 | else |
534 | *sas_address = le64_to_cpu(sas_device_pg0.SASAddress); |
535 | return 0; |
536 | } |
537 | |
538 | /* we hit this because the given parent handle doesn't exist */ |
539 | if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) |
540 | return -ENXIO; |
541 | |
542 | /* else error case */ |
543 | ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n", |
544 | handle, ioc_status, __FILE__, __LINE__, __func__); |
545 | return -EIO; |
546 | } |
547 | |
548 | /** |
549 | * _scsih_determine_boot_device - determine boot device. |
550 | * @ioc: per adapter object |
551 | * @device: sas_device or pcie_device object |
552 | * @channel: SAS or PCIe channel |
553 | * |
554 | * Determines whether this device should be first reported device to |
555 | * to scsi-ml or sas transport, this purpose is for persistent boot device. |
556 | * There are primary, alternate, and current entries in bios page 2. The order |
557 | * priority is primary, alternate, then current. This routine saves |
558 | * the corresponding device object. |
559 | * The saved data to be used later in _scsih_probe_boot_devices(). |
560 | */ |
561 | static void |
562 | _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device, |
563 | u32 channel) |
564 | { |
565 | struct _sas_device *sas_device; |
566 | struct _pcie_device *pcie_device; |
567 | struct _raid_device *raid_device; |
568 | u64 sas_address; |
569 | u64 device_name; |
570 | u64 enclosure_logical_id; |
571 | u16 slot; |
572 | |
573 | /* only process this function when driver loads */ |
574 | if (!ioc->is_driver_loading) |
575 | return; |
576 | |
577 | /* no Bios, return immediately */ |
578 | if (!ioc->bios_pg3.BiosVersion) |
579 | return; |
580 | |
581 | if (channel == RAID_CHANNEL) { |
582 | raid_device = device; |
583 | sas_address = raid_device->wwid; |
584 | device_name = 0; |
585 | enclosure_logical_id = 0; |
586 | slot = 0; |
587 | } else if (channel == PCIE_CHANNEL) { |
588 | pcie_device = device; |
589 | sas_address = pcie_device->wwid; |
590 | device_name = 0; |
591 | enclosure_logical_id = 0; |
592 | slot = 0; |
593 | } else { |
594 | sas_device = device; |
595 | sas_address = sas_device->sas_address; |
596 | device_name = sas_device->device_name; |
597 | enclosure_logical_id = sas_device->enclosure_logical_id; |
598 | slot = sas_device->slot; |
599 | } |
600 | |
601 | if (!ioc->req_boot_device.device) { |
602 | if (_scsih_is_boot_device(sas_address, device_name, |
603 | enclosure_logical_id, slot, |
604 | form: (ioc->bios_pg2.ReqBootDeviceForm & |
605 | MPI2_BIOSPAGE2_FORM_MASK), |
606 | boot_device: &ioc->bios_pg2.RequestedBootDevice)) { |
607 | dinitprintk(ioc, |
608 | ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n", |
609 | __func__, (u64)sas_address)); |
610 | ioc->req_boot_device.device = device; |
611 | ioc->req_boot_device.channel = channel; |
612 | } |
613 | } |
614 | |
615 | if (!ioc->req_alt_boot_device.device) { |
616 | if (_scsih_is_boot_device(sas_address, device_name, |
617 | enclosure_logical_id, slot, |
618 | form: (ioc->bios_pg2.ReqAltBootDeviceForm & |
619 | MPI2_BIOSPAGE2_FORM_MASK), |
620 | boot_device: &ioc->bios_pg2.RequestedAltBootDevice)) { |
621 | dinitprintk(ioc, |
622 | ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n", |
623 | __func__, (u64)sas_address)); |
624 | ioc->req_alt_boot_device.device = device; |
625 | ioc->req_alt_boot_device.channel = channel; |
626 | } |
627 | } |
628 | |
629 | if (!ioc->current_boot_device.device) { |
630 | if (_scsih_is_boot_device(sas_address, device_name, |
631 | enclosure_logical_id, slot, |
632 | form: (ioc->bios_pg2.CurrentBootDeviceForm & |
633 | MPI2_BIOSPAGE2_FORM_MASK), |
634 | boot_device: &ioc->bios_pg2.CurrentBootDevice)) { |
635 | dinitprintk(ioc, |
636 | ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n", |
637 | __func__, (u64)sas_address)); |
638 | ioc->current_boot_device.device = device; |
639 | ioc->current_boot_device.channel = channel; |
640 | } |
641 | } |
642 | } |
643 | |
644 | static struct _sas_device * |
645 | __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc, |
646 | struct MPT3SAS_TARGET *tgt_priv) |
647 | { |
648 | struct _sas_device *ret; |
649 | |
650 | assert_spin_locked(&ioc->sas_device_lock); |
651 | |
652 | ret = tgt_priv->sas_dev; |
653 | if (ret) |
654 | sas_device_get(s: ret); |
655 | |
656 | return ret; |
657 | } |
658 | |
659 | static struct _sas_device * |
660 | mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc, |
661 | struct MPT3SAS_TARGET *tgt_priv) |
662 | { |
663 | struct _sas_device *ret; |
664 | unsigned long flags; |
665 | |
666 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
667 | ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv); |
668 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
669 | |
670 | return ret; |
671 | } |
672 | |
673 | static struct _pcie_device * |
674 | __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc, |
675 | struct MPT3SAS_TARGET *tgt_priv) |
676 | { |
677 | struct _pcie_device *ret; |
678 | |
679 | assert_spin_locked(&ioc->pcie_device_lock); |
680 | |
681 | ret = tgt_priv->pcie_dev; |
682 | if (ret) |
683 | pcie_device_get(p: ret); |
684 | |
685 | return ret; |
686 | } |
687 | |
688 | /** |
689 | * mpt3sas_get_pdev_from_target - pcie device search |
690 | * @ioc: per adapter object |
691 | * @tgt_priv: starget private object |
692 | * |
693 | * Context: This function will acquire ioc->pcie_device_lock and will release |
694 | * before returning the pcie_device object. |
695 | * |
696 | * This searches for pcie_device from target, then return pcie_device object. |
697 | */ |
698 | static struct _pcie_device * |
699 | mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc, |
700 | struct MPT3SAS_TARGET *tgt_priv) |
701 | { |
702 | struct _pcie_device *ret; |
703 | unsigned long flags; |
704 | |
705 | spin_lock_irqsave(&ioc->pcie_device_lock, flags); |
706 | ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv); |
707 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
708 | |
709 | return ret; |
710 | } |
711 | |
712 | |
713 | /** |
714 | * __mpt3sas_get_sdev_by_rphy - sas device search |
715 | * @ioc: per adapter object |
716 | * @rphy: sas_rphy pointer |
717 | * |
718 | * Context: This function will acquire ioc->sas_device_lock and will release |
719 | * before returning the sas_device object. |
720 | * |
721 | * This searches for sas_device from rphy object |
722 | * then return sas_device object. |
723 | */ |
724 | struct _sas_device * |
725 | __mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc, |
726 | struct sas_rphy *rphy) |
727 | { |
728 | struct _sas_device *sas_device; |
729 | |
730 | assert_spin_locked(&ioc->sas_device_lock); |
731 | |
732 | list_for_each_entry(sas_device, &ioc->sas_device_list, list) { |
733 | if (sas_device->rphy != rphy) |
734 | continue; |
735 | sas_device_get(s: sas_device); |
736 | return sas_device; |
737 | } |
738 | |
739 | sas_device = NULL; |
740 | list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) { |
741 | if (sas_device->rphy != rphy) |
742 | continue; |
743 | sas_device_get(s: sas_device); |
744 | return sas_device; |
745 | } |
746 | |
747 | return NULL; |
748 | } |
749 | |
750 | /** |
751 | * __mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided |
752 | * sas address from sas_device_list list |
753 | * @ioc: per adapter object |
754 | * @sas_address: device sas address |
755 | * @port: port number |
756 | * |
757 | * Search for _sas_device object corresponding to provided sas address, |
758 | * if available return _sas_device object address otherwise return NULL. |
759 | */ |
760 | struct _sas_device * |
761 | __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc, |
762 | u64 sas_address, struct hba_port *port) |
763 | { |
764 | struct _sas_device *sas_device; |
765 | |
766 | if (!port) |
767 | return NULL; |
768 | |
769 | assert_spin_locked(&ioc->sas_device_lock); |
770 | |
771 | list_for_each_entry(sas_device, &ioc->sas_device_list, list) { |
772 | if (sas_device->sas_address != sas_address) |
773 | continue; |
774 | if (sas_device->port != port) |
775 | continue; |
776 | sas_device_get(s: sas_device); |
777 | return sas_device; |
778 | } |
779 | |
780 | list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) { |
781 | if (sas_device->sas_address != sas_address) |
782 | continue; |
783 | if (sas_device->port != port) |
784 | continue; |
785 | sas_device_get(s: sas_device); |
786 | return sas_device; |
787 | } |
788 | |
789 | return NULL; |
790 | } |
791 | |
792 | /** |
793 | * mpt3sas_get_sdev_by_addr - sas device search |
794 | * @ioc: per adapter object |
795 | * @sas_address: sas address |
796 | * @port: hba port entry |
797 | * Context: Calling function should acquire ioc->sas_device_lock |
798 | * |
799 | * This searches for sas_device based on sas_address & port number, |
800 | * then return sas_device object. |
801 | */ |
802 | struct _sas_device * |
803 | mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc, |
804 | u64 sas_address, struct hba_port *port) |
805 | { |
806 | struct _sas_device *sas_device; |
807 | unsigned long flags; |
808 | |
809 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
810 | sas_device = __mpt3sas_get_sdev_by_addr(ioc, |
811 | sas_address, port); |
812 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
813 | |
814 | return sas_device; |
815 | } |
816 | |
817 | static struct _sas_device * |
818 | __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
819 | { |
820 | struct _sas_device *sas_device; |
821 | |
822 | assert_spin_locked(&ioc->sas_device_lock); |
823 | |
824 | list_for_each_entry(sas_device, &ioc->sas_device_list, list) |
825 | if (sas_device->handle == handle) |
826 | goto found_device; |
827 | |
828 | list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) |
829 | if (sas_device->handle == handle) |
830 | goto found_device; |
831 | |
832 | return NULL; |
833 | |
834 | found_device: |
835 | sas_device_get(s: sas_device); |
836 | return sas_device; |
837 | } |
838 | |
839 | /** |
840 | * mpt3sas_get_sdev_by_handle - sas device search |
841 | * @ioc: per adapter object |
842 | * @handle: sas device handle (assigned by firmware) |
843 | * Context: Calling function should acquire ioc->sas_device_lock |
844 | * |
845 | * This searches for sas_device based on sas_address, then return sas_device |
846 | * object. |
847 | */ |
848 | struct _sas_device * |
849 | mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
850 | { |
851 | struct _sas_device *sas_device; |
852 | unsigned long flags; |
853 | |
854 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
855 | sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); |
856 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
857 | |
858 | return sas_device; |
859 | } |
860 | |
861 | /** |
862 | * _scsih_display_enclosure_chassis_info - display device location info |
863 | * @ioc: per adapter object |
864 | * @sas_device: per sas device object |
865 | * @sdev: scsi device struct |
866 | * @starget: scsi target struct |
867 | */ |
868 | static void |
869 | _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc, |
870 | struct _sas_device *sas_device, struct scsi_device *sdev, |
871 | struct scsi_target *starget) |
872 | { |
873 | if (sdev) { |
874 | if (sas_device->enclosure_handle != 0) |
875 | sdev_printk(KERN_INFO, sdev, |
876 | "enclosure logical id (0x%016llx), slot(%d) \n", |
877 | (unsigned long long) |
878 | sas_device->enclosure_logical_id, |
879 | sas_device->slot); |
880 | if (sas_device->connector_name[0] != '\0') |
881 | sdev_printk(KERN_INFO, sdev, |
882 | "enclosure level(0x%04x), connector name( %s)\n", |
883 | sas_device->enclosure_level, |
884 | sas_device->connector_name); |
885 | if (sas_device->is_chassis_slot_valid) |
886 | sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n", |
887 | sas_device->chassis_slot); |
888 | } else if (starget) { |
889 | if (sas_device->enclosure_handle != 0) |
890 | starget_printk(KERN_INFO, starget, |
891 | "enclosure logical id(0x%016llx), slot(%d) \n", |
892 | (unsigned long long) |
893 | sas_device->enclosure_logical_id, |
894 | sas_device->slot); |
895 | if (sas_device->connector_name[0] != '\0') |
896 | starget_printk(KERN_INFO, starget, |
897 | "enclosure level(0x%04x), connector name( %s)\n", |
898 | sas_device->enclosure_level, |
899 | sas_device->connector_name); |
900 | if (sas_device->is_chassis_slot_valid) |
901 | starget_printk(KERN_INFO, starget, |
902 | "chassis slot(0x%04x)\n", |
903 | sas_device->chassis_slot); |
904 | } else { |
905 | if (sas_device->enclosure_handle != 0) |
906 | ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n", |
907 | (u64)sas_device->enclosure_logical_id, |
908 | sas_device->slot); |
909 | if (sas_device->connector_name[0] != '\0') |
910 | ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n", |
911 | sas_device->enclosure_level, |
912 | sas_device->connector_name); |
913 | if (sas_device->is_chassis_slot_valid) |
914 | ioc_info(ioc, "chassis slot(0x%04x)\n", |
915 | sas_device->chassis_slot); |
916 | } |
917 | } |
918 | |
919 | /** |
920 | * _scsih_sas_device_remove - remove sas_device from list. |
921 | * @ioc: per adapter object |
922 | * @sas_device: the sas_device object |
923 | * Context: This function will acquire ioc->sas_device_lock. |
924 | * |
925 | * If sas_device is on the list, remove it and decrement its reference count. |
926 | */ |
927 | static void |
928 | _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc, |
929 | struct _sas_device *sas_device) |
930 | { |
931 | unsigned long flags; |
932 | |
933 | if (!sas_device) |
934 | return; |
935 | ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n", |
936 | sas_device->handle, (u64)sas_device->sas_address); |
937 | |
938 | _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL); |
939 | |
940 | /* |
941 | * The lock serializes access to the list, but we still need to verify |
942 | * that nobody removed the entry while we were waiting on the lock. |
943 | */ |
944 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
945 | if (!list_empty(head: &sas_device->list)) { |
946 | list_del_init(entry: &sas_device->list); |
947 | sas_device_put(s: sas_device); |
948 | } |
949 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
950 | } |
951 | |
952 | /** |
953 | * _scsih_device_remove_by_handle - removing device object by handle |
954 | * @ioc: per adapter object |
955 | * @handle: device handle |
956 | */ |
957 | static void |
958 | _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
959 | { |
960 | struct _sas_device *sas_device; |
961 | unsigned long flags; |
962 | |
963 | if (ioc->shost_recovery) |
964 | return; |
965 | |
966 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
967 | sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); |
968 | if (sas_device) { |
969 | list_del_init(entry: &sas_device->list); |
970 | sas_device_put(s: sas_device); |
971 | } |
972 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
973 | if (sas_device) { |
974 | _scsih_remove_device(ioc, sas_device); |
975 | sas_device_put(s: sas_device); |
976 | } |
977 | } |
978 | |
979 | /** |
980 | * mpt3sas_device_remove_by_sas_address - removing device object by |
981 | * sas address & port number |
982 | * @ioc: per adapter object |
983 | * @sas_address: device sas_address |
984 | * @port: hba port entry |
985 | * |
986 | * Return nothing. |
987 | */ |
988 | void |
989 | mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc, |
990 | u64 sas_address, struct hba_port *port) |
991 | { |
992 | struct _sas_device *sas_device; |
993 | unsigned long flags; |
994 | |
995 | if (ioc->shost_recovery) |
996 | return; |
997 | |
998 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
999 | sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port); |
1000 | if (sas_device) { |
1001 | list_del_init(entry: &sas_device->list); |
1002 | sas_device_put(s: sas_device); |
1003 | } |
1004 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
1005 | if (sas_device) { |
1006 | _scsih_remove_device(ioc, sas_device); |
1007 | sas_device_put(s: sas_device); |
1008 | } |
1009 | } |
1010 | |
1011 | /** |
1012 | * _scsih_sas_device_add - insert sas_device to the list. |
1013 | * @ioc: per adapter object |
1014 | * @sas_device: the sas_device object |
1015 | * Context: This function will acquire ioc->sas_device_lock. |
1016 | * |
1017 | * Adding new object to the ioc->sas_device_list. |
1018 | */ |
1019 | static void |
1020 | _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc, |
1021 | struct _sas_device *sas_device) |
1022 | { |
1023 | unsigned long flags; |
1024 | |
1025 | dewtprintk(ioc, |
1026 | ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n", |
1027 | __func__, sas_device->handle, |
1028 | (u64)sas_device->sas_address)); |
1029 | |
1030 | dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, |
1031 | NULL, NULL)); |
1032 | |
1033 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
1034 | sas_device_get(s: sas_device); |
1035 | list_add_tail(new: &sas_device->list, head: &ioc->sas_device_list); |
1036 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
1037 | |
1038 | if (ioc->hide_drives) { |
1039 | clear_bit(nr: sas_device->handle, addr: ioc->pend_os_device_add); |
1040 | return; |
1041 | } |
1042 | |
1043 | if (!mpt3sas_transport_port_add(ioc, handle: sas_device->handle, |
1044 | sas_address: sas_device->sas_address_parent, port: sas_device->port)) { |
1045 | _scsih_sas_device_remove(ioc, sas_device); |
1046 | } else if (!sas_device->starget) { |
1047 | /* |
1048 | * When asyn scanning is enabled, its not possible to remove |
1049 | * devices while scanning is turned on due to an oops in |
1050 | * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start() |
1051 | */ |
1052 | if (!ioc->is_driver_loading) { |
1053 | mpt3sas_transport_port_remove(ioc, |
1054 | sas_address: sas_device->sas_address, |
1055 | sas_address_parent: sas_device->sas_address_parent, |
1056 | port: sas_device->port); |
1057 | _scsih_sas_device_remove(ioc, sas_device); |
1058 | } |
1059 | } else |
1060 | clear_bit(nr: sas_device->handle, addr: ioc->pend_os_device_add); |
1061 | } |
1062 | |
1063 | /** |
1064 | * _scsih_sas_device_init_add - insert sas_device to the list. |
1065 | * @ioc: per adapter object |
1066 | * @sas_device: the sas_device object |
1067 | * Context: This function will acquire ioc->sas_device_lock. |
1068 | * |
1069 | * Adding new object at driver load time to the ioc->sas_device_init_list. |
1070 | */ |
1071 | static void |
1072 | _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc, |
1073 | struct _sas_device *sas_device) |
1074 | { |
1075 | unsigned long flags; |
1076 | |
1077 | dewtprintk(ioc, |
1078 | ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n", |
1079 | __func__, sas_device->handle, |
1080 | (u64)sas_device->sas_address)); |
1081 | |
1082 | dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, |
1083 | NULL, NULL)); |
1084 | |
1085 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
1086 | sas_device_get(s: sas_device); |
1087 | list_add_tail(new: &sas_device->list, head: &ioc->sas_device_init_list); |
1088 | _scsih_determine_boot_device(ioc, device: sas_device, channel: 0); |
1089 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
1090 | } |
1091 | |
1092 | |
1093 | static struct _pcie_device * |
1094 | __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) |
1095 | { |
1096 | struct _pcie_device *pcie_device; |
1097 | |
1098 | assert_spin_locked(&ioc->pcie_device_lock); |
1099 | |
1100 | list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) |
1101 | if (pcie_device->wwid == wwid) |
1102 | goto found_device; |
1103 | |
1104 | list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) |
1105 | if (pcie_device->wwid == wwid) |
1106 | goto found_device; |
1107 | |
1108 | return NULL; |
1109 | |
1110 | found_device: |
1111 | pcie_device_get(p: pcie_device); |
1112 | return pcie_device; |
1113 | } |
1114 | |
1115 | |
1116 | /** |
1117 | * mpt3sas_get_pdev_by_wwid - pcie device search |
1118 | * @ioc: per adapter object |
1119 | * @wwid: wwid |
1120 | * |
1121 | * Context: This function will acquire ioc->pcie_device_lock and will release |
1122 | * before returning the pcie_device object. |
1123 | * |
1124 | * This searches for pcie_device based on wwid, then return pcie_device object. |
1125 | */ |
1126 | static struct _pcie_device * |
1127 | mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) |
1128 | { |
1129 | struct _pcie_device *pcie_device; |
1130 | unsigned long flags; |
1131 | |
1132 | spin_lock_irqsave(&ioc->pcie_device_lock, flags); |
1133 | pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); |
1134 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
1135 | |
1136 | return pcie_device; |
1137 | } |
1138 | |
1139 | |
1140 | static struct _pcie_device * |
1141 | __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id, |
1142 | int channel) |
1143 | { |
1144 | struct _pcie_device *pcie_device; |
1145 | |
1146 | assert_spin_locked(&ioc->pcie_device_lock); |
1147 | |
1148 | list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) |
1149 | if (pcie_device->id == id && pcie_device->channel == channel) |
1150 | goto found_device; |
1151 | |
1152 | list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) |
1153 | if (pcie_device->id == id && pcie_device->channel == channel) |
1154 | goto found_device; |
1155 | |
1156 | return NULL; |
1157 | |
1158 | found_device: |
1159 | pcie_device_get(p: pcie_device); |
1160 | return pcie_device; |
1161 | } |
1162 | |
1163 | static struct _pcie_device * |
1164 | __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
1165 | { |
1166 | struct _pcie_device *pcie_device; |
1167 | |
1168 | assert_spin_locked(&ioc->pcie_device_lock); |
1169 | |
1170 | list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) |
1171 | if (pcie_device->handle == handle) |
1172 | goto found_device; |
1173 | |
1174 | list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) |
1175 | if (pcie_device->handle == handle) |
1176 | goto found_device; |
1177 | |
1178 | return NULL; |
1179 | |
1180 | found_device: |
1181 | pcie_device_get(p: pcie_device); |
1182 | return pcie_device; |
1183 | } |
1184 | |
1185 | |
1186 | /** |
1187 | * mpt3sas_get_pdev_by_handle - pcie device search |
1188 | * @ioc: per adapter object |
1189 | * @handle: Firmware device handle |
1190 | * |
1191 | * Context: This function will acquire ioc->pcie_device_lock and will release |
1192 | * before returning the pcie_device object. |
1193 | * |
1194 | * This searches for pcie_device based on handle, then return pcie_device |
1195 | * object. |
1196 | */ |
1197 | struct _pcie_device * |
1198 | mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
1199 | { |
1200 | struct _pcie_device *pcie_device; |
1201 | unsigned long flags; |
1202 | |
1203 | spin_lock_irqsave(&ioc->pcie_device_lock, flags); |
1204 | pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); |
1205 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
1206 | |
1207 | return pcie_device; |
1208 | } |
1209 | |
1210 | /** |
1211 | * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency. |
1212 | * @ioc: per adapter object |
1213 | * Context: This function will acquire ioc->pcie_device_lock |
1214 | * |
1215 | * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency |
1216 | * which has reported maximum among all available NVMe drives. |
1217 | * Minimum max_shutdown_latency will be six seconds. |
1218 | */ |
1219 | static void |
1220 | _scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc) |
1221 | { |
1222 | struct _pcie_device *pcie_device; |
1223 | unsigned long flags; |
1224 | u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT; |
1225 | |
1226 | spin_lock_irqsave(&ioc->pcie_device_lock, flags); |
1227 | list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { |
1228 | if (pcie_device->shutdown_latency) { |
1229 | if (shutdown_latency < pcie_device->shutdown_latency) |
1230 | shutdown_latency = |
1231 | pcie_device->shutdown_latency; |
1232 | } |
1233 | } |
1234 | ioc->max_shutdown_latency = shutdown_latency; |
1235 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
1236 | } |
1237 | |
1238 | /** |
1239 | * _scsih_pcie_device_remove - remove pcie_device from list. |
1240 | * @ioc: per adapter object |
1241 | * @pcie_device: the pcie_device object |
1242 | * Context: This function will acquire ioc->pcie_device_lock. |
1243 | * |
1244 | * If pcie_device is on the list, remove it and decrement its reference count. |
1245 | */ |
1246 | static void |
1247 | _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc, |
1248 | struct _pcie_device *pcie_device) |
1249 | { |
1250 | unsigned long flags; |
1251 | int was_on_pcie_device_list = 0; |
1252 | u8 update_latency = 0; |
1253 | |
1254 | if (!pcie_device) |
1255 | return; |
1256 | ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", |
1257 | pcie_device->handle, (u64)pcie_device->wwid); |
1258 | if (pcie_device->enclosure_handle != 0) |
1259 | ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n", |
1260 | (u64)pcie_device->enclosure_logical_id, |
1261 | pcie_device->slot); |
1262 | if (pcie_device->connector_name[0] != '\0') |
1263 | ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n", |
1264 | pcie_device->enclosure_level, |
1265 | pcie_device->connector_name); |
1266 | |
1267 | spin_lock_irqsave(&ioc->pcie_device_lock, flags); |
1268 | if (!list_empty(head: &pcie_device->list)) { |
1269 | list_del_init(entry: &pcie_device->list); |
1270 | was_on_pcie_device_list = 1; |
1271 | } |
1272 | if (pcie_device->shutdown_latency == ioc->max_shutdown_latency) |
1273 | update_latency = 1; |
1274 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
1275 | if (was_on_pcie_device_list) { |
1276 | kfree(objp: pcie_device->serial_number); |
1277 | pcie_device_put(p: pcie_device); |
1278 | } |
1279 | |
1280 | /* |
1281 | * This device's RTD3 Entry Latency matches IOC's |
1282 | * max_shutdown_latency. Recalculate IOC's max_shutdown_latency |
1283 | * from the available drives as current drive is getting removed. |
1284 | */ |
1285 | if (update_latency) |
1286 | _scsih_set_nvme_max_shutdown_latency(ioc); |
1287 | } |
1288 | |
1289 | |
1290 | /** |
1291 | * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle |
1292 | * @ioc: per adapter object |
1293 | * @handle: device handle |
1294 | */ |
1295 | static void |
1296 | _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
1297 | { |
1298 | struct _pcie_device *pcie_device; |
1299 | unsigned long flags; |
1300 | int was_on_pcie_device_list = 0; |
1301 | u8 update_latency = 0; |
1302 | |
1303 | if (ioc->shost_recovery) |
1304 | return; |
1305 | |
1306 | spin_lock_irqsave(&ioc->pcie_device_lock, flags); |
1307 | pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); |
1308 | if (pcie_device) { |
1309 | if (!list_empty(head: &pcie_device->list)) { |
1310 | list_del_init(entry: &pcie_device->list); |
1311 | was_on_pcie_device_list = 1; |
1312 | pcie_device_put(p: pcie_device); |
1313 | } |
1314 | if (pcie_device->shutdown_latency == ioc->max_shutdown_latency) |
1315 | update_latency = 1; |
1316 | } |
1317 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
1318 | if (was_on_pcie_device_list) { |
1319 | _scsih_pcie_device_remove_from_sml(ioc, pcie_device); |
1320 | pcie_device_put(p: pcie_device); |
1321 | } |
1322 | |
1323 | /* |
1324 | * This device's RTD3 Entry Latency matches IOC's |
1325 | * max_shutdown_latency. Recalculate IOC's max_shutdown_latency |
1326 | * from the available drives as current drive is getting removed. |
1327 | */ |
1328 | if (update_latency) |
1329 | _scsih_set_nvme_max_shutdown_latency(ioc); |
1330 | } |
1331 | |
1332 | /** |
1333 | * _scsih_pcie_device_add - add pcie_device object |
1334 | * @ioc: per adapter object |
1335 | * @pcie_device: pcie_device object |
1336 | * |
1337 | * This is added to the pcie_device_list link list. |
1338 | */ |
1339 | static void |
1340 | _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc, |
1341 | struct _pcie_device *pcie_device) |
1342 | { |
1343 | unsigned long flags; |
1344 | |
1345 | dewtprintk(ioc, |
1346 | ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n", |
1347 | __func__, |
1348 | pcie_device->handle, (u64)pcie_device->wwid)); |
1349 | if (pcie_device->enclosure_handle != 0) |
1350 | dewtprintk(ioc, |
1351 | ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n", |
1352 | __func__, |
1353 | (u64)pcie_device->enclosure_logical_id, |
1354 | pcie_device->slot)); |
1355 | if (pcie_device->connector_name[0] != '\0') |
1356 | dewtprintk(ioc, |
1357 | ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n", |
1358 | __func__, pcie_device->enclosure_level, |
1359 | pcie_device->connector_name)); |
1360 | |
1361 | spin_lock_irqsave(&ioc->pcie_device_lock, flags); |
1362 | pcie_device_get(p: pcie_device); |
1363 | list_add_tail(new: &pcie_device->list, head: &ioc->pcie_device_list); |
1364 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
1365 | |
1366 | if (pcie_device->access_status == |
1367 | MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) { |
1368 | clear_bit(nr: pcie_device->handle, addr: ioc->pend_os_device_add); |
1369 | return; |
1370 | } |
1371 | if (scsi_add_device(host: ioc->shost, PCIE_CHANNEL, target: pcie_device->id, lun: 0)) { |
1372 | _scsih_pcie_device_remove(ioc, pcie_device); |
1373 | } else if (!pcie_device->starget) { |
1374 | if (!ioc->is_driver_loading) { |
1375 | /*TODO-- Need to find out whether this condition will occur or not*/ |
1376 | clear_bit(nr: pcie_device->handle, addr: ioc->pend_os_device_add); |
1377 | } |
1378 | } else |
1379 | clear_bit(nr: pcie_device->handle, addr: ioc->pend_os_device_add); |
1380 | } |
1381 | |
1382 | /* |
1383 | * _scsih_pcie_device_init_add - insert pcie_device to the init list. |
1384 | * @ioc: per adapter object |
1385 | * @pcie_device: the pcie_device object |
1386 | * Context: This function will acquire ioc->pcie_device_lock. |
1387 | * |
1388 | * Adding new object at driver load time to the ioc->pcie_device_init_list. |
1389 | */ |
1390 | static void |
1391 | _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc, |
1392 | struct _pcie_device *pcie_device) |
1393 | { |
1394 | unsigned long flags; |
1395 | |
1396 | dewtprintk(ioc, |
1397 | ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n", |
1398 | __func__, |
1399 | pcie_device->handle, (u64)pcie_device->wwid)); |
1400 | if (pcie_device->enclosure_handle != 0) |
1401 | dewtprintk(ioc, |
1402 | ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n", |
1403 | __func__, |
1404 | (u64)pcie_device->enclosure_logical_id, |
1405 | pcie_device->slot)); |
1406 | if (pcie_device->connector_name[0] != '\0') |
1407 | dewtprintk(ioc, |
1408 | ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n", |
1409 | __func__, pcie_device->enclosure_level, |
1410 | pcie_device->connector_name)); |
1411 | |
1412 | spin_lock_irqsave(&ioc->pcie_device_lock, flags); |
1413 | pcie_device_get(p: pcie_device); |
1414 | list_add_tail(new: &pcie_device->list, head: &ioc->pcie_device_init_list); |
1415 | if (pcie_device->access_status != |
1416 | MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) |
1417 | _scsih_determine_boot_device(ioc, device: pcie_device, PCIE_CHANNEL); |
1418 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
1419 | } |
1420 | /** |
1421 | * _scsih_raid_device_find_by_id - raid device search |
1422 | * @ioc: per adapter object |
1423 | * @id: sas device target id |
1424 | * @channel: sas device channel |
1425 | * Context: Calling function should acquire ioc->raid_device_lock |
1426 | * |
1427 | * This searches for raid_device based on target id, then return raid_device |
1428 | * object. |
1429 | */ |
1430 | static struct _raid_device * |
1431 | _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel) |
1432 | { |
1433 | struct _raid_device *raid_device, *r; |
1434 | |
1435 | r = NULL; |
1436 | list_for_each_entry(raid_device, &ioc->raid_device_list, list) { |
1437 | if (raid_device->id == id && raid_device->channel == channel) { |
1438 | r = raid_device; |
1439 | goto out; |
1440 | } |
1441 | } |
1442 | |
1443 | out: |
1444 | return r; |
1445 | } |
1446 | |
1447 | /** |
1448 | * mpt3sas_raid_device_find_by_handle - raid device search |
1449 | * @ioc: per adapter object |
1450 | * @handle: sas device handle (assigned by firmware) |
1451 | * Context: Calling function should acquire ioc->raid_device_lock |
1452 | * |
1453 | * This searches for raid_device based on handle, then return raid_device |
1454 | * object. |
1455 | */ |
1456 | struct _raid_device * |
1457 | mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
1458 | { |
1459 | struct _raid_device *raid_device, *r; |
1460 | |
1461 | r = NULL; |
1462 | list_for_each_entry(raid_device, &ioc->raid_device_list, list) { |
1463 | if (raid_device->handle != handle) |
1464 | continue; |
1465 | r = raid_device; |
1466 | goto out; |
1467 | } |
1468 | |
1469 | out: |
1470 | return r; |
1471 | } |
1472 | |
1473 | /** |
1474 | * _scsih_raid_device_find_by_wwid - raid device search |
1475 | * @ioc: per adapter object |
1476 | * @wwid: ? |
1477 | * Context: Calling function should acquire ioc->raid_device_lock |
1478 | * |
1479 | * This searches for raid_device based on wwid, then return raid_device |
1480 | * object. |
1481 | */ |
1482 | static struct _raid_device * |
1483 | _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) |
1484 | { |
1485 | struct _raid_device *raid_device, *r; |
1486 | |
1487 | r = NULL; |
1488 | list_for_each_entry(raid_device, &ioc->raid_device_list, list) { |
1489 | if (raid_device->wwid != wwid) |
1490 | continue; |
1491 | r = raid_device; |
1492 | goto out; |
1493 | } |
1494 | |
1495 | out: |
1496 | return r; |
1497 | } |
1498 | |
1499 | /** |
1500 | * _scsih_raid_device_add - add raid_device object |
1501 | * @ioc: per adapter object |
1502 | * @raid_device: raid_device object |
1503 | * |
1504 | * This is added to the raid_device_list link list. |
1505 | */ |
1506 | static void |
1507 | _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc, |
1508 | struct _raid_device *raid_device) |
1509 | { |
1510 | unsigned long flags; |
1511 | |
1512 | dewtprintk(ioc, |
1513 | ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n", |
1514 | __func__, |
1515 | raid_device->handle, (u64)raid_device->wwid)); |
1516 | |
1517 | spin_lock_irqsave(&ioc->raid_device_lock, flags); |
1518 | list_add_tail(new: &raid_device->list, head: &ioc->raid_device_list); |
1519 | spin_unlock_irqrestore(lock: &ioc->raid_device_lock, flags); |
1520 | } |
1521 | |
1522 | /** |
1523 | * _scsih_raid_device_remove - delete raid_device object |
1524 | * @ioc: per adapter object |
1525 | * @raid_device: raid_device object |
1526 | * |
1527 | */ |
1528 | static void |
1529 | _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc, |
1530 | struct _raid_device *raid_device) |
1531 | { |
1532 | unsigned long flags; |
1533 | |
1534 | spin_lock_irqsave(&ioc->raid_device_lock, flags); |
1535 | list_del(entry: &raid_device->list); |
1536 | kfree(objp: raid_device); |
1537 | spin_unlock_irqrestore(lock: &ioc->raid_device_lock, flags); |
1538 | } |
1539 | |
1540 | /** |
1541 | * mpt3sas_scsih_expander_find_by_handle - expander device search |
1542 | * @ioc: per adapter object |
1543 | * @handle: expander handle (assigned by firmware) |
1544 | * Context: Calling function should acquire ioc->sas_device_lock |
1545 | * |
1546 | * This searches for expander device based on handle, then returns the |
1547 | * sas_node object. |
1548 | */ |
1549 | struct _sas_node * |
1550 | mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
1551 | { |
1552 | struct _sas_node *sas_expander, *r; |
1553 | |
1554 | r = NULL; |
1555 | list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { |
1556 | if (sas_expander->handle != handle) |
1557 | continue; |
1558 | r = sas_expander; |
1559 | goto out; |
1560 | } |
1561 | out: |
1562 | return r; |
1563 | } |
1564 | |
1565 | /** |
1566 | * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search |
1567 | * @ioc: per adapter object |
1568 | * @handle: enclosure handle (assigned by firmware) |
1569 | * Context: Calling function should acquire ioc->sas_device_lock |
1570 | * |
1571 | * This searches for enclosure device based on handle, then returns the |
1572 | * enclosure object. |
1573 | */ |
1574 | static struct _enclosure_node * |
1575 | mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
1576 | { |
1577 | struct _enclosure_node *enclosure_dev, *r; |
1578 | |
1579 | r = NULL; |
1580 | list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) { |
1581 | if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle) |
1582 | continue; |
1583 | r = enclosure_dev; |
1584 | goto out; |
1585 | } |
1586 | out: |
1587 | return r; |
1588 | } |
1589 | /** |
1590 | * mpt3sas_scsih_expander_find_by_sas_address - expander device search |
1591 | * @ioc: per adapter object |
1592 | * @sas_address: sas address |
1593 | * @port: hba port entry |
1594 | * Context: Calling function should acquire ioc->sas_node_lock. |
1595 | * |
1596 | * This searches for expander device based on sas_address & port number, |
1597 | * then returns the sas_node object. |
1598 | */ |
1599 | struct _sas_node * |
1600 | mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc, |
1601 | u64 sas_address, struct hba_port *port) |
1602 | { |
1603 | struct _sas_node *sas_expander, *r = NULL; |
1604 | |
1605 | if (!port) |
1606 | return r; |
1607 | |
1608 | list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { |
1609 | if (sas_expander->sas_address != sas_address) |
1610 | continue; |
1611 | if (sas_expander->port != port) |
1612 | continue; |
1613 | r = sas_expander; |
1614 | goto out; |
1615 | } |
1616 | out: |
1617 | return r; |
1618 | } |
1619 | |
1620 | /** |
1621 | * _scsih_expander_node_add - insert expander device to the list. |
1622 | * @ioc: per adapter object |
1623 | * @sas_expander: the sas_device object |
1624 | * Context: This function will acquire ioc->sas_node_lock. |
1625 | * |
1626 | * Adding new object to the ioc->sas_expander_list. |
1627 | */ |
1628 | static void |
1629 | _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc, |
1630 | struct _sas_node *sas_expander) |
1631 | { |
1632 | unsigned long flags; |
1633 | |
1634 | spin_lock_irqsave(&ioc->sas_node_lock, flags); |
1635 | list_add_tail(new: &sas_expander->list, head: &ioc->sas_expander_list); |
1636 | spin_unlock_irqrestore(lock: &ioc->sas_node_lock, flags); |
1637 | } |
1638 | |
1639 | /** |
1640 | * _scsih_is_end_device - determines if device is an end device |
1641 | * @device_info: bitfield providing information about the device. |
1642 | * Context: none |
1643 | * |
1644 | * Return: 1 if end device. |
1645 | */ |
1646 | static int |
1647 | _scsih_is_end_device(u32 device_info) |
1648 | { |
1649 | if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE && |
1650 | ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) | |
1651 | (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) | |
1652 | (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE))) |
1653 | return 1; |
1654 | else |
1655 | return 0; |
1656 | } |
1657 | |
1658 | /** |
1659 | * _scsih_is_nvme_pciescsi_device - determines if |
1660 | * device is an pcie nvme/scsi device |
1661 | * @device_info: bitfield providing information about the device. |
1662 | * Context: none |
1663 | * |
1664 | * Returns 1 if device is pcie device type nvme/scsi. |
1665 | */ |
1666 | static int |
1667 | _scsih_is_nvme_pciescsi_device(u32 device_info) |
1668 | { |
1669 | if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) |
1670 | == MPI26_PCIE_DEVINFO_NVME) || |
1671 | ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) |
1672 | == MPI26_PCIE_DEVINFO_SCSI)) |
1673 | return 1; |
1674 | else |
1675 | return 0; |
1676 | } |
1677 | |
1678 | /** |
1679 | * _scsih_scsi_lookup_find_by_target - search for matching channel:id |
1680 | * @ioc: per adapter object |
1681 | * @id: target id |
1682 | * @channel: channel |
1683 | * Context: This function will acquire ioc->scsi_lookup_lock. |
1684 | * |
1685 | * This will search for a matching channel:id in the scsi_lookup array, |
1686 | * returning 1 if found. |
1687 | */ |
1688 | static u8 |
1689 | _scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id, |
1690 | int channel) |
1691 | { |
1692 | int smid; |
1693 | struct scsi_cmnd *scmd; |
1694 | |
1695 | for (smid = 1; |
1696 | smid <= ioc->shost->can_queue; smid++) { |
1697 | scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); |
1698 | if (!scmd) |
1699 | continue; |
1700 | if (scmd->device->id == id && |
1701 | scmd->device->channel == channel) |
1702 | return 1; |
1703 | } |
1704 | return 0; |
1705 | } |
1706 | |
1707 | /** |
1708 | * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun |
1709 | * @ioc: per adapter object |
1710 | * @id: target id |
1711 | * @lun: lun number |
1712 | * @channel: channel |
1713 | * Context: This function will acquire ioc->scsi_lookup_lock. |
1714 | * |
1715 | * This will search for a matching channel:id:lun in the scsi_lookup array, |
1716 | * returning 1 if found. |
1717 | */ |
1718 | static u8 |
1719 | _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id, |
1720 | unsigned int lun, int channel) |
1721 | { |
1722 | int smid; |
1723 | struct scsi_cmnd *scmd; |
1724 | |
1725 | for (smid = 1; smid <= ioc->shost->can_queue; smid++) { |
1726 | |
1727 | scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); |
1728 | if (!scmd) |
1729 | continue; |
1730 | if (scmd->device->id == id && |
1731 | scmd->device->channel == channel && |
1732 | scmd->device->lun == lun) |
1733 | return 1; |
1734 | } |
1735 | return 0; |
1736 | } |
1737 | |
1738 | /** |
1739 | * mpt3sas_scsih_scsi_lookup_get - returns scmd entry |
1740 | * @ioc: per adapter object |
1741 | * @smid: system request message index |
1742 | * |
1743 | * Return: the smid stored scmd pointer. |
1744 | * Then will dereference the stored scmd pointer. |
1745 | */ |
1746 | struct scsi_cmnd * |
1747 | mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
1748 | { |
1749 | struct scsi_cmnd *scmd = NULL; |
1750 | struct scsiio_tracker *st; |
1751 | Mpi25SCSIIORequest_t *mpi_request; |
1752 | u16 tag = smid - 1; |
1753 | |
1754 | if (smid > 0 && |
1755 | smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) { |
1756 | u32 unique_tag = |
1757 | ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag; |
1758 | |
1759 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); |
1760 | |
1761 | /* |
1762 | * If SCSI IO request is outstanding at driver level then |
1763 | * DevHandle filed must be non-zero. If DevHandle is zero |
1764 | * then it means that this smid is free at driver level, |
1765 | * so return NULL. |
1766 | */ |
1767 | if (!mpi_request->DevHandle) |
1768 | return scmd; |
1769 | |
1770 | scmd = scsi_host_find_tag(shost: ioc->shost, tag: unique_tag); |
1771 | if (scmd) { |
1772 | st = scsi_cmd_priv(cmd: scmd); |
1773 | if (st->cb_idx == 0xFF || st->smid == 0) |
1774 | scmd = NULL; |
1775 | } |
1776 | } |
1777 | return scmd; |
1778 | } |
1779 | |
1780 | /** |
1781 | * scsih_change_queue_depth - setting device queue depth |
1782 | * @sdev: scsi device struct |
1783 | * @qdepth: requested queue depth |
1784 | * |
1785 | * Return: queue depth. |
1786 | */ |
1787 | static int |
1788 | scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) |
1789 | { |
1790 | struct Scsi_Host *shost = sdev->host; |
1791 | int max_depth; |
1792 | struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); |
1793 | struct MPT3SAS_DEVICE *sas_device_priv_data; |
1794 | struct MPT3SAS_TARGET *sas_target_priv_data; |
1795 | struct _sas_device *sas_device; |
1796 | unsigned long flags; |
1797 | |
1798 | max_depth = shost->can_queue; |
1799 | |
1800 | /* |
1801 | * limit max device queue for SATA to 32 if enable_sdev_max_qd |
1802 | * is disabled. |
1803 | */ |
1804 | if (ioc->enable_sdev_max_qd || ioc->is_gen35_ioc) |
1805 | goto not_sata; |
1806 | |
1807 | sas_device_priv_data = sdev->hostdata; |
1808 | if (!sas_device_priv_data) |
1809 | goto not_sata; |
1810 | sas_target_priv_data = sas_device_priv_data->sas_target; |
1811 | if (!sas_target_priv_data) |
1812 | goto not_sata; |
1813 | if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) |
1814 | goto not_sata; |
1815 | |
1816 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
1817 | sas_device = __mpt3sas_get_sdev_from_target(ioc, tgt_priv: sas_target_priv_data); |
1818 | if (sas_device) { |
1819 | if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) |
1820 | max_depth = MPT3SAS_SATA_QUEUE_DEPTH; |
1821 | |
1822 | sas_device_put(s: sas_device); |
1823 | } |
1824 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
1825 | |
1826 | not_sata: |
1827 | |
1828 | if (!sdev->tagged_supported) |
1829 | max_depth = 1; |
1830 | if (qdepth > max_depth) |
1831 | qdepth = max_depth; |
1832 | scsi_change_queue_depth(sdev, qdepth); |
1833 | sdev_printk(KERN_INFO, sdev, |
1834 | "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n", |
1835 | sdev->queue_depth, sdev->tagged_supported, |
1836 | sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1)); |
1837 | return sdev->queue_depth; |
1838 | } |
1839 | |
1840 | /** |
1841 | * mpt3sas_scsih_change_queue_depth - setting device queue depth |
1842 | * @sdev: scsi device struct |
1843 | * @qdepth: requested queue depth |
1844 | * |
1845 | * Returns nothing. |
1846 | */ |
1847 | void |
1848 | mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) |
1849 | { |
1850 | struct Scsi_Host *shost = sdev->host; |
1851 | struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); |
1852 | |
1853 | if (ioc->enable_sdev_max_qd) |
1854 | qdepth = shost->can_queue; |
1855 | |
1856 | scsih_change_queue_depth(sdev, qdepth); |
1857 | } |
1858 | |
1859 | /** |
1860 | * scsih_target_alloc - target add routine |
1861 | * @starget: scsi target struct |
1862 | * |
1863 | * Return: 0 if ok. Any other return is assumed to be an error and |
1864 | * the device is ignored. |
1865 | */ |
1866 | static int |
1867 | scsih_target_alloc(struct scsi_target *starget) |
1868 | { |
1869 | struct Scsi_Host *shost = dev_to_shost(dev: &starget->dev); |
1870 | struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); |
1871 | struct MPT3SAS_TARGET *sas_target_priv_data; |
1872 | struct _sas_device *sas_device; |
1873 | struct _raid_device *raid_device; |
1874 | struct _pcie_device *pcie_device; |
1875 | unsigned long flags; |
1876 | struct sas_rphy *rphy; |
1877 | |
1878 | sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data), |
1879 | GFP_KERNEL); |
1880 | if (!sas_target_priv_data) |
1881 | return -ENOMEM; |
1882 | |
1883 | starget->hostdata = sas_target_priv_data; |
1884 | sas_target_priv_data->starget = starget; |
1885 | sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; |
1886 | |
1887 | /* RAID volumes */ |
1888 | if (starget->channel == RAID_CHANNEL) { |
1889 | spin_lock_irqsave(&ioc->raid_device_lock, flags); |
1890 | raid_device = _scsih_raid_device_find_by_id(ioc, id: starget->id, |
1891 | channel: starget->channel); |
1892 | if (raid_device) { |
1893 | sas_target_priv_data->handle = raid_device->handle; |
1894 | sas_target_priv_data->sas_address = raid_device->wwid; |
1895 | sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME; |
1896 | if (ioc->is_warpdrive) |
1897 | sas_target_priv_data->raid_device = raid_device; |
1898 | raid_device->starget = starget; |
1899 | } |
1900 | spin_unlock_irqrestore(lock: &ioc->raid_device_lock, flags); |
1901 | return 0; |
1902 | } |
1903 | |
1904 | /* PCIe devices */ |
1905 | if (starget->channel == PCIE_CHANNEL) { |
1906 | spin_lock_irqsave(&ioc->pcie_device_lock, flags); |
1907 | pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, id: starget->id, |
1908 | channel: starget->channel); |
1909 | if (pcie_device) { |
1910 | sas_target_priv_data->handle = pcie_device->handle; |
1911 | sas_target_priv_data->sas_address = pcie_device->wwid; |
1912 | sas_target_priv_data->port = NULL; |
1913 | sas_target_priv_data->pcie_dev = pcie_device; |
1914 | pcie_device->starget = starget; |
1915 | pcie_device->id = starget->id; |
1916 | pcie_device->channel = starget->channel; |
1917 | sas_target_priv_data->flags |= |
1918 | MPT_TARGET_FLAGS_PCIE_DEVICE; |
1919 | if (pcie_device->fast_path) |
1920 | sas_target_priv_data->flags |= |
1921 | MPT_TARGET_FASTPATH_IO; |
1922 | } |
1923 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
1924 | return 0; |
1925 | } |
1926 | |
1927 | /* sas/sata devices */ |
1928 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
1929 | rphy = dev_to_rphy(starget->dev.parent); |
1930 | sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy); |
1931 | |
1932 | if (sas_device) { |
1933 | sas_target_priv_data->handle = sas_device->handle; |
1934 | sas_target_priv_data->sas_address = sas_device->sas_address; |
1935 | sas_target_priv_data->port = sas_device->port; |
1936 | sas_target_priv_data->sas_dev = sas_device; |
1937 | sas_device->starget = starget; |
1938 | sas_device->id = starget->id; |
1939 | sas_device->channel = starget->channel; |
1940 | if (test_bit(sas_device->handle, ioc->pd_handles)) |
1941 | sas_target_priv_data->flags |= |
1942 | MPT_TARGET_FLAGS_RAID_COMPONENT; |
1943 | if (sas_device->fast_path) |
1944 | sas_target_priv_data->flags |= |
1945 | MPT_TARGET_FASTPATH_IO; |
1946 | } |
1947 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
1948 | |
1949 | return 0; |
1950 | } |
1951 | |
1952 | /** |
1953 | * scsih_target_destroy - target destroy routine |
1954 | * @starget: scsi target struct |
1955 | */ |
1956 | static void |
1957 | scsih_target_destroy(struct scsi_target *starget) |
1958 | { |
1959 | struct Scsi_Host *shost = dev_to_shost(dev: &starget->dev); |
1960 | struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); |
1961 | struct MPT3SAS_TARGET *sas_target_priv_data; |
1962 | struct _sas_device *sas_device; |
1963 | struct _raid_device *raid_device; |
1964 | struct _pcie_device *pcie_device; |
1965 | unsigned long flags; |
1966 | |
1967 | sas_target_priv_data = starget->hostdata; |
1968 | if (!sas_target_priv_data) |
1969 | return; |
1970 | |
1971 | if (starget->channel == RAID_CHANNEL) { |
1972 | spin_lock_irqsave(&ioc->raid_device_lock, flags); |
1973 | raid_device = _scsih_raid_device_find_by_id(ioc, id: starget->id, |
1974 | channel: starget->channel); |
1975 | if (raid_device) { |
1976 | raid_device->starget = NULL; |
1977 | raid_device->sdev = NULL; |
1978 | } |
1979 | spin_unlock_irqrestore(lock: &ioc->raid_device_lock, flags); |
1980 | goto out; |
1981 | } |
1982 | |
1983 | if (starget->channel == PCIE_CHANNEL) { |
1984 | spin_lock_irqsave(&ioc->pcie_device_lock, flags); |
1985 | pcie_device = __mpt3sas_get_pdev_from_target(ioc, |
1986 | tgt_priv: sas_target_priv_data); |
1987 | if (pcie_device && (pcie_device->starget == starget) && |
1988 | (pcie_device->id == starget->id) && |
1989 | (pcie_device->channel == starget->channel)) |
1990 | pcie_device->starget = NULL; |
1991 | |
1992 | if (pcie_device) { |
1993 | /* |
1994 | * Corresponding get() is in _scsih_target_alloc() |
1995 | */ |
1996 | sas_target_priv_data->pcie_dev = NULL; |
1997 | pcie_device_put(p: pcie_device); |
1998 | pcie_device_put(p: pcie_device); |
1999 | } |
2000 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
2001 | goto out; |
2002 | } |
2003 | |
2004 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
2005 | sas_device = __mpt3sas_get_sdev_from_target(ioc, tgt_priv: sas_target_priv_data); |
2006 | if (sas_device && (sas_device->starget == starget) && |
2007 | (sas_device->id == starget->id) && |
2008 | (sas_device->channel == starget->channel)) |
2009 | sas_device->starget = NULL; |
2010 | |
2011 | if (sas_device) { |
2012 | /* |
2013 | * Corresponding get() is in _scsih_target_alloc() |
2014 | */ |
2015 | sas_target_priv_data->sas_dev = NULL; |
2016 | sas_device_put(s: sas_device); |
2017 | |
2018 | sas_device_put(s: sas_device); |
2019 | } |
2020 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
2021 | |
2022 | out: |
2023 | kfree(objp: sas_target_priv_data); |
2024 | starget->hostdata = NULL; |
2025 | } |
2026 | |
2027 | /** |
2028 | * scsih_sdev_init - device add routine |
2029 | * @sdev: scsi device struct |
2030 | * |
2031 | * Return: 0 if ok. Any other return is assumed to be an error and |
2032 | * the device is ignored. |
2033 | */ |
2034 | static int |
2035 | scsih_sdev_init(struct scsi_device *sdev) |
2036 | { |
2037 | struct Scsi_Host *shost; |
2038 | struct MPT3SAS_ADAPTER *ioc; |
2039 | struct MPT3SAS_TARGET *sas_target_priv_data; |
2040 | struct MPT3SAS_DEVICE *sas_device_priv_data; |
2041 | struct scsi_target *starget; |
2042 | struct _raid_device *raid_device; |
2043 | struct _sas_device *sas_device; |
2044 | struct _pcie_device *pcie_device; |
2045 | unsigned long flags; |
2046 | |
2047 | sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data), |
2048 | GFP_KERNEL); |
2049 | if (!sas_device_priv_data) |
2050 | return -ENOMEM; |
2051 | |
2052 | sas_device_priv_data->lun = sdev->lun; |
2053 | sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT; |
2054 | |
2055 | starget = scsi_target(sdev); |
2056 | sas_target_priv_data = starget->hostdata; |
2057 | sas_target_priv_data->num_luns++; |
2058 | sas_device_priv_data->sas_target = sas_target_priv_data; |
2059 | sdev->hostdata = sas_device_priv_data; |
2060 | if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT)) |
2061 | sdev->no_uld_attach = 1; |
2062 | |
2063 | shost = dev_to_shost(dev: &starget->dev); |
2064 | ioc = shost_priv(shost); |
2065 | if (starget->channel == RAID_CHANNEL) { |
2066 | spin_lock_irqsave(&ioc->raid_device_lock, flags); |
2067 | raid_device = _scsih_raid_device_find_by_id(ioc, |
2068 | id: starget->id, channel: starget->channel); |
2069 | if (raid_device) |
2070 | raid_device->sdev = sdev; /* raid is single lun */ |
2071 | spin_unlock_irqrestore(lock: &ioc->raid_device_lock, flags); |
2072 | } |
2073 | if (starget->channel == PCIE_CHANNEL) { |
2074 | spin_lock_irqsave(&ioc->pcie_device_lock, flags); |
2075 | pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, |
2076 | wwid: sas_target_priv_data->sas_address); |
2077 | if (pcie_device && (pcie_device->starget == NULL)) { |
2078 | sdev_printk(KERN_INFO, sdev, |
2079 | "%s : pcie_device->starget set to starget @ %d\n", |
2080 | __func__, __LINE__); |
2081 | pcie_device->starget = starget; |
2082 | } |
2083 | |
2084 | if (pcie_device) |
2085 | pcie_device_put(p: pcie_device); |
2086 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
2087 | |
2088 | } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { |
2089 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
2090 | sas_device = __mpt3sas_get_sdev_by_addr(ioc, |
2091 | sas_address: sas_target_priv_data->sas_address, |
2092 | port: sas_target_priv_data->port); |
2093 | if (sas_device && (sas_device->starget == NULL)) { |
2094 | sdev_printk(KERN_INFO, sdev, |
2095 | "%s : sas_device->starget set to starget @ %d\n", |
2096 | __func__, __LINE__); |
2097 | sas_device->starget = starget; |
2098 | } |
2099 | |
2100 | if (sas_device) |
2101 | sas_device_put(s: sas_device); |
2102 | |
2103 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
2104 | } |
2105 | |
2106 | return 0; |
2107 | } |
2108 | |
2109 | /** |
2110 | * scsih_sdev_destroy - device destroy routine |
2111 | * @sdev: scsi device struct |
2112 | */ |
2113 | static void |
2114 | scsih_sdev_destroy(struct scsi_device *sdev) |
2115 | { |
2116 | struct MPT3SAS_TARGET *sas_target_priv_data; |
2117 | struct scsi_target *starget; |
2118 | struct Scsi_Host *shost; |
2119 | struct MPT3SAS_ADAPTER *ioc; |
2120 | struct _sas_device *sas_device; |
2121 | struct _pcie_device *pcie_device; |
2122 | unsigned long flags; |
2123 | |
2124 | if (!sdev->hostdata) |
2125 | return; |
2126 | |
2127 | starget = scsi_target(sdev); |
2128 | sas_target_priv_data = starget->hostdata; |
2129 | sas_target_priv_data->num_luns--; |
2130 | |
2131 | shost = dev_to_shost(dev: &starget->dev); |
2132 | ioc = shost_priv(shost); |
2133 | |
2134 | if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { |
2135 | spin_lock_irqsave(&ioc->pcie_device_lock, flags); |
2136 | pcie_device = __mpt3sas_get_pdev_from_target(ioc, |
2137 | tgt_priv: sas_target_priv_data); |
2138 | if (pcie_device && !sas_target_priv_data->num_luns) |
2139 | pcie_device->starget = NULL; |
2140 | |
2141 | if (pcie_device) |
2142 | pcie_device_put(p: pcie_device); |
2143 | |
2144 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
2145 | |
2146 | } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { |
2147 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
2148 | sas_device = __mpt3sas_get_sdev_from_target(ioc, |
2149 | tgt_priv: sas_target_priv_data); |
2150 | if (sas_device && !sas_target_priv_data->num_luns) |
2151 | sas_device->starget = NULL; |
2152 | |
2153 | if (sas_device) |
2154 | sas_device_put(s: sas_device); |
2155 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
2156 | } |
2157 | |
2158 | kfree(objp: sdev->hostdata); |
2159 | sdev->hostdata = NULL; |
2160 | } |
2161 | |
2162 | /** |
2163 | * _scsih_display_sata_capabilities - sata capabilities |
2164 | * @ioc: per adapter object |
2165 | * @handle: device handle |
2166 | * @sdev: scsi device struct |
2167 | */ |
2168 | static void |
2169 | _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc, |
2170 | u16 handle, struct scsi_device *sdev) |
2171 | { |
2172 | Mpi2ConfigReply_t mpi_reply; |
2173 | Mpi2SasDevicePage0_t sas_device_pg0; |
2174 | u32 ioc_status; |
2175 | u16 flags; |
2176 | u32 device_info; |
2177 | |
2178 | if ((mpt3sas_config_get_sas_device_pg0(ioc, mpi_reply: &mpi_reply, config_page: &sas_device_pg0, |
2179 | MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { |
2180 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
2181 | __FILE__, __LINE__, __func__); |
2182 | return; |
2183 | } |
2184 | |
2185 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
2186 | MPI2_IOCSTATUS_MASK; |
2187 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
2188 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
2189 | __FILE__, __LINE__, __func__); |
2190 | return; |
2191 | } |
2192 | |
2193 | flags = le16_to_cpu(sas_device_pg0.Flags); |
2194 | device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); |
2195 | |
2196 | sdev_printk(KERN_INFO, sdev, |
2197 | "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), " |
2198 | "sw_preserve(%s)\n", |
2199 | (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y": "n", |
2200 | (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y": "n", |
2201 | (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y": |
2202 | "n", |
2203 | (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y": "n", |
2204 | (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y": "n", |
2205 | (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y": "n"); |
2206 | } |
2207 | |
2208 | /* |
2209 | * raid transport support - |
2210 | * Enabled for SLES11 and newer, in older kernels the driver will panic when |
2211 | * unloading the driver followed by a load - I believe that the subroutine |
2212 | * raid_class_release() is not cleaning up properly. |
2213 | */ |
2214 | |
2215 | /** |
2216 | * scsih_is_raid - return boolean indicating device is raid volume |
2217 | * @dev: the device struct object |
2218 | */ |
2219 | static int |
2220 | scsih_is_raid(struct device *dev) |
2221 | { |
2222 | struct scsi_device *sdev = to_scsi_device(dev); |
2223 | struct MPT3SAS_ADAPTER *ioc = shost_priv(shost: sdev->host); |
2224 | |
2225 | if (ioc->is_warpdrive) |
2226 | return 0; |
2227 | return (sdev->channel == RAID_CHANNEL) ? 1 : 0; |
2228 | } |
2229 | |
2230 | static int |
2231 | scsih_is_nvme(struct device *dev) |
2232 | { |
2233 | struct scsi_device *sdev = to_scsi_device(dev); |
2234 | |
2235 | return (sdev->channel == PCIE_CHANNEL) ? 1 : 0; |
2236 | } |
2237 | |
2238 | /** |
2239 | * scsih_get_resync - get raid volume resync percent complete |
2240 | * @dev: the device struct object |
2241 | */ |
2242 | static void |
2243 | scsih_get_resync(struct device *dev) |
2244 | { |
2245 | struct scsi_device *sdev = to_scsi_device(dev); |
2246 | struct MPT3SAS_ADAPTER *ioc = shost_priv(shost: sdev->host); |
2247 | static struct _raid_device *raid_device; |
2248 | unsigned long flags; |
2249 | Mpi2RaidVolPage0_t vol_pg0; |
2250 | Mpi2ConfigReply_t mpi_reply; |
2251 | u32 volume_status_flags; |
2252 | u8 percent_complete; |
2253 | u16 handle; |
2254 | |
2255 | percent_complete = 0; |
2256 | handle = 0; |
2257 | if (ioc->is_warpdrive) |
2258 | goto out; |
2259 | |
2260 | spin_lock_irqsave(&ioc->raid_device_lock, flags); |
2261 | raid_device = _scsih_raid_device_find_by_id(ioc, id: sdev->id, |
2262 | channel: sdev->channel); |
2263 | if (raid_device) { |
2264 | handle = raid_device->handle; |
2265 | percent_complete = raid_device->percent_complete; |
2266 | } |
2267 | spin_unlock_irqrestore(lock: &ioc->raid_device_lock, flags); |
2268 | |
2269 | if (!handle) |
2270 | goto out; |
2271 | |
2272 | if (mpt3sas_config_get_raid_volume_pg0(ioc, mpi_reply: &mpi_reply, config_page: &vol_pg0, |
2273 | MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, |
2274 | sz: sizeof(Mpi2RaidVolPage0_t))) { |
2275 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
2276 | __FILE__, __LINE__, __func__); |
2277 | percent_complete = 0; |
2278 | goto out; |
2279 | } |
2280 | |
2281 | volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags); |
2282 | if (!(volume_status_flags & |
2283 | MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS)) |
2284 | percent_complete = 0; |
2285 | |
2286 | out: |
2287 | |
2288 | switch (ioc->hba_mpi_version_belonged) { |
2289 | case MPI2_VERSION: |
2290 | raid_set_resync(r: mpt2sas_raid_template, dev, value: percent_complete); |
2291 | break; |
2292 | case MPI25_VERSION: |
2293 | case MPI26_VERSION: |
2294 | raid_set_resync(r: mpt3sas_raid_template, dev, value: percent_complete); |
2295 | break; |
2296 | } |
2297 | } |
2298 | |
2299 | /** |
2300 | * scsih_get_state - get raid volume level |
2301 | * @dev: the device struct object |
2302 | */ |
2303 | static void |
2304 | scsih_get_state(struct device *dev) |
2305 | { |
2306 | struct scsi_device *sdev = to_scsi_device(dev); |
2307 | struct MPT3SAS_ADAPTER *ioc = shost_priv(shost: sdev->host); |
2308 | static struct _raid_device *raid_device; |
2309 | unsigned long flags; |
2310 | Mpi2RaidVolPage0_t vol_pg0; |
2311 | Mpi2ConfigReply_t mpi_reply; |
2312 | u32 volstate; |
2313 | enum raid_state state = RAID_STATE_UNKNOWN; |
2314 | u16 handle = 0; |
2315 | |
2316 | spin_lock_irqsave(&ioc->raid_device_lock, flags); |
2317 | raid_device = _scsih_raid_device_find_by_id(ioc, id: sdev->id, |
2318 | channel: sdev->channel); |
2319 | if (raid_device) |
2320 | handle = raid_device->handle; |
2321 | spin_unlock_irqrestore(lock: &ioc->raid_device_lock, flags); |
2322 | |
2323 | if (!raid_device) |
2324 | goto out; |
2325 | |
2326 | if (mpt3sas_config_get_raid_volume_pg0(ioc, mpi_reply: &mpi_reply, config_page: &vol_pg0, |
2327 | MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, |
2328 | sz: sizeof(Mpi2RaidVolPage0_t))) { |
2329 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
2330 | __FILE__, __LINE__, __func__); |
2331 | goto out; |
2332 | } |
2333 | |
2334 | volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags); |
2335 | if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) { |
2336 | state = RAID_STATE_RESYNCING; |
2337 | goto out; |
2338 | } |
2339 | |
2340 | switch (vol_pg0.VolumeState) { |
2341 | case MPI2_RAID_VOL_STATE_OPTIMAL: |
2342 | case MPI2_RAID_VOL_STATE_ONLINE: |
2343 | state = RAID_STATE_ACTIVE; |
2344 | break; |
2345 | case MPI2_RAID_VOL_STATE_DEGRADED: |
2346 | state = RAID_STATE_DEGRADED; |
2347 | break; |
2348 | case MPI2_RAID_VOL_STATE_FAILED: |
2349 | case MPI2_RAID_VOL_STATE_MISSING: |
2350 | state = RAID_STATE_OFFLINE; |
2351 | break; |
2352 | } |
2353 | out: |
2354 | switch (ioc->hba_mpi_version_belonged) { |
2355 | case MPI2_VERSION: |
2356 | raid_set_state(r: mpt2sas_raid_template, dev, value: state); |
2357 | break; |
2358 | case MPI25_VERSION: |
2359 | case MPI26_VERSION: |
2360 | raid_set_state(r: mpt3sas_raid_template, dev, value: state); |
2361 | break; |
2362 | } |
2363 | } |
2364 | |
2365 | /** |
2366 | * _scsih_set_level - set raid level |
2367 | * @ioc: ? |
2368 | * @sdev: scsi device struct |
2369 | * @volume_type: volume type |
2370 | */ |
2371 | static void |
2372 | _scsih_set_level(struct MPT3SAS_ADAPTER *ioc, |
2373 | struct scsi_device *sdev, u8 volume_type) |
2374 | { |
2375 | enum raid_level level = RAID_LEVEL_UNKNOWN; |
2376 | |
2377 | switch (volume_type) { |
2378 | case MPI2_RAID_VOL_TYPE_RAID0: |
2379 | level = RAID_LEVEL_0; |
2380 | break; |
2381 | case MPI2_RAID_VOL_TYPE_RAID10: |
2382 | level = RAID_LEVEL_10; |
2383 | break; |
2384 | case MPI2_RAID_VOL_TYPE_RAID1E: |
2385 | level = RAID_LEVEL_1E; |
2386 | break; |
2387 | case MPI2_RAID_VOL_TYPE_RAID1: |
2388 | level = RAID_LEVEL_1; |
2389 | break; |
2390 | } |
2391 | |
2392 | switch (ioc->hba_mpi_version_belonged) { |
2393 | case MPI2_VERSION: |
2394 | raid_set_level(r: mpt2sas_raid_template, |
2395 | dev: &sdev->sdev_gendev, value: level); |
2396 | break; |
2397 | case MPI25_VERSION: |
2398 | case MPI26_VERSION: |
2399 | raid_set_level(r: mpt3sas_raid_template, |
2400 | dev: &sdev->sdev_gendev, value: level); |
2401 | break; |
2402 | } |
2403 | } |
2404 | |
2405 | |
2406 | /** |
2407 | * _scsih_get_volume_capabilities - volume capabilities |
2408 | * @ioc: per adapter object |
2409 | * @raid_device: the raid_device object |
2410 | * |
2411 | * Return: 0 for success, else 1 |
2412 | */ |
2413 | static int |
2414 | _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc, |
2415 | struct _raid_device *raid_device) |
2416 | { |
2417 | Mpi2RaidVolPage0_t *vol_pg0; |
2418 | Mpi2RaidPhysDiskPage0_t pd_pg0; |
2419 | Mpi2SasDevicePage0_t sas_device_pg0; |
2420 | Mpi2ConfigReply_t mpi_reply; |
2421 | u16 sz; |
2422 | u8 num_pds; |
2423 | |
2424 | if ((mpt3sas_config_get_number_pds(ioc, handle: raid_device->handle, |
2425 | num_pds: &num_pds)) || !num_pds) { |
2426 | dfailprintk(ioc, |
2427 | ioc_warn(ioc, "failure at %s:%d/%s()!\n", |
2428 | __FILE__, __LINE__, __func__)); |
2429 | return 1; |
2430 | } |
2431 | |
2432 | raid_device->num_pds = num_pds; |
2433 | sz = struct_size(vol_pg0, PhysDisk, num_pds); |
2434 | vol_pg0 = kzalloc(sz, GFP_KERNEL); |
2435 | if (!vol_pg0) { |
2436 | dfailprintk(ioc, |
2437 | ioc_warn(ioc, "failure at %s:%d/%s()!\n", |
2438 | __FILE__, __LINE__, __func__)); |
2439 | return 1; |
2440 | } |
2441 | |
2442 | if ((mpt3sas_config_get_raid_volume_pg0(ioc, mpi_reply: &mpi_reply, config_page: vol_pg0, |
2443 | MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle: raid_device->handle, sz))) { |
2444 | dfailprintk(ioc, |
2445 | ioc_warn(ioc, "failure at %s:%d/%s()!\n", |
2446 | __FILE__, __LINE__, __func__)); |
2447 | kfree(objp: vol_pg0); |
2448 | return 1; |
2449 | } |
2450 | |
2451 | raid_device->volume_type = vol_pg0->VolumeType; |
2452 | |
2453 | /* figure out what the underlying devices are by |
2454 | * obtaining the device_info bits for the 1st device |
2455 | */ |
2456 | if (!(mpt3sas_config_get_phys_disk_pg0(ioc, mpi_reply: &mpi_reply, |
2457 | config_page: &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM, |
2458 | form_specific: vol_pg0->PhysDisk[0].PhysDiskNum))) { |
2459 | if (!(mpt3sas_config_get_sas_device_pg0(ioc, mpi_reply: &mpi_reply, |
2460 | config_page: &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, |
2461 | le16_to_cpu(pd_pg0.DevHandle)))) { |
2462 | raid_device->device_info = |
2463 | le32_to_cpu(sas_device_pg0.DeviceInfo); |
2464 | } |
2465 | } |
2466 | |
2467 | kfree(objp: vol_pg0); |
2468 | return 0; |
2469 | } |
2470 | |
2471 | /** |
2472 | * _scsih_enable_tlr - setting TLR flags |
2473 | * @ioc: per adapter object |
2474 | * @sdev: scsi device struct |
2475 | * |
2476 | * Enabling Transaction Layer Retries for tape devices when |
2477 | * vpd page 0x90 is present |
2478 | * |
2479 | */ |
2480 | static void |
2481 | _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev) |
2482 | { |
2483 | |
2484 | /* only for TAPE */ |
2485 | if (sdev->type != TYPE_TAPE) |
2486 | return; |
2487 | |
2488 | if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR)) |
2489 | return; |
2490 | |
2491 | sas_enable_tlr(sdev); |
2492 | sdev_printk(KERN_INFO, sdev, "TLR %s\n", |
2493 | sas_is_tlr_enabled(sdev) ? "Enabled": "Disabled"); |
2494 | return; |
2495 | |
2496 | } |
2497 | |
2498 | /** |
2499 | * scsih_sdev_configure - device configure routine. |
2500 | * @sdev: scsi device struct |
2501 | * @lim: queue limits |
2502 | * |
2503 | * Return: 0 if ok. Any other return is assumed to be an error and |
2504 | * the device is ignored. |
2505 | */ |
2506 | static int |
2507 | scsih_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim) |
2508 | { |
2509 | struct Scsi_Host *shost = sdev->host; |
2510 | struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); |
2511 | struct MPT3SAS_DEVICE *sas_device_priv_data; |
2512 | struct MPT3SAS_TARGET *sas_target_priv_data; |
2513 | struct _sas_device *sas_device; |
2514 | struct _pcie_device *pcie_device; |
2515 | struct _raid_device *raid_device; |
2516 | unsigned long flags; |
2517 | int qdepth; |
2518 | u8 ssp_target = 0; |
2519 | char *ds = ""; |
2520 | char *r_level = ""; |
2521 | u16 handle, volume_handle = 0; |
2522 | u64 volume_wwid = 0; |
2523 | |
2524 | qdepth = 1; |
2525 | sas_device_priv_data = sdev->hostdata; |
2526 | sas_device_priv_data->configured_lun = 1; |
2527 | sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT; |
2528 | sas_target_priv_data = sas_device_priv_data->sas_target; |
2529 | handle = sas_target_priv_data->handle; |
2530 | |
2531 | /* raid volume handling */ |
2532 | if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) { |
2533 | |
2534 | spin_lock_irqsave(&ioc->raid_device_lock, flags); |
2535 | raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); |
2536 | spin_unlock_irqrestore(lock: &ioc->raid_device_lock, flags); |
2537 | if (!raid_device) { |
2538 | dfailprintk(ioc, |
2539 | ioc_warn(ioc, "failure at %s:%d/%s()!\n", |
2540 | __FILE__, __LINE__, __func__)); |
2541 | return 1; |
2542 | } |
2543 | |
2544 | if (_scsih_get_volume_capabilities(ioc, raid_device)) { |
2545 | dfailprintk(ioc, |
2546 | ioc_warn(ioc, "failure at %s:%d/%s()!\n", |
2547 | __FILE__, __LINE__, __func__)); |
2548 | return 1; |
2549 | } |
2550 | |
2551 | /* |
2552 | * WARPDRIVE: Initialize the required data for Direct IO |
2553 | */ |
2554 | mpt3sas_init_warpdrive_properties(ioc, raid_device); |
2555 | |
2556 | /* RAID Queue Depth Support |
2557 | * IS volume = underlying qdepth of drive type, either |
2558 | * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH |
2559 | * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH) |
2560 | */ |
2561 | if (raid_device->device_info & |
2562 | MPI2_SAS_DEVICE_INFO_SSP_TARGET) { |
2563 | qdepth = MPT3SAS_SAS_QUEUE_DEPTH; |
2564 | ds = "SSP"; |
2565 | } else { |
2566 | qdepth = MPT3SAS_SATA_QUEUE_DEPTH; |
2567 | if (raid_device->device_info & |
2568 | MPI2_SAS_DEVICE_INFO_SATA_DEVICE) |
2569 | ds = "SATA"; |
2570 | else |
2571 | ds = "STP"; |
2572 | } |
2573 | |
2574 | switch (raid_device->volume_type) { |
2575 | case MPI2_RAID_VOL_TYPE_RAID0: |
2576 | r_level = "RAID0"; |
2577 | break; |
2578 | case MPI2_RAID_VOL_TYPE_RAID1E: |
2579 | qdepth = MPT3SAS_RAID_QUEUE_DEPTH; |
2580 | if (ioc->manu_pg10.OEMIdentifier && |
2581 | (le32_to_cpu(ioc->manu_pg10.GenericFlags0) & |
2582 | MFG10_GF0_R10_DISPLAY) && |
2583 | !(raid_device->num_pds % 2)) |
2584 | r_level = "RAID10"; |
2585 | else |
2586 | r_level = "RAID1E"; |
2587 | break; |
2588 | case MPI2_RAID_VOL_TYPE_RAID1: |
2589 | qdepth = MPT3SAS_RAID_QUEUE_DEPTH; |
2590 | r_level = "RAID1"; |
2591 | break; |
2592 | case MPI2_RAID_VOL_TYPE_RAID10: |
2593 | qdepth = MPT3SAS_RAID_QUEUE_DEPTH; |
2594 | r_level = "RAID10"; |
2595 | break; |
2596 | case MPI2_RAID_VOL_TYPE_UNKNOWN: |
2597 | default: |
2598 | qdepth = MPT3SAS_RAID_QUEUE_DEPTH; |
2599 | r_level = "RAIDX"; |
2600 | break; |
2601 | } |
2602 | |
2603 | if (!ioc->hide_ir_msg) |
2604 | sdev_printk(KERN_INFO, sdev, |
2605 | "%s: handle(0x%04x), wwid(0x%016llx)," |
2606 | " pd_count(%d), type(%s)\n", |
2607 | r_level, raid_device->handle, |
2608 | (unsigned long long)raid_device->wwid, |
2609 | raid_device->num_pds, ds); |
2610 | |
2611 | if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) { |
2612 | lim->max_hw_sectors = MPT3SAS_RAID_MAX_SECTORS; |
2613 | sdev_printk(KERN_INFO, sdev, |
2614 | "Set queue's max_sector to: %u\n", |
2615 | MPT3SAS_RAID_MAX_SECTORS); |
2616 | } |
2617 | |
2618 | mpt3sas_scsih_change_queue_depth(sdev, qdepth); |
2619 | |
2620 | /* raid transport support */ |
2621 | if (!ioc->is_warpdrive) |
2622 | _scsih_set_level(ioc, sdev, volume_type: raid_device->volume_type); |
2623 | return 0; |
2624 | } |
2625 | |
2626 | /* non-raid handling */ |
2627 | if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) { |
2628 | if (mpt3sas_config_get_volume_handle(ioc, pd_handle: handle, |
2629 | volume_handle: &volume_handle)) { |
2630 | dfailprintk(ioc, |
2631 | ioc_warn(ioc, "failure at %s:%d/%s()!\n", |
2632 | __FILE__, __LINE__, __func__)); |
2633 | return 1; |
2634 | } |
2635 | if (volume_handle && mpt3sas_config_get_volume_wwid(ioc, |
2636 | volume_handle, wwid: &volume_wwid)) { |
2637 | dfailprintk(ioc, |
2638 | ioc_warn(ioc, "failure at %s:%d/%s()!\n", |
2639 | __FILE__, __LINE__, __func__)); |
2640 | return 1; |
2641 | } |
2642 | } |
2643 | |
2644 | /* PCIe handling */ |
2645 | if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { |
2646 | spin_lock_irqsave(&ioc->pcie_device_lock, flags); |
2647 | pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, |
2648 | wwid: sas_device_priv_data->sas_target->sas_address); |
2649 | if (!pcie_device) { |
2650 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
2651 | dfailprintk(ioc, |
2652 | ioc_warn(ioc, "failure at %s:%d/%s()!\n", |
2653 | __FILE__, __LINE__, __func__)); |
2654 | return 1; |
2655 | } |
2656 | |
2657 | qdepth = ioc->max_nvme_qd; |
2658 | ds = "NVMe"; |
2659 | sdev_printk(KERN_INFO, sdev, |
2660 | "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n", |
2661 | ds, handle, (unsigned long long)pcie_device->wwid, |
2662 | pcie_device->port_num); |
2663 | if (pcie_device->enclosure_handle != 0) |
2664 | sdev_printk(KERN_INFO, sdev, |
2665 | "%s: enclosure logical id(0x%016llx), slot(%d)\n", |
2666 | ds, |
2667 | (unsigned long long)pcie_device->enclosure_logical_id, |
2668 | pcie_device->slot); |
2669 | if (pcie_device->connector_name[0] != '\0') |
2670 | sdev_printk(KERN_INFO, sdev, |
2671 | "%s: enclosure level(0x%04x)," |
2672 | "connector name( %s)\n", ds, |
2673 | pcie_device->enclosure_level, |
2674 | pcie_device->connector_name); |
2675 | |
2676 | if (pcie_device->nvme_mdts) |
2677 | lim->max_hw_sectors = pcie_device->nvme_mdts / 512; |
2678 | |
2679 | pcie_device_put(p: pcie_device); |
2680 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
2681 | mpt3sas_scsih_change_queue_depth(sdev, qdepth); |
2682 | lim->virt_boundary_mask = ioc->page_size - 1; |
2683 | return 0; |
2684 | } |
2685 | |
2686 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
2687 | sas_device = __mpt3sas_get_sdev_by_addr(ioc, |
2688 | sas_address: sas_device_priv_data->sas_target->sas_address, |
2689 | port: sas_device_priv_data->sas_target->port); |
2690 | if (!sas_device) { |
2691 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
2692 | dfailprintk(ioc, |
2693 | ioc_warn(ioc, "failure at %s:%d/%s()!\n", |
2694 | __FILE__, __LINE__, __func__)); |
2695 | return 1; |
2696 | } |
2697 | |
2698 | sas_device->volume_handle = volume_handle; |
2699 | sas_device->volume_wwid = volume_wwid; |
2700 | if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) { |
2701 | qdepth = (sas_device->port_type > 1) ? |
2702 | ioc->max_wideport_qd : ioc->max_narrowport_qd; |
2703 | ssp_target = 1; |
2704 | if (sas_device->device_info & |
2705 | MPI2_SAS_DEVICE_INFO_SEP) { |
2706 | sdev_printk(KERN_INFO, sdev, |
2707 | "set ignore_delay_remove for handle(0x%04x)\n", |
2708 | sas_device_priv_data->sas_target->handle); |
2709 | sas_device_priv_data->ignore_delay_remove = 1; |
2710 | ds = "SES"; |
2711 | } else |
2712 | ds = "SSP"; |
2713 | } else { |
2714 | qdepth = ioc->max_sata_qd; |
2715 | if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
2716 | ds = "STP"; |
2717 | else if (sas_device->device_info & |
2718 | MPI2_SAS_DEVICE_INFO_SATA_DEVICE) |
2719 | ds = "SATA"; |
2720 | } |
2721 | |
2722 | sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), "\ |
2723 | "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n", |
2724 | ds, handle, (unsigned long long)sas_device->sas_address, |
2725 | sas_device->phy, (unsigned long long)sas_device->device_name); |
2726 | |
2727 | _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL); |
2728 | |
2729 | sas_device_put(s: sas_device); |
2730 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
2731 | |
2732 | if (!ssp_target) |
2733 | _scsih_display_sata_capabilities(ioc, handle, sdev); |
2734 | |
2735 | |
2736 | mpt3sas_scsih_change_queue_depth(sdev, qdepth); |
2737 | |
2738 | if (ssp_target) { |
2739 | sas_read_port_mode_page(sdev); |
2740 | _scsih_enable_tlr(ioc, sdev); |
2741 | } |
2742 | |
2743 | return 0; |
2744 | } |
2745 | |
2746 | /** |
2747 | * scsih_bios_param - fetch head, sector, cylinder info for a disk |
2748 | * @sdev: scsi device struct |
2749 | * @bdev: pointer to block device context |
2750 | * @capacity: device size (in 512 byte sectors) |
2751 | * @params: three element array to place output: |
2752 | * params[0] number of heads (max 255) |
2753 | * params[1] number of sectors (max 63) |
2754 | * params[2] number of cylinders |
2755 | */ |
2756 | static int |
2757 | scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, |
2758 | sector_t capacity, int params[]) |
2759 | { |
2760 | int heads; |
2761 | int sectors; |
2762 | sector_t cylinders; |
2763 | ulong dummy; |
2764 | |
2765 | heads = 64; |
2766 | sectors = 32; |
2767 | |
2768 | dummy = heads * sectors; |
2769 | cylinders = capacity; |
2770 | sector_div(cylinders, dummy); |
2771 | |
2772 | /* |
2773 | * Handle extended translation size for logical drives |
2774 | * > 1Gb |
2775 | */ |
2776 | if ((ulong)capacity >= 0x200000) { |
2777 | heads = 255; |
2778 | sectors = 63; |
2779 | dummy = heads * sectors; |
2780 | cylinders = capacity; |
2781 | sector_div(cylinders, dummy); |
2782 | } |
2783 | |
2784 | /* return result */ |
2785 | params[0] = heads; |
2786 | params[1] = sectors; |
2787 | params[2] = cylinders; |
2788 | |
2789 | return 0; |
2790 | } |
2791 | |
2792 | /** |
2793 | * _scsih_response_code - translation of device response code |
2794 | * @ioc: per adapter object |
2795 | * @response_code: response code returned by the device |
2796 | */ |
2797 | static void |
2798 | _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code) |
2799 | { |
2800 | char *desc; |
2801 | |
2802 | switch (response_code) { |
2803 | case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: |
2804 | desc = "task management request completed"; |
2805 | break; |
2806 | case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: |
2807 | desc = "invalid frame"; |
2808 | break; |
2809 | case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: |
2810 | desc = "task management request not supported"; |
2811 | break; |
2812 | case MPI2_SCSITASKMGMT_RSP_TM_FAILED: |
2813 | desc = "task management request failed"; |
2814 | break; |
2815 | case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: |
2816 | desc = "task management request succeeded"; |
2817 | break; |
2818 | case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: |
2819 | desc = "invalid lun"; |
2820 | break; |
2821 | case 0xA: |
2822 | desc = "overlapped tag attempted"; |
2823 | break; |
2824 | case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: |
2825 | desc = "task queued, however not sent to target"; |
2826 | break; |
2827 | default: |
2828 | desc = "unknown"; |
2829 | break; |
2830 | } |
2831 | ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc); |
2832 | } |
2833 | |
2834 | /** |
2835 | * _scsih_tm_done - tm completion routine |
2836 | * @ioc: per adapter object |
2837 | * @smid: system request message index |
2838 | * @msix_index: MSIX table index supplied by the OS |
2839 | * @reply: reply message frame(lower 32bit addr) |
2840 | * Context: none. |
2841 | * |
2842 | * The callback handler when using scsih_issue_tm. |
2843 | * |
2844 | * Return: 1 meaning mf should be freed from _base_interrupt |
2845 | * 0 means the mf is freed from this function. |
2846 | */ |
2847 | static u8 |
2848 | _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) |
2849 | { |
2850 | MPI2DefaultReply_t *mpi_reply; |
2851 | |
2852 | if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED) |
2853 | return 1; |
2854 | if (ioc->tm_cmds.smid != smid) |
2855 | return 1; |
2856 | ioc->tm_cmds.status |= MPT3_CMD_COMPLETE; |
2857 | mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, phys_addr: reply); |
2858 | if (mpi_reply) { |
2859 | memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); |
2860 | ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID; |
2861 | } |
2862 | ioc->tm_cmds.status &= ~MPT3_CMD_PENDING; |
2863 | complete(&ioc->tm_cmds.done); |
2864 | return 1; |
2865 | } |
2866 | |
2867 | /** |
2868 | * mpt3sas_scsih_set_tm_flag - set per target tm_busy |
2869 | * @ioc: per adapter object |
2870 | * @handle: device handle |
2871 | * |
2872 | * During taskmangement request, we need to freeze the device queue. |
2873 | */ |
2874 | void |
2875 | mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
2876 | { |
2877 | struct MPT3SAS_DEVICE *sas_device_priv_data; |
2878 | struct scsi_device *sdev; |
2879 | u8 skip = 0; |
2880 | |
2881 | shost_for_each_device(sdev, ioc->shost) { |
2882 | if (skip) |
2883 | continue; |
2884 | sas_device_priv_data = sdev->hostdata; |
2885 | if (!sas_device_priv_data) |
2886 | continue; |
2887 | if (sas_device_priv_data->sas_target->handle == handle) { |
2888 | sas_device_priv_data->sas_target->tm_busy = 1; |
2889 | skip = 1; |
2890 | ioc->ignore_loginfos = 1; |
2891 | } |
2892 | } |
2893 | } |
2894 | |
2895 | /** |
2896 | * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy |
2897 | * @ioc: per adapter object |
2898 | * @handle: device handle |
2899 | * |
2900 | * During taskmangement request, we need to freeze the device queue. |
2901 | */ |
2902 | void |
2903 | mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
2904 | { |
2905 | struct MPT3SAS_DEVICE *sas_device_priv_data; |
2906 | struct scsi_device *sdev; |
2907 | u8 skip = 0; |
2908 | |
2909 | shost_for_each_device(sdev, ioc->shost) { |
2910 | if (skip) |
2911 | continue; |
2912 | sas_device_priv_data = sdev->hostdata; |
2913 | if (!sas_device_priv_data) |
2914 | continue; |
2915 | if (sas_device_priv_data->sas_target->handle == handle) { |
2916 | sas_device_priv_data->sas_target->tm_busy = 0; |
2917 | skip = 1; |
2918 | ioc->ignore_loginfos = 0; |
2919 | } |
2920 | } |
2921 | } |
2922 | |
2923 | /** |
2924 | * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status |
2925 | * @ioc: per adapter object |
2926 | * @channel: the channel assigned by the OS |
2927 | * @id: the id assigned by the OS |
2928 | * @lun: lun number |
2929 | * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) |
2930 | * @smid_task: smid assigned to the task |
2931 | * |
2932 | * Look whether TM has aborted the timed out SCSI command, if |
2933 | * TM has aborted the IO then return SUCCESS else return FAILED. |
2934 | */ |
2935 | static int |
2936 | scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel, |
2937 | uint id, uint lun, u8 type, u16 smid_task) |
2938 | { |
2939 | |
2940 | if (smid_task <= ioc->shost->can_queue) { |
2941 | switch (type) { |
2942 | case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: |
2943 | if (!(_scsih_scsi_lookup_find_by_target(ioc, |
2944 | id, channel))) |
2945 | return SUCCESS; |
2946 | break; |
2947 | case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: |
2948 | case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: |
2949 | if (!(_scsih_scsi_lookup_find_by_lun(ioc, id, |
2950 | lun, channel))) |
2951 | return SUCCESS; |
2952 | break; |
2953 | default: |
2954 | return SUCCESS; |
2955 | } |
2956 | } else if (smid_task == ioc->scsih_cmds.smid) { |
2957 | if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) || |
2958 | (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED)) |
2959 | return SUCCESS; |
2960 | } else if (smid_task == ioc->ctl_cmds.smid) { |
2961 | if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) || |
2962 | (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED)) |
2963 | return SUCCESS; |
2964 | } |
2965 | |
2966 | return FAILED; |
2967 | } |
2968 | |
2969 | /** |
2970 | * scsih_tm_post_processing - post processing of target & LUN reset |
2971 | * @ioc: per adapter object |
2972 | * @handle: device handle |
2973 | * @channel: the channel assigned by the OS |
2974 | * @id: the id assigned by the OS |
2975 | * @lun: lun number |
2976 | * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) |
2977 | * @smid_task: smid assigned to the task |
2978 | * |
2979 | * Post processing of target & LUN reset. Due to interrupt latency |
2980 | * issue it possible that interrupt for aborted IO might not be |
2981 | * received yet. So before returning failure status, poll the |
2982 | * reply descriptor pools for the reply of timed out SCSI command. |
2983 | * Return FAILED status if reply for timed out is not received |
2984 | * otherwise return SUCCESS. |
2985 | */ |
2986 | static int |
2987 | scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle, |
2988 | uint channel, uint id, uint lun, u8 type, u16 smid_task) |
2989 | { |
2990 | int rc; |
2991 | |
2992 | rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task); |
2993 | if (rc == SUCCESS) |
2994 | return rc; |
2995 | |
2996 | ioc_info(ioc, |
2997 | "Poll ReplyDescriptor queues for completion of" |
2998 | " smid(%d), task_type(0x%02x), handle(0x%04x)\n", |
2999 | smid_task, type, handle); |
3000 | |
3001 | /* |
3002 | * Due to interrupt latency issues, driver may receive interrupt for |
3003 | * TM first and then for aborted SCSI IO command. So, poll all the |
3004 | * ReplyDescriptor pools before returning the FAILED status to SML. |
3005 | */ |
3006 | mpt3sas_base_mask_interrupts(ioc); |
3007 | mpt3sas_base_sync_reply_irqs(ioc, poll: 1); |
3008 | mpt3sas_base_unmask_interrupts(ioc); |
3009 | |
3010 | return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task); |
3011 | } |
3012 | |
3013 | /** |
3014 | * mpt3sas_scsih_issue_tm - main routine for sending tm requests |
3015 | * @ioc: per adapter struct |
3016 | * @handle: device handle |
3017 | * @channel: the channel assigned by the OS |
3018 | * @id: the id assigned by the OS |
3019 | * @lun: lun number |
3020 | * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) |
3021 | * @smid_task: smid assigned to the task |
3022 | * @msix_task: MSIX table index supplied by the OS |
3023 | * @timeout: timeout in seconds |
3024 | * @tr_method: Target Reset Method |
3025 | * Context: user |
3026 | * |
3027 | * A generic API for sending task management requests to firmware. |
3028 | * |
3029 | * The callback index is set inside `ioc->tm_cb_idx`. |
3030 | * The caller is responsible to check for outstanding commands. |
3031 | * |
3032 | * Return: SUCCESS or FAILED. |
3033 | */ |
3034 | int |
3035 | mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, |
3036 | uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task, |
3037 | u8 timeout, u8 tr_method) |
3038 | { |
3039 | Mpi2SCSITaskManagementRequest_t *mpi_request; |
3040 | Mpi2SCSITaskManagementReply_t *mpi_reply; |
3041 | Mpi25SCSIIORequest_t *request; |
3042 | u16 smid = 0; |
3043 | u32 ioc_state; |
3044 | int rc; |
3045 | u8 issue_reset = 0; |
3046 | |
3047 | lockdep_assert_held(&ioc->tm_cmds.mutex); |
3048 | |
3049 | if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) { |
3050 | ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__); |
3051 | return FAILED; |
3052 | } |
3053 | |
3054 | if (ioc->shost_recovery || ioc->remove_host || |
3055 | ioc->pci_error_recovery) { |
3056 | ioc_info(ioc, "%s: host reset in progress!\n", __func__); |
3057 | return FAILED; |
3058 | } |
3059 | |
3060 | ioc_state = mpt3sas_base_get_iocstate(ioc, cooked: 0); |
3061 | if (ioc_state & MPI2_DOORBELL_USED) { |
3062 | dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n")); |
3063 | rc = mpt3sas_base_hard_reset_handler(ioc, type: FORCE_BIG_HAMMER); |
3064 | return (!rc) ? SUCCESS : FAILED; |
3065 | } |
3066 | |
3067 | if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { |
3068 | mpt3sas_print_fault_code(ioc, ioc_state & |
3069 | MPI2_DOORBELL_DATA_MASK); |
3070 | rc = mpt3sas_base_hard_reset_handler(ioc, type: FORCE_BIG_HAMMER); |
3071 | return (!rc) ? SUCCESS : FAILED; |
3072 | } else if ((ioc_state & MPI2_IOC_STATE_MASK) == |
3073 | MPI2_IOC_STATE_COREDUMP) { |
3074 | mpt3sas_print_coredump_info(ioc, ioc_state & |
3075 | MPI2_DOORBELL_DATA_MASK); |
3076 | rc = mpt3sas_base_hard_reset_handler(ioc, type: FORCE_BIG_HAMMER); |
3077 | return (!rc) ? SUCCESS : FAILED; |
3078 | } |
3079 | |
3080 | smid = mpt3sas_base_get_smid_hpr(ioc, cb_idx: ioc->tm_cb_idx); |
3081 | if (!smid) { |
3082 | ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); |
3083 | return FAILED; |
3084 | } |
3085 | |
3086 | dtmprintk(ioc, |
3087 | ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n", |
3088 | handle, type, smid_task, timeout, tr_method)); |
3089 | ioc->tm_cmds.status = MPT3_CMD_PENDING; |
3090 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); |
3091 | ioc->tm_cmds.smid = smid; |
3092 | memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); |
3093 | memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t)); |
3094 | mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; |
3095 | mpi_request->DevHandle = cpu_to_le16(handle); |
3096 | mpi_request->TaskType = type; |
3097 | if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || |
3098 | type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) |
3099 | mpi_request->MsgFlags = tr_method; |
3100 | mpi_request->TaskMID = cpu_to_le16(smid_task); |
3101 | int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN); |
3102 | mpt3sas_scsih_set_tm_flag(ioc, handle); |
3103 | init_completion(x: &ioc->tm_cmds.done); |
3104 | ioc->put_smid_hi_priority(ioc, smid, msix_task); |
3105 | wait_for_completion_timeout(x: &ioc->tm_cmds.done, timeout: timeout*HZ); |
3106 | if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) { |
3107 | mpt3sas_check_cmd_timeout(ioc, |
3108 | ioc->tm_cmds.status, mpi_request, |
3109 | sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset); |
3110 | if (issue_reset) { |
3111 | rc = mpt3sas_base_hard_reset_handler(ioc, |
3112 | type: FORCE_BIG_HAMMER); |
3113 | rc = (!rc) ? SUCCESS : FAILED; |
3114 | goto out; |
3115 | } |
3116 | } |
3117 | |
3118 | /* sync IRQs in case those were busy during flush. */ |
3119 | mpt3sas_base_sync_reply_irqs(ioc, poll: 0); |
3120 | |
3121 | if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) { |
3122 | mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); |
3123 | mpi_reply = ioc->tm_cmds.reply; |
3124 | dtmprintk(ioc, |
3125 | ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n", |
3126 | le16_to_cpu(mpi_reply->IOCStatus), |
3127 | le32_to_cpu(mpi_reply->IOCLogInfo), |
3128 | le32_to_cpu(mpi_reply->TerminationCount))); |
3129 | if (ioc->logging_level & MPT_DEBUG_TM) { |
3130 | _scsih_response_code(ioc, response_code: mpi_reply->ResponseCode); |
3131 | if (mpi_reply->IOCStatus) |
3132 | _debug_dump_mf(mpi_request, |
3133 | sz: sizeof(Mpi2SCSITaskManagementRequest_t)/4); |
3134 | } |
3135 | } |
3136 | |
3137 | switch (type) { |
3138 | case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: |
3139 | rc = SUCCESS; |
3140 | /* |
3141 | * If DevHandle filed in smid_task's entry of request pool |
3142 | * doesn't match with device handle on which this task abort |
3143 | * TM is received then it means that TM has successfully |
3144 | * aborted the timed out command. Since smid_task's entry in |
3145 | * request pool will be memset to zero once the timed out |
3146 | * command is returned to the SML. If the command is not |
3147 | * aborted then smid_task’s entry won’t be cleared and it |
3148 | * will have same DevHandle value on which this task abort TM |
3149 | * is received and driver will return the TM status as FAILED. |
3150 | */ |
3151 | request = mpt3sas_base_get_msg_frame(ioc, smid: smid_task); |
3152 | if (le16_to_cpu(request->DevHandle) != handle) |
3153 | break; |
3154 | |
3155 | ioc_info(ioc, "Task abort tm failed: handle(0x%04x)," |
3156 | "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n", |
3157 | handle, timeout, tr_method, smid_task, msix_task); |
3158 | rc = FAILED; |
3159 | break; |
3160 | |
3161 | case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: |
3162 | case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: |
3163 | case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: |
3164 | rc = scsih_tm_post_processing(ioc, handle, channel, id, lun, |
3165 | type, smid_task); |
3166 | break; |
3167 | case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK: |
3168 | rc = SUCCESS; |
3169 | break; |
3170 | default: |
3171 | rc = FAILED; |
3172 | break; |
3173 | } |
3174 | |
3175 | out: |
3176 | mpt3sas_scsih_clear_tm_flag(ioc, handle); |
3177 | ioc->tm_cmds.status = MPT3_CMD_NOT_USED; |
3178 | return rc; |
3179 | } |
3180 | |
3181 | int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, |
3182 | uint channel, uint id, u64 lun, u8 type, u16 smid_task, |
3183 | u16 msix_task, u8 timeout, u8 tr_method) |
3184 | { |
3185 | int ret; |
3186 | |
3187 | mutex_lock(&ioc->tm_cmds.mutex); |
3188 | ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type, |
3189 | smid_task, msix_task, timeout, tr_method); |
3190 | mutex_unlock(lock: &ioc->tm_cmds.mutex); |
3191 | |
3192 | return ret; |
3193 | } |
3194 | |
3195 | /** |
3196 | * _scsih_tm_display_info - displays info about the device |
3197 | * @ioc: per adapter struct |
3198 | * @scmd: pointer to scsi command object |
3199 | * |
3200 | * Called by task management callback handlers. |
3201 | */ |
3202 | static void |
3203 | _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd) |
3204 | { |
3205 | struct scsi_target *starget = scmd->device->sdev_target; |
3206 | struct MPT3SAS_TARGET *priv_target = starget->hostdata; |
3207 | struct _sas_device *sas_device = NULL; |
3208 | struct _pcie_device *pcie_device = NULL; |
3209 | unsigned long flags; |
3210 | char *device_str = NULL; |
3211 | |
3212 | if (!priv_target) |
3213 | return; |
3214 | if (ioc->hide_ir_msg) |
3215 | device_str = "WarpDrive"; |
3216 | else |
3217 | device_str = "volume"; |
3218 | |
3219 | scsi_print_command(scmd); |
3220 | if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { |
3221 | starget_printk(KERN_INFO, starget, |
3222 | "%s handle(0x%04x), %s wwid(0x%016llx)\n", |
3223 | device_str, priv_target->handle, |
3224 | device_str, (unsigned long long)priv_target->sas_address); |
3225 | |
3226 | } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { |
3227 | spin_lock_irqsave(&ioc->pcie_device_lock, flags); |
3228 | pcie_device = __mpt3sas_get_pdev_from_target(ioc, tgt_priv: priv_target); |
3229 | if (pcie_device) { |
3230 | starget_printk(KERN_INFO, starget, |
3231 | "handle(0x%04x), wwid(0x%016llx), port(%d)\n", |
3232 | pcie_device->handle, |
3233 | (unsigned long long)pcie_device->wwid, |
3234 | pcie_device->port_num); |
3235 | if (pcie_device->enclosure_handle != 0) |
3236 | starget_printk(KERN_INFO, starget, |
3237 | "enclosure logical id(0x%016llx), slot(%d)\n", |
3238 | (unsigned long long) |
3239 | pcie_device->enclosure_logical_id, |
3240 | pcie_device->slot); |
3241 | if (pcie_device->connector_name[0] != '\0') |
3242 | starget_printk(KERN_INFO, starget, |
3243 | "enclosure level(0x%04x), connector name( %s)\n", |
3244 | pcie_device->enclosure_level, |
3245 | pcie_device->connector_name); |
3246 | pcie_device_put(p: pcie_device); |
3247 | } |
3248 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
3249 | |
3250 | } else { |
3251 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
3252 | sas_device = __mpt3sas_get_sdev_from_target(ioc, tgt_priv: priv_target); |
3253 | if (sas_device) { |
3254 | if (priv_target->flags & |
3255 | MPT_TARGET_FLAGS_RAID_COMPONENT) { |
3256 | starget_printk(KERN_INFO, starget, |
3257 | "volume handle(0x%04x), " |
3258 | "volume wwid(0x%016llx)\n", |
3259 | sas_device->volume_handle, |
3260 | (unsigned long long)sas_device->volume_wwid); |
3261 | } |
3262 | starget_printk(KERN_INFO, starget, |
3263 | "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n", |
3264 | sas_device->handle, |
3265 | (unsigned long long)sas_device->sas_address, |
3266 | sas_device->phy); |
3267 | |
3268 | _scsih_display_enclosure_chassis_info(NULL, sas_device, |
3269 | NULL, starget); |
3270 | |
3271 | sas_device_put(s: sas_device); |
3272 | } |
3273 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
3274 | } |
3275 | } |
3276 | |
3277 | /** |
3278 | * scsih_abort - eh threads main abort routine |
3279 | * @scmd: pointer to scsi command object |
3280 | * |
3281 | * Return: SUCCESS if command aborted else FAILED |
3282 | */ |
3283 | static int |
3284 | scsih_abort(struct scsi_cmnd *scmd) |
3285 | { |
3286 | struct MPT3SAS_ADAPTER *ioc = shost_priv(shost: scmd->device->host); |
3287 | struct MPT3SAS_DEVICE *sas_device_priv_data; |
3288 | struct scsiio_tracker *st = scsi_cmd_priv(cmd: scmd); |
3289 | u16 handle; |
3290 | int r; |
3291 | |
3292 | u8 timeout = 30; |
3293 | struct _pcie_device *pcie_device = NULL; |
3294 | sdev_printk(KERN_INFO, scmd->device, "attempting task abort!" |
3295 | "scmd(0x%p), outstanding for %u ms & timeout %u ms\n", |
3296 | scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc), |
3297 | (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000); |
3298 | _scsih_tm_display_info(ioc, scmd); |
3299 | |
3300 | sas_device_priv_data = scmd->device->hostdata; |
3301 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target || |
3302 | ioc->remove_host) { |
3303 | sdev_printk(KERN_INFO, scmd->device, |
3304 | "device been deleted! scmd(0x%p)\n", scmd); |
3305 | scmd->result = DID_NO_CONNECT << 16; |
3306 | scsi_done(cmd: scmd); |
3307 | r = SUCCESS; |
3308 | goto out; |
3309 | } |
3310 | |
3311 | /* check for completed command */ |
3312 | if (st == NULL || st->cb_idx == 0xFF) { |
3313 | sdev_printk(KERN_INFO, scmd->device, "No reference found at " |
3314 | "driver, assuming scmd(0x%p) might have completed\n", scmd); |
3315 | scmd->result = DID_RESET << 16; |
3316 | r = SUCCESS; |
3317 | goto out; |
3318 | } |
3319 | |
3320 | /* for hidden raid components and volumes this is not supported */ |
3321 | if (sas_device_priv_data->sas_target->flags & |
3322 | MPT_TARGET_FLAGS_RAID_COMPONENT || |
3323 | sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) { |
3324 | scmd->result = DID_RESET << 16; |
3325 | r = FAILED; |
3326 | goto out; |
3327 | } |
3328 | |
3329 | mpt3sas_halt_firmware(ioc); |
3330 | |
3331 | handle = sas_device_priv_data->sas_target->handle; |
3332 | pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); |
3333 | if (pcie_device && (!ioc->tm_custom_handling) && |
3334 | (!(mpt3sas_scsih_is_pcie_scsi_device(device_info: pcie_device->device_info)))) |
3335 | timeout = ioc->nvme_abort_timeout; |
3336 | r = mpt3sas_scsih_issue_locked_tm(ioc, handle, channel: scmd->device->channel, |
3337 | id: scmd->device->id, lun: scmd->device->lun, |
3338 | MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, |
3339 | smid_task: st->smid, msix_task: st->msix_io, timeout, tr_method: 0); |
3340 | /* Command must be cleared after abort */ |
3341 | if (r == SUCCESS && st->cb_idx != 0xFF) |
3342 | r = FAILED; |
3343 | out: |
3344 | sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n", |
3345 | ((r == SUCCESS) ? "SUCCESS": "FAILED"), scmd); |
3346 | if (pcie_device) |
3347 | pcie_device_put(p: pcie_device); |
3348 | return r; |
3349 | } |
3350 | |
3351 | /** |
3352 | * scsih_dev_reset - eh threads main device reset routine |
3353 | * @scmd: pointer to scsi command object |
3354 | * |
3355 | * Return: SUCCESS if command aborted else FAILED |
3356 | */ |
3357 | static int |
3358 | scsih_dev_reset(struct scsi_cmnd *scmd) |
3359 | { |
3360 | struct MPT3SAS_ADAPTER *ioc = shost_priv(shost: scmd->device->host); |
3361 | struct MPT3SAS_DEVICE *sas_device_priv_data; |
3362 | struct _sas_device *sas_device = NULL; |
3363 | struct _pcie_device *pcie_device = NULL; |
3364 | u16 handle; |
3365 | u8 tr_method = 0; |
3366 | u8 tr_timeout = 30; |
3367 | int r; |
3368 | |
3369 | struct scsi_target *starget = scmd->device->sdev_target; |
3370 | struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; |
3371 | |
3372 | sdev_printk(KERN_INFO, scmd->device, |
3373 | "attempting device reset! scmd(0x%p)\n", scmd); |
3374 | _scsih_tm_display_info(ioc, scmd); |
3375 | |
3376 | sas_device_priv_data = scmd->device->hostdata; |
3377 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target || |
3378 | ioc->remove_host) { |
3379 | sdev_printk(KERN_INFO, scmd->device, |
3380 | "device been deleted! scmd(0x%p)\n", scmd); |
3381 | scmd->result = DID_NO_CONNECT << 16; |
3382 | scsi_done(cmd: scmd); |
3383 | r = SUCCESS; |
3384 | goto out; |
3385 | } |
3386 | |
3387 | /* for hidden raid components obtain the volume_handle */ |
3388 | handle = 0; |
3389 | if (sas_device_priv_data->sas_target->flags & |
3390 | MPT_TARGET_FLAGS_RAID_COMPONENT) { |
3391 | sas_device = mpt3sas_get_sdev_from_target(ioc, |
3392 | tgt_priv: target_priv_data); |
3393 | if (sas_device) |
3394 | handle = sas_device->volume_handle; |
3395 | } else |
3396 | handle = sas_device_priv_data->sas_target->handle; |
3397 | |
3398 | if (!handle) { |
3399 | scmd->result = DID_RESET << 16; |
3400 | r = FAILED; |
3401 | goto out; |
3402 | } |
3403 | |
3404 | pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); |
3405 | |
3406 | if (pcie_device && (!ioc->tm_custom_handling) && |
3407 | (!(mpt3sas_scsih_is_pcie_scsi_device(device_info: pcie_device->device_info)))) { |
3408 | tr_timeout = pcie_device->reset_timeout; |
3409 | tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; |
3410 | } else |
3411 | tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; |
3412 | |
3413 | r = mpt3sas_scsih_issue_locked_tm(ioc, handle, channel: scmd->device->channel, |
3414 | id: scmd->device->id, lun: scmd->device->lun, |
3415 | MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, smid_task: 0, msix_task: 0, |
3416 | timeout: tr_timeout, tr_method); |
3417 | /* Check for busy commands after reset */ |
3418 | if (r == SUCCESS && scsi_device_busy(sdev: scmd->device)) |
3419 | r = FAILED; |
3420 | out: |
3421 | sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n", |
3422 | ((r == SUCCESS) ? "SUCCESS": "FAILED"), scmd); |
3423 | |
3424 | if (sas_device) |
3425 | sas_device_put(s: sas_device); |
3426 | if (pcie_device) |
3427 | pcie_device_put(p: pcie_device); |
3428 | |
3429 | return r; |
3430 | } |
3431 | |
3432 | /** |
3433 | * scsih_target_reset - eh threads main target reset routine |
3434 | * @scmd: pointer to scsi command object |
3435 | * |
3436 | * Return: SUCCESS if command aborted else FAILED |
3437 | */ |
3438 | static int |
3439 | scsih_target_reset(struct scsi_cmnd *scmd) |
3440 | { |
3441 | struct MPT3SAS_ADAPTER *ioc = shost_priv(shost: scmd->device->host); |
3442 | struct MPT3SAS_DEVICE *sas_device_priv_data; |
3443 | struct _sas_device *sas_device = NULL; |
3444 | struct _pcie_device *pcie_device = NULL; |
3445 | u16 handle; |
3446 | u8 tr_method = 0; |
3447 | u8 tr_timeout = 30; |
3448 | int r; |
3449 | struct scsi_target *starget = scmd->device->sdev_target; |
3450 | struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; |
3451 | |
3452 | starget_printk(KERN_INFO, starget, |
3453 | "attempting target reset! scmd(0x%p)\n", scmd); |
3454 | _scsih_tm_display_info(ioc, scmd); |
3455 | |
3456 | sas_device_priv_data = scmd->device->hostdata; |
3457 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target || |
3458 | ioc->remove_host) { |
3459 | starget_printk(KERN_INFO, starget, |
3460 | "target been deleted! scmd(0x%p)\n", scmd); |
3461 | scmd->result = DID_NO_CONNECT << 16; |
3462 | scsi_done(cmd: scmd); |
3463 | r = SUCCESS; |
3464 | goto out; |
3465 | } |
3466 | |
3467 | /* for hidden raid components obtain the volume_handle */ |
3468 | handle = 0; |
3469 | if (sas_device_priv_data->sas_target->flags & |
3470 | MPT_TARGET_FLAGS_RAID_COMPONENT) { |
3471 | sas_device = mpt3sas_get_sdev_from_target(ioc, |
3472 | tgt_priv: target_priv_data); |
3473 | if (sas_device) |
3474 | handle = sas_device->volume_handle; |
3475 | } else |
3476 | handle = sas_device_priv_data->sas_target->handle; |
3477 | |
3478 | if (!handle) { |
3479 | scmd->result = DID_RESET << 16; |
3480 | r = FAILED; |
3481 | goto out; |
3482 | } |
3483 | |
3484 | pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); |
3485 | |
3486 | if (pcie_device && (!ioc->tm_custom_handling) && |
3487 | (!(mpt3sas_scsih_is_pcie_scsi_device(device_info: pcie_device->device_info)))) { |
3488 | tr_timeout = pcie_device->reset_timeout; |
3489 | tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; |
3490 | } else |
3491 | tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; |
3492 | r = mpt3sas_scsih_issue_locked_tm(ioc, handle, channel: scmd->device->channel, |
3493 | id: scmd->device->id, lun: 0, |
3494 | MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, smid_task: 0, msix_task: 0, |
3495 | timeout: tr_timeout, tr_method); |
3496 | /* Check for busy commands after reset */ |
3497 | if (r == SUCCESS && atomic_read(v: &starget->target_busy)) |
3498 | r = FAILED; |
3499 | out: |
3500 | starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n", |
3501 | ((r == SUCCESS) ? "SUCCESS": "FAILED"), scmd); |
3502 | |
3503 | if (sas_device) |
3504 | sas_device_put(s: sas_device); |
3505 | if (pcie_device) |
3506 | pcie_device_put(p: pcie_device); |
3507 | return r; |
3508 | } |
3509 | |
3510 | |
3511 | /** |
3512 | * scsih_host_reset - eh threads main host reset routine |
3513 | * @scmd: pointer to scsi command object |
3514 | * |
3515 | * Return: SUCCESS if command aborted else FAILED |
3516 | */ |
3517 | static int |
3518 | scsih_host_reset(struct scsi_cmnd *scmd) |
3519 | { |
3520 | struct MPT3SAS_ADAPTER *ioc = shost_priv(shost: scmd->device->host); |
3521 | int r, retval; |
3522 | |
3523 | ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd); |
3524 | scsi_print_command(scmd); |
3525 | |
3526 | if (ioc->is_driver_loading || ioc->remove_host) { |
3527 | ioc_info(ioc, "Blocking the host reset\n"); |
3528 | r = FAILED; |
3529 | goto out; |
3530 | } |
3531 | |
3532 | retval = mpt3sas_base_hard_reset_handler(ioc, type: FORCE_BIG_HAMMER); |
3533 | r = (retval < 0) ? FAILED : SUCCESS; |
3534 | out: |
3535 | ioc_info(ioc, "host reset: %s scmd(0x%p)\n", |
3536 | r == SUCCESS ? "SUCCESS": "FAILED", scmd); |
3537 | |
3538 | return r; |
3539 | } |
3540 | |
3541 | /** |
3542 | * _scsih_fw_event_add - insert and queue up fw_event |
3543 | * @ioc: per adapter object |
3544 | * @fw_event: object describing the event |
3545 | * Context: This function will acquire ioc->fw_event_lock. |
3546 | * |
3547 | * This adds the firmware event object into link list, then queues it up to |
3548 | * be processed from user context. |
3549 | */ |
3550 | static void |
3551 | _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) |
3552 | { |
3553 | unsigned long flags; |
3554 | |
3555 | if (ioc->firmware_event_thread == NULL) |
3556 | return; |
3557 | |
3558 | spin_lock_irqsave(&ioc->fw_event_lock, flags); |
3559 | fw_event_work_get(fw_work: fw_event); |
3560 | INIT_LIST_HEAD(list: &fw_event->list); |
3561 | list_add_tail(new: &fw_event->list, head: &ioc->fw_event_list); |
3562 | INIT_WORK(&fw_event->work, _firmware_event_work); |
3563 | fw_event_work_get(fw_work: fw_event); |
3564 | queue_work(wq: ioc->firmware_event_thread, work: &fw_event->work); |
3565 | spin_unlock_irqrestore(lock: &ioc->fw_event_lock, flags); |
3566 | } |
3567 | |
3568 | /** |
3569 | * _scsih_fw_event_del_from_list - delete fw_event from the list |
3570 | * @ioc: per adapter object |
3571 | * @fw_event: object describing the event |
3572 | * Context: This function will acquire ioc->fw_event_lock. |
3573 | * |
3574 | * If the fw_event is on the fw_event_list, remove it and do a put. |
3575 | */ |
3576 | static void |
3577 | _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work |
3578 | *fw_event) |
3579 | { |
3580 | unsigned long flags; |
3581 | |
3582 | spin_lock_irqsave(&ioc->fw_event_lock, flags); |
3583 | if (!list_empty(head: &fw_event->list)) { |
3584 | list_del_init(entry: &fw_event->list); |
3585 | fw_event_work_put(fw_work: fw_event); |
3586 | } |
3587 | spin_unlock_irqrestore(lock: &ioc->fw_event_lock, flags); |
3588 | } |
3589 | |
3590 | |
3591 | /** |
3592 | * mpt3sas_send_trigger_data_event - send event for processing trigger data |
3593 | * @ioc: per adapter object |
3594 | * @event_data: trigger event data |
3595 | */ |
3596 | void |
3597 | mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc, |
3598 | struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) |
3599 | { |
3600 | struct fw_event_work *fw_event; |
3601 | u16 sz; |
3602 | |
3603 | if (ioc->is_driver_loading) |
3604 | return; |
3605 | sz = sizeof(*event_data); |
3606 | fw_event = alloc_fw_event_work(len: sz); |
3607 | if (!fw_event) |
3608 | return; |
3609 | fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG; |
3610 | fw_event->ioc = ioc; |
3611 | memcpy(fw_event->event_data, event_data, sizeof(*event_data)); |
3612 | _scsih_fw_event_add(ioc, fw_event); |
3613 | fw_event_work_put(fw_work: fw_event); |
3614 | } |
3615 | |
3616 | /** |
3617 | * _scsih_error_recovery_delete_devices - remove devices not responding |
3618 | * @ioc: per adapter object |
3619 | */ |
3620 | static void |
3621 | _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc) |
3622 | { |
3623 | struct fw_event_work *fw_event; |
3624 | |
3625 | fw_event = alloc_fw_event_work(len: 0); |
3626 | if (!fw_event) |
3627 | return; |
3628 | fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES; |
3629 | fw_event->ioc = ioc; |
3630 | _scsih_fw_event_add(ioc, fw_event); |
3631 | fw_event_work_put(fw_work: fw_event); |
3632 | } |
3633 | |
3634 | /** |
3635 | * mpt3sas_port_enable_complete - port enable completed (fake event) |
3636 | * @ioc: per adapter object |
3637 | */ |
3638 | void |
3639 | mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc) |
3640 | { |
3641 | struct fw_event_work *fw_event; |
3642 | |
3643 | fw_event = alloc_fw_event_work(len: 0); |
3644 | if (!fw_event) |
3645 | return; |
3646 | fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE; |
3647 | fw_event->ioc = ioc; |
3648 | _scsih_fw_event_add(ioc, fw_event); |
3649 | fw_event_work_put(fw_work: fw_event); |
3650 | } |
3651 | |
3652 | static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc) |
3653 | { |
3654 | unsigned long flags; |
3655 | struct fw_event_work *fw_event = NULL; |
3656 | |
3657 | spin_lock_irqsave(&ioc->fw_event_lock, flags); |
3658 | if (!list_empty(head: &ioc->fw_event_list)) { |
3659 | fw_event = list_first_entry(&ioc->fw_event_list, |
3660 | struct fw_event_work, list); |
3661 | list_del_init(entry: &fw_event->list); |
3662 | fw_event_work_put(fw_work: fw_event); |
3663 | } |
3664 | spin_unlock_irqrestore(lock: &ioc->fw_event_lock, flags); |
3665 | |
3666 | return fw_event; |
3667 | } |
3668 | |
3669 | /** |
3670 | * _scsih_fw_event_cleanup_queue - cleanup event queue |
3671 | * @ioc: per adapter object |
3672 | * |
3673 | * Walk the firmware event queue, either killing timers, or waiting |
3674 | * for outstanding events to complete |
3675 | * |
3676 | * Context: task, can sleep |
3677 | */ |
3678 | static void |
3679 | _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) |
3680 | { |
3681 | struct fw_event_work *fw_event; |
3682 | |
3683 | if ((list_empty(head: &ioc->fw_event_list) && !ioc->current_event) || |
3684 | !ioc->firmware_event_thread) |
3685 | return; |
3686 | /* |
3687 | * Set current running event as ignore, so that |
3688 | * current running event will exit quickly. |
3689 | * As diag reset has occurred it is of no use |
3690 | * to process remaining stale event data entries. |
3691 | */ |
3692 | if (ioc->shost_recovery && ioc->current_event) |
3693 | ioc->current_event->ignore = 1; |
3694 | |
3695 | ioc->fw_events_cleanup = 1; |
3696 | while ((fw_event = dequeue_next_fw_event(ioc)) || |
3697 | (fw_event = ioc->current_event)) { |
3698 | |
3699 | /* |
3700 | * Don't call cancel_work_sync() for current_event |
3701 | * other than MPT3SAS_REMOVE_UNRESPONDING_DEVICES; |
3702 | * otherwise we may observe deadlock if current |
3703 | * hard reset issued as part of processing the current_event. |
3704 | * |
3705 | * Orginal logic of cleaning the current_event is added |
3706 | * for handling the back to back host reset issued by the user. |
3707 | * i.e. during back to back host reset, driver use to process |
3708 | * the two instances of MPT3SAS_REMOVE_UNRESPONDING_DEVICES |
3709 | * event back to back and this made the drives to unregister |
3710 | * the devices from SML. |
3711 | */ |
3712 | |
3713 | if (fw_event == ioc->current_event && |
3714 | ioc->current_event->event != |
3715 | MPT3SAS_REMOVE_UNRESPONDING_DEVICES) { |
3716 | ioc->current_event = NULL; |
3717 | continue; |
3718 | } |
3719 | |
3720 | /* |
3721 | * Driver has to clear ioc->start_scan flag when |
3722 | * it is cleaning up MPT3SAS_PORT_ENABLE_COMPLETE, |
3723 | * otherwise scsi_scan_host() API waits for the |
3724 | * 5 minute timer to expire. If we exit from |
3725 | * scsi_scan_host() early then we can issue the |
3726 | * new port enable request as part of current diag reset. |
3727 | */ |
3728 | if (fw_event->event == MPT3SAS_PORT_ENABLE_COMPLETE) { |
3729 | ioc->port_enable_cmds.status |= MPT3_CMD_RESET; |
3730 | ioc->start_scan = 0; |
3731 | } |
3732 | |
3733 | /* |
3734 | * Wait on the fw_event to complete. If this returns 1, then |
3735 | * the event was never executed, and we need a put for the |
3736 | * reference the work had on the fw_event. |
3737 | * |
3738 | * If it did execute, we wait for it to finish, and the put will |
3739 | * happen from _firmware_event_work() |
3740 | */ |
3741 | if (cancel_work_sync(work: &fw_event->work)) |
3742 | fw_event_work_put(fw_work: fw_event); |
3743 | |
3744 | } |
3745 | ioc->fw_events_cleanup = 0; |
3746 | } |
3747 | |
3748 | /** |
3749 | * _scsih_internal_device_block - block the sdev device |
3750 | * @sdev: per device object |
3751 | * @sas_device_priv_data : per device driver private data |
3752 | * |
3753 | * make sure device is blocked without error, if not |
3754 | * print an error |
3755 | */ |
3756 | static void |
3757 | _scsih_internal_device_block(struct scsi_device *sdev, |
3758 | struct MPT3SAS_DEVICE *sas_device_priv_data) |
3759 | { |
3760 | int r = 0; |
3761 | |
3762 | sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n", |
3763 | sas_device_priv_data->sas_target->handle); |
3764 | sas_device_priv_data->block = 1; |
3765 | |
3766 | r = scsi_internal_device_block_nowait(sdev); |
3767 | if (r == -EINVAL) |
3768 | sdev_printk(KERN_WARNING, sdev, |
3769 | "device_block failed with return(%d) for handle(0x%04x)\n", |
3770 | r, sas_device_priv_data->sas_target->handle); |
3771 | } |
3772 | |
3773 | /** |
3774 | * _scsih_internal_device_unblock - unblock the sdev device |
3775 | * @sdev: per device object |
3776 | * @sas_device_priv_data : per device driver private data |
3777 | * make sure device is unblocked without error, if not retry |
3778 | * by blocking and then unblocking |
3779 | */ |
3780 | |
3781 | static void |
3782 | _scsih_internal_device_unblock(struct scsi_device *sdev, |
3783 | struct MPT3SAS_DEVICE *sas_device_priv_data) |
3784 | { |
3785 | int r = 0; |
3786 | |
3787 | sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, " |
3788 | "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle); |
3789 | sas_device_priv_data->block = 0; |
3790 | r = scsi_internal_device_unblock_nowait(sdev, new_state: SDEV_RUNNING); |
3791 | if (r == -EINVAL) { |
3792 | /* The device has been set to SDEV_RUNNING by SD layer during |
3793 | * device addition but the request queue is still stopped by |
3794 | * our earlier block call. We need to perform a block again |
3795 | * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */ |
3796 | |
3797 | sdev_printk(KERN_WARNING, sdev, |
3798 | "device_unblock failed with return(%d) for handle(0x%04x) " |
3799 | "performing a block followed by an unblock\n", |
3800 | r, sas_device_priv_data->sas_target->handle); |
3801 | sas_device_priv_data->block = 1; |
3802 | r = scsi_internal_device_block_nowait(sdev); |
3803 | if (r) |
3804 | sdev_printk(KERN_WARNING, sdev, "retried device_block " |
3805 | "failed with return(%d) for handle(0x%04x)\n", |
3806 | r, sas_device_priv_data->sas_target->handle); |
3807 | |
3808 | sas_device_priv_data->block = 0; |
3809 | r = scsi_internal_device_unblock_nowait(sdev, new_state: SDEV_RUNNING); |
3810 | if (r) |
3811 | sdev_printk(KERN_WARNING, sdev, "retried device_unblock" |
3812 | " failed with return(%d) for handle(0x%04x)\n", |
3813 | r, sas_device_priv_data->sas_target->handle); |
3814 | } |
3815 | } |
3816 | |
3817 | /** |
3818 | * _scsih_ublock_io_all_device - unblock every device |
3819 | * @ioc: per adapter object |
3820 | * |
3821 | * change the device state from block to running |
3822 | */ |
3823 | static void |
3824 | _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc) |
3825 | { |
3826 | struct MPT3SAS_DEVICE *sas_device_priv_data; |
3827 | struct scsi_device *sdev; |
3828 | |
3829 | shost_for_each_device(sdev, ioc->shost) { |
3830 | sas_device_priv_data = sdev->hostdata; |
3831 | if (!sas_device_priv_data) |
3832 | continue; |
3833 | if (!sas_device_priv_data->block) |
3834 | continue; |
3835 | |
3836 | dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, |
3837 | "device_running, handle(0x%04x)\n", |
3838 | sas_device_priv_data->sas_target->handle)); |
3839 | _scsih_internal_device_unblock(sdev, sas_device_priv_data); |
3840 | } |
3841 | } |
3842 | |
3843 | |
3844 | /** |
3845 | * _scsih_ublock_io_device - prepare device to be deleted |
3846 | * @ioc: per adapter object |
3847 | * @sas_address: sas address |
3848 | * @port: hba port entry |
3849 | * |
3850 | * unblock then put device in offline state |
3851 | */ |
3852 | static void |
3853 | _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, |
3854 | u64 sas_address, struct hba_port *port) |
3855 | { |
3856 | struct MPT3SAS_DEVICE *sas_device_priv_data; |
3857 | struct scsi_device *sdev; |
3858 | |
3859 | shost_for_each_device(sdev, ioc->shost) { |
3860 | sas_device_priv_data = sdev->hostdata; |
3861 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target) |
3862 | continue; |
3863 | if (sas_device_priv_data->sas_target->sas_address |
3864 | != sas_address) |
3865 | continue; |
3866 | if (sas_device_priv_data->sas_target->port != port) |
3867 | continue; |
3868 | if (sas_device_priv_data->block) |
3869 | _scsih_internal_device_unblock(sdev, |
3870 | sas_device_priv_data); |
3871 | } |
3872 | } |
3873 | |
3874 | /** |
3875 | * _scsih_block_io_all_device - set the device state to SDEV_BLOCK |
3876 | * @ioc: per adapter object |
3877 | * |
3878 | * During device pull we need to appropriately set the sdev state. |
3879 | */ |
3880 | static void |
3881 | _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc) |
3882 | { |
3883 | struct MPT3SAS_DEVICE *sas_device_priv_data; |
3884 | struct scsi_device *sdev; |
3885 | |
3886 | shost_for_each_device(sdev, ioc->shost) { |
3887 | sas_device_priv_data = sdev->hostdata; |
3888 | if (!sas_device_priv_data) |
3889 | continue; |
3890 | if (sas_device_priv_data->block) |
3891 | continue; |
3892 | if (sas_device_priv_data->ignore_delay_remove) { |
3893 | sdev_printk(KERN_INFO, sdev, |
3894 | "%s skip device_block for SES handle(0x%04x)\n", |
3895 | __func__, sas_device_priv_data->sas_target->handle); |
3896 | continue; |
3897 | } |
3898 | _scsih_internal_device_block(sdev, sas_device_priv_data); |
3899 | } |
3900 | } |
3901 | |
3902 | /** |
3903 | * _scsih_block_io_device - set the device state to SDEV_BLOCK |
3904 | * @ioc: per adapter object |
3905 | * @handle: device handle |
3906 | * |
3907 | * During device pull we need to appropriately set the sdev state. |
3908 | */ |
3909 | static void |
3910 | _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
3911 | { |
3912 | struct MPT3SAS_DEVICE *sas_device_priv_data; |
3913 | struct scsi_device *sdev; |
3914 | struct _sas_device *sas_device; |
3915 | |
3916 | sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); |
3917 | |
3918 | shost_for_each_device(sdev, ioc->shost) { |
3919 | sas_device_priv_data = sdev->hostdata; |
3920 | if (!sas_device_priv_data) |
3921 | continue; |
3922 | if (sas_device_priv_data->sas_target->handle != handle) |
3923 | continue; |
3924 | if (sas_device_priv_data->block) |
3925 | continue; |
3926 | if (sas_device && sas_device->pend_sas_rphy_add) |
3927 | continue; |
3928 | if (sas_device_priv_data->ignore_delay_remove) { |
3929 | sdev_printk(KERN_INFO, sdev, |
3930 | "%s skip device_block for SES handle(0x%04x)\n", |
3931 | __func__, sas_device_priv_data->sas_target->handle); |
3932 | continue; |
3933 | } |
3934 | _scsih_internal_device_block(sdev, sas_device_priv_data); |
3935 | } |
3936 | |
3937 | if (sas_device) |
3938 | sas_device_put(s: sas_device); |
3939 | } |
3940 | |
3941 | /** |
3942 | * _scsih_block_io_to_children_attached_to_ex |
3943 | * @ioc: per adapter object |
3944 | * @sas_expander: the sas_device object |
3945 | * |
3946 | * This routine set sdev state to SDEV_BLOCK for all devices |
3947 | * attached to this expander. This function called when expander is |
3948 | * pulled. |
3949 | */ |
3950 | static void |
3951 | _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc, |
3952 | struct _sas_node *sas_expander) |
3953 | { |
3954 | struct _sas_port *mpt3sas_port; |
3955 | struct _sas_device *sas_device; |
3956 | struct _sas_node *expander_sibling; |
3957 | unsigned long flags; |
3958 | |
3959 | if (!sas_expander) |
3960 | return; |
3961 | |
3962 | list_for_each_entry(mpt3sas_port, |
3963 | &sas_expander->sas_port_list, port_list) { |
3964 | if (mpt3sas_port->remote_identify.device_type == |
3965 | SAS_END_DEVICE) { |
3966 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
3967 | sas_device = __mpt3sas_get_sdev_by_addr(ioc, |
3968 | sas_address: mpt3sas_port->remote_identify.sas_address, |
3969 | port: mpt3sas_port->hba_port); |
3970 | if (sas_device) { |
3971 | set_bit(nr: sas_device->handle, |
3972 | addr: ioc->blocking_handles); |
3973 | sas_device_put(s: sas_device); |
3974 | } |
3975 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
3976 | } |
3977 | } |
3978 | |
3979 | list_for_each_entry(mpt3sas_port, |
3980 | &sas_expander->sas_port_list, port_list) { |
3981 | |
3982 | if (mpt3sas_port->remote_identify.device_type == |
3983 | SAS_EDGE_EXPANDER_DEVICE || |
3984 | mpt3sas_port->remote_identify.device_type == |
3985 | SAS_FANOUT_EXPANDER_DEVICE) { |
3986 | expander_sibling = |
3987 | mpt3sas_scsih_expander_find_by_sas_address( |
3988 | ioc, sas_address: mpt3sas_port->remote_identify.sas_address, |
3989 | port: mpt3sas_port->hba_port); |
3990 | _scsih_block_io_to_children_attached_to_ex(ioc, |
3991 | sas_expander: expander_sibling); |
3992 | } |
3993 | } |
3994 | } |
3995 | |
3996 | /** |
3997 | * _scsih_block_io_to_children_attached_directly |
3998 | * @ioc: per adapter object |
3999 | * @event_data: topology change event data |
4000 | * |
4001 | * This routine set sdev state to SDEV_BLOCK for all devices |
4002 | * direct attached during device pull. |
4003 | */ |
4004 | static void |
4005 | _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc, |
4006 | Mpi2EventDataSasTopologyChangeList_t *event_data) |
4007 | { |
4008 | int i; |
4009 | u16 handle; |
4010 | u16 reason_code; |
4011 | |
4012 | for (i = 0; i < event_data->NumEntries; i++) { |
4013 | handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); |
4014 | if (!handle) |
4015 | continue; |
4016 | reason_code = event_data->PHY[i].PhyStatus & |
4017 | MPI2_EVENT_SAS_TOPO_RC_MASK; |
4018 | if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING) |
4019 | _scsih_block_io_device(ioc, handle); |
4020 | } |
4021 | } |
4022 | |
4023 | /** |
4024 | * _scsih_block_io_to_pcie_children_attached_directly |
4025 | * @ioc: per adapter object |
4026 | * @event_data: topology change event data |
4027 | * |
4028 | * This routine set sdev state to SDEV_BLOCK for all devices |
4029 | * direct attached during device pull/reconnect. |
4030 | */ |
4031 | static void |
4032 | _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc, |
4033 | Mpi26EventDataPCIeTopologyChangeList_t *event_data) |
4034 | { |
4035 | int i; |
4036 | u16 handle; |
4037 | u16 reason_code; |
4038 | |
4039 | for (i = 0; i < event_data->NumEntries; i++) { |
4040 | handle = |
4041 | le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); |
4042 | if (!handle) |
4043 | continue; |
4044 | reason_code = event_data->PortEntry[i].PortStatus; |
4045 | if (reason_code == |
4046 | MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING) |
4047 | _scsih_block_io_device(ioc, handle); |
4048 | } |
4049 | } |
4050 | /** |
4051 | * _scsih_tm_tr_send - send task management request |
4052 | * @ioc: per adapter object |
4053 | * @handle: device handle |
4054 | * Context: interrupt time. |
4055 | * |
4056 | * This code is to initiate the device removal handshake protocol |
4057 | * with controller firmware. This function will issue target reset |
4058 | * using high priority request queue. It will send a sas iounit |
4059 | * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion. |
4060 | * |
4061 | * This is designed to send muliple task management request at the same |
4062 | * time to the fifo. If the fifo is full, we will append the request, |
4063 | * and process it in a future completion. |
4064 | */ |
4065 | static void |
4066 | _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
4067 | { |
4068 | Mpi2SCSITaskManagementRequest_t *mpi_request; |
4069 | u16 smid; |
4070 | struct _sas_device *sas_device = NULL; |
4071 | struct _pcie_device *pcie_device = NULL; |
4072 | struct MPT3SAS_TARGET *sas_target_priv_data = NULL; |
4073 | u64 sas_address = 0; |
4074 | unsigned long flags; |
4075 | struct _tr_list *delayed_tr; |
4076 | u32 ioc_state; |
4077 | u8 tr_method = 0; |
4078 | struct hba_port *port = NULL; |
4079 | |
4080 | if (ioc->pci_error_recovery) { |
4081 | dewtprintk(ioc, |
4082 | ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n", |
4083 | __func__, handle)); |
4084 | return; |
4085 | } |
4086 | ioc_state = mpt3sas_base_get_iocstate(ioc, cooked: 1); |
4087 | if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { |
4088 | dewtprintk(ioc, |
4089 | ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n", |
4090 | __func__, handle)); |
4091 | return; |
4092 | } |
4093 | |
4094 | /* if PD, then return */ |
4095 | if (test_bit(handle, ioc->pd_handles)) |
4096 | return; |
4097 | |
4098 | clear_bit(nr: handle, addr: ioc->pend_os_device_add); |
4099 | |
4100 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
4101 | sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); |
4102 | if (sas_device && sas_device->starget && |
4103 | sas_device->starget->hostdata) { |
4104 | sas_target_priv_data = sas_device->starget->hostdata; |
4105 | sas_target_priv_data->deleted = 1; |
4106 | sas_address = sas_device->sas_address; |
4107 | port = sas_device->port; |
4108 | } |
4109 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
4110 | if (!sas_device) { |
4111 | spin_lock_irqsave(&ioc->pcie_device_lock, flags); |
4112 | pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); |
4113 | if (pcie_device && pcie_device->starget && |
4114 | pcie_device->starget->hostdata) { |
4115 | sas_target_priv_data = pcie_device->starget->hostdata; |
4116 | sas_target_priv_data->deleted = 1; |
4117 | sas_address = pcie_device->wwid; |
4118 | } |
4119 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
4120 | if (pcie_device && (!ioc->tm_custom_handling) && |
4121 | (!(mpt3sas_scsih_is_pcie_scsi_device( |
4122 | device_info: pcie_device->device_info)))) |
4123 | tr_method = |
4124 | MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; |
4125 | else |
4126 | tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; |
4127 | } |
4128 | if (sas_target_priv_data) { |
4129 | dewtprintk(ioc, |
4130 | ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n", |
4131 | handle, (u64)sas_address)); |
4132 | if (sas_device) { |
4133 | if (sas_device->enclosure_handle != 0) |
4134 | dewtprintk(ioc, |
4135 | ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n", |
4136 | (u64)sas_device->enclosure_logical_id, |
4137 | sas_device->slot)); |
4138 | if (sas_device->connector_name[0] != '\0') |
4139 | dewtprintk(ioc, |
4140 | ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n", |
4141 | sas_device->enclosure_level, |
4142 | sas_device->connector_name)); |
4143 | } else if (pcie_device) { |
4144 | if (pcie_device->enclosure_handle != 0) |
4145 | dewtprintk(ioc, |
4146 | ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n", |
4147 | (u64)pcie_device->enclosure_logical_id, |
4148 | pcie_device->slot)); |
4149 | if (pcie_device->connector_name[0] != '\0') |
4150 | dewtprintk(ioc, |
4151 | ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n", |
4152 | pcie_device->enclosure_level, |
4153 | pcie_device->connector_name)); |
4154 | } |
4155 | _scsih_ublock_io_device(ioc, sas_address, port); |
4156 | sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; |
4157 | } |
4158 | |
4159 | smid = mpt3sas_base_get_smid_hpr(ioc, cb_idx: ioc->tm_tr_cb_idx); |
4160 | if (!smid) { |
4161 | delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); |
4162 | if (!delayed_tr) |
4163 | goto out; |
4164 | INIT_LIST_HEAD(list: &delayed_tr->list); |
4165 | delayed_tr->handle = handle; |
4166 | list_add_tail(new: &delayed_tr->list, head: &ioc->delayed_tr_list); |
4167 | dewtprintk(ioc, |
4168 | ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", |
4169 | handle)); |
4170 | goto out; |
4171 | } |
4172 | |
4173 | dewtprintk(ioc, |
4174 | ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", |
4175 | handle, smid, ioc->tm_tr_cb_idx)); |
4176 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); |
4177 | memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); |
4178 | mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; |
4179 | mpi_request->DevHandle = cpu_to_le16(handle); |
4180 | mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; |
4181 | mpi_request->MsgFlags = tr_method; |
4182 | set_bit(nr: handle, addr: ioc->device_remove_in_progress); |
4183 | ioc->put_smid_hi_priority(ioc, smid, 0); |
4184 | mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL); |
4185 | |
4186 | out: |
4187 | if (sas_device) |
4188 | sas_device_put(s: sas_device); |
4189 | if (pcie_device) |
4190 | pcie_device_put(p: pcie_device); |
4191 | } |
4192 | |
4193 | /** |
4194 | * _scsih_tm_tr_complete - |
4195 | * @ioc: per adapter object |
4196 | * @smid: system request message index |
4197 | * @msix_index: MSIX table index supplied by the OS |
4198 | * @reply: reply message frame(lower 32bit addr) |
4199 | * Context: interrupt time. |
4200 | * |
4201 | * This is the target reset completion routine. |
4202 | * This code is part of the code to initiate the device removal |
4203 | * handshake protocol with controller firmware. |
4204 | * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE) |
4205 | * |
4206 | * Return: 1 meaning mf should be freed from _base_interrupt |
4207 | * 0 means the mf is freed from this function. |
4208 | */ |
4209 | static u8 |
4210 | _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, |
4211 | u32 reply) |
4212 | { |
4213 | u16 handle; |
4214 | Mpi2SCSITaskManagementRequest_t *mpi_request_tm; |
4215 | Mpi2SCSITaskManagementReply_t *mpi_reply = |
4216 | mpt3sas_base_get_reply_virt_addr(ioc, phys_addr: reply); |
4217 | Mpi2SasIoUnitControlRequest_t *mpi_request; |
4218 | u16 smid_sas_ctrl; |
4219 | u32 ioc_state; |
4220 | struct _sc_list *delayed_sc; |
4221 | |
4222 | if (ioc->pci_error_recovery) { |
4223 | dewtprintk(ioc, |
4224 | ioc_info(ioc, "%s: host in pci error recovery\n", |
4225 | __func__)); |
4226 | return 1; |
4227 | } |
4228 | ioc_state = mpt3sas_base_get_iocstate(ioc, cooked: 1); |
4229 | if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { |
4230 | dewtprintk(ioc, |
4231 | ioc_info(ioc, "%s: host is not operational\n", |
4232 | __func__)); |
4233 | return 1; |
4234 | } |
4235 | if (unlikely(!mpi_reply)) { |
4236 | ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", |
4237 | __FILE__, __LINE__, __func__); |
4238 | return 1; |
4239 | } |
4240 | mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); |
4241 | handle = le16_to_cpu(mpi_request_tm->DevHandle); |
4242 | if (handle != le16_to_cpu(mpi_reply->DevHandle)) { |
4243 | dewtprintk(ioc, |
4244 | ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", |
4245 | handle, |
4246 | le16_to_cpu(mpi_reply->DevHandle), smid)); |
4247 | return 0; |
4248 | } |
4249 | |
4250 | mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); |
4251 | dewtprintk(ioc, |
4252 | ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n", |
4253 | handle, smid, le16_to_cpu(mpi_reply->IOCStatus), |
4254 | le32_to_cpu(mpi_reply->IOCLogInfo), |
4255 | le32_to_cpu(mpi_reply->TerminationCount))); |
4256 | |
4257 | smid_sas_ctrl = mpt3sas_base_get_smid(ioc, cb_idx: ioc->tm_sas_control_cb_idx); |
4258 | if (!smid_sas_ctrl) { |
4259 | delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC); |
4260 | if (!delayed_sc) |
4261 | return _scsih_check_for_pending_tm(ioc, smid); |
4262 | INIT_LIST_HEAD(list: &delayed_sc->list); |
4263 | delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle); |
4264 | list_add_tail(new: &delayed_sc->list, head: &ioc->delayed_sc_list); |
4265 | dewtprintk(ioc, |
4266 | ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n", |
4267 | handle)); |
4268 | return _scsih_check_for_pending_tm(ioc, smid); |
4269 | } |
4270 | |
4271 | dewtprintk(ioc, |
4272 | ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", |
4273 | handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx)); |
4274 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid: smid_sas_ctrl); |
4275 | memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); |
4276 | mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; |
4277 | mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; |
4278 | mpi_request->DevHandle = mpi_request_tm->DevHandle; |
4279 | ioc->put_smid_default(ioc, smid_sas_ctrl); |
4280 | |
4281 | return _scsih_check_for_pending_tm(ioc, smid); |
4282 | } |
4283 | |
4284 | /** _scsih_allow_scmd_to_device - check whether scmd needs to |
4285 | * issue to IOC or not. |
4286 | * @ioc: per adapter object |
4287 | * @scmd: pointer to scsi command object |
4288 | * |
4289 | * Returns true if scmd can be issued to IOC otherwise returns false. |
4290 | */ |
4291 | inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc, |
4292 | struct scsi_cmnd *scmd) |
4293 | { |
4294 | |
4295 | if (ioc->pci_error_recovery) |
4296 | return false; |
4297 | |
4298 | if (ioc->hba_mpi_version_belonged == MPI2_VERSION) { |
4299 | if (ioc->remove_host) |
4300 | return false; |
4301 | |
4302 | return true; |
4303 | } |
4304 | |
4305 | if (ioc->remove_host) { |
4306 | |
4307 | switch (scmd->cmnd[0]) { |
4308 | case SYNCHRONIZE_CACHE: |
4309 | case START_STOP: |
4310 | return true; |
4311 | default: |
4312 | return false; |
4313 | } |
4314 | } |
4315 | |
4316 | return true; |
4317 | } |
4318 | |
4319 | /** |
4320 | * _scsih_sas_control_complete - completion routine |
4321 | * @ioc: per adapter object |
4322 | * @smid: system request message index |
4323 | * @msix_index: MSIX table index supplied by the OS |
4324 | * @reply: reply message frame(lower 32bit addr) |
4325 | * Context: interrupt time. |
4326 | * |
4327 | * This is the sas iounit control completion routine. |
4328 | * This code is part of the code to initiate the device removal |
4329 | * handshake protocol with controller firmware. |
4330 | * |
4331 | * Return: 1 meaning mf should be freed from _base_interrupt |
4332 | * 0 means the mf is freed from this function. |
4333 | */ |
4334 | static u8 |
4335 | _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, |
4336 | u8 msix_index, u32 reply) |
4337 | { |
4338 | Mpi2SasIoUnitControlReply_t *mpi_reply = |
4339 | mpt3sas_base_get_reply_virt_addr(ioc, phys_addr: reply); |
4340 | |
4341 | if (likely(mpi_reply)) { |
4342 | dewtprintk(ioc, |
4343 | ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", |
4344 | le16_to_cpu(mpi_reply->DevHandle), smid, |
4345 | le16_to_cpu(mpi_reply->IOCStatus), |
4346 | le32_to_cpu(mpi_reply->IOCLogInfo))); |
4347 | if (le16_to_cpu(mpi_reply->IOCStatus) == |
4348 | MPI2_IOCSTATUS_SUCCESS) { |
4349 | clear_bit(le16_to_cpu(mpi_reply->DevHandle), |
4350 | addr: ioc->device_remove_in_progress); |
4351 | } |
4352 | } else { |
4353 | ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", |
4354 | __FILE__, __LINE__, __func__); |
4355 | } |
4356 | return mpt3sas_check_for_pending_internal_cmds(ioc, smid); |
4357 | } |
4358 | |
4359 | /** |
4360 | * _scsih_tm_tr_volume_send - send target reset request for volumes |
4361 | * @ioc: per adapter object |
4362 | * @handle: device handle |
4363 | * Context: interrupt time. |
4364 | * |
4365 | * This is designed to send muliple task management request at the same |
4366 | * time to the fifo. If the fifo is full, we will append the request, |
4367 | * and process it in a future completion. |
4368 | */ |
4369 | static void |
4370 | _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
4371 | { |
4372 | Mpi2SCSITaskManagementRequest_t *mpi_request; |
4373 | u16 smid; |
4374 | struct _tr_list *delayed_tr; |
4375 | |
4376 | if (ioc->pci_error_recovery) { |
4377 | dewtprintk(ioc, |
4378 | ioc_info(ioc, "%s: host reset in progress!\n", |
4379 | __func__)); |
4380 | return; |
4381 | } |
4382 | |
4383 | smid = mpt3sas_base_get_smid_hpr(ioc, cb_idx: ioc->tm_tr_volume_cb_idx); |
4384 | if (!smid) { |
4385 | delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); |
4386 | if (!delayed_tr) |
4387 | return; |
4388 | INIT_LIST_HEAD(list: &delayed_tr->list); |
4389 | delayed_tr->handle = handle; |
4390 | list_add_tail(new: &delayed_tr->list, head: &ioc->delayed_tr_volume_list); |
4391 | dewtprintk(ioc, |
4392 | ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", |
4393 | handle)); |
4394 | return; |
4395 | } |
4396 | |
4397 | dewtprintk(ioc, |
4398 | ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", |
4399 | handle, smid, ioc->tm_tr_volume_cb_idx)); |
4400 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); |
4401 | memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); |
4402 | mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; |
4403 | mpi_request->DevHandle = cpu_to_le16(handle); |
4404 | mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; |
4405 | ioc->put_smid_hi_priority(ioc, smid, 0); |
4406 | } |
4407 | |
4408 | /** |
4409 | * _scsih_tm_volume_tr_complete - target reset completion |
4410 | * @ioc: per adapter object |
4411 | * @smid: system request message index |
4412 | * @msix_index: MSIX table index supplied by the OS |
4413 | * @reply: reply message frame(lower 32bit addr) |
4414 | * Context: interrupt time. |
4415 | * |
4416 | * Return: 1 meaning mf should be freed from _base_interrupt |
4417 | * 0 means the mf is freed from this function. |
4418 | */ |
4419 | static u8 |
4420 | _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, |
4421 | u8 msix_index, u32 reply) |
4422 | { |
4423 | u16 handle; |
4424 | Mpi2SCSITaskManagementRequest_t *mpi_request_tm; |
4425 | Mpi2SCSITaskManagementReply_t *mpi_reply = |
4426 | mpt3sas_base_get_reply_virt_addr(ioc, phys_addr: reply); |
4427 | |
4428 | if (ioc->shost_recovery || ioc->pci_error_recovery) { |
4429 | dewtprintk(ioc, |
4430 | ioc_info(ioc, "%s: host reset in progress!\n", |
4431 | __func__)); |
4432 | return 1; |
4433 | } |
4434 | if (unlikely(!mpi_reply)) { |
4435 | ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", |
4436 | __FILE__, __LINE__, __func__); |
4437 | return 1; |
4438 | } |
4439 | |
4440 | mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); |
4441 | handle = le16_to_cpu(mpi_request_tm->DevHandle); |
4442 | if (handle != le16_to_cpu(mpi_reply->DevHandle)) { |
4443 | dewtprintk(ioc, |
4444 | ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", |
4445 | handle, le16_to_cpu(mpi_reply->DevHandle), |
4446 | smid)); |
4447 | return 0; |
4448 | } |
4449 | |
4450 | dewtprintk(ioc, |
4451 | ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n", |
4452 | handle, smid, le16_to_cpu(mpi_reply->IOCStatus), |
4453 | le32_to_cpu(mpi_reply->IOCLogInfo), |
4454 | le32_to_cpu(mpi_reply->TerminationCount))); |
4455 | |
4456 | return _scsih_check_for_pending_tm(ioc, smid); |
4457 | } |
4458 | |
4459 | /** |
4460 | * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages |
4461 | * @ioc: per adapter object |
4462 | * @smid: system request message index |
4463 | * @event: Event ID |
4464 | * @event_context: used to track events uniquely |
4465 | * |
4466 | * Context - processed in interrupt context. |
4467 | */ |
4468 | static void |
4469 | _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event, |
4470 | U32 event_context) |
4471 | { |
4472 | Mpi2EventAckRequest_t *ack_request; |
4473 | int i = smid - ioc->internal_smid; |
4474 | unsigned long flags; |
4475 | |
4476 | /* Without releasing the smid just update the |
4477 | * call back index and reuse the same smid for |
4478 | * processing this delayed request |
4479 | */ |
4480 | spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); |
4481 | ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx; |
4482 | spin_unlock_irqrestore(lock: &ioc->scsi_lookup_lock, flags); |
4483 | |
4484 | dewtprintk(ioc, |
4485 | ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n", |
4486 | le16_to_cpu(event), smid, ioc->base_cb_idx)); |
4487 | ack_request = mpt3sas_base_get_msg_frame(ioc, smid); |
4488 | memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t)); |
4489 | ack_request->Function = MPI2_FUNCTION_EVENT_ACK; |
4490 | ack_request->Event = event; |
4491 | ack_request->EventContext = event_context; |
4492 | ack_request->VF_ID = 0; /* TODO */ |
4493 | ack_request->VP_ID = 0; |
4494 | ioc->put_smid_default(ioc, smid); |
4495 | } |
4496 | |
4497 | /** |
4498 | * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed |
4499 | * sas_io_unit_ctrl messages |
4500 | * @ioc: per adapter object |
4501 | * @smid: system request message index |
4502 | * @handle: device handle |
4503 | * |
4504 | * Context - processed in interrupt context. |
4505 | */ |
4506 | static void |
4507 | _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc, |
4508 | u16 smid, u16 handle) |
4509 | { |
4510 | Mpi2SasIoUnitControlRequest_t *mpi_request; |
4511 | u32 ioc_state; |
4512 | int i = smid - ioc->internal_smid; |
4513 | unsigned long flags; |
4514 | |
4515 | if (ioc->remove_host) { |
4516 | dewtprintk(ioc, |
4517 | ioc_info(ioc, "%s: host has been removed\n", |
4518 | __func__)); |
4519 | return; |
4520 | } else if (ioc->pci_error_recovery) { |
4521 | dewtprintk(ioc, |
4522 | ioc_info(ioc, "%s: host in pci error recovery\n", |
4523 | __func__)); |
4524 | return; |
4525 | } |
4526 | ioc_state = mpt3sas_base_get_iocstate(ioc, cooked: 1); |
4527 | if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { |
4528 | dewtprintk(ioc, |
4529 | ioc_info(ioc, "%s: host is not operational\n", |
4530 | __func__)); |
4531 | return; |
4532 | } |
4533 | |
4534 | /* Without releasing the smid just update the |
4535 | * call back index and reuse the same smid for |
4536 | * processing this delayed request |
4537 | */ |
4538 | spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); |
4539 | ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx; |
4540 | spin_unlock_irqrestore(lock: &ioc->scsi_lookup_lock, flags); |
4541 | |
4542 | dewtprintk(ioc, |
4543 | ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", |
4544 | handle, smid, ioc->tm_sas_control_cb_idx)); |
4545 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); |
4546 | memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); |
4547 | mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; |
4548 | mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; |
4549 | mpi_request->DevHandle = cpu_to_le16(handle); |
4550 | ioc->put_smid_default(ioc, smid); |
4551 | } |
4552 | |
4553 | /** |
4554 | * mpt3sas_check_for_pending_internal_cmds - check for pending internal messages |
4555 | * @ioc: per adapter object |
4556 | * @smid: system request message index |
4557 | * |
4558 | * Context: Executed in interrupt context |
4559 | * |
4560 | * This will check delayed internal messages list, and process the |
4561 | * next request. |
4562 | * |
4563 | * Return: 1 meaning mf should be freed from _base_interrupt |
4564 | * 0 means the mf is freed from this function. |
4565 | */ |
4566 | u8 |
4567 | mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
4568 | { |
4569 | struct _sc_list *delayed_sc; |
4570 | struct _event_ack_list *delayed_event_ack; |
4571 | |
4572 | if (!list_empty(head: &ioc->delayed_event_ack_list)) { |
4573 | delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next, |
4574 | struct _event_ack_list, list); |
4575 | _scsih_issue_delayed_event_ack(ioc, smid, |
4576 | event: delayed_event_ack->Event, event_context: delayed_event_ack->EventContext); |
4577 | list_del(entry: &delayed_event_ack->list); |
4578 | kfree(objp: delayed_event_ack); |
4579 | return 0; |
4580 | } |
4581 | |
4582 | if (!list_empty(head: &ioc->delayed_sc_list)) { |
4583 | delayed_sc = list_entry(ioc->delayed_sc_list.next, |
4584 | struct _sc_list, list); |
4585 | _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid, |
4586 | handle: delayed_sc->handle); |
4587 | list_del(entry: &delayed_sc->list); |
4588 | kfree(objp: delayed_sc); |
4589 | return 0; |
4590 | } |
4591 | return 1; |
4592 | } |
4593 | |
4594 | /** |
4595 | * _scsih_check_for_pending_tm - check for pending task management |
4596 | * @ioc: per adapter object |
4597 | * @smid: system request message index |
4598 | * |
4599 | * This will check delayed target reset list, and feed the |
4600 | * next reqeust. |
4601 | * |
4602 | * Return: 1 meaning mf should be freed from _base_interrupt |
4603 | * 0 means the mf is freed from this function. |
4604 | */ |
4605 | static u8 |
4606 | _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
4607 | { |
4608 | struct _tr_list *delayed_tr; |
4609 | |
4610 | if (!list_empty(head: &ioc->delayed_tr_volume_list)) { |
4611 | delayed_tr = list_entry(ioc->delayed_tr_volume_list.next, |
4612 | struct _tr_list, list); |
4613 | mpt3sas_base_free_smid(ioc, smid); |
4614 | _scsih_tm_tr_volume_send(ioc, handle: delayed_tr->handle); |
4615 | list_del(entry: &delayed_tr->list); |
4616 | kfree(objp: delayed_tr); |
4617 | return 0; |
4618 | } |
4619 | |
4620 | if (!list_empty(head: &ioc->delayed_tr_list)) { |
4621 | delayed_tr = list_entry(ioc->delayed_tr_list.next, |
4622 | struct _tr_list, list); |
4623 | mpt3sas_base_free_smid(ioc, smid); |
4624 | _scsih_tm_tr_send(ioc, handle: delayed_tr->handle); |
4625 | list_del(entry: &delayed_tr->list); |
4626 | kfree(objp: delayed_tr); |
4627 | return 0; |
4628 | } |
4629 | |
4630 | return 1; |
4631 | } |
4632 | |
4633 | /** |
4634 | * _scsih_check_topo_delete_events - sanity check on topo events |
4635 | * @ioc: per adapter object |
4636 | * @event_data: the event data payload |
4637 | * |
4638 | * This routine added to better handle cable breaker. |
4639 | * |
4640 | * This handles the case where driver receives multiple expander |
4641 | * add and delete events in a single shot. When there is a delete event |
4642 | * the routine will void any pending add events waiting in the event queue. |
4643 | */ |
4644 | static void |
4645 | _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc, |
4646 | Mpi2EventDataSasTopologyChangeList_t *event_data) |
4647 | { |
4648 | struct fw_event_work *fw_event; |
4649 | Mpi2EventDataSasTopologyChangeList_t *local_event_data; |
4650 | u16 expander_handle; |
4651 | struct _sas_node *sas_expander; |
4652 | unsigned long flags; |
4653 | int i, reason_code; |
4654 | u16 handle; |
4655 | |
4656 | for (i = 0 ; i < event_data->NumEntries; i++) { |
4657 | handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); |
4658 | if (!handle) |
4659 | continue; |
4660 | reason_code = event_data->PHY[i].PhyStatus & |
4661 | MPI2_EVENT_SAS_TOPO_RC_MASK; |
4662 | if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING) |
4663 | _scsih_tm_tr_send(ioc, handle); |
4664 | } |
4665 | |
4666 | expander_handle = le16_to_cpu(event_data->ExpanderDevHandle); |
4667 | if (expander_handle < ioc->sas_hba.num_phys) { |
4668 | _scsih_block_io_to_children_attached_directly(ioc, event_data); |
4669 | return; |
4670 | } |
4671 | if (event_data->ExpStatus == |
4672 | MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) { |
4673 | /* put expander attached devices into blocking state */ |
4674 | spin_lock_irqsave(&ioc->sas_node_lock, flags); |
4675 | sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc, |
4676 | handle: expander_handle); |
4677 | _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander); |
4678 | spin_unlock_irqrestore(lock: &ioc->sas_node_lock, flags); |
4679 | do { |
4680 | handle = find_first_bit(addr: ioc->blocking_handles, |
4681 | size: ioc->facts.MaxDevHandle); |
4682 | if (handle < ioc->facts.MaxDevHandle) |
4683 | _scsih_block_io_device(ioc, handle); |
4684 | } while (test_and_clear_bit(nr: handle, addr: ioc->blocking_handles)); |
4685 | } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING) |
4686 | _scsih_block_io_to_children_attached_directly(ioc, event_data); |
4687 | |
4688 | if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) |
4689 | return; |
4690 | |
4691 | /* mark ignore flag for pending events */ |
4692 | spin_lock_irqsave(&ioc->fw_event_lock, flags); |
4693 | list_for_each_entry(fw_event, &ioc->fw_event_list, list) { |
4694 | if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST || |
4695 | fw_event->ignore) |
4696 | continue; |
4697 | local_event_data = (Mpi2EventDataSasTopologyChangeList_t *) |
4698 | fw_event->event_data; |
4699 | if (local_event_data->ExpStatus == |
4700 | MPI2_EVENT_SAS_TOPO_ES_ADDED || |
4701 | local_event_data->ExpStatus == |
4702 | MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { |
4703 | if (le16_to_cpu(local_event_data->ExpanderDevHandle) == |
4704 | expander_handle) { |
4705 | dewtprintk(ioc, |
4706 | ioc_info(ioc, "setting ignoring flag\n")); |
4707 | fw_event->ignore = 1; |
4708 | } |
4709 | } |
4710 | } |
4711 | spin_unlock_irqrestore(lock: &ioc->fw_event_lock, flags); |
4712 | } |
4713 | |
4714 | /** |
4715 | * _scsih_check_pcie_topo_remove_events - sanity check on topo |
4716 | * events |
4717 | * @ioc: per adapter object |
4718 | * @event_data: the event data payload |
4719 | * |
4720 | * This handles the case where driver receives multiple switch |
4721 | * or device add and delete events in a single shot. When there |
4722 | * is a delete event the routine will void any pending add |
4723 | * events waiting in the event queue. |
4724 | */ |
4725 | static void |
4726 | _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc, |
4727 | Mpi26EventDataPCIeTopologyChangeList_t *event_data) |
4728 | { |
4729 | struct fw_event_work *fw_event; |
4730 | Mpi26EventDataPCIeTopologyChangeList_t *local_event_data; |
4731 | unsigned long flags; |
4732 | int i, reason_code; |
4733 | u16 handle, switch_handle; |
4734 | |
4735 | for (i = 0; i < event_data->NumEntries; i++) { |
4736 | handle = |
4737 | le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); |
4738 | if (!handle) |
4739 | continue; |
4740 | reason_code = event_data->PortEntry[i].PortStatus; |
4741 | if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING) |
4742 | _scsih_tm_tr_send(ioc, handle); |
4743 | } |
4744 | |
4745 | switch_handle = le16_to_cpu(event_data->SwitchDevHandle); |
4746 | if (!switch_handle) { |
4747 | _scsih_block_io_to_pcie_children_attached_directly( |
4748 | ioc, event_data); |
4749 | return; |
4750 | } |
4751 | /* TODO We are not supporting cascaded PCIe Switch removal yet*/ |
4752 | if ((event_data->SwitchStatus |
4753 | == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) || |
4754 | (event_data->SwitchStatus == |
4755 | MPI26_EVENT_PCIE_TOPO_SS_RESPONDING)) |
4756 | _scsih_block_io_to_pcie_children_attached_directly( |
4757 | ioc, event_data); |
4758 | |
4759 | if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) |
4760 | return; |
4761 | |
4762 | /* mark ignore flag for pending events */ |
4763 | spin_lock_irqsave(&ioc->fw_event_lock, flags); |
4764 | list_for_each_entry(fw_event, &ioc->fw_event_list, list) { |
4765 | if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST || |
4766 | fw_event->ignore) |
4767 | continue; |
4768 | local_event_data = |
4769 | (Mpi26EventDataPCIeTopologyChangeList_t *) |
4770 | fw_event->event_data; |
4771 | if (local_event_data->SwitchStatus == |
4772 | MPI2_EVENT_SAS_TOPO_ES_ADDED || |
4773 | local_event_data->SwitchStatus == |
4774 | MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { |
4775 | if (le16_to_cpu(local_event_data->SwitchDevHandle) == |
4776 | switch_handle) { |
4777 | dewtprintk(ioc, |
4778 | ioc_info(ioc, "setting ignoring flag for switch event\n")); |
4779 | fw_event->ignore = 1; |
4780 | } |
4781 | } |
4782 | } |
4783 | spin_unlock_irqrestore(lock: &ioc->fw_event_lock, flags); |
4784 | } |
4785 | |
4786 | /** |
4787 | * _scsih_set_volume_delete_flag - setting volume delete flag |
4788 | * @ioc: per adapter object |
4789 | * @handle: device handle |
4790 | * |
4791 | * This returns nothing. |
4792 | */ |
4793 | static void |
4794 | _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
4795 | { |
4796 | struct _raid_device *raid_device; |
4797 | struct MPT3SAS_TARGET *sas_target_priv_data; |
4798 | unsigned long flags; |
4799 | |
4800 | spin_lock_irqsave(&ioc->raid_device_lock, flags); |
4801 | raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); |
4802 | if (raid_device && raid_device->starget && |
4803 | raid_device->starget->hostdata) { |
4804 | sas_target_priv_data = |
4805 | raid_device->starget->hostdata; |
4806 | sas_target_priv_data->deleted = 1; |
4807 | dewtprintk(ioc, |
4808 | ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n", |
4809 | handle, (u64)raid_device->wwid)); |
4810 | } |
4811 | spin_unlock_irqrestore(lock: &ioc->raid_device_lock, flags); |
4812 | } |
4813 | |
4814 | /** |
4815 | * _scsih_set_volume_handle_for_tr - set handle for target reset to volume |
4816 | * @handle: input handle |
4817 | * @a: handle for volume a |
4818 | * @b: handle for volume b |
4819 | * |
4820 | * IR firmware only supports two raid volumes. The purpose of this |
4821 | * routine is to set the volume handle in either a or b. When the given |
4822 | * input handle is non-zero, or when a and b have not been set before. |
4823 | */ |
4824 | static void |
4825 | _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b) |
4826 | { |
4827 | if (!handle || handle == *a || handle == *b) |
4828 | return; |
4829 | if (!*a) |
4830 | *a = handle; |
4831 | else if (!*b) |
4832 | *b = handle; |
4833 | } |
4834 | |
4835 | /** |
4836 | * _scsih_check_ir_config_unhide_events - check for UNHIDE events |
4837 | * @ioc: per adapter object |
4838 | * @event_data: the event data payload |
4839 | * Context: interrupt time. |
4840 | * |
4841 | * This routine will send target reset to volume, followed by target |
4842 | * resets to the PDs. This is called when a PD has been removed, or |
4843 | * volume has been deleted or removed. When the target reset is sent |
4844 | * to volume, the PD target resets need to be queued to start upon |
4845 | * completion of the volume target reset. |
4846 | */ |
4847 | static void |
4848 | _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc, |
4849 | Mpi2EventDataIrConfigChangeList_t *event_data) |
4850 | { |
4851 | Mpi2EventIrConfigElement_t *element; |
4852 | int i; |
4853 | u16 handle, volume_handle, a, b; |
4854 | struct _tr_list *delayed_tr; |
4855 | |
4856 | a = 0; |
4857 | b = 0; |
4858 | |
4859 | if (ioc->is_warpdrive) |
4860 | return; |
4861 | |
4862 | /* Volume Resets for Deleted or Removed */ |
4863 | element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; |
4864 | for (i = 0; i < event_data->NumElements; i++, element++) { |
4865 | if (le32_to_cpu(event_data->Flags) & |
4866 | MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) |
4867 | continue; |
4868 | if (element->ReasonCode == |
4869 | MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED || |
4870 | element->ReasonCode == |
4871 | MPI2_EVENT_IR_CHANGE_RC_REMOVED) { |
4872 | volume_handle = le16_to_cpu(element->VolDevHandle); |
4873 | _scsih_set_volume_delete_flag(ioc, handle: volume_handle); |
4874 | _scsih_set_volume_handle_for_tr(handle: volume_handle, a: &a, b: &b); |
4875 | } |
4876 | } |
4877 | |
4878 | /* Volume Resets for UNHIDE events */ |
4879 | element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; |
4880 | for (i = 0; i < event_data->NumElements; i++, element++) { |
4881 | if (le32_to_cpu(event_data->Flags) & |
4882 | MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) |
4883 | continue; |
4884 | if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) { |
4885 | volume_handle = le16_to_cpu(element->VolDevHandle); |
4886 | _scsih_set_volume_handle_for_tr(handle: volume_handle, a: &a, b: &b); |
4887 | } |
4888 | } |
4889 | |
4890 | if (a) |
4891 | _scsih_tm_tr_volume_send(ioc, handle: a); |
4892 | if (b) |
4893 | _scsih_tm_tr_volume_send(ioc, handle: b); |
4894 | |
4895 | /* PD target resets */ |
4896 | element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; |
4897 | for (i = 0; i < event_data->NumElements; i++, element++) { |
4898 | if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE) |
4899 | continue; |
4900 | handle = le16_to_cpu(element->PhysDiskDevHandle); |
4901 | volume_handle = le16_to_cpu(element->VolDevHandle); |
4902 | clear_bit(nr: handle, addr: ioc->pd_handles); |
4903 | if (!volume_handle) |
4904 | _scsih_tm_tr_send(ioc, handle); |
4905 | else if (volume_handle == a || volume_handle == b) { |
4906 | delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); |
4907 | BUG_ON(!delayed_tr); |
4908 | INIT_LIST_HEAD(list: &delayed_tr->list); |
4909 | delayed_tr->handle = handle; |
4910 | list_add_tail(new: &delayed_tr->list, head: &ioc->delayed_tr_list); |
4911 | dewtprintk(ioc, |
4912 | ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", |
4913 | handle)); |
4914 | } else |
4915 | _scsih_tm_tr_send(ioc, handle); |
4916 | } |
4917 | } |
4918 | |
4919 | |
4920 | /** |
4921 | * _scsih_check_volume_delete_events - set delete flag for volumes |
4922 | * @ioc: per adapter object |
4923 | * @event_data: the event data payload |
4924 | * Context: interrupt time. |
4925 | * |
4926 | * This will handle the case when the cable connected to entire volume is |
4927 | * pulled. We will take care of setting the deleted flag so normal IO will |
4928 | * not be sent. |
4929 | */ |
4930 | static void |
4931 | _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc, |
4932 | Mpi2EventDataIrVolume_t *event_data) |
4933 | { |
4934 | u32 state; |
4935 | |
4936 | if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) |
4937 | return; |
4938 | state = le32_to_cpu(event_data->NewValue); |
4939 | if (state == MPI2_RAID_VOL_STATE_MISSING || state == |
4940 | MPI2_RAID_VOL_STATE_FAILED) |
4941 | _scsih_set_volume_delete_flag(ioc, |
4942 | le16_to_cpu(event_data->VolDevHandle)); |
4943 | } |
4944 | |
4945 | /** |
4946 | * _scsih_temp_threshold_events - display temperature threshold exceeded events |
4947 | * @ioc: per adapter object |
4948 | * @event_data: the temp threshold event data |
4949 | * Context: interrupt time. |
4950 | */ |
4951 | static void |
4952 | _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, |
4953 | Mpi2EventDataTemperature_t *event_data) |
4954 | { |
4955 | u32 doorbell; |
4956 | if (ioc->temp_sensors_count >= event_data->SensorNum) { |
4957 | ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n", |
4958 | le16_to_cpu(event_data->Status) & 0x1 ? "0 ": " ", |
4959 | le16_to_cpu(event_data->Status) & 0x2 ? "1 ": " ", |
4960 | le16_to_cpu(event_data->Status) & 0x4 ? "2 ": " ", |
4961 | le16_to_cpu(event_data->Status) & 0x8 ? "3 ": " ", |
4962 | event_data->SensorNum); |
4963 | ioc_err(ioc, "Current Temp In Celsius: %d\n", |
4964 | event_data->CurrentTemperature); |
4965 | if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { |
4966 | doorbell = mpt3sas_base_get_iocstate(ioc, cooked: 0); |
4967 | if ((doorbell & MPI2_IOC_STATE_MASK) == |
4968 | MPI2_IOC_STATE_FAULT) { |
4969 | mpt3sas_print_fault_code(ioc, |
4970 | doorbell & MPI2_DOORBELL_DATA_MASK); |
4971 | } else if ((doorbell & MPI2_IOC_STATE_MASK) == |
4972 | MPI2_IOC_STATE_COREDUMP) { |
4973 | mpt3sas_print_coredump_info(ioc, |
4974 | doorbell & MPI2_DOORBELL_DATA_MASK); |
4975 | } |
4976 | } |
4977 | } |
4978 | } |
4979 | |
4980 | static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending) |
4981 | { |
4982 | struct MPT3SAS_DEVICE *priv = scmd->device->hostdata; |
4983 | |
4984 | if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16) |
4985 | return 0; |
4986 | |
4987 | if (pending) |
4988 | return test_and_set_bit(nr: 0, addr: &priv->ata_command_pending); |
4989 | |
4990 | clear_bit(nr: 0, addr: &priv->ata_command_pending); |
4991 | return 0; |
4992 | } |
4993 | |
4994 | /** |
4995 | * _scsih_flush_running_cmds - completing outstanding commands. |
4996 | * @ioc: per adapter object |
4997 | * |
4998 | * The flushing out of all pending scmd commands following host reset, |
4999 | * where all IO is dropped to the floor. |
5000 | */ |
5001 | static void |
5002 | _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) |
5003 | { |
5004 | struct scsi_cmnd *scmd; |
5005 | struct scsiio_tracker *st; |
5006 | u16 smid; |
5007 | int count = 0; |
5008 | |
5009 | for (smid = 1; smid <= ioc->scsiio_depth; smid++) { |
5010 | scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); |
5011 | if (!scmd) |
5012 | continue; |
5013 | count++; |
5014 | _scsih_set_satl_pending(scmd, pending: false); |
5015 | st = scsi_cmd_priv(cmd: scmd); |
5016 | mpt3sas_base_clear_st(ioc, st); |
5017 | scsi_dma_unmap(cmd: scmd); |
5018 | if (ioc->pci_error_recovery || ioc->remove_host) |
5019 | scmd->result = DID_NO_CONNECT << 16; |
5020 | else |
5021 | scmd->result = DID_RESET << 16; |
5022 | scsi_done(cmd: scmd); |
5023 | } |
5024 | dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count)); |
5025 | } |
5026 | |
5027 | /** |
5028 | * _scsih_setup_eedp - setup MPI request for EEDP transfer |
5029 | * @ioc: per adapter object |
5030 | * @scmd: pointer to scsi command object |
5031 | * @mpi_request: pointer to the SCSI_IO request message frame |
5032 | * |
5033 | * Supporting protection 1 and 3. |
5034 | */ |
5035 | static void |
5036 | _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, |
5037 | Mpi25SCSIIORequest_t *mpi_request) |
5038 | { |
5039 | u16 eedp_flags; |
5040 | Mpi25SCSIIORequest_t *mpi_request_3v = |
5041 | (Mpi25SCSIIORequest_t *)mpi_request; |
5042 | |
5043 | switch (scsi_get_prot_op(scmd)) { |
5044 | case SCSI_PROT_READ_STRIP: |
5045 | eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP; |
5046 | break; |
5047 | case SCSI_PROT_WRITE_INSERT: |
5048 | eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; |
5049 | break; |
5050 | default: |
5051 | return; |
5052 | } |
5053 | |
5054 | if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK) |
5055 | eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; |
5056 | |
5057 | if (scmd->prot_flags & SCSI_PROT_REF_CHECK) |
5058 | eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG; |
5059 | |
5060 | if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) { |
5061 | eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG; |
5062 | |
5063 | mpi_request->CDB.EEDP32.PrimaryReferenceTag = |
5064 | cpu_to_be32(scsi_prot_ref_tag(scmd)); |
5065 | } |
5066 | |
5067 | mpi_request_3v->EEDPBlockSize = cpu_to_le16(scsi_prot_interval(scmd)); |
5068 | |
5069 | if (ioc->is_gen35_ioc) |
5070 | eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE; |
5071 | mpi_request->EEDPFlags = cpu_to_le16(eedp_flags); |
5072 | } |
5073 | |
5074 | /** |
5075 | * _scsih_eedp_error_handling - return sense code for EEDP errors |
5076 | * @scmd: pointer to scsi command object |
5077 | * @ioc_status: ioc status |
5078 | */ |
5079 | static void |
5080 | _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) |
5081 | { |
5082 | u8 ascq; |
5083 | |
5084 | switch (ioc_status) { |
5085 | case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: |
5086 | ascq = 0x01; |
5087 | break; |
5088 | case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: |
5089 | ascq = 0x02; |
5090 | break; |
5091 | case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: |
5092 | ascq = 0x03; |
5093 | break; |
5094 | default: |
5095 | ascq = 0x00; |
5096 | break; |
5097 | } |
5098 | scsi_build_sense(scmd, desc: 0, ILLEGAL_REQUEST, asc: 0x10, ascq); |
5099 | set_host_byte(cmd: scmd, status: DID_ABORT); |
5100 | } |
5101 | |
5102 | /** |
5103 | * scsih_qcmd - main scsi request entry point |
5104 | * @shost: SCSI host pointer |
5105 | * @scmd: pointer to scsi command object |
5106 | * |
5107 | * The callback index is set inside `ioc->scsi_io_cb_idx`. |
5108 | * |
5109 | * Return: 0 on success. If there's a failure, return either: |
5110 | * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or |
5111 | * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full |
5112 | */ |
5113 | static int |
5114 | scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) |
5115 | { |
5116 | struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); |
5117 | struct MPT3SAS_DEVICE *sas_device_priv_data; |
5118 | struct MPT3SAS_TARGET *sas_target_priv_data; |
5119 | struct _raid_device *raid_device; |
5120 | struct request *rq = scsi_cmd_to_rq(scmd); |
5121 | int class; |
5122 | Mpi25SCSIIORequest_t *mpi_request; |
5123 | struct _pcie_device *pcie_device = NULL; |
5124 | u32 mpi_control; |
5125 | u16 smid; |
5126 | u16 handle; |
5127 | |
5128 | if (ioc->logging_level & MPT_DEBUG_SCSI) |
5129 | scsi_print_command(scmd); |
5130 | |
5131 | sas_device_priv_data = scmd->device->hostdata; |
5132 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { |
5133 | scmd->result = DID_NO_CONNECT << 16; |
5134 | scsi_done(cmd: scmd); |
5135 | return 0; |
5136 | } |
5137 | |
5138 | if (!(_scsih_allow_scmd_to_device(ioc, scmd))) { |
5139 | scmd->result = DID_NO_CONNECT << 16; |
5140 | scsi_done(cmd: scmd); |
5141 | return 0; |
5142 | } |
5143 | |
5144 | sas_target_priv_data = sas_device_priv_data->sas_target; |
5145 | |
5146 | /* invalid device handle */ |
5147 | handle = sas_target_priv_data->handle; |
5148 | |
5149 | /* |
5150 | * Avoid error handling escallation when device is disconnected |
5151 | */ |
5152 | if (handle == MPT3SAS_INVALID_DEVICE_HANDLE || sas_device_priv_data->block) { |
5153 | if (scmd->device->host->shost_state == SHOST_RECOVERY && |
5154 | scmd->cmnd[0] == TEST_UNIT_READY) { |
5155 | scsi_build_sense(scmd, desc: 0, UNIT_ATTENTION, asc: 0x29, ascq: 0x07); |
5156 | scsi_done(cmd: scmd); |
5157 | return 0; |
5158 | } |
5159 | } |
5160 | |
5161 | if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) { |
5162 | scmd->result = DID_NO_CONNECT << 16; |
5163 | scsi_done(cmd: scmd); |
5164 | return 0; |
5165 | } |
5166 | |
5167 | |
5168 | if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) { |
5169 | /* host recovery or link resets sent via IOCTLs */ |
5170 | return SCSI_MLQUEUE_HOST_BUSY; |
5171 | } else if (sas_target_priv_data->deleted) { |
5172 | /* device has been deleted */ |
5173 | scmd->result = DID_NO_CONNECT << 16; |
5174 | scsi_done(cmd: scmd); |
5175 | return 0; |
5176 | } else if (sas_target_priv_data->tm_busy || |
5177 | sas_device_priv_data->block) { |
5178 | /* device busy with task management */ |
5179 | return SCSI_MLQUEUE_DEVICE_BUSY; |
5180 | } |
5181 | |
5182 | /* |
5183 | * Bug work around for firmware SATL handling. The loop |
5184 | * is based on atomic operations and ensures consistency |
5185 | * since we're lockless at this point |
5186 | */ |
5187 | do { |
5188 | if (test_bit(0, &sas_device_priv_data->ata_command_pending)) |
5189 | return SCSI_MLQUEUE_DEVICE_BUSY; |
5190 | } while (_scsih_set_satl_pending(scmd, pending: true)); |
5191 | |
5192 | if (scmd->sc_data_direction == DMA_FROM_DEVICE) |
5193 | mpi_control = MPI2_SCSIIO_CONTROL_READ; |
5194 | else if (scmd->sc_data_direction == DMA_TO_DEVICE) |
5195 | mpi_control = MPI2_SCSIIO_CONTROL_WRITE; |
5196 | else |
5197 | mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; |
5198 | |
5199 | /* set tags */ |
5200 | mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; |
5201 | /* NCQ Prio supported, make sure control indicated high priority */ |
5202 | if (sas_device_priv_data->ncq_prio_enable) { |
5203 | class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); |
5204 | if (class == IOPRIO_CLASS_RT) |
5205 | mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT; |
5206 | } |
5207 | /* Make sure Device is not raid volume. |
5208 | * We do not expose raid functionality to upper layer for warpdrive. |
5209 | */ |
5210 | if (((!ioc->is_warpdrive && !scsih_is_raid(dev: &scmd->device->sdev_gendev)) |
5211 | && !scsih_is_nvme(dev: &scmd->device->sdev_gendev)) |
5212 | && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32) |
5213 | mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; |
5214 | |
5215 | smid = mpt3sas_base_get_smid_scsiio(ioc, cb_idx: ioc->scsi_io_cb_idx, scmd); |
5216 | if (!smid) { |
5217 | ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); |
5218 | _scsih_set_satl_pending(scmd, pending: false); |
5219 | goto out; |
5220 | } |
5221 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); |
5222 | memset(mpi_request, 0, ioc->request_sz); |
5223 | _scsih_setup_eedp(ioc, scmd, mpi_request); |
5224 | |
5225 | if (scmd->cmd_len == 32) |
5226 | mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; |
5227 | mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; |
5228 | if (sas_device_priv_data->sas_target->flags & |
5229 | MPT_TARGET_FLAGS_RAID_COMPONENT) |
5230 | mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; |
5231 | else |
5232 | mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; |
5233 | mpi_request->DevHandle = cpu_to_le16(handle); |
5234 | mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); |
5235 | mpi_request->Control = cpu_to_le32(mpi_control); |
5236 | mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len); |
5237 | mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; |
5238 | mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; |
5239 | mpi_request->SenseBufferLowAddress = |
5240 | mpt3sas_base_get_sense_buffer_dma(ioc, smid); |
5241 | mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4; |
5242 | int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *) |
5243 | mpi_request->LUN); |
5244 | memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); |
5245 | |
5246 | if (mpi_request->DataLength) { |
5247 | pcie_device = sas_target_priv_data->pcie_dev; |
5248 | if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) { |
5249 | mpt3sas_base_free_smid(ioc, smid); |
5250 | _scsih_set_satl_pending(scmd, pending: false); |
5251 | goto out; |
5252 | } |
5253 | } else |
5254 | ioc->build_zero_len_sge(ioc, &mpi_request->SGL); |
5255 | |
5256 | raid_device = sas_target_priv_data->raid_device; |
5257 | if (raid_device && raid_device->direct_io_enabled) |
5258 | mpt3sas_setup_direct_io(ioc, scmd, |
5259 | raid_device, mpi_request); |
5260 | |
5261 | if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) { |
5262 | if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) { |
5263 | mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len | |
5264 | MPI25_SCSIIO_IOFLAGS_FAST_PATH); |
5265 | ioc->put_smid_fast_path(ioc, smid, handle); |
5266 | } else |
5267 | ioc->put_smid_scsi_io(ioc, smid, |
5268 | le16_to_cpu(mpi_request->DevHandle)); |
5269 | } else |
5270 | ioc->put_smid_default(ioc, smid); |
5271 | return 0; |
5272 | |
5273 | out: |
5274 | return SCSI_MLQUEUE_HOST_BUSY; |
5275 | } |
5276 | |
5277 | /** |
5278 | * _scsih_normalize_sense - normalize descriptor and fixed format sense data |
5279 | * @sense_buffer: sense data returned by target |
5280 | * @data: normalized skey/asc/ascq |
5281 | */ |
5282 | static void |
5283 | _scsih_normalize_sense(char *sense_buffer, struct sense_info *data) |
5284 | { |
5285 | if ((sense_buffer[0] & 0x7F) >= 0x72) { |
5286 | /* descriptor format */ |
5287 | data->skey = sense_buffer[1] & 0x0F; |
5288 | data->asc = sense_buffer[2]; |
5289 | data->ascq = sense_buffer[3]; |
5290 | } else { |
5291 | /* fixed format */ |
5292 | data->skey = sense_buffer[2] & 0x0F; |
5293 | data->asc = sense_buffer[12]; |
5294 | data->ascq = sense_buffer[13]; |
5295 | } |
5296 | } |
5297 | |
5298 | /** |
5299 | * _scsih_scsi_ioc_info - translated non-successful SCSI_IO request |
5300 | * @ioc: per adapter object |
5301 | * @scmd: pointer to scsi command object |
5302 | * @mpi_reply: reply mf payload returned from firmware |
5303 | * @smid: ? |
5304 | * |
5305 | * scsi_status - SCSI Status code returned from target device |
5306 | * scsi_state - state info associated with SCSI_IO determined by ioc |
5307 | * ioc_status - ioc supplied status info |
5308 | */ |
5309 | static void |
5310 | _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, |
5311 | Mpi2SCSIIOReply_t *mpi_reply, u16 smid) |
5312 | { |
5313 | u32 response_info; |
5314 | u8 *response_bytes; |
5315 | u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & |
5316 | MPI2_IOCSTATUS_MASK; |
5317 | u8 scsi_state = mpi_reply->SCSIState; |
5318 | u8 scsi_status = mpi_reply->SCSIStatus; |
5319 | char *desc_ioc_state = NULL; |
5320 | char *desc_scsi_status = NULL; |
5321 | char *desc_scsi_state = ioc->tmp_string; |
5322 | u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); |
5323 | struct _sas_device *sas_device = NULL; |
5324 | struct _pcie_device *pcie_device = NULL; |
5325 | struct scsi_target *starget = scmd->device->sdev_target; |
5326 | struct MPT3SAS_TARGET *priv_target = starget->hostdata; |
5327 | char *device_str = NULL; |
5328 | |
5329 | if (!priv_target) |
5330 | return; |
5331 | if (ioc->hide_ir_msg) |
5332 | device_str = "WarpDrive"; |
5333 | else |
5334 | device_str = "volume"; |
5335 | |
5336 | if (log_info == 0x31170000) |
5337 | return; |
5338 | |
5339 | switch (ioc_status) { |
5340 | case MPI2_IOCSTATUS_SUCCESS: |
5341 | desc_ioc_state = "success"; |
5342 | break; |
5343 | case MPI2_IOCSTATUS_INVALID_FUNCTION: |
5344 | desc_ioc_state = "invalid function"; |
5345 | break; |
5346 | case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: |
5347 | desc_ioc_state = "scsi recovered error"; |
5348 | break; |
5349 | case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: |
5350 | desc_ioc_state = "scsi invalid dev handle"; |
5351 | break; |
5352 | case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: |
5353 | desc_ioc_state = "scsi device not there"; |
5354 | break; |
5355 | case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: |
5356 | desc_ioc_state = "scsi data overrun"; |
5357 | break; |
5358 | case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: |
5359 | desc_ioc_state = "scsi data underrun"; |
5360 | break; |
5361 | case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: |
5362 | desc_ioc_state = "scsi io data error"; |
5363 | break; |
5364 | case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: |
5365 | desc_ioc_state = "scsi protocol error"; |
5366 | break; |
5367 | case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: |
5368 | desc_ioc_state = "scsi task terminated"; |
5369 | break; |
5370 | case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: |
5371 | desc_ioc_state = "scsi residual mismatch"; |
5372 | break; |
5373 | case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: |
5374 | desc_ioc_state = "scsi task mgmt failed"; |
5375 | break; |
5376 | case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: |
5377 | desc_ioc_state = "scsi ioc terminated"; |
5378 | break; |
5379 | case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: |
5380 | desc_ioc_state = "scsi ext terminated"; |
5381 | break; |
5382 | case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: |
5383 | desc_ioc_state = "eedp guard error"; |
5384 | break; |
5385 | case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: |
5386 | desc_ioc_state = "eedp ref tag error"; |
5387 | break; |
5388 | case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: |
5389 | desc_ioc_state = "eedp app tag error"; |
5390 | break; |
5391 | case MPI2_IOCSTATUS_INSUFFICIENT_POWER: |
5392 | desc_ioc_state = "insufficient power"; |
5393 | break; |
5394 | default: |
5395 | desc_ioc_state = "unknown"; |
5396 | break; |
5397 | } |
5398 | |
5399 | switch (scsi_status) { |
5400 | case MPI2_SCSI_STATUS_GOOD: |
5401 | desc_scsi_status = "good"; |
5402 | break; |
5403 | case MPI2_SCSI_STATUS_CHECK_CONDITION: |
5404 | desc_scsi_status = "check condition"; |
5405 | break; |
5406 | case MPI2_SCSI_STATUS_CONDITION_MET: |
5407 | desc_scsi_status = "condition met"; |
5408 | break; |
5409 | case MPI2_SCSI_STATUS_BUSY: |
5410 | desc_scsi_status = "busy"; |
5411 | break; |
5412 | case MPI2_SCSI_STATUS_INTERMEDIATE: |
5413 | desc_scsi_status = "intermediate"; |
5414 | break; |
5415 | case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET: |
5416 | desc_scsi_status = "intermediate condmet"; |
5417 | break; |
5418 | case MPI2_SCSI_STATUS_RESERVATION_CONFLICT: |
5419 | desc_scsi_status = "reservation conflict"; |
5420 | break; |
5421 | case MPI2_SCSI_STATUS_COMMAND_TERMINATED: |
5422 | desc_scsi_status = "command terminated"; |
5423 | break; |
5424 | case MPI2_SCSI_STATUS_TASK_SET_FULL: |
5425 | desc_scsi_status = "task set full"; |
5426 | break; |
5427 | case MPI2_SCSI_STATUS_ACA_ACTIVE: |
5428 | desc_scsi_status = "aca active"; |
5429 | break; |
5430 | case MPI2_SCSI_STATUS_TASK_ABORTED: |
5431 | desc_scsi_status = "task aborted"; |
5432 | break; |
5433 | default: |
5434 | desc_scsi_status = "unknown"; |
5435 | break; |
5436 | } |
5437 | |
5438 | desc_scsi_state[0] = '\0'; |
5439 | if (!scsi_state) |
5440 | desc_scsi_state = " "; |
5441 | if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) |
5442 | strcat(p: desc_scsi_state, q: "response info "); |
5443 | if (scsi_state & MPI2_SCSI_STATE_TERMINATED) |
5444 | strcat(p: desc_scsi_state, q: "state terminated "); |
5445 | if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) |
5446 | strcat(p: desc_scsi_state, q: "no status "); |
5447 | if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED) |
5448 | strcat(p: desc_scsi_state, q: "autosense failed "); |
5449 | if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) |
5450 | strcat(p: desc_scsi_state, q: "autosense valid "); |
5451 | |
5452 | scsi_print_command(scmd); |
5453 | |
5454 | if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { |
5455 | ioc_warn(ioc, "\t%s wwid(0x%016llx)\n", |
5456 | device_str, (u64)priv_target->sas_address); |
5457 | } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { |
5458 | pcie_device = mpt3sas_get_pdev_from_target(ioc, tgt_priv: priv_target); |
5459 | if (pcie_device) { |
5460 | ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n", |
5461 | (u64)pcie_device->wwid, pcie_device->port_num); |
5462 | if (pcie_device->enclosure_handle != 0) |
5463 | ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n", |
5464 | (u64)pcie_device->enclosure_logical_id, |
5465 | pcie_device->slot); |
5466 | if (pcie_device->connector_name[0]) |
5467 | ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n", |
5468 | pcie_device->enclosure_level, |
5469 | pcie_device->connector_name); |
5470 | pcie_device_put(p: pcie_device); |
5471 | } |
5472 | } else { |
5473 | sas_device = mpt3sas_get_sdev_from_target(ioc, tgt_priv: priv_target); |
5474 | if (sas_device) { |
5475 | ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n", |
5476 | (u64)sas_device->sas_address, sas_device->phy); |
5477 | |
5478 | _scsih_display_enclosure_chassis_info(ioc, sas_device, |
5479 | NULL, NULL); |
5480 | |
5481 | sas_device_put(s: sas_device); |
5482 | } |
5483 | } |
5484 | |
5485 | ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n", |
5486 | le16_to_cpu(mpi_reply->DevHandle), |
5487 | desc_ioc_state, ioc_status, smid); |
5488 | ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n", |
5489 | scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd)); |
5490 | ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n", |
5491 | le16_to_cpu(mpi_reply->TaskTag), |
5492 | le32_to_cpu(mpi_reply->TransferCount), scmd->result); |
5493 | ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n", |
5494 | desc_scsi_status, scsi_status, desc_scsi_state, scsi_state); |
5495 | |
5496 | if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { |
5497 | struct sense_info data; |
5498 | _scsih_normalize_sense(sense_buffer: scmd->sense_buffer, data: &data); |
5499 | ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n", |
5500 | data.skey, data.asc, data.ascq, |
5501 | le32_to_cpu(mpi_reply->SenseCount)); |
5502 | } |
5503 | if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { |
5504 | response_info = le32_to_cpu(mpi_reply->ResponseInfo); |
5505 | response_bytes = (u8 *)&response_info; |
5506 | _scsih_response_code(ioc, response_code: response_bytes[0]); |
5507 | } |
5508 | } |
5509 | |
5510 | /** |
5511 | * _scsih_turn_on_pfa_led - illuminate PFA LED |
5512 | * @ioc: per adapter object |
5513 | * @handle: device handle |
5514 | * Context: process |
5515 | */ |
5516 | static void |
5517 | _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
5518 | { |
5519 | Mpi2SepReply_t mpi_reply; |
5520 | Mpi2SepRequest_t mpi_request; |
5521 | struct _sas_device *sas_device; |
5522 | |
5523 | sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); |
5524 | if (!sas_device) |
5525 | return; |
5526 | |
5527 | memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); |
5528 | mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; |
5529 | mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; |
5530 | mpi_request.SlotStatus = |
5531 | cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT); |
5532 | mpi_request.DevHandle = cpu_to_le16(handle); |
5533 | mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS; |
5534 | if ((mpt3sas_base_scsi_enclosure_processor(ioc, mpi_reply: &mpi_reply, |
5535 | mpi_request: &mpi_request)) != 0) { |
5536 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
5537 | __FILE__, __LINE__, __func__); |
5538 | goto out; |
5539 | } |
5540 | sas_device->pfa_led_on = 1; |
5541 | |
5542 | if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { |
5543 | dewtprintk(ioc, |
5544 | ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", |
5545 | le16_to_cpu(mpi_reply.IOCStatus), |
5546 | le32_to_cpu(mpi_reply.IOCLogInfo))); |
5547 | goto out; |
5548 | } |
5549 | out: |
5550 | sas_device_put(s: sas_device); |
5551 | } |
5552 | |
5553 | /** |
5554 | * _scsih_turn_off_pfa_led - turn off Fault LED |
5555 | * @ioc: per adapter object |
5556 | * @sas_device: sas device whose PFA LED has to turned off |
5557 | * Context: process |
5558 | */ |
5559 | static void |
5560 | _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc, |
5561 | struct _sas_device *sas_device) |
5562 | { |
5563 | Mpi2SepReply_t mpi_reply; |
5564 | Mpi2SepRequest_t mpi_request; |
5565 | |
5566 | memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); |
5567 | mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; |
5568 | mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; |
5569 | mpi_request.SlotStatus = 0; |
5570 | mpi_request.Slot = cpu_to_le16(sas_device->slot); |
5571 | mpi_request.DevHandle = 0; |
5572 | mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle); |
5573 | mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS; |
5574 | if ((mpt3sas_base_scsi_enclosure_processor(ioc, mpi_reply: &mpi_reply, |
5575 | mpi_request: &mpi_request)) != 0) { |
5576 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
5577 | __FILE__, __LINE__, __func__); |
5578 | return; |
5579 | } |
5580 | |
5581 | if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { |
5582 | dewtprintk(ioc, |
5583 | ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", |
5584 | le16_to_cpu(mpi_reply.IOCStatus), |
5585 | le32_to_cpu(mpi_reply.IOCLogInfo))); |
5586 | return; |
5587 | } |
5588 | } |
5589 | |
5590 | /** |
5591 | * _scsih_send_event_to_turn_on_pfa_led - fire delayed event |
5592 | * @ioc: per adapter object |
5593 | * @handle: device handle |
5594 | * Context: interrupt. |
5595 | */ |
5596 | static void |
5597 | _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
5598 | { |
5599 | struct fw_event_work *fw_event; |
5600 | |
5601 | fw_event = alloc_fw_event_work(len: 0); |
5602 | if (!fw_event) |
5603 | return; |
5604 | fw_event->event = MPT3SAS_TURN_ON_PFA_LED; |
5605 | fw_event->device_handle = handle; |
5606 | fw_event->ioc = ioc; |
5607 | _scsih_fw_event_add(ioc, fw_event); |
5608 | fw_event_work_put(fw_work: fw_event); |
5609 | } |
5610 | |
5611 | /** |
5612 | * _scsih_smart_predicted_fault - process smart errors |
5613 | * @ioc: per adapter object |
5614 | * @handle: device handle |
5615 | * Context: interrupt. |
5616 | */ |
5617 | static void |
5618 | _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
5619 | { |
5620 | struct scsi_target *starget; |
5621 | struct MPT3SAS_TARGET *sas_target_priv_data; |
5622 | Mpi2EventNotificationReply_t *event_reply; |
5623 | Mpi2EventDataSasDeviceStatusChange_t *event_data; |
5624 | struct _sas_device *sas_device; |
5625 | ssize_t sz; |
5626 | unsigned long flags; |
5627 | |
5628 | /* only handle non-raid devices */ |
5629 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
5630 | sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); |
5631 | if (!sas_device) |
5632 | goto out_unlock; |
5633 | |
5634 | starget = sas_device->starget; |
5635 | sas_target_priv_data = starget->hostdata; |
5636 | |
5637 | if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) || |
5638 | ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) |
5639 | goto out_unlock; |
5640 | |
5641 | _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget); |
5642 | |
5643 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
5644 | |
5645 | if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) |
5646 | _scsih_send_event_to_turn_on_pfa_led(ioc, handle); |
5647 | |
5648 | /* insert into event log */ |
5649 | sz = offsetof(Mpi2EventNotificationReply_t, EventData) + |
5650 | sizeof(Mpi2EventDataSasDeviceStatusChange_t); |
5651 | event_reply = kzalloc(sz, GFP_ATOMIC); |
5652 | if (!event_reply) { |
5653 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
5654 | __FILE__, __LINE__, __func__); |
5655 | goto out; |
5656 | } |
5657 | |
5658 | event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; |
5659 | event_reply->Event = |
5660 | cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); |
5661 | event_reply->MsgLength = sz/4; |
5662 | event_reply->EventDataLength = |
5663 | cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4); |
5664 | event_data = (Mpi2EventDataSasDeviceStatusChange_t *) |
5665 | event_reply->EventData; |
5666 | event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA; |
5667 | event_data->ASC = 0x5D; |
5668 | event_data->DevHandle = cpu_to_le16(handle); |
5669 | event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address); |
5670 | mpt3sas_ctl_add_to_event_log(ioc, mpi_reply: event_reply); |
5671 | kfree(objp: event_reply); |
5672 | out: |
5673 | if (sas_device) |
5674 | sas_device_put(s: sas_device); |
5675 | return; |
5676 | |
5677 | out_unlock: |
5678 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
5679 | goto out; |
5680 | } |
5681 | |
5682 | /** |
5683 | * _scsih_io_done - scsi request callback |
5684 | * @ioc: per adapter object |
5685 | * @smid: system request message index |
5686 | * @msix_index: MSIX table index supplied by the OS |
5687 | * @reply: reply message frame(lower 32bit addr) |
5688 | * |
5689 | * Callback handler when using _scsih_qcmd. |
5690 | * |
5691 | * Return: 1 meaning mf should be freed from _base_interrupt |
5692 | * 0 means the mf is freed from this function. |
5693 | */ |
5694 | static u8 |
5695 | _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) |
5696 | { |
5697 | Mpi25SCSIIORequest_t *mpi_request; |
5698 | Mpi2SCSIIOReply_t *mpi_reply; |
5699 | struct scsi_cmnd *scmd; |
5700 | struct scsiio_tracker *st; |
5701 | u16 ioc_status; |
5702 | u32 xfer_cnt; |
5703 | u8 scsi_state; |
5704 | u8 scsi_status; |
5705 | u32 log_info; |
5706 | struct MPT3SAS_DEVICE *sas_device_priv_data; |
5707 | u32 response_code = 0; |
5708 | |
5709 | mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, phys_addr: reply); |
5710 | |
5711 | scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); |
5712 | if (scmd == NULL) |
5713 | return 1; |
5714 | |
5715 | _scsih_set_satl_pending(scmd, pending: false); |
5716 | |
5717 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); |
5718 | |
5719 | if (mpi_reply == NULL) { |
5720 | scmd->result = DID_OK << 16; |
5721 | goto out; |
5722 | } |
5723 | |
5724 | sas_device_priv_data = scmd->device->hostdata; |
5725 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target || |
5726 | sas_device_priv_data->sas_target->deleted) { |
5727 | scmd->result = DID_NO_CONNECT << 16; |
5728 | goto out; |
5729 | } |
5730 | ioc_status = le16_to_cpu(mpi_reply->IOCStatus); |
5731 | |
5732 | /* |
5733 | * WARPDRIVE: If direct_io is set then it is directIO, |
5734 | * the failed direct I/O should be redirected to volume |
5735 | */ |
5736 | st = scsi_cmd_priv(cmd: scmd); |
5737 | if (st->direct_io && |
5738 | ((ioc_status & MPI2_IOCSTATUS_MASK) |
5739 | != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) { |
5740 | st->direct_io = 0; |
5741 | st->scmd = scmd; |
5742 | memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); |
5743 | mpi_request->DevHandle = |
5744 | cpu_to_le16(sas_device_priv_data->sas_target->handle); |
5745 | ioc->put_smid_scsi_io(ioc, smid, |
5746 | sas_device_priv_data->sas_target->handle); |
5747 | return 0; |
5748 | } |
5749 | /* turning off TLR */ |
5750 | scsi_state = mpi_reply->SCSIState; |
5751 | if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) |
5752 | response_code = |
5753 | le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF; |
5754 | if (!sas_device_priv_data->tlr_snoop_check) { |
5755 | sas_device_priv_data->tlr_snoop_check++; |
5756 | if ((!ioc->is_warpdrive && |
5757 | !scsih_is_raid(dev: &scmd->device->sdev_gendev) && |
5758 | !scsih_is_nvme(dev: &scmd->device->sdev_gendev)) |
5759 | && sas_is_tlr_enabled(scmd->device) && |
5760 | response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) { |
5761 | sas_disable_tlr(scmd->device); |
5762 | sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n"); |
5763 | } |
5764 | } |
5765 | |
5766 | xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); |
5767 | scsi_set_resid(cmd: scmd, resid: scsi_bufflen(cmd: scmd) - xfer_cnt); |
5768 | if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) |
5769 | log_info = le32_to_cpu(mpi_reply->IOCLogInfo); |
5770 | else |
5771 | log_info = 0; |
5772 | ioc_status &= MPI2_IOCSTATUS_MASK; |
5773 | scsi_status = mpi_reply->SCSIStatus; |
5774 | |
5775 | if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 && |
5776 | (scsi_status == MPI2_SCSI_STATUS_BUSY || |
5777 | scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT || |
5778 | scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) { |
5779 | ioc_status = MPI2_IOCSTATUS_SUCCESS; |
5780 | } |
5781 | |
5782 | if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { |
5783 | struct sense_info data; |
5784 | const void *sense_data = mpt3sas_base_get_sense_buffer(ioc, |
5785 | smid); |
5786 | u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, |
5787 | le32_to_cpu(mpi_reply->SenseCount)); |
5788 | memcpy(scmd->sense_buffer, sense_data, sz); |
5789 | _scsih_normalize_sense(sense_buffer: scmd->sense_buffer, data: &data); |
5790 | /* failure prediction threshold exceeded */ |
5791 | if (data.asc == 0x5D) |
5792 | _scsih_smart_predicted_fault(ioc, |
5793 | le16_to_cpu(mpi_reply->DevHandle)); |
5794 | mpt3sas_trigger_scsi(ioc, sense_key: data.skey, asc: data.asc, ascq: data.ascq); |
5795 | |
5796 | if ((ioc->logging_level & MPT_DEBUG_REPLY) && |
5797 | ((scmd->sense_buffer[2] == UNIT_ATTENTION) || |
5798 | (scmd->sense_buffer[2] == MEDIUM_ERROR) || |
5799 | (scmd->sense_buffer[2] == HARDWARE_ERROR))) |
5800 | _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid); |
5801 | } |
5802 | switch (ioc_status) { |
5803 | case MPI2_IOCSTATUS_BUSY: |
5804 | case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: |
5805 | scmd->result = SAM_STAT_BUSY; |
5806 | break; |
5807 | |
5808 | case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: |
5809 | scmd->result = DID_NO_CONNECT << 16; |
5810 | break; |
5811 | |
5812 | case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: |
5813 | if (sas_device_priv_data->block) { |
5814 | scmd->result = DID_TRANSPORT_DISRUPTED << 16; |
5815 | goto out; |
5816 | } |
5817 | if (log_info == 0x31110630) { |
5818 | if (scmd->retries > 2) { |
5819 | scmd->result = DID_NO_CONNECT << 16; |
5820 | scsi_device_set_state(sdev: scmd->device, |
5821 | state: SDEV_OFFLINE); |
5822 | } else { |
5823 | scmd->result = DID_SOFT_ERROR << 16; |
5824 | scmd->device->expecting_cc_ua = 1; |
5825 | } |
5826 | break; |
5827 | } else if (log_info == VIRTUAL_IO_FAILED_RETRY) { |
5828 | scmd->result = DID_RESET << 16; |
5829 | break; |
5830 | } else if ((scmd->device->channel == RAID_CHANNEL) && |
5831 | (scsi_state == (MPI2_SCSI_STATE_TERMINATED | |
5832 | MPI2_SCSI_STATE_NO_SCSI_STATUS))) { |
5833 | scmd->result = DID_RESET << 16; |
5834 | break; |
5835 | } |
5836 | scmd->result = DID_SOFT_ERROR << 16; |
5837 | break; |
5838 | case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: |
5839 | case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: |
5840 | scmd->result = DID_RESET << 16; |
5841 | break; |
5842 | |
5843 | case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: |
5844 | if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt)) |
5845 | scmd->result = DID_SOFT_ERROR << 16; |
5846 | else |
5847 | scmd->result = (DID_OK << 16) | scsi_status; |
5848 | break; |
5849 | |
5850 | case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: |
5851 | scmd->result = (DID_OK << 16) | scsi_status; |
5852 | |
5853 | if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)) |
5854 | break; |
5855 | |
5856 | if (xfer_cnt < scmd->underflow) { |
5857 | if (scsi_status == SAM_STAT_BUSY) |
5858 | scmd->result = SAM_STAT_BUSY; |
5859 | else |
5860 | scmd->result = DID_SOFT_ERROR << 16; |
5861 | } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | |
5862 | MPI2_SCSI_STATE_NO_SCSI_STATUS)) |
5863 | scmd->result = DID_SOFT_ERROR << 16; |
5864 | else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) |
5865 | scmd->result = DID_RESET << 16; |
5866 | else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) { |
5867 | mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID; |
5868 | mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION; |
5869 | scsi_build_sense(scmd, desc: 0, ILLEGAL_REQUEST, |
5870 | asc: 0x20, ascq: 0); |
5871 | } |
5872 | break; |
5873 | |
5874 | case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: |
5875 | scsi_set_resid(cmd: scmd, resid: 0); |
5876 | fallthrough; |
5877 | case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: |
5878 | case MPI2_IOCSTATUS_SUCCESS: |
5879 | scmd->result = (DID_OK << 16) | scsi_status; |
5880 | if (response_code == |
5881 | MPI2_SCSITASKMGMT_RSP_INVALID_FRAME || |
5882 | (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | |
5883 | MPI2_SCSI_STATE_NO_SCSI_STATUS))) |
5884 | scmd->result = DID_SOFT_ERROR << 16; |
5885 | else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) |
5886 | scmd->result = DID_RESET << 16; |
5887 | break; |
5888 | |
5889 | case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: |
5890 | case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: |
5891 | case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: |
5892 | _scsih_eedp_error_handling(scmd, ioc_status); |
5893 | break; |
5894 | |
5895 | case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: |
5896 | case MPI2_IOCSTATUS_INVALID_FUNCTION: |
5897 | case MPI2_IOCSTATUS_INVALID_SGL: |
5898 | case MPI2_IOCSTATUS_INTERNAL_ERROR: |
5899 | case MPI2_IOCSTATUS_INVALID_FIELD: |
5900 | case MPI2_IOCSTATUS_INVALID_STATE: |
5901 | case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: |
5902 | case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: |
5903 | case MPI2_IOCSTATUS_INSUFFICIENT_POWER: |
5904 | default: |
5905 | scmd->result = DID_SOFT_ERROR << 16; |
5906 | break; |
5907 | |
5908 | } |
5909 | |
5910 | if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY)) |
5911 | _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid); |
5912 | |
5913 | out: |
5914 | |
5915 | scsi_dma_unmap(cmd: scmd); |
5916 | mpt3sas_base_free_smid(ioc, smid); |
5917 | scsi_done(cmd: scmd); |
5918 | return 0; |
5919 | } |
5920 | |
5921 | /** |
5922 | * _scsih_update_vphys_after_reset - update the Port's |
5923 | * vphys_list after reset |
5924 | * @ioc: per adapter object |
5925 | * |
5926 | * Returns nothing. |
5927 | */ |
5928 | static void |
5929 | _scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc) |
5930 | { |
5931 | u16 sz, ioc_status; |
5932 | int i; |
5933 | Mpi2ConfigReply_t mpi_reply; |
5934 | Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; |
5935 | u16 attached_handle; |
5936 | u64 attached_sas_addr; |
5937 | u8 found = 0, port_id; |
5938 | Mpi2SasPhyPage0_t phy_pg0; |
5939 | struct hba_port *port, *port_next, *mport; |
5940 | struct virtual_phy *vphy, *vphy_next; |
5941 | struct _sas_device *sas_device; |
5942 | |
5943 | /* |
5944 | * Mark all the vphys objects as dirty. |
5945 | */ |
5946 | list_for_each_entry_safe(port, port_next, |
5947 | &ioc->port_table_list, list) { |
5948 | if (!port->vphys_mask) |
5949 | continue; |
5950 | list_for_each_entry_safe(vphy, vphy_next, |
5951 | &port->vphys_list, list) { |
5952 | vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY; |
5953 | } |
5954 | } |
5955 | |
5956 | /* |
5957 | * Read SASIOUnitPage0 to get each HBA Phy's data. |
5958 | */ |
5959 | sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys); |
5960 | sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); |
5961 | if (!sas_iounit_pg0) { |
5962 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
5963 | __FILE__, __LINE__, __func__); |
5964 | return; |
5965 | } |
5966 | if ((mpt3sas_config_get_sas_iounit_pg0(ioc, mpi_reply: &mpi_reply, |
5967 | config_page: sas_iounit_pg0, sz)) != 0) |
5968 | goto out; |
5969 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; |
5970 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) |
5971 | goto out; |
5972 | /* |
5973 | * Loop over each HBA Phy. |
5974 | */ |
5975 | for (i = 0; i < ioc->sas_hba.num_phys; i++) { |
5976 | /* |
5977 | * Check whether Phy's Negotiation Link Rate is > 1.5G or not. |
5978 | */ |
5979 | if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) < |
5980 | MPI2_SAS_NEG_LINK_RATE_1_5) |
5981 | continue; |
5982 | /* |
5983 | * Check whether Phy is connected to SEP device or not, |
5984 | * if it is SEP device then read the Phy's SASPHYPage0 data to |
5985 | * determine whether Phy is a virtual Phy or not. if it is |
5986 | * virtual phy then it is conformed that the attached remote |
5987 | * device is a HBA's vSES device. |
5988 | */ |
5989 | if (!(le32_to_cpu( |
5990 | sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) & |
5991 | MPI2_SAS_DEVICE_INFO_SEP)) |
5992 | continue; |
5993 | |
5994 | if ((mpt3sas_config_get_phy_pg0(ioc, mpi_reply: &mpi_reply, config_page: &phy_pg0, |
5995 | phy_number: i))) { |
5996 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
5997 | __FILE__, __LINE__, __func__); |
5998 | continue; |
5999 | } |
6000 | |
6001 | if (!(le32_to_cpu(phy_pg0.PhyInfo) & |
6002 | MPI2_SAS_PHYINFO_VIRTUAL_PHY)) |
6003 | continue; |
6004 | /* |
6005 | * Get the vSES device's SAS Address. |
6006 | */ |
6007 | attached_handle = le16_to_cpu( |
6008 | sas_iounit_pg0->PhyData[i].AttachedDevHandle); |
6009 | if (_scsih_get_sas_address(ioc, handle: attached_handle, |
6010 | sas_address: &attached_sas_addr) != 0) { |
6011 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6012 | __FILE__, __LINE__, __func__); |
6013 | continue; |
6014 | } |
6015 | |
6016 | found = 0; |
6017 | port = port_next = NULL; |
6018 | /* |
6019 | * Loop over each virtual_phy object from |
6020 | * each port's vphys_list. |
6021 | */ |
6022 | list_for_each_entry_safe(port, |
6023 | port_next, &ioc->port_table_list, list) { |
6024 | if (!port->vphys_mask) |
6025 | continue; |
6026 | list_for_each_entry_safe(vphy, vphy_next, |
6027 | &port->vphys_list, list) { |
6028 | /* |
6029 | * Continue with next virtual_phy object |
6030 | * if the object is not marked as dirty. |
6031 | */ |
6032 | if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY)) |
6033 | continue; |
6034 | |
6035 | /* |
6036 | * Continue with next virtual_phy object |
6037 | * if the object's SAS Address is not equals |
6038 | * to current Phy's vSES device SAS Address. |
6039 | */ |
6040 | if (vphy->sas_address != attached_sas_addr) |
6041 | continue; |
6042 | /* |
6043 | * Enable current Phy number bit in object's |
6044 | * phy_mask field. |
6045 | */ |
6046 | if (!(vphy->phy_mask & (1 << i))) |
6047 | vphy->phy_mask = (1 << i); |
6048 | /* |
6049 | * Get hba_port object from hba_port table |
6050 | * corresponding to current phy's Port ID. |
6051 | * if there is no hba_port object corresponding |
6052 | * to Phy's Port ID then create a new hba_port |
6053 | * object & add to hba_port table. |
6054 | */ |
6055 | port_id = sas_iounit_pg0->PhyData[i].Port; |
6056 | mport = mpt3sas_get_port_by_id(ioc, port_id, bypass_dirty_port_flag: 1); |
6057 | if (!mport) { |
6058 | mport = kzalloc( |
6059 | sizeof(struct hba_port), GFP_KERNEL); |
6060 | if (!mport) |
6061 | break; |
6062 | mport->port_id = port_id; |
6063 | ioc_info(ioc, |
6064 | "%s: hba_port entry: %p, port: %d is added to hba_port list\n", |
6065 | __func__, mport, mport->port_id); |
6066 | list_add_tail(new: &mport->list, |
6067 | head: &ioc->port_table_list); |
6068 | } |
6069 | /* |
6070 | * If mport & port pointers are not pointing to |
6071 | * same hba_port object then it means that vSES |
6072 | * device's Port ID got changed after reset and |
6073 | * hence move current virtual_phy object from |
6074 | * port's vphys_list to mport's vphys_list. |
6075 | */ |
6076 | if (port != mport) { |
6077 | if (!mport->vphys_mask) |
6078 | INIT_LIST_HEAD( |
6079 | list: &mport->vphys_list); |
6080 | mport->vphys_mask |= (1 << i); |
6081 | port->vphys_mask &= ~(1 << i); |
6082 | list_move(list: &vphy->list, |
6083 | head: &mport->vphys_list); |
6084 | sas_device = mpt3sas_get_sdev_by_addr( |
6085 | ioc, sas_address: attached_sas_addr, port); |
6086 | if (sas_device) |
6087 | sas_device->port = mport; |
6088 | } |
6089 | /* |
6090 | * Earlier while updating the hba_port table, |
6091 | * it is determined that there is no other |
6092 | * direct attached device with mport's Port ID, |
6093 | * Hence mport was marked as dirty. Only vSES |
6094 | * device has this Port ID, so unmark the mport |
6095 | * as dirt. |
6096 | */ |
6097 | if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) { |
6098 | mport->sas_address = 0; |
6099 | mport->phy_mask = 0; |
6100 | mport->flags &= |
6101 | ~HBA_PORT_FLAG_DIRTY_PORT; |
6102 | } |
6103 | /* |
6104 | * Unmark current virtual_phy object as dirty. |
6105 | */ |
6106 | vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY; |
6107 | found = 1; |
6108 | break; |
6109 | } |
6110 | if (found) |
6111 | break; |
6112 | } |
6113 | } |
6114 | out: |
6115 | kfree(objp: sas_iounit_pg0); |
6116 | } |
6117 | |
6118 | /** |
6119 | * _scsih_get_port_table_after_reset - Construct temporary port table |
6120 | * @ioc: per adapter object |
6121 | * @port_table: address where port table needs to be constructed |
6122 | * |
6123 | * return number of HBA port entries available after reset. |
6124 | */ |
6125 | static int |
6126 | _scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc, |
6127 | struct hba_port *port_table) |
6128 | { |
6129 | u16 sz, ioc_status; |
6130 | int i, j; |
6131 | Mpi2ConfigReply_t mpi_reply; |
6132 | Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; |
6133 | u16 attached_handle; |
6134 | u64 attached_sas_addr; |
6135 | u8 found = 0, port_count = 0, port_id; |
6136 | |
6137 | sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys); |
6138 | sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); |
6139 | if (!sas_iounit_pg0) { |
6140 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6141 | __FILE__, __LINE__, __func__); |
6142 | return port_count; |
6143 | } |
6144 | |
6145 | if ((mpt3sas_config_get_sas_iounit_pg0(ioc, mpi_reply: &mpi_reply, |
6146 | config_page: sas_iounit_pg0, sz)) != 0) |
6147 | goto out; |
6148 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; |
6149 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) |
6150 | goto out; |
6151 | for (i = 0; i < ioc->sas_hba.num_phys; i++) { |
6152 | found = 0; |
6153 | if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) < |
6154 | MPI2_SAS_NEG_LINK_RATE_1_5) |
6155 | continue; |
6156 | attached_handle = |
6157 | le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle); |
6158 | if (_scsih_get_sas_address( |
6159 | ioc, handle: attached_handle, sas_address: &attached_sas_addr) != 0) { |
6160 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6161 | __FILE__, __LINE__, __func__); |
6162 | continue; |
6163 | } |
6164 | |
6165 | for (j = 0; j < port_count; j++) { |
6166 | port_id = sas_iounit_pg0->PhyData[i].Port; |
6167 | if (port_table[j].port_id == port_id && |
6168 | port_table[j].sas_address == attached_sas_addr) { |
6169 | port_table[j].phy_mask |= (1 << i); |
6170 | found = 1; |
6171 | break; |
6172 | } |
6173 | } |
6174 | |
6175 | if (found) |
6176 | continue; |
6177 | |
6178 | port_id = sas_iounit_pg0->PhyData[i].Port; |
6179 | port_table[port_count].port_id = port_id; |
6180 | port_table[port_count].phy_mask = (1 << i); |
6181 | port_table[port_count].sas_address = attached_sas_addr; |
6182 | port_count++; |
6183 | } |
6184 | out: |
6185 | kfree(objp: sas_iounit_pg0); |
6186 | return port_count; |
6187 | } |
6188 | |
6189 | enum hba_port_matched_codes { |
6190 | NOT_MATCHED = 0, |
6191 | MATCHED_WITH_ADDR_AND_PHYMASK, |
6192 | MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT, |
6193 | MATCHED_WITH_ADDR_AND_SUBPHYMASK, |
6194 | MATCHED_WITH_ADDR, |
6195 | }; |
6196 | |
6197 | /** |
6198 | * _scsih_look_and_get_matched_port_entry - Get matched hba port entry |
6199 | * from HBA port table |
6200 | * @ioc: per adapter object |
6201 | * @port_entry: hba port entry from temporary port table which needs to be |
6202 | * searched for matched entry in the HBA port table |
6203 | * @matched_port_entry: save matched hba port entry here |
6204 | * @count: count of matched entries |
6205 | * |
6206 | * return type of matched entry found. |
6207 | */ |
6208 | static enum hba_port_matched_codes |
6209 | _scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc, |
6210 | struct hba_port *port_entry, |
6211 | struct hba_port **matched_port_entry, int *count) |
6212 | { |
6213 | struct hba_port *port_table_entry, *matched_port = NULL; |
6214 | enum hba_port_matched_codes matched_code = NOT_MATCHED; |
6215 | int lcount = 0; |
6216 | *matched_port_entry = NULL; |
6217 | |
6218 | list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { |
6219 | if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT)) |
6220 | continue; |
6221 | |
6222 | if ((port_table_entry->sas_address == port_entry->sas_address) |
6223 | && (port_table_entry->phy_mask == port_entry->phy_mask)) { |
6224 | matched_code = MATCHED_WITH_ADDR_AND_PHYMASK; |
6225 | matched_port = port_table_entry; |
6226 | break; |
6227 | } |
6228 | |
6229 | if ((port_table_entry->sas_address == port_entry->sas_address) |
6230 | && (port_table_entry->phy_mask & port_entry->phy_mask) |
6231 | && (port_table_entry->port_id == port_entry->port_id)) { |
6232 | matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT; |
6233 | matched_port = port_table_entry; |
6234 | continue; |
6235 | } |
6236 | |
6237 | if ((port_table_entry->sas_address == port_entry->sas_address) |
6238 | && (port_table_entry->phy_mask & port_entry->phy_mask)) { |
6239 | if (matched_code == |
6240 | MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT) |
6241 | continue; |
6242 | matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK; |
6243 | matched_port = port_table_entry; |
6244 | continue; |
6245 | } |
6246 | |
6247 | if (port_table_entry->sas_address == port_entry->sas_address) { |
6248 | if (matched_code == |
6249 | MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT) |
6250 | continue; |
6251 | if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK) |
6252 | continue; |
6253 | matched_code = MATCHED_WITH_ADDR; |
6254 | matched_port = port_table_entry; |
6255 | lcount++; |
6256 | } |
6257 | } |
6258 | |
6259 | *matched_port_entry = matched_port; |
6260 | if (matched_code == MATCHED_WITH_ADDR) |
6261 | *count = lcount; |
6262 | return matched_code; |
6263 | } |
6264 | |
6265 | /** |
6266 | * _scsih_del_phy_part_of_anther_port - remove phy if it |
6267 | * is a part of anther port |
6268 | *@ioc: per adapter object |
6269 | *@port_table: port table after reset |
6270 | *@index: hba port entry index |
6271 | *@port_count: number of ports available after host reset |
6272 | *@offset: HBA phy bit offset |
6273 | * |
6274 | */ |
6275 | static void |
6276 | _scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc, |
6277 | struct hba_port *port_table, |
6278 | int index, u8 port_count, int offset) |
6279 | { |
6280 | struct _sas_node *sas_node = &ioc->sas_hba; |
6281 | u32 i, found = 0; |
6282 | |
6283 | for (i = 0; i < port_count; i++) { |
6284 | if (i == index) |
6285 | continue; |
6286 | |
6287 | if (port_table[i].phy_mask & (1 << offset)) { |
6288 | mpt3sas_transport_del_phy_from_an_existing_port( |
6289 | ioc, sas_node, mpt3sas_phy: &sas_node->phy[offset]); |
6290 | found = 1; |
6291 | break; |
6292 | } |
6293 | } |
6294 | if (!found) |
6295 | port_table[index].phy_mask |= (1 << offset); |
6296 | } |
6297 | |
6298 | /** |
6299 | * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from |
6300 | * right port |
6301 | *@ioc: per adapter object |
6302 | *@hba_port_entry: hba port table entry |
6303 | *@port_table: temporary port table |
6304 | *@index: hba port entry index |
6305 | *@port_count: number of ports available after host reset |
6306 | * |
6307 | */ |
6308 | static void |
6309 | _scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc, |
6310 | struct hba_port *hba_port_entry, struct hba_port *port_table, |
6311 | int index, int port_count) |
6312 | { |
6313 | u32 phy_mask, offset = 0; |
6314 | struct _sas_node *sas_node = &ioc->sas_hba; |
6315 | |
6316 | phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask; |
6317 | |
6318 | for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) { |
6319 | if (phy_mask & (1 << offset)) { |
6320 | if (!(port_table[index].phy_mask & (1 << offset))) { |
6321 | _scsih_del_phy_part_of_anther_port( |
6322 | ioc, port_table, index, port_count, |
6323 | offset); |
6324 | continue; |
6325 | } |
6326 | if (sas_node->phy[offset].phy_belongs_to_port) |
6327 | mpt3sas_transport_del_phy_from_an_existing_port( |
6328 | ioc, sas_node, mpt3sas_phy: &sas_node->phy[offset]); |
6329 | mpt3sas_transport_add_phy_to_an_existing_port( |
6330 | ioc, sas_node, mpt3sas_phy: &sas_node->phy[offset], |
6331 | sas_address: hba_port_entry->sas_address, |
6332 | port: hba_port_entry); |
6333 | } |
6334 | } |
6335 | } |
6336 | |
6337 | /** |
6338 | * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty. |
6339 | * @ioc: per adapter object |
6340 | * |
6341 | * Returns nothing. |
6342 | */ |
6343 | static void |
6344 | _scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc) |
6345 | { |
6346 | struct hba_port *port, *port_next; |
6347 | struct virtual_phy *vphy, *vphy_next; |
6348 | |
6349 | list_for_each_entry_safe(port, port_next, |
6350 | &ioc->port_table_list, list) { |
6351 | if (!port->vphys_mask) |
6352 | continue; |
6353 | list_for_each_entry_safe(vphy, vphy_next, |
6354 | &port->vphys_list, list) { |
6355 | if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) { |
6356 | drsprintk(ioc, ioc_info(ioc, |
6357 | "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n", |
6358 | vphy, port->port_id, |
6359 | vphy->phy_mask)); |
6360 | port->vphys_mask &= ~vphy->phy_mask; |
6361 | list_del(entry: &vphy->list); |
6362 | kfree(objp: vphy); |
6363 | } |
6364 | } |
6365 | if (!port->vphys_mask && !port->sas_address) |
6366 | port->flags |= HBA_PORT_FLAG_DIRTY_PORT; |
6367 | } |
6368 | } |
6369 | |
6370 | /** |
6371 | * _scsih_del_dirty_port_entries - delete dirty port entries from port list |
6372 | * after host reset |
6373 | *@ioc: per adapter object |
6374 | * |
6375 | */ |
6376 | static void |
6377 | _scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc) |
6378 | { |
6379 | struct hba_port *port, *port_next; |
6380 | |
6381 | list_for_each_entry_safe(port, port_next, |
6382 | &ioc->port_table_list, list) { |
6383 | if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) || |
6384 | port->flags & HBA_PORT_FLAG_NEW_PORT) |
6385 | continue; |
6386 | |
6387 | drsprintk(ioc, ioc_info(ioc, |
6388 | "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n", |
6389 | port, port->port_id, port->phy_mask)); |
6390 | list_del(entry: &port->list); |
6391 | kfree(objp: port); |
6392 | } |
6393 | } |
6394 | |
6395 | /** |
6396 | * _scsih_sas_port_refresh - Update HBA port table after host reset |
6397 | * @ioc: per adapter object |
6398 | */ |
6399 | static void |
6400 | _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc) |
6401 | { |
6402 | u32 port_count = 0; |
6403 | struct hba_port *port_table; |
6404 | struct hba_port *port_table_entry; |
6405 | struct hba_port *port_entry = NULL; |
6406 | int i, j, count = 0, lcount = 0; |
6407 | int ret; |
6408 | u64 sas_addr; |
6409 | u8 num_phys; |
6410 | |
6411 | drsprintk(ioc, ioc_info(ioc, |
6412 | "updating ports for sas_host(0x%016llx)\n", |
6413 | (unsigned long long)ioc->sas_hba.sas_address)); |
6414 | |
6415 | mpt3sas_config_get_number_hba_phys(ioc, num_phys: &num_phys); |
6416 | if (!num_phys) { |
6417 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6418 | __FILE__, __LINE__, __func__); |
6419 | return; |
6420 | } |
6421 | |
6422 | if (num_phys > ioc->sas_hba.nr_phys_allocated) { |
6423 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6424 | __FILE__, __LINE__, __func__); |
6425 | return; |
6426 | } |
6427 | ioc->sas_hba.num_phys = num_phys; |
6428 | |
6429 | port_table = kcalloc(ioc->sas_hba.num_phys, |
6430 | sizeof(struct hba_port), GFP_KERNEL); |
6431 | if (!port_table) |
6432 | return; |
6433 | |
6434 | port_count = _scsih_get_port_table_after_reset(ioc, port_table); |
6435 | if (!port_count) |
6436 | return; |
6437 | |
6438 | drsprintk(ioc, ioc_info(ioc, "New Port table\n")); |
6439 | for (j = 0; j < port_count; j++) |
6440 | drsprintk(ioc, ioc_info(ioc, |
6441 | "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n", |
6442 | port_table[j].port_id, |
6443 | port_table[j].phy_mask, port_table[j].sas_address)); |
6444 | |
6445 | list_for_each_entry(port_table_entry, &ioc->port_table_list, list) |
6446 | port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT; |
6447 | |
6448 | drsprintk(ioc, ioc_info(ioc, "Old Port table\n")); |
6449 | port_table_entry = NULL; |
6450 | list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { |
6451 | drsprintk(ioc, ioc_info(ioc, |
6452 | "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n", |
6453 | port_table_entry->port_id, |
6454 | port_table_entry->phy_mask, |
6455 | port_table_entry->sas_address)); |
6456 | } |
6457 | |
6458 | for (j = 0; j < port_count; j++) { |
6459 | ret = _scsih_look_and_get_matched_port_entry(ioc, |
6460 | port_entry: &port_table[j], matched_port_entry: &port_entry, count: &count); |
6461 | if (!port_entry) { |
6462 | drsprintk(ioc, ioc_info(ioc, |
6463 | "No Matched entry for sas_addr(0x%16llx), Port:%d\n", |
6464 | port_table[j].sas_address, |
6465 | port_table[j].port_id)); |
6466 | continue; |
6467 | } |
6468 | |
6469 | switch (ret) { |
6470 | case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT: |
6471 | case MATCHED_WITH_ADDR_AND_SUBPHYMASK: |
6472 | _scsih_add_or_del_phys_from_existing_port(ioc, |
6473 | hba_port_entry: port_entry, port_table, index: j, port_count); |
6474 | break; |
6475 | case MATCHED_WITH_ADDR: |
6476 | sas_addr = port_table[j].sas_address; |
6477 | for (i = 0; i < port_count; i++) { |
6478 | if (port_table[i].sas_address == sas_addr) |
6479 | lcount++; |
6480 | } |
6481 | |
6482 | if (count > 1 || lcount > 1) |
6483 | port_entry = NULL; |
6484 | else |
6485 | _scsih_add_or_del_phys_from_existing_port(ioc, |
6486 | hba_port_entry: port_entry, port_table, index: j, port_count); |
6487 | } |
6488 | |
6489 | if (!port_entry) |
6490 | continue; |
6491 | |
6492 | if (port_entry->port_id != port_table[j].port_id) |
6493 | port_entry->port_id = port_table[j].port_id; |
6494 | port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT; |
6495 | port_entry->phy_mask = port_table[j].phy_mask; |
6496 | } |
6497 | |
6498 | port_table_entry = NULL; |
6499 | } |
6500 | |
6501 | /** |
6502 | * _scsih_alloc_vphy - allocate virtual_phy object |
6503 | * @ioc: per adapter object |
6504 | * @port_id: Port ID number |
6505 | * @phy_num: HBA Phy number |
6506 | * |
6507 | * Returns allocated virtual_phy object. |
6508 | */ |
6509 | static struct virtual_phy * |
6510 | _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num) |
6511 | { |
6512 | struct virtual_phy *vphy; |
6513 | struct hba_port *port; |
6514 | |
6515 | port = mpt3sas_get_port_by_id(ioc, port_id, bypass_dirty_port_flag: 0); |
6516 | if (!port) |
6517 | return NULL; |
6518 | |
6519 | vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy: phy_num); |
6520 | if (!vphy) { |
6521 | vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL); |
6522 | if (!vphy) |
6523 | return NULL; |
6524 | |
6525 | if (!port->vphys_mask) |
6526 | INIT_LIST_HEAD(list: &port->vphys_list); |
6527 | |
6528 | /* |
6529 | * Enable bit corresponding to HBA phy number on its |
6530 | * parent hba_port object's vphys_mask field. |
6531 | */ |
6532 | port->vphys_mask |= (1 << phy_num); |
6533 | vphy->phy_mask |= (1 << phy_num); |
6534 | |
6535 | list_add_tail(new: &vphy->list, head: &port->vphys_list); |
6536 | |
6537 | ioc_info(ioc, |
6538 | "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n", |
6539 | vphy, port->port_id, phy_num); |
6540 | } |
6541 | return vphy; |
6542 | } |
6543 | |
6544 | /** |
6545 | * _scsih_sas_host_refresh - refreshing sas host object contents |
6546 | * @ioc: per adapter object |
6547 | * Context: user |
6548 | * |
6549 | * During port enable, fw will send topology events for every device. Its |
6550 | * possible that the handles may change from the previous setting, so this |
6551 | * code keeping handles updating if changed. |
6552 | */ |
6553 | static void |
6554 | _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) |
6555 | { |
6556 | u16 sz; |
6557 | u16 ioc_status; |
6558 | int i; |
6559 | Mpi2ConfigReply_t mpi_reply; |
6560 | Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; |
6561 | u16 attached_handle; |
6562 | u8 link_rate, port_id; |
6563 | struct hba_port *port; |
6564 | Mpi2SasPhyPage0_t phy_pg0; |
6565 | |
6566 | dtmprintk(ioc, |
6567 | ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n", |
6568 | (u64)ioc->sas_hba.sas_address)); |
6569 | |
6570 | sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys); |
6571 | sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); |
6572 | if (!sas_iounit_pg0) { |
6573 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6574 | __FILE__, __LINE__, __func__); |
6575 | return; |
6576 | } |
6577 | |
6578 | if ((mpt3sas_config_get_sas_iounit_pg0(ioc, mpi_reply: &mpi_reply, |
6579 | config_page: sas_iounit_pg0, sz)) != 0) |
6580 | goto out; |
6581 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; |
6582 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) |
6583 | goto out; |
6584 | for (i = 0; i < ioc->sas_hba.num_phys ; i++) { |
6585 | link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4; |
6586 | if (i == 0) |
6587 | ioc->sas_hba.handle = le16_to_cpu( |
6588 | sas_iounit_pg0->PhyData[0].ControllerDevHandle); |
6589 | port_id = sas_iounit_pg0->PhyData[i].Port; |
6590 | if (!(mpt3sas_get_port_by_id(ioc, port_id, bypass_dirty_port_flag: 0))) { |
6591 | port = kzalloc(sizeof(struct hba_port), GFP_KERNEL); |
6592 | if (!port) |
6593 | goto out; |
6594 | |
6595 | port->port_id = port_id; |
6596 | ioc_info(ioc, |
6597 | "hba_port entry: %p, port: %d is added to hba_port list\n", |
6598 | port, port->port_id); |
6599 | if (ioc->shost_recovery) |
6600 | port->flags = HBA_PORT_FLAG_NEW_PORT; |
6601 | list_add_tail(new: &port->list, head: &ioc->port_table_list); |
6602 | } |
6603 | /* |
6604 | * Check whether current Phy belongs to HBA vSES device or not. |
6605 | */ |
6606 | if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) & |
6607 | MPI2_SAS_DEVICE_INFO_SEP && |
6608 | (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) { |
6609 | if ((mpt3sas_config_get_phy_pg0(ioc, mpi_reply: &mpi_reply, |
6610 | config_page: &phy_pg0, phy_number: i))) { |
6611 | ioc_err(ioc, |
6612 | "failure at %s:%d/%s()!\n", |
6613 | __FILE__, __LINE__, __func__); |
6614 | goto out; |
6615 | } |
6616 | if (!(le32_to_cpu(phy_pg0.PhyInfo) & |
6617 | MPI2_SAS_PHYINFO_VIRTUAL_PHY)) |
6618 | continue; |
6619 | /* |
6620 | * Allocate a virtual_phy object for vSES device, if |
6621 | * this vSES device is hot added. |
6622 | */ |
6623 | if (!_scsih_alloc_vphy(ioc, port_id, phy_num: i)) |
6624 | goto out; |
6625 | ioc->sas_hba.phy[i].hba_vphy = 1; |
6626 | } |
6627 | |
6628 | /* |
6629 | * Add new HBA phys to STL if these new phys got added as part |
6630 | * of HBA Firmware upgrade/downgrade operation. |
6631 | */ |
6632 | if (!ioc->sas_hba.phy[i].phy) { |
6633 | if ((mpt3sas_config_get_phy_pg0(ioc, mpi_reply: &mpi_reply, |
6634 | config_page: &phy_pg0, phy_number: i))) { |
6635 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6636 | __FILE__, __LINE__, __func__); |
6637 | continue; |
6638 | } |
6639 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
6640 | MPI2_IOCSTATUS_MASK; |
6641 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
6642 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6643 | __FILE__, __LINE__, __func__); |
6644 | continue; |
6645 | } |
6646 | ioc->sas_hba.phy[i].phy_id = i; |
6647 | mpt3sas_transport_add_host_phy(ioc, |
6648 | mpt3sas_phy: &ioc->sas_hba.phy[i], phy_pg0, |
6649 | parent_dev: ioc->sas_hba.parent_dev); |
6650 | continue; |
6651 | } |
6652 | ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; |
6653 | attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i]. |
6654 | AttachedDevHandle); |
6655 | if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) |
6656 | link_rate = MPI2_SAS_NEG_LINK_RATE_1_5; |
6657 | ioc->sas_hba.phy[i].port = |
6658 | mpt3sas_get_port_by_id(ioc, port_id, bypass_dirty_port_flag: 0); |
6659 | mpt3sas_transport_update_links(ioc, sas_address: ioc->sas_hba.sas_address, |
6660 | handle: attached_handle, phy_number: i, link_rate, |
6661 | port: ioc->sas_hba.phy[i].port); |
6662 | } |
6663 | /* |
6664 | * Clear the phy details if this phy got disabled as part of |
6665 | * HBA Firmware upgrade/downgrade operation. |
6666 | */ |
6667 | for (i = ioc->sas_hba.num_phys; |
6668 | i < ioc->sas_hba.nr_phys_allocated; i++) { |
6669 | if (ioc->sas_hba.phy[i].phy && |
6670 | ioc->sas_hba.phy[i].phy->negotiated_linkrate >= |
6671 | SAS_LINK_RATE_1_5_GBPS) |
6672 | mpt3sas_transport_update_links(ioc, |
6673 | sas_address: ioc->sas_hba.sas_address, handle: 0, phy_number: i, |
6674 | MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED, NULL); |
6675 | } |
6676 | out: |
6677 | kfree(objp: sas_iounit_pg0); |
6678 | } |
6679 | |
6680 | /** |
6681 | * _scsih_sas_host_add - create sas host object |
6682 | * @ioc: per adapter object |
6683 | * |
6684 | * Creating host side data object, stored in ioc->sas_hba |
6685 | */ |
6686 | static void |
6687 | _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) |
6688 | { |
6689 | int i; |
6690 | Mpi2ConfigReply_t mpi_reply; |
6691 | Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; |
6692 | Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; |
6693 | Mpi2SasPhyPage0_t phy_pg0; |
6694 | Mpi2SasDevicePage0_t sas_device_pg0; |
6695 | Mpi2SasEnclosurePage0_t enclosure_pg0; |
6696 | u16 ioc_status; |
6697 | u16 sz; |
6698 | u8 device_missing_delay; |
6699 | u8 num_phys, port_id; |
6700 | struct hba_port *port; |
6701 | |
6702 | mpt3sas_config_get_number_hba_phys(ioc, num_phys: &num_phys); |
6703 | if (!num_phys) { |
6704 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6705 | __FILE__, __LINE__, __func__); |
6706 | return; |
6707 | } |
6708 | |
6709 | ioc->sas_hba.nr_phys_allocated = max_t(u8, |
6710 | MPT_MAX_HBA_NUM_PHYS, num_phys); |
6711 | ioc->sas_hba.phy = kcalloc(ioc->sas_hba.nr_phys_allocated, |
6712 | sizeof(struct _sas_phy), GFP_KERNEL); |
6713 | if (!ioc->sas_hba.phy) { |
6714 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6715 | __FILE__, __LINE__, __func__); |
6716 | goto out; |
6717 | } |
6718 | ioc->sas_hba.num_phys = num_phys; |
6719 | |
6720 | /* sas_iounit page 0 */ |
6721 | sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys); |
6722 | sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); |
6723 | if (!sas_iounit_pg0) { |
6724 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6725 | __FILE__, __LINE__, __func__); |
6726 | return; |
6727 | } |
6728 | if ((mpt3sas_config_get_sas_iounit_pg0(ioc, mpi_reply: &mpi_reply, |
6729 | config_page: sas_iounit_pg0, sz))) { |
6730 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6731 | __FILE__, __LINE__, __func__); |
6732 | goto out; |
6733 | } |
6734 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
6735 | MPI2_IOCSTATUS_MASK; |
6736 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
6737 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6738 | __FILE__, __LINE__, __func__); |
6739 | goto out; |
6740 | } |
6741 | |
6742 | /* sas_iounit page 1 */ |
6743 | sz = struct_size(sas_iounit_pg1, PhyData, ioc->sas_hba.num_phys); |
6744 | sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); |
6745 | if (!sas_iounit_pg1) { |
6746 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6747 | __FILE__, __LINE__, __func__); |
6748 | goto out; |
6749 | } |
6750 | if ((mpt3sas_config_get_sas_iounit_pg1(ioc, mpi_reply: &mpi_reply, |
6751 | config_page: sas_iounit_pg1, sz))) { |
6752 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6753 | __FILE__, __LINE__, __func__); |
6754 | goto out; |
6755 | } |
6756 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
6757 | MPI2_IOCSTATUS_MASK; |
6758 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
6759 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6760 | __FILE__, __LINE__, __func__); |
6761 | goto out; |
6762 | } |
6763 | |
6764 | ioc->io_missing_delay = |
6765 | sas_iounit_pg1->IODeviceMissingDelay; |
6766 | device_missing_delay = |
6767 | sas_iounit_pg1->ReportDeviceMissingDelay; |
6768 | if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) |
6769 | ioc->device_missing_delay = (device_missing_delay & |
6770 | MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; |
6771 | else |
6772 | ioc->device_missing_delay = device_missing_delay & |
6773 | MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; |
6774 | |
6775 | ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev; |
6776 | for (i = 0; i < ioc->sas_hba.num_phys ; i++) { |
6777 | if ((mpt3sas_config_get_phy_pg0(ioc, mpi_reply: &mpi_reply, config_page: &phy_pg0, |
6778 | phy_number: i))) { |
6779 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6780 | __FILE__, __LINE__, __func__); |
6781 | goto out; |
6782 | } |
6783 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
6784 | MPI2_IOCSTATUS_MASK; |
6785 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
6786 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6787 | __FILE__, __LINE__, __func__); |
6788 | goto out; |
6789 | } |
6790 | |
6791 | if (i == 0) |
6792 | ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> |
6793 | PhyData[0].ControllerDevHandle); |
6794 | |
6795 | port_id = sas_iounit_pg0->PhyData[i].Port; |
6796 | if (!(mpt3sas_get_port_by_id(ioc, port_id, bypass_dirty_port_flag: 0))) { |
6797 | port = kzalloc(sizeof(struct hba_port), GFP_KERNEL); |
6798 | if (!port) |
6799 | goto out; |
6800 | |
6801 | port->port_id = port_id; |
6802 | ioc_info(ioc, |
6803 | "hba_port entry: %p, port: %d is added to hba_port list\n", |
6804 | port, port->port_id); |
6805 | list_add_tail(new: &port->list, |
6806 | head: &ioc->port_table_list); |
6807 | } |
6808 | |
6809 | /* |
6810 | * Check whether current Phy belongs to HBA vSES device or not. |
6811 | */ |
6812 | if ((le32_to_cpu(phy_pg0.PhyInfo) & |
6813 | MPI2_SAS_PHYINFO_VIRTUAL_PHY) && |
6814 | (phy_pg0.NegotiatedLinkRate >> 4) >= |
6815 | MPI2_SAS_NEG_LINK_RATE_1_5) { |
6816 | /* |
6817 | * Allocate a virtual_phy object for vSES device. |
6818 | */ |
6819 | if (!_scsih_alloc_vphy(ioc, port_id, phy_num: i)) |
6820 | goto out; |
6821 | ioc->sas_hba.phy[i].hba_vphy = 1; |
6822 | } |
6823 | |
6824 | ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; |
6825 | ioc->sas_hba.phy[i].phy_id = i; |
6826 | ioc->sas_hba.phy[i].port = |
6827 | mpt3sas_get_port_by_id(ioc, port_id, bypass_dirty_port_flag: 0); |
6828 | mpt3sas_transport_add_host_phy(ioc, mpt3sas_phy: &ioc->sas_hba.phy[i], |
6829 | phy_pg0, parent_dev: ioc->sas_hba.parent_dev); |
6830 | } |
6831 | if ((mpt3sas_config_get_sas_device_pg0(ioc, mpi_reply: &mpi_reply, config_page: &sas_device_pg0, |
6832 | MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle: ioc->sas_hba.handle))) { |
6833 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6834 | __FILE__, __LINE__, __func__); |
6835 | goto out; |
6836 | } |
6837 | ioc->sas_hba.enclosure_handle = |
6838 | le16_to_cpu(sas_device_pg0.EnclosureHandle); |
6839 | ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress); |
6840 | ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n", |
6841 | ioc->sas_hba.handle, |
6842 | (u64)ioc->sas_hba.sas_address, |
6843 | ioc->sas_hba.num_phys); |
6844 | |
6845 | if (ioc->sas_hba.enclosure_handle) { |
6846 | if (!(mpt3sas_config_get_enclosure_pg0(ioc, mpi_reply: &mpi_reply, |
6847 | config_page: &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, |
6848 | handle: ioc->sas_hba.enclosure_handle))) |
6849 | ioc->sas_hba.enclosure_logical_id = |
6850 | le64_to_cpu(enclosure_pg0.EnclosureLogicalID); |
6851 | } |
6852 | |
6853 | out: |
6854 | kfree(objp: sas_iounit_pg1); |
6855 | kfree(objp: sas_iounit_pg0); |
6856 | } |
6857 | |
6858 | /** |
6859 | * _scsih_expander_add - creating expander object |
6860 | * @ioc: per adapter object |
6861 | * @handle: expander handle |
6862 | * |
6863 | * Creating expander object, stored in ioc->sas_expander_list. |
6864 | * |
6865 | * Return: 0 for success, else error. |
6866 | */ |
6867 | static int |
6868 | _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
6869 | { |
6870 | struct _sas_node *sas_expander; |
6871 | struct _enclosure_node *enclosure_dev; |
6872 | Mpi2ConfigReply_t mpi_reply; |
6873 | Mpi2ExpanderPage0_t expander_pg0; |
6874 | Mpi2ExpanderPage1_t expander_pg1; |
6875 | u32 ioc_status; |
6876 | u16 parent_handle; |
6877 | u64 sas_address, sas_address_parent = 0; |
6878 | int i; |
6879 | unsigned long flags; |
6880 | struct _sas_port *mpt3sas_port = NULL; |
6881 | u8 port_id; |
6882 | |
6883 | int rc = 0; |
6884 | |
6885 | if (!handle) |
6886 | return -1; |
6887 | |
6888 | if (ioc->shost_recovery || ioc->pci_error_recovery) |
6889 | return -1; |
6890 | |
6891 | if ((mpt3sas_config_get_expander_pg0(ioc, mpi_reply: &mpi_reply, config_page: &expander_pg0, |
6892 | MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) { |
6893 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6894 | __FILE__, __LINE__, __func__); |
6895 | return -1; |
6896 | } |
6897 | |
6898 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
6899 | MPI2_IOCSTATUS_MASK; |
6900 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
6901 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6902 | __FILE__, __LINE__, __func__); |
6903 | return -1; |
6904 | } |
6905 | |
6906 | /* handle out of order topology events */ |
6907 | parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle); |
6908 | if (_scsih_get_sas_address(ioc, handle: parent_handle, sas_address: &sas_address_parent) |
6909 | != 0) { |
6910 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6911 | __FILE__, __LINE__, __func__); |
6912 | return -1; |
6913 | } |
6914 | |
6915 | port_id = expander_pg0.PhysicalPort; |
6916 | if (sas_address_parent != ioc->sas_hba.sas_address) { |
6917 | spin_lock_irqsave(&ioc->sas_node_lock, flags); |
6918 | sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, |
6919 | sas_address: sas_address_parent, |
6920 | port: mpt3sas_get_port_by_id(ioc, port_id, bypass_dirty_port_flag: 0)); |
6921 | spin_unlock_irqrestore(lock: &ioc->sas_node_lock, flags); |
6922 | if (!sas_expander) { |
6923 | rc = _scsih_expander_add(ioc, handle: parent_handle); |
6924 | if (rc != 0) |
6925 | return rc; |
6926 | } |
6927 | } |
6928 | |
6929 | spin_lock_irqsave(&ioc->sas_node_lock, flags); |
6930 | sas_address = le64_to_cpu(expander_pg0.SASAddress); |
6931 | sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, |
6932 | sas_address, port: mpt3sas_get_port_by_id(ioc, port_id, bypass_dirty_port_flag: 0)); |
6933 | spin_unlock_irqrestore(lock: &ioc->sas_node_lock, flags); |
6934 | |
6935 | if (sas_expander) |
6936 | return 0; |
6937 | |
6938 | sas_expander = kzalloc(sizeof(struct _sas_node), |
6939 | GFP_KERNEL); |
6940 | if (!sas_expander) { |
6941 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6942 | __FILE__, __LINE__, __func__); |
6943 | return -1; |
6944 | } |
6945 | |
6946 | sas_expander->handle = handle; |
6947 | sas_expander->num_phys = expander_pg0.NumPhys; |
6948 | sas_expander->sas_address_parent = sas_address_parent; |
6949 | sas_expander->sas_address = sas_address; |
6950 | sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, bypass_dirty_port_flag: 0); |
6951 | if (!sas_expander->port) { |
6952 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6953 | __FILE__, __LINE__, __func__); |
6954 | rc = -1; |
6955 | goto out_fail; |
6956 | } |
6957 | |
6958 | ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", |
6959 | handle, parent_handle, |
6960 | (u64)sas_expander->sas_address, sas_expander->num_phys); |
6961 | |
6962 | if (!sas_expander->num_phys) { |
6963 | rc = -1; |
6964 | goto out_fail; |
6965 | } |
6966 | sas_expander->phy = kcalloc(sas_expander->num_phys, |
6967 | sizeof(struct _sas_phy), GFP_KERNEL); |
6968 | if (!sas_expander->phy) { |
6969 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6970 | __FILE__, __LINE__, __func__); |
6971 | rc = -1; |
6972 | goto out_fail; |
6973 | } |
6974 | |
6975 | INIT_LIST_HEAD(list: &sas_expander->sas_port_list); |
6976 | mpt3sas_port = mpt3sas_transport_port_add(ioc, handle, |
6977 | sas_address: sas_address_parent, port: sas_expander->port); |
6978 | if (!mpt3sas_port) { |
6979 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6980 | __FILE__, __LINE__, __func__); |
6981 | rc = -1; |
6982 | goto out_fail; |
6983 | } |
6984 | sas_expander->parent_dev = &mpt3sas_port->rphy->dev; |
6985 | sas_expander->rphy = mpt3sas_port->rphy; |
6986 | |
6987 | for (i = 0 ; i < sas_expander->num_phys ; i++) { |
6988 | if ((mpt3sas_config_get_expander_pg1(ioc, mpi_reply: &mpi_reply, |
6989 | config_page: &expander_pg1, phy_number: i, handle))) { |
6990 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
6991 | __FILE__, __LINE__, __func__); |
6992 | rc = -1; |
6993 | goto out_fail; |
6994 | } |
6995 | sas_expander->phy[i].handle = handle; |
6996 | sas_expander->phy[i].phy_id = i; |
6997 | sas_expander->phy[i].port = |
6998 | mpt3sas_get_port_by_id(ioc, port_id, bypass_dirty_port_flag: 0); |
6999 | |
7000 | if ((mpt3sas_transport_add_expander_phy(ioc, |
7001 | mpt3sas_phy: &sas_expander->phy[i], expander_pg1, |
7002 | parent_dev: sas_expander->parent_dev))) { |
7003 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
7004 | __FILE__, __LINE__, __func__); |
7005 | rc = -1; |
7006 | goto out_fail; |
7007 | } |
7008 | } |
7009 | |
7010 | if (sas_expander->enclosure_handle) { |
7011 | enclosure_dev = |
7012 | mpt3sas_scsih_enclosure_find_by_handle(ioc, |
7013 | handle: sas_expander->enclosure_handle); |
7014 | if (enclosure_dev) |
7015 | sas_expander->enclosure_logical_id = |
7016 | le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); |
7017 | } |
7018 | |
7019 | _scsih_expander_node_add(ioc, sas_expander); |
7020 | return 0; |
7021 | |
7022 | out_fail: |
7023 | |
7024 | if (mpt3sas_port) |
7025 | mpt3sas_transport_port_remove(ioc, sas_address: sas_expander->sas_address, |
7026 | sas_address_parent, port: sas_expander->port); |
7027 | kfree(objp: sas_expander); |
7028 | return rc; |
7029 | } |
7030 | |
7031 | /** |
7032 | * mpt3sas_expander_remove - removing expander object |
7033 | * @ioc: per adapter object |
7034 | * @sas_address: expander sas_address |
7035 | * @port: hba port entry |
7036 | */ |
7037 | void |
7038 | mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, |
7039 | struct hba_port *port) |
7040 | { |
7041 | struct _sas_node *sas_expander; |
7042 | unsigned long flags; |
7043 | |
7044 | if (ioc->shost_recovery) |
7045 | return; |
7046 | |
7047 | if (!port) |
7048 | return; |
7049 | |
7050 | spin_lock_irqsave(&ioc->sas_node_lock, flags); |
7051 | sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, |
7052 | sas_address, port); |
7053 | spin_unlock_irqrestore(lock: &ioc->sas_node_lock, flags); |
7054 | if (sas_expander) |
7055 | _scsih_expander_node_remove(ioc, sas_expander); |
7056 | } |
7057 | |
7058 | /** |
7059 | * _scsih_done - internal SCSI_IO callback handler. |
7060 | * @ioc: per adapter object |
7061 | * @smid: system request message index |
7062 | * @msix_index: MSIX table index supplied by the OS |
7063 | * @reply: reply message frame(lower 32bit addr) |
7064 | * |
7065 | * Callback handler when sending internal generated SCSI_IO. |
7066 | * The callback index passed is `ioc->scsih_cb_idx` |
7067 | * |
7068 | * Return: 1 meaning mf should be freed from _base_interrupt |
7069 | * 0 means the mf is freed from this function. |
7070 | */ |
7071 | static u8 |
7072 | _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) |
7073 | { |
7074 | MPI2DefaultReply_t *mpi_reply; |
7075 | |
7076 | mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, phys_addr: reply); |
7077 | if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED) |
7078 | return 1; |
7079 | if (ioc->scsih_cmds.smid != smid) |
7080 | return 1; |
7081 | ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE; |
7082 | if (mpi_reply) { |
7083 | memcpy(ioc->scsih_cmds.reply, mpi_reply, |
7084 | mpi_reply->MsgLength*4); |
7085 | ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID; |
7086 | } |
7087 | ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING; |
7088 | complete(&ioc->scsih_cmds.done); |
7089 | return 1; |
7090 | } |
7091 | |
7092 | |
7093 | |
7094 | |
7095 | #define MPT3_MAX_LUNS (255) |
7096 | |
7097 | |
7098 | /** |
7099 | * _scsih_check_access_status - check access flags |
7100 | * @ioc: per adapter object |
7101 | * @sas_address: sas address |
7102 | * @handle: sas device handle |
7103 | * @access_status: errors returned during discovery of the device |
7104 | * |
7105 | * Return: 0 for success, else failure |
7106 | */ |
7107 | static u8 |
7108 | _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, |
7109 | u16 handle, u8 access_status) |
7110 | { |
7111 | u8 rc = 1; |
7112 | char *desc = NULL; |
7113 | |
7114 | switch (access_status) { |
7115 | case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS: |
7116 | case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION: |
7117 | rc = 0; |
7118 | break; |
7119 | case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED: |
7120 | desc = "sata capability failed"; |
7121 | break; |
7122 | case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT: |
7123 | desc = "sata affiliation conflict"; |
7124 | break; |
7125 | case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE: |
7126 | desc = "route not addressable"; |
7127 | break; |
7128 | case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE: |
7129 | desc = "smp error not addressable"; |
7130 | break; |
7131 | case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED: |
7132 | desc = "device blocked"; |
7133 | break; |
7134 | case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED: |
7135 | case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN: |
7136 | case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT: |
7137 | case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG: |
7138 | case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION: |
7139 | case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER: |
7140 | case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN: |
7141 | case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN: |
7142 | case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN: |
7143 | case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION: |
7144 | case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE: |
7145 | case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX: |
7146 | desc = "sata initialization failed"; |
7147 | break; |
7148 | default: |
7149 | desc = "unknown"; |
7150 | break; |
7151 | } |
7152 | |
7153 | if (!rc) |
7154 | return 0; |
7155 | |
7156 | ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n", |
7157 | desc, (u64)sas_address, handle); |
7158 | return rc; |
7159 | } |
7160 | |
7161 | /** |
7162 | * _scsih_check_device - checking device responsiveness |
7163 | * @ioc: per adapter object |
7164 | * @parent_sas_address: sas address of parent expander or sas host |
7165 | * @handle: attached device handle |
7166 | * @phy_number: phy number |
7167 | * @link_rate: new link rate |
7168 | */ |
7169 | static void |
7170 | _scsih_check_device(struct MPT3SAS_ADAPTER *ioc, |
7171 | u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate) |
7172 | { |
7173 | Mpi2ConfigReply_t mpi_reply; |
7174 | Mpi2SasDevicePage0_t sas_device_pg0; |
7175 | struct _sas_device *sas_device = NULL; |
7176 | struct _enclosure_node *enclosure_dev = NULL; |
7177 | u32 ioc_status; |
7178 | unsigned long flags; |
7179 | u64 sas_address; |
7180 | struct scsi_target *starget; |
7181 | struct MPT3SAS_TARGET *sas_target_priv_data; |
7182 | u32 device_info; |
7183 | struct hba_port *port; |
7184 | |
7185 | if ((mpt3sas_config_get_sas_device_pg0(ioc, mpi_reply: &mpi_reply, config_page: &sas_device_pg0, |
7186 | MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) |
7187 | return; |
7188 | |
7189 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; |
7190 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) |
7191 | return; |
7192 | |
7193 | /* wide port handling ~ we need only handle device once for the phy that |
7194 | * is matched in sas device page zero |
7195 | */ |
7196 | if (phy_number != sas_device_pg0.PhyNum) |
7197 | return; |
7198 | |
7199 | /* check if this is end device */ |
7200 | device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); |
7201 | if (!(_scsih_is_end_device(device_info))) |
7202 | return; |
7203 | |
7204 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
7205 | sas_address = le64_to_cpu(sas_device_pg0.SASAddress); |
7206 | port = mpt3sas_get_port_by_id(ioc, port_id: sas_device_pg0.PhysicalPort, bypass_dirty_port_flag: 0); |
7207 | if (!port) |
7208 | goto out_unlock; |
7209 | sas_device = __mpt3sas_get_sdev_by_addr(ioc, |
7210 | sas_address, port); |
7211 | |
7212 | if (!sas_device) |
7213 | goto out_unlock; |
7214 | |
7215 | if (unlikely(sas_device->handle != handle)) { |
7216 | starget = sas_device->starget; |
7217 | sas_target_priv_data = starget->hostdata; |
7218 | starget_printk(KERN_INFO, starget, |
7219 | "handle changed from(0x%04x) to (0x%04x)!!!\n", |
7220 | sas_device->handle, handle); |
7221 | sas_target_priv_data->handle = handle; |
7222 | sas_device->handle = handle; |
7223 | if (le16_to_cpu(sas_device_pg0.Flags) & |
7224 | MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { |
7225 | sas_device->enclosure_level = |
7226 | sas_device_pg0.EnclosureLevel; |
7227 | memcpy(sas_device->connector_name, |
7228 | sas_device_pg0.ConnectorName, 4); |
7229 | sas_device->connector_name[4] = '\0'; |
7230 | } else { |
7231 | sas_device->enclosure_level = 0; |
7232 | sas_device->connector_name[0] = '\0'; |
7233 | } |
7234 | |
7235 | sas_device->enclosure_handle = |
7236 | le16_to_cpu(sas_device_pg0.EnclosureHandle); |
7237 | sas_device->is_chassis_slot_valid = 0; |
7238 | enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc, |
7239 | handle: sas_device->enclosure_handle); |
7240 | if (enclosure_dev) { |
7241 | sas_device->enclosure_logical_id = |
7242 | le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); |
7243 | if (le16_to_cpu(enclosure_dev->pg0.Flags) & |
7244 | MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { |
7245 | sas_device->is_chassis_slot_valid = 1; |
7246 | sas_device->chassis_slot = |
7247 | enclosure_dev->pg0.ChassisSlot; |
7248 | } |
7249 | } |
7250 | } |
7251 | |
7252 | /* check if device is present */ |
7253 | if (!(le16_to_cpu(sas_device_pg0.Flags) & |
7254 | MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { |
7255 | ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n", |
7256 | handle); |
7257 | goto out_unlock; |
7258 | } |
7259 | |
7260 | /* check if there were any issues with discovery */ |
7261 | if (_scsih_check_access_status(ioc, sas_address, handle, |
7262 | access_status: sas_device_pg0.AccessStatus)) |
7263 | goto out_unlock; |
7264 | |
7265 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
7266 | _scsih_ublock_io_device(ioc, sas_address, port); |
7267 | |
7268 | if (sas_device) |
7269 | sas_device_put(s: sas_device); |
7270 | return; |
7271 | |
7272 | out_unlock: |
7273 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
7274 | if (sas_device) |
7275 | sas_device_put(s: sas_device); |
7276 | } |
7277 | |
7278 | /** |
7279 | * _scsih_add_device - creating sas device object |
7280 | * @ioc: per adapter object |
7281 | * @handle: sas device handle |
7282 | * @phy_num: phy number end device attached to |
7283 | * @is_pd: is this hidden raid component |
7284 | * |
7285 | * Creating end device object, stored in ioc->sas_device_list. |
7286 | * |
7287 | * Return: 0 for success, non-zero for failure. |
7288 | */ |
7289 | static int |
7290 | _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, |
7291 | u8 is_pd) |
7292 | { |
7293 | Mpi2ConfigReply_t mpi_reply; |
7294 | Mpi2SasDevicePage0_t sas_device_pg0; |
7295 | struct _sas_device *sas_device; |
7296 | struct _enclosure_node *enclosure_dev = NULL; |
7297 | u32 ioc_status; |
7298 | u64 sas_address; |
7299 | u32 device_info; |
7300 | u8 port_id; |
7301 | |
7302 | if ((mpt3sas_config_get_sas_device_pg0(ioc, mpi_reply: &mpi_reply, config_page: &sas_device_pg0, |
7303 | MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { |
7304 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
7305 | __FILE__, __LINE__, __func__); |
7306 | return -1; |
7307 | } |
7308 | |
7309 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
7310 | MPI2_IOCSTATUS_MASK; |
7311 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
7312 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
7313 | __FILE__, __LINE__, __func__); |
7314 | return -1; |
7315 | } |
7316 | |
7317 | /* check if this is end device */ |
7318 | device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); |
7319 | if (!(_scsih_is_end_device(device_info))) |
7320 | return -1; |
7321 | set_bit(nr: handle, addr: ioc->pend_os_device_add); |
7322 | sas_address = le64_to_cpu(sas_device_pg0.SASAddress); |
7323 | |
7324 | /* check if device is present */ |
7325 | if (!(le16_to_cpu(sas_device_pg0.Flags) & |
7326 | MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { |
7327 | ioc_err(ioc, "device is not present handle(0x04%x)!!!\n", |
7328 | handle); |
7329 | return -1; |
7330 | } |
7331 | |
7332 | /* check if there were any issues with discovery */ |
7333 | if (_scsih_check_access_status(ioc, sas_address, handle, |
7334 | access_status: sas_device_pg0.AccessStatus)) |
7335 | return -1; |
7336 | |
7337 | port_id = sas_device_pg0.PhysicalPort; |
7338 | sas_device = mpt3sas_get_sdev_by_addr(ioc, |
7339 | sas_address, port: mpt3sas_get_port_by_id(ioc, port_id, bypass_dirty_port_flag: 0)); |
7340 | if (sas_device) { |
7341 | clear_bit(nr: handle, addr: ioc->pend_os_device_add); |
7342 | sas_device_put(s: sas_device); |
7343 | return -1; |
7344 | } |
7345 | |
7346 | if (sas_device_pg0.EnclosureHandle) { |
7347 | enclosure_dev = |
7348 | mpt3sas_scsih_enclosure_find_by_handle(ioc, |
7349 | le16_to_cpu(sas_device_pg0.EnclosureHandle)); |
7350 | if (enclosure_dev == NULL) |
7351 | ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n", |
7352 | sas_device_pg0.EnclosureHandle); |
7353 | } |
7354 | |
7355 | sas_device = kzalloc(sizeof(struct _sas_device), |
7356 | GFP_KERNEL); |
7357 | if (!sas_device) { |
7358 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
7359 | __FILE__, __LINE__, __func__); |
7360 | return 0; |
7361 | } |
7362 | |
7363 | kref_init(kref: &sas_device->refcount); |
7364 | sas_device->handle = handle; |
7365 | if (_scsih_get_sas_address(ioc, |
7366 | le16_to_cpu(sas_device_pg0.ParentDevHandle), |
7367 | sas_address: &sas_device->sas_address_parent) != 0) |
7368 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
7369 | __FILE__, __LINE__, __func__); |
7370 | sas_device->enclosure_handle = |
7371 | le16_to_cpu(sas_device_pg0.EnclosureHandle); |
7372 | if (sas_device->enclosure_handle != 0) |
7373 | sas_device->slot = |
7374 | le16_to_cpu(sas_device_pg0.Slot); |
7375 | sas_device->device_info = device_info; |
7376 | sas_device->sas_address = sas_address; |
7377 | sas_device->phy = sas_device_pg0.PhyNum; |
7378 | sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) & |
7379 | MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; |
7380 | sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, bypass_dirty_port_flag: 0); |
7381 | if (!sas_device->port) { |
7382 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
7383 | __FILE__, __LINE__, __func__); |
7384 | goto out; |
7385 | } |
7386 | |
7387 | if (le16_to_cpu(sas_device_pg0.Flags) |
7388 | & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { |
7389 | sas_device->enclosure_level = |
7390 | sas_device_pg0.EnclosureLevel; |
7391 | memcpy(sas_device->connector_name, |
7392 | sas_device_pg0.ConnectorName, 4); |
7393 | sas_device->connector_name[4] = '\0'; |
7394 | } else { |
7395 | sas_device->enclosure_level = 0; |
7396 | sas_device->connector_name[0] = '\0'; |
7397 | } |
7398 | /* get enclosure_logical_id & chassis_slot*/ |
7399 | sas_device->is_chassis_slot_valid = 0; |
7400 | if (enclosure_dev) { |
7401 | sas_device->enclosure_logical_id = |
7402 | le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); |
7403 | if (le16_to_cpu(enclosure_dev->pg0.Flags) & |
7404 | MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { |
7405 | sas_device->is_chassis_slot_valid = 1; |
7406 | sas_device->chassis_slot = |
7407 | enclosure_dev->pg0.ChassisSlot; |
7408 | } |
7409 | } |
7410 | |
7411 | /* get device name */ |
7412 | sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); |
7413 | sas_device->port_type = sas_device_pg0.MaxPortConnections; |
7414 | ioc_info(ioc, |
7415 | "handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n", |
7416 | handle, sas_device->sas_address, sas_device->port_type); |
7417 | |
7418 | if (ioc->wait_for_discovery_to_complete) |
7419 | _scsih_sas_device_init_add(ioc, sas_device); |
7420 | else |
7421 | _scsih_sas_device_add(ioc, sas_device); |
7422 | |
7423 | out: |
7424 | sas_device_put(s: sas_device); |
7425 | return 0; |
7426 | } |
7427 | |
7428 | /** |
7429 | * _scsih_remove_device - removing sas device object |
7430 | * @ioc: per adapter object |
7431 | * @sas_device: the sas_device object |
7432 | */ |
7433 | static void |
7434 | _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, |
7435 | struct _sas_device *sas_device) |
7436 | { |
7437 | struct MPT3SAS_TARGET *sas_target_priv_data; |
7438 | |
7439 | if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) && |
7440 | (sas_device->pfa_led_on)) { |
7441 | _scsih_turn_off_pfa_led(ioc, sas_device); |
7442 | sas_device->pfa_led_on = 0; |
7443 | } |
7444 | |
7445 | dewtprintk(ioc, |
7446 | ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n", |
7447 | __func__, |
7448 | sas_device->handle, (u64)sas_device->sas_address)); |
7449 | |
7450 | dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, |
7451 | NULL, NULL)); |
7452 | |
7453 | if (sas_device->starget && sas_device->starget->hostdata) { |
7454 | sas_target_priv_data = sas_device->starget->hostdata; |
7455 | sas_target_priv_data->deleted = 1; |
7456 | _scsih_ublock_io_device(ioc, sas_address: sas_device->sas_address, |
7457 | port: sas_device->port); |
7458 | sas_target_priv_data->handle = |
7459 | MPT3SAS_INVALID_DEVICE_HANDLE; |
7460 | } |
7461 | |
7462 | if (!ioc->hide_drives) |
7463 | mpt3sas_transport_port_remove(ioc, |
7464 | sas_address: sas_device->sas_address, |
7465 | sas_address_parent: sas_device->sas_address_parent, |
7466 | port: sas_device->port); |
7467 | |
7468 | ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n", |
7469 | sas_device->handle, (u64)sas_device->sas_address); |
7470 | |
7471 | _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL); |
7472 | |
7473 | dewtprintk(ioc, |
7474 | ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n", |
7475 | __func__, |
7476 | sas_device->handle, (u64)sas_device->sas_address)); |
7477 | dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, |
7478 | NULL, NULL)); |
7479 | } |
7480 | |
7481 | /** |
7482 | * _scsih_sas_topology_change_event_debug - debug for topology event |
7483 | * @ioc: per adapter object |
7484 | * @event_data: event data payload |
7485 | * Context: user. |
7486 | */ |
7487 | static void |
7488 | _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc, |
7489 | Mpi2EventDataSasTopologyChangeList_t *event_data) |
7490 | { |
7491 | int i; |
7492 | u16 handle; |
7493 | u16 reason_code; |
7494 | u8 phy_number; |
7495 | char *status_str = NULL; |
7496 | u8 link_rate, prev_link_rate; |
7497 | |
7498 | switch (event_data->ExpStatus) { |
7499 | case MPI2_EVENT_SAS_TOPO_ES_ADDED: |
7500 | status_str = "add"; |
7501 | break; |
7502 | case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING: |
7503 | status_str = "remove"; |
7504 | break; |
7505 | case MPI2_EVENT_SAS_TOPO_ES_RESPONDING: |
7506 | case 0: |
7507 | status_str = "responding"; |
7508 | break; |
7509 | case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: |
7510 | status_str = "remove delay"; |
7511 | break; |
7512 | default: |
7513 | status_str = "unknown status"; |
7514 | break; |
7515 | } |
7516 | ioc_info(ioc, "sas topology change: (%s)\n", status_str); |
7517 | pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) "\ |
7518 | "start_phy(%02d), count(%d)\n", |
7519 | le16_to_cpu(event_data->ExpanderDevHandle), |
7520 | le16_to_cpu(event_data->EnclosureHandle), |
7521 | event_data->StartPhyNum, event_data->NumEntries); |
7522 | for (i = 0; i < event_data->NumEntries; i++) { |
7523 | handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); |
7524 | if (!handle) |
7525 | continue; |
7526 | phy_number = event_data->StartPhyNum + i; |
7527 | reason_code = event_data->PHY[i].PhyStatus & |
7528 | MPI2_EVENT_SAS_TOPO_RC_MASK; |
7529 | switch (reason_code) { |
7530 | case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: |
7531 | status_str = "target add"; |
7532 | break; |
7533 | case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: |
7534 | status_str = "target remove"; |
7535 | break; |
7536 | case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING: |
7537 | status_str = "delay target remove"; |
7538 | break; |
7539 | case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: |
7540 | status_str = "link rate change"; |
7541 | break; |
7542 | case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE: |
7543 | status_str = "target responding"; |
7544 | break; |
7545 | default: |
7546 | status_str = "unknown"; |
7547 | break; |
7548 | } |
7549 | link_rate = event_data->PHY[i].LinkRate >> 4; |
7550 | prev_link_rate = event_data->PHY[i].LinkRate & 0xF; |
7551 | pr_info("\tphy(%02d), attached_handle(0x%04x): %s:"\ |
7552 | " link rate: new(0x%02x), old(0x%02x)\n", phy_number, |
7553 | handle, status_str, link_rate, prev_link_rate); |
7554 | |
7555 | } |
7556 | } |
7557 | |
7558 | /** |
7559 | * _scsih_sas_topology_change_event - handle topology changes |
7560 | * @ioc: per adapter object |
7561 | * @fw_event: The fw_event_work object |
7562 | * Context: user. |
7563 | * |
7564 | */ |
7565 | static int |
7566 | _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, |
7567 | struct fw_event_work *fw_event) |
7568 | { |
7569 | int i; |
7570 | u16 parent_handle, handle; |
7571 | u16 reason_code; |
7572 | u8 phy_number, max_phys; |
7573 | struct _sas_node *sas_expander; |
7574 | u64 sas_address; |
7575 | unsigned long flags; |
7576 | u8 link_rate, prev_link_rate; |
7577 | struct hba_port *port; |
7578 | Mpi2EventDataSasTopologyChangeList_t *event_data = |
7579 | (Mpi2EventDataSasTopologyChangeList_t *) |
7580 | fw_event->event_data; |
7581 | |
7582 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) |
7583 | _scsih_sas_topology_change_event_debug(ioc, event_data); |
7584 | |
7585 | if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) |
7586 | return 0; |
7587 | |
7588 | if (!ioc->sas_hba.num_phys) |
7589 | _scsih_sas_host_add(ioc); |
7590 | else |
7591 | _scsih_sas_host_refresh(ioc); |
7592 | |
7593 | if (fw_event->ignore) { |
7594 | dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n")); |
7595 | return 0; |
7596 | } |
7597 | |
7598 | parent_handle = le16_to_cpu(event_data->ExpanderDevHandle); |
7599 | port = mpt3sas_get_port_by_id(ioc, port_id: event_data->PhysicalPort, bypass_dirty_port_flag: 0); |
7600 | |
7601 | /* handle expander add */ |
7602 | if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED) |
7603 | if (_scsih_expander_add(ioc, handle: parent_handle) != 0) |
7604 | return 0; |
7605 | |
7606 | spin_lock_irqsave(&ioc->sas_node_lock, flags); |
7607 | sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc, |
7608 | handle: parent_handle); |
7609 | if (sas_expander) { |
7610 | sas_address = sas_expander->sas_address; |
7611 | max_phys = sas_expander->num_phys; |
7612 | port = sas_expander->port; |
7613 | } else if (parent_handle < ioc->sas_hba.num_phys) { |
7614 | sas_address = ioc->sas_hba.sas_address; |
7615 | max_phys = ioc->sas_hba.num_phys; |
7616 | } else { |
7617 | spin_unlock_irqrestore(lock: &ioc->sas_node_lock, flags); |
7618 | return 0; |
7619 | } |
7620 | spin_unlock_irqrestore(lock: &ioc->sas_node_lock, flags); |
7621 | |
7622 | /* handle siblings events */ |
7623 | for (i = 0; i < event_data->NumEntries; i++) { |
7624 | if (fw_event->ignore) { |
7625 | dewtprintk(ioc, |
7626 | ioc_info(ioc, "ignoring expander event\n")); |
7627 | return 0; |
7628 | } |
7629 | if (ioc->remove_host || ioc->pci_error_recovery) |
7630 | return 0; |
7631 | phy_number = event_data->StartPhyNum + i; |
7632 | if (phy_number >= max_phys) |
7633 | continue; |
7634 | reason_code = event_data->PHY[i].PhyStatus & |
7635 | MPI2_EVENT_SAS_TOPO_RC_MASK; |
7636 | if ((event_data->PHY[i].PhyStatus & |
7637 | MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code != |
7638 | MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) |
7639 | continue; |
7640 | handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); |
7641 | if (!handle) |
7642 | continue; |
7643 | link_rate = event_data->PHY[i].LinkRate >> 4; |
7644 | prev_link_rate = event_data->PHY[i].LinkRate & 0xF; |
7645 | switch (reason_code) { |
7646 | case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: |
7647 | |
7648 | if (ioc->shost_recovery) |
7649 | break; |
7650 | |
7651 | if (link_rate == prev_link_rate) |
7652 | break; |
7653 | |
7654 | mpt3sas_transport_update_links(ioc, sas_address, |
7655 | handle, phy_number, link_rate, port); |
7656 | |
7657 | if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) |
7658 | break; |
7659 | |
7660 | _scsih_check_device(ioc, parent_sas_address: sas_address, handle, |
7661 | phy_number, link_rate); |
7662 | |
7663 | if (!test_bit(handle, ioc->pend_os_device_add)) |
7664 | break; |
7665 | |
7666 | fallthrough; |
7667 | |
7668 | case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: |
7669 | |
7670 | if (ioc->shost_recovery) |
7671 | break; |
7672 | |
7673 | mpt3sas_transport_update_links(ioc, sas_address, |
7674 | handle, phy_number, link_rate, port); |
7675 | |
7676 | _scsih_add_device(ioc, handle, phy_num: phy_number, is_pd: 0); |
7677 | |
7678 | break; |
7679 | case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: |
7680 | |
7681 | _scsih_device_remove_by_handle(ioc, handle); |
7682 | break; |
7683 | } |
7684 | } |
7685 | |
7686 | /* handle expander removal */ |
7687 | if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING && |
7688 | sas_expander) |
7689 | mpt3sas_expander_remove(ioc, sas_address, port); |
7690 | |
7691 | return 0; |
7692 | } |
7693 | |
7694 | /** |
7695 | * _scsih_sas_device_status_change_event_debug - debug for device event |
7696 | * @ioc: ? |
7697 | * @event_data: event data payload |
7698 | * Context: user. |
7699 | */ |
7700 | static void |
7701 | _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, |
7702 | Mpi2EventDataSasDeviceStatusChange_t *event_data) |
7703 | { |
7704 | char *reason_str = NULL; |
7705 | |
7706 | switch (event_data->ReasonCode) { |
7707 | case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA: |
7708 | reason_str = "smart data"; |
7709 | break; |
7710 | case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED: |
7711 | reason_str = "unsupported device discovered"; |
7712 | break; |
7713 | case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: |
7714 | reason_str = "internal device reset"; |
7715 | break; |
7716 | case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: |
7717 | reason_str = "internal task abort"; |
7718 | break; |
7719 | case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL: |
7720 | reason_str = "internal task abort set"; |
7721 | break; |
7722 | case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: |
7723 | reason_str = "internal clear task set"; |
7724 | break; |
7725 | case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL: |
7726 | reason_str = "internal query task"; |
7727 | break; |
7728 | case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE: |
7729 | reason_str = "sata init failure"; |
7730 | break; |
7731 | case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET: |
7732 | reason_str = "internal device reset complete"; |
7733 | break; |
7734 | case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL: |
7735 | reason_str = "internal task abort complete"; |
7736 | break; |
7737 | case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION: |
7738 | reason_str = "internal async notification"; |
7739 | break; |
7740 | case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY: |
7741 | reason_str = "expander reduced functionality"; |
7742 | break; |
7743 | case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY: |
7744 | reason_str = "expander reduced functionality complete"; |
7745 | break; |
7746 | default: |
7747 | reason_str = "unknown reason"; |
7748 | break; |
7749 | } |
7750 | ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)", |
7751 | reason_str, le16_to_cpu(event_data->DevHandle), |
7752 | (u64)le64_to_cpu(event_data->SASAddress), |
7753 | le16_to_cpu(event_data->TaskTag)); |
7754 | if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA) |
7755 | pr_cont(", ASC(0x%x), ASCQ(0x%x)\n", |
7756 | event_data->ASC, event_data->ASCQ); |
7757 | pr_cont("\n"); |
7758 | } |
7759 | |
7760 | /** |
7761 | * _scsih_sas_device_status_change_event - handle device status change |
7762 | * @ioc: per adapter object |
7763 | * @event_data: The fw event |
7764 | * Context: user. |
7765 | */ |
7766 | static void |
7767 | _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, |
7768 | Mpi2EventDataSasDeviceStatusChange_t *event_data) |
7769 | { |
7770 | struct MPT3SAS_TARGET *target_priv_data; |
7771 | struct _sas_device *sas_device; |
7772 | u64 sas_address; |
7773 | unsigned long flags; |
7774 | |
7775 | /* In MPI Revision K (0xC), the internal device reset complete was |
7776 | * implemented, so avoid setting tm_busy flag for older firmware. |
7777 | */ |
7778 | if ((ioc->facts.HeaderVersion >> 8) < 0xC) |
7779 | return; |
7780 | |
7781 | if (event_data->ReasonCode != |
7782 | MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET && |
7783 | event_data->ReasonCode != |
7784 | MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET) |
7785 | return; |
7786 | |
7787 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
7788 | sas_address = le64_to_cpu(event_data->SASAddress); |
7789 | sas_device = __mpt3sas_get_sdev_by_addr(ioc, |
7790 | sas_address, |
7791 | port: mpt3sas_get_port_by_id(ioc, port_id: event_data->PhysicalPort, bypass_dirty_port_flag: 0)); |
7792 | |
7793 | if (!sas_device || !sas_device->starget) |
7794 | goto out; |
7795 | |
7796 | target_priv_data = sas_device->starget->hostdata; |
7797 | if (!target_priv_data) |
7798 | goto out; |
7799 | |
7800 | if (event_data->ReasonCode == |
7801 | MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET) |
7802 | target_priv_data->tm_busy = 1; |
7803 | else |
7804 | target_priv_data->tm_busy = 0; |
7805 | |
7806 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) |
7807 | ioc_info(ioc, |
7808 | "%s tm_busy flag for handle(0x%04x)\n", |
7809 | (target_priv_data->tm_busy == 1) ? "Enable": "Disable", |
7810 | target_priv_data->handle); |
7811 | |
7812 | out: |
7813 | if (sas_device) |
7814 | sas_device_put(s: sas_device); |
7815 | |
7816 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
7817 | } |
7818 | |
7819 | |
7820 | /** |
7821 | * _scsih_check_pcie_access_status - check access flags |
7822 | * @ioc: per adapter object |
7823 | * @wwid: wwid |
7824 | * @handle: sas device handle |
7825 | * @access_status: errors returned during discovery of the device |
7826 | * |
7827 | * Return: 0 for success, else failure |
7828 | */ |
7829 | static u8 |
7830 | _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid, |
7831 | u16 handle, u8 access_status) |
7832 | { |
7833 | u8 rc = 1; |
7834 | char *desc = NULL; |
7835 | |
7836 | switch (access_status) { |
7837 | case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS: |
7838 | case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION: |
7839 | rc = 0; |
7840 | break; |
7841 | case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED: |
7842 | desc = "PCIe device capability failed"; |
7843 | break; |
7844 | case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED: |
7845 | desc = "PCIe device blocked"; |
7846 | ioc_info(ioc, |
7847 | "Device with Access Status (%s): wwid(0x%016llx), " |
7848 | "handle(0x%04x)\n ll only be added to the internal list", |
7849 | desc, (u64)wwid, handle); |
7850 | rc = 0; |
7851 | break; |
7852 | case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED: |
7853 | desc = "PCIe device mem space access failed"; |
7854 | break; |
7855 | case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE: |
7856 | desc = "PCIe device unsupported"; |
7857 | break; |
7858 | case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED: |
7859 | desc = "PCIe device MSIx Required"; |
7860 | break; |
7861 | case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX: |
7862 | desc = "PCIe device init fail max"; |
7863 | break; |
7864 | case MPI26_PCIEDEV0_ASTATUS_UNKNOWN: |
7865 | desc = "PCIe device status unknown"; |
7866 | break; |
7867 | case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT: |
7868 | desc = "nvme ready timeout"; |
7869 | break; |
7870 | case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED: |
7871 | desc = "nvme device configuration unsupported"; |
7872 | break; |
7873 | case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED: |
7874 | desc = "nvme identify failed"; |
7875 | break; |
7876 | case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED: |
7877 | desc = "nvme qconfig failed"; |
7878 | break; |
7879 | case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED: |
7880 | desc = "nvme qcreation failed"; |
7881 | break; |
7882 | case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED: |
7883 | desc = "nvme eventcfg failed"; |
7884 | break; |
7885 | case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED: |
7886 | desc = "nvme get feature stat failed"; |
7887 | break; |
7888 | case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT: |
7889 | desc = "nvme idle timeout"; |
7890 | break; |
7891 | case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS: |
7892 | desc = "nvme failure status"; |
7893 | break; |
7894 | default: |
7895 | ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n", |
7896 | access_status, (u64)wwid, handle); |
7897 | return rc; |
7898 | } |
7899 | |
7900 | if (!rc) |
7901 | return rc; |
7902 | |
7903 | ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n", |
7904 | desc, (u64)wwid, handle); |
7905 | return rc; |
7906 | } |
7907 | |
7908 | /** |
7909 | * _scsih_pcie_device_remove_from_sml - removing pcie device |
7910 | * from SML and free up associated memory |
7911 | * @ioc: per adapter object |
7912 | * @pcie_device: the pcie_device object |
7913 | */ |
7914 | static void |
7915 | _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, |
7916 | struct _pcie_device *pcie_device) |
7917 | { |
7918 | struct MPT3SAS_TARGET *sas_target_priv_data; |
7919 | |
7920 | dewtprintk(ioc, |
7921 | ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n", |
7922 | __func__, |
7923 | pcie_device->handle, (u64)pcie_device->wwid)); |
7924 | if (pcie_device->enclosure_handle != 0) |
7925 | dewtprintk(ioc, |
7926 | ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n", |
7927 | __func__, |
7928 | (u64)pcie_device->enclosure_logical_id, |
7929 | pcie_device->slot)); |
7930 | if (pcie_device->connector_name[0] != '\0') |
7931 | dewtprintk(ioc, |
7932 | ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n", |
7933 | __func__, |
7934 | pcie_device->enclosure_level, |
7935 | pcie_device->connector_name)); |
7936 | |
7937 | if (pcie_device->starget && pcie_device->starget->hostdata) { |
7938 | sas_target_priv_data = pcie_device->starget->hostdata; |
7939 | sas_target_priv_data->deleted = 1; |
7940 | _scsih_ublock_io_device(ioc, sas_address: pcie_device->wwid, NULL); |
7941 | sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; |
7942 | } |
7943 | |
7944 | ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", |
7945 | pcie_device->handle, (u64)pcie_device->wwid); |
7946 | if (pcie_device->enclosure_handle != 0) |
7947 | ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n", |
7948 | (u64)pcie_device->enclosure_logical_id, |
7949 | pcie_device->slot); |
7950 | if (pcie_device->connector_name[0] != '\0') |
7951 | ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n", |
7952 | pcie_device->enclosure_level, |
7953 | pcie_device->connector_name); |
7954 | |
7955 | if (pcie_device->starget && (pcie_device->access_status != |
7956 | MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)) |
7957 | scsi_remove_target(&pcie_device->starget->dev); |
7958 | dewtprintk(ioc, |
7959 | ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n", |
7960 | __func__, |
7961 | pcie_device->handle, (u64)pcie_device->wwid)); |
7962 | if (pcie_device->enclosure_handle != 0) |
7963 | dewtprintk(ioc, |
7964 | ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n", |
7965 | __func__, |
7966 | (u64)pcie_device->enclosure_logical_id, |
7967 | pcie_device->slot)); |
7968 | if (pcie_device->connector_name[0] != '\0') |
7969 | dewtprintk(ioc, |
7970 | ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n", |
7971 | __func__, |
7972 | pcie_device->enclosure_level, |
7973 | pcie_device->connector_name)); |
7974 | |
7975 | kfree(objp: pcie_device->serial_number); |
7976 | } |
7977 | |
7978 | |
7979 | /** |
7980 | * _scsih_pcie_check_device - checking device responsiveness |
7981 | * @ioc: per adapter object |
7982 | * @handle: attached device handle |
7983 | */ |
7984 | static void |
7985 | _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
7986 | { |
7987 | Mpi2ConfigReply_t mpi_reply; |
7988 | Mpi26PCIeDevicePage0_t pcie_device_pg0; |
7989 | u32 ioc_status; |
7990 | struct _pcie_device *pcie_device; |
7991 | u64 wwid; |
7992 | unsigned long flags; |
7993 | struct scsi_target *starget; |
7994 | struct MPT3SAS_TARGET *sas_target_priv_data; |
7995 | u32 device_info; |
7996 | |
7997 | if ((mpt3sas_config_get_pcie_device_pg0(ioc, mpi_reply: &mpi_reply, |
7998 | config_page: &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) |
7999 | return; |
8000 | |
8001 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; |
8002 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) |
8003 | return; |
8004 | |
8005 | /* check if this is end device */ |
8006 | device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); |
8007 | if (!(_scsih_is_nvme_pciescsi_device(device_info))) |
8008 | return; |
8009 | |
8010 | wwid = le64_to_cpu(pcie_device_pg0.WWID); |
8011 | spin_lock_irqsave(&ioc->pcie_device_lock, flags); |
8012 | pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); |
8013 | |
8014 | if (!pcie_device) { |
8015 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
8016 | return; |
8017 | } |
8018 | |
8019 | if (unlikely(pcie_device->handle != handle)) { |
8020 | starget = pcie_device->starget; |
8021 | sas_target_priv_data = starget->hostdata; |
8022 | pcie_device->access_status = pcie_device_pg0.AccessStatus; |
8023 | starget_printk(KERN_INFO, starget, |
8024 | "handle changed from(0x%04x) to (0x%04x)!!!\n", |
8025 | pcie_device->handle, handle); |
8026 | sas_target_priv_data->handle = handle; |
8027 | pcie_device->handle = handle; |
8028 | |
8029 | if (le32_to_cpu(pcie_device_pg0.Flags) & |
8030 | MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) { |
8031 | pcie_device->enclosure_level = |
8032 | pcie_device_pg0.EnclosureLevel; |
8033 | memcpy(&pcie_device->connector_name[0], |
8034 | &pcie_device_pg0.ConnectorName[0], 4); |
8035 | } else { |
8036 | pcie_device->enclosure_level = 0; |
8037 | pcie_device->connector_name[0] = '\0'; |
8038 | } |
8039 | } |
8040 | |
8041 | /* check if device is present */ |
8042 | if (!(le32_to_cpu(pcie_device_pg0.Flags) & |
8043 | MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) { |
8044 | ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n", |
8045 | handle); |
8046 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
8047 | pcie_device_put(p: pcie_device); |
8048 | return; |
8049 | } |
8050 | |
8051 | /* check if there were any issues with discovery */ |
8052 | if (_scsih_check_pcie_access_status(ioc, wwid, handle, |
8053 | access_status: pcie_device_pg0.AccessStatus)) { |
8054 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
8055 | pcie_device_put(p: pcie_device); |
8056 | return; |
8057 | } |
8058 | |
8059 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
8060 | pcie_device_put(p: pcie_device); |
8061 | |
8062 | _scsih_ublock_io_device(ioc, sas_address: wwid, NULL); |
8063 | |
8064 | return; |
8065 | } |
8066 | |
8067 | /** |
8068 | * _scsih_pcie_add_device - creating pcie device object |
8069 | * @ioc: per adapter object |
8070 | * @handle: pcie device handle |
8071 | * |
8072 | * Creating end device object, stored in ioc->pcie_device_list. |
8073 | * |
8074 | * Return: 1 means queue the event later, 0 means complete the event |
8075 | */ |
8076 | static int |
8077 | _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
8078 | { |
8079 | Mpi26PCIeDevicePage0_t pcie_device_pg0; |
8080 | Mpi26PCIeDevicePage2_t pcie_device_pg2; |
8081 | Mpi2ConfigReply_t mpi_reply; |
8082 | struct _pcie_device *pcie_device; |
8083 | struct _enclosure_node *enclosure_dev; |
8084 | u32 ioc_status; |
8085 | u64 wwid; |
8086 | |
8087 | if ((mpt3sas_config_get_pcie_device_pg0(ioc, mpi_reply: &mpi_reply, |
8088 | config_page: &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) { |
8089 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
8090 | __FILE__, __LINE__, __func__); |
8091 | return 0; |
8092 | } |
8093 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
8094 | MPI2_IOCSTATUS_MASK; |
8095 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
8096 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
8097 | __FILE__, __LINE__, __func__); |
8098 | return 0; |
8099 | } |
8100 | |
8101 | set_bit(nr: handle, addr: ioc->pend_os_device_add); |
8102 | wwid = le64_to_cpu(pcie_device_pg0.WWID); |
8103 | |
8104 | /* check if device is present */ |
8105 | if (!(le32_to_cpu(pcie_device_pg0.Flags) & |
8106 | MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) { |
8107 | ioc_err(ioc, "device is not present handle(0x04%x)!!!\n", |
8108 | handle); |
8109 | return 0; |
8110 | } |
8111 | |
8112 | /* check if there were any issues with discovery */ |
8113 | if (_scsih_check_pcie_access_status(ioc, wwid, handle, |
8114 | access_status: pcie_device_pg0.AccessStatus)) |
8115 | return 0; |
8116 | |
8117 | if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu |
8118 | (pcie_device_pg0.DeviceInfo)))) |
8119 | return 0; |
8120 | |
8121 | pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid); |
8122 | if (pcie_device) { |
8123 | clear_bit(nr: handle, addr: ioc->pend_os_device_add); |
8124 | pcie_device_put(p: pcie_device); |
8125 | return 0; |
8126 | } |
8127 | |
8128 | /* PCIe Device Page 2 contains read-only information about a |
8129 | * specific NVMe device; therefore, this page is only |
8130 | * valid for NVMe devices and skip for pcie devices of type scsi. |
8131 | */ |
8132 | if (!(mpt3sas_scsih_is_pcie_scsi_device( |
8133 | le32_to_cpu(pcie_device_pg0.DeviceInfo)))) { |
8134 | if (mpt3sas_config_get_pcie_device_pg2(ioc, mpi_reply: &mpi_reply, |
8135 | config_page: &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, |
8136 | handle)) { |
8137 | ioc_err(ioc, |
8138 | "failure at %s:%d/%s()!\n", __FILE__, |
8139 | __LINE__, __func__); |
8140 | return 0; |
8141 | } |
8142 | |
8143 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
8144 | MPI2_IOCSTATUS_MASK; |
8145 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
8146 | ioc_err(ioc, |
8147 | "failure at %s:%d/%s()!\n", __FILE__, |
8148 | __LINE__, __func__); |
8149 | return 0; |
8150 | } |
8151 | } |
8152 | |
8153 | pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL); |
8154 | if (!pcie_device) { |
8155 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
8156 | __FILE__, __LINE__, __func__); |
8157 | return 0; |
8158 | } |
8159 | |
8160 | kref_init(kref: &pcie_device->refcount); |
8161 | pcie_device->id = ioc->pcie_target_id++; |
8162 | pcie_device->channel = PCIE_CHANNEL; |
8163 | pcie_device->handle = handle; |
8164 | pcie_device->access_status = pcie_device_pg0.AccessStatus; |
8165 | pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); |
8166 | pcie_device->wwid = wwid; |
8167 | pcie_device->port_num = pcie_device_pg0.PortNum; |
8168 | pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) & |
8169 | MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; |
8170 | |
8171 | pcie_device->enclosure_handle = |
8172 | le16_to_cpu(pcie_device_pg0.EnclosureHandle); |
8173 | if (pcie_device->enclosure_handle != 0) |
8174 | pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot); |
8175 | |
8176 | if (le32_to_cpu(pcie_device_pg0.Flags) & |
8177 | MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) { |
8178 | pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel; |
8179 | memcpy(&pcie_device->connector_name[0], |
8180 | &pcie_device_pg0.ConnectorName[0], 4); |
8181 | } else { |
8182 | pcie_device->enclosure_level = 0; |
8183 | pcie_device->connector_name[0] = '\0'; |
8184 | } |
8185 | |
8186 | /* get enclosure_logical_id */ |
8187 | if (pcie_device->enclosure_handle) { |
8188 | enclosure_dev = |
8189 | mpt3sas_scsih_enclosure_find_by_handle(ioc, |
8190 | handle: pcie_device->enclosure_handle); |
8191 | if (enclosure_dev) |
8192 | pcie_device->enclosure_logical_id = |
8193 | le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); |
8194 | } |
8195 | /* TODO -- Add device name once FW supports it */ |
8196 | if (!(mpt3sas_scsih_is_pcie_scsi_device( |
8197 | le32_to_cpu(pcie_device_pg0.DeviceInfo)))) { |
8198 | pcie_device->nvme_mdts = |
8199 | le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize); |
8200 | pcie_device->shutdown_latency = |
8201 | le16_to_cpu(pcie_device_pg2.ShutdownLatency); |
8202 | /* |
8203 | * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency |
8204 | * if drive's RTD3 Entry Latency is greater then IOC's |
8205 | * max_shutdown_latency. |
8206 | */ |
8207 | if (pcie_device->shutdown_latency > ioc->max_shutdown_latency) |
8208 | ioc->max_shutdown_latency = |
8209 | pcie_device->shutdown_latency; |
8210 | if (pcie_device_pg2.ControllerResetTO) |
8211 | pcie_device->reset_timeout = |
8212 | pcie_device_pg2.ControllerResetTO; |
8213 | else |
8214 | pcie_device->reset_timeout = 30; |
8215 | } else |
8216 | pcie_device->reset_timeout = 30; |
8217 | |
8218 | if (ioc->wait_for_discovery_to_complete) |
8219 | _scsih_pcie_device_init_add(ioc, pcie_device); |
8220 | else |
8221 | _scsih_pcie_device_add(ioc, pcie_device); |
8222 | |
8223 | pcie_device_put(p: pcie_device); |
8224 | return 0; |
8225 | } |
8226 | |
8227 | /** |
8228 | * _scsih_pcie_topology_change_event_debug - debug for topology |
8229 | * event |
8230 | * @ioc: per adapter object |
8231 | * @event_data: event data payload |
8232 | * Context: user. |
8233 | */ |
8234 | static void |
8235 | _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc, |
8236 | Mpi26EventDataPCIeTopologyChangeList_t *event_data) |
8237 | { |
8238 | int i; |
8239 | u16 handle; |
8240 | u16 reason_code; |
8241 | u8 port_number; |
8242 | char *status_str = NULL; |
8243 | u8 link_rate, prev_link_rate; |
8244 | |
8245 | switch (event_data->SwitchStatus) { |
8246 | case MPI26_EVENT_PCIE_TOPO_SS_ADDED: |
8247 | status_str = "add"; |
8248 | break; |
8249 | case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING: |
8250 | status_str = "remove"; |
8251 | break; |
8252 | case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING: |
8253 | case 0: |
8254 | status_str = "responding"; |
8255 | break; |
8256 | case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING: |
8257 | status_str = "remove delay"; |
8258 | break; |
8259 | default: |
8260 | status_str = "unknown status"; |
8261 | break; |
8262 | } |
8263 | ioc_info(ioc, "pcie topology change: (%s)\n", status_str); |
8264 | pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)" |
8265 | "start_port(%02d), count(%d)\n", |
8266 | le16_to_cpu(event_data->SwitchDevHandle), |
8267 | le16_to_cpu(event_data->EnclosureHandle), |
8268 | event_data->StartPortNum, event_data->NumEntries); |
8269 | for (i = 0; i < event_data->NumEntries; i++) { |
8270 | handle = |
8271 | le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); |
8272 | if (!handle) |
8273 | continue; |
8274 | port_number = event_data->StartPortNum + i; |
8275 | reason_code = event_data->PortEntry[i].PortStatus; |
8276 | switch (reason_code) { |
8277 | case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED: |
8278 | status_str = "target add"; |
8279 | break; |
8280 | case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: |
8281 | status_str = "target remove"; |
8282 | break; |
8283 | case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: |
8284 | status_str = "delay target remove"; |
8285 | break; |
8286 | case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED: |
8287 | status_str = "link rate change"; |
8288 | break; |
8289 | case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE: |
8290 | status_str = "target responding"; |
8291 | break; |
8292 | default: |
8293 | status_str = "unknown"; |
8294 | break; |
8295 | } |
8296 | link_rate = event_data->PortEntry[i].CurrentPortInfo & |
8297 | MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; |
8298 | prev_link_rate = event_data->PortEntry[i].PreviousPortInfo & |
8299 | MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; |
8300 | pr_info("\tport(%02d), attached_handle(0x%04x): %s:" |
8301 | " link rate: new(0x%02x), old(0x%02x)\n", port_number, |
8302 | handle, status_str, link_rate, prev_link_rate); |
8303 | } |
8304 | } |
8305 | |
8306 | /** |
8307 | * _scsih_pcie_topology_change_event - handle PCIe topology |
8308 | * changes |
8309 | * @ioc: per adapter object |
8310 | * @fw_event: The fw_event_work object |
8311 | * Context: user. |
8312 | * |
8313 | */ |
8314 | static void |
8315 | _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc, |
8316 | struct fw_event_work *fw_event) |
8317 | { |
8318 | int i; |
8319 | u16 handle; |
8320 | u16 reason_code; |
8321 | u8 link_rate, prev_link_rate; |
8322 | unsigned long flags; |
8323 | int rc; |
8324 | Mpi26EventDataPCIeTopologyChangeList_t *event_data = |
8325 | (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data; |
8326 | struct _pcie_device *pcie_device; |
8327 | |
8328 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) |
8329 | _scsih_pcie_topology_change_event_debug(ioc, event_data); |
8330 | |
8331 | if (ioc->shost_recovery || ioc->remove_host || |
8332 | ioc->pci_error_recovery) |
8333 | return; |
8334 | |
8335 | if (fw_event->ignore) { |
8336 | dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n")); |
8337 | return; |
8338 | } |
8339 | |
8340 | /* handle siblings events */ |
8341 | for (i = 0; i < event_data->NumEntries; i++) { |
8342 | if (fw_event->ignore) { |
8343 | dewtprintk(ioc, |
8344 | ioc_info(ioc, "ignoring switch event\n")); |
8345 | return; |
8346 | } |
8347 | if (ioc->remove_host || ioc->pci_error_recovery) |
8348 | return; |
8349 | reason_code = event_data->PortEntry[i].PortStatus; |
8350 | handle = |
8351 | le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); |
8352 | if (!handle) |
8353 | continue; |
8354 | |
8355 | link_rate = event_data->PortEntry[i].CurrentPortInfo |
8356 | & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; |
8357 | prev_link_rate = event_data->PortEntry[i].PreviousPortInfo |
8358 | & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; |
8359 | |
8360 | switch (reason_code) { |
8361 | case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED: |
8362 | if (ioc->shost_recovery) |
8363 | break; |
8364 | if (link_rate == prev_link_rate) |
8365 | break; |
8366 | if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5) |
8367 | break; |
8368 | |
8369 | _scsih_pcie_check_device(ioc, handle); |
8370 | |
8371 | /* This code after this point handles the test case |
8372 | * where a device has been added, however its returning |
8373 | * BUSY for sometime. Then before the Device Missing |
8374 | * Delay expires and the device becomes READY, the |
8375 | * device is removed and added back. |
8376 | */ |
8377 | spin_lock_irqsave(&ioc->pcie_device_lock, flags); |
8378 | pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); |
8379 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
8380 | |
8381 | if (pcie_device) { |
8382 | pcie_device_put(p: pcie_device); |
8383 | break; |
8384 | } |
8385 | |
8386 | if (!test_bit(handle, ioc->pend_os_device_add)) |
8387 | break; |
8388 | |
8389 | dewtprintk(ioc, |
8390 | ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n", |
8391 | handle)); |
8392 | event_data->PortEntry[i].PortStatus &= 0xF0; |
8393 | event_data->PortEntry[i].PortStatus |= |
8394 | MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED; |
8395 | fallthrough; |
8396 | case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED: |
8397 | if (ioc->shost_recovery) |
8398 | break; |
8399 | if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5) |
8400 | break; |
8401 | |
8402 | rc = _scsih_pcie_add_device(ioc, handle); |
8403 | if (!rc) { |
8404 | /* mark entry vacant */ |
8405 | /* TODO This needs to be reviewed and fixed, |
8406 | * we dont have an entry |
8407 | * to make an event void like vacant |
8408 | */ |
8409 | event_data->PortEntry[i].PortStatus |= |
8410 | MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE; |
8411 | } |
8412 | break; |
8413 | case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: |
8414 | _scsih_pcie_device_remove_by_handle(ioc, handle); |
8415 | break; |
8416 | } |
8417 | } |
8418 | } |
8419 | |
8420 | /** |
8421 | * _scsih_pcie_device_status_change_event_debug - debug for device event |
8422 | * @ioc: ? |
8423 | * @event_data: event data payload |
8424 | * Context: user. |
8425 | */ |
8426 | static void |
8427 | _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, |
8428 | Mpi26EventDataPCIeDeviceStatusChange_t *event_data) |
8429 | { |
8430 | char *reason_str = NULL; |
8431 | |
8432 | switch (event_data->ReasonCode) { |
8433 | case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA: |
8434 | reason_str = "smart data"; |
8435 | break; |
8436 | case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED: |
8437 | reason_str = "unsupported device discovered"; |
8438 | break; |
8439 | case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET: |
8440 | reason_str = "internal device reset"; |
8441 | break; |
8442 | case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL: |
8443 | reason_str = "internal task abort"; |
8444 | break; |
8445 | case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL: |
8446 | reason_str = "internal task abort set"; |
8447 | break; |
8448 | case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: |
8449 | reason_str = "internal clear task set"; |
8450 | break; |
8451 | case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL: |
8452 | reason_str = "internal query task"; |
8453 | break; |
8454 | case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE: |
8455 | reason_str = "device init failure"; |
8456 | break; |
8457 | case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET: |
8458 | reason_str = "internal device reset complete"; |
8459 | break; |
8460 | case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL: |
8461 | reason_str = "internal task abort complete"; |
8462 | break; |
8463 | case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION: |
8464 | reason_str = "internal async notification"; |
8465 | break; |
8466 | case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED: |
8467 | reason_str = "pcie hot reset failed"; |
8468 | break; |
8469 | default: |
8470 | reason_str = "unknown reason"; |
8471 | break; |
8472 | } |
8473 | |
8474 | ioc_info(ioc, "PCIE device status change: (%s)\n" |
8475 | "\thandle(0x%04x), WWID(0x%016llx), tag(%d)", |
8476 | reason_str, le16_to_cpu(event_data->DevHandle), |
8477 | (u64)le64_to_cpu(event_data->WWID), |
8478 | le16_to_cpu(event_data->TaskTag)); |
8479 | if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA) |
8480 | pr_cont(", ASC(0x%x), ASCQ(0x%x)\n", |
8481 | event_data->ASC, event_data->ASCQ); |
8482 | pr_cont("\n"); |
8483 | } |
8484 | |
8485 | /** |
8486 | * _scsih_pcie_device_status_change_event - handle device status |
8487 | * change |
8488 | * @ioc: per adapter object |
8489 | * @fw_event: The fw_event_work object |
8490 | * Context: user. |
8491 | */ |
8492 | static void |
8493 | _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, |
8494 | struct fw_event_work *fw_event) |
8495 | { |
8496 | struct MPT3SAS_TARGET *target_priv_data; |
8497 | struct _pcie_device *pcie_device; |
8498 | u64 wwid; |
8499 | unsigned long flags; |
8500 | Mpi26EventDataPCIeDeviceStatusChange_t *event_data = |
8501 | (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data; |
8502 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) |
8503 | _scsih_pcie_device_status_change_event_debug(ioc, |
8504 | event_data); |
8505 | |
8506 | if (event_data->ReasonCode != |
8507 | MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET && |
8508 | event_data->ReasonCode != |
8509 | MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET) |
8510 | return; |
8511 | |
8512 | spin_lock_irqsave(&ioc->pcie_device_lock, flags); |
8513 | wwid = le64_to_cpu(event_data->WWID); |
8514 | pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); |
8515 | |
8516 | if (!pcie_device || !pcie_device->starget) |
8517 | goto out; |
8518 | |
8519 | target_priv_data = pcie_device->starget->hostdata; |
8520 | if (!target_priv_data) |
8521 | goto out; |
8522 | |
8523 | if (event_data->ReasonCode == |
8524 | MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET) |
8525 | target_priv_data->tm_busy = 1; |
8526 | else |
8527 | target_priv_data->tm_busy = 0; |
8528 | out: |
8529 | if (pcie_device) |
8530 | pcie_device_put(p: pcie_device); |
8531 | |
8532 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
8533 | } |
8534 | |
8535 | /** |
8536 | * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure |
8537 | * event |
8538 | * @ioc: per adapter object |
8539 | * @event_data: event data payload |
8540 | * Context: user. |
8541 | */ |
8542 | static void |
8543 | _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, |
8544 | Mpi2EventDataSasEnclDevStatusChange_t *event_data) |
8545 | { |
8546 | char *reason_str = NULL; |
8547 | |
8548 | switch (event_data->ReasonCode) { |
8549 | case MPI2_EVENT_SAS_ENCL_RC_ADDED: |
8550 | reason_str = "enclosure add"; |
8551 | break; |
8552 | case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING: |
8553 | reason_str = "enclosure remove"; |
8554 | break; |
8555 | default: |
8556 | reason_str = "unknown reason"; |
8557 | break; |
8558 | } |
8559 | |
8560 | ioc_info(ioc, "enclosure status change: (%s)\n" |
8561 | "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n", |
8562 | reason_str, |
8563 | le16_to_cpu(event_data->EnclosureHandle), |
8564 | (u64)le64_to_cpu(event_data->EnclosureLogicalID), |
8565 | le16_to_cpu(event_data->StartSlot)); |
8566 | } |
8567 | |
8568 | /** |
8569 | * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events |
8570 | * @ioc: per adapter object |
8571 | * @fw_event: The fw_event_work object |
8572 | * Context: user. |
8573 | */ |
8574 | static void |
8575 | _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc, |
8576 | struct fw_event_work *fw_event) |
8577 | { |
8578 | Mpi2ConfigReply_t mpi_reply; |
8579 | struct _enclosure_node *enclosure_dev = NULL; |
8580 | Mpi2EventDataSasEnclDevStatusChange_t *event_data = |
8581 | (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data; |
8582 | int rc; |
8583 | u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle); |
8584 | |
8585 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) |
8586 | _scsih_sas_enclosure_dev_status_change_event_debug(ioc, |
8587 | event_data: (Mpi2EventDataSasEnclDevStatusChange_t *) |
8588 | fw_event->event_data); |
8589 | if (ioc->shost_recovery) |
8590 | return; |
8591 | |
8592 | if (enclosure_handle) |
8593 | enclosure_dev = |
8594 | mpt3sas_scsih_enclosure_find_by_handle(ioc, |
8595 | handle: enclosure_handle); |
8596 | switch (event_data->ReasonCode) { |
8597 | case MPI2_EVENT_SAS_ENCL_RC_ADDED: |
8598 | if (!enclosure_dev) { |
8599 | enclosure_dev = |
8600 | kzalloc(sizeof(struct _enclosure_node), |
8601 | GFP_KERNEL); |
8602 | if (!enclosure_dev) { |
8603 | ioc_info(ioc, "failure at %s:%d/%s()!\n", |
8604 | __FILE__, __LINE__, __func__); |
8605 | return; |
8606 | } |
8607 | rc = mpt3sas_config_get_enclosure_pg0(ioc, mpi_reply: &mpi_reply, |
8608 | config_page: &enclosure_dev->pg0, |
8609 | MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, |
8610 | handle: enclosure_handle); |
8611 | |
8612 | if (rc || (le16_to_cpu(mpi_reply.IOCStatus) & |
8613 | MPI2_IOCSTATUS_MASK)) { |
8614 | kfree(objp: enclosure_dev); |
8615 | return; |
8616 | } |
8617 | |
8618 | list_add_tail(new: &enclosure_dev->list, |
8619 | head: &ioc->enclosure_list); |
8620 | } |
8621 | break; |
8622 | case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING: |
8623 | if (enclosure_dev) { |
8624 | list_del(entry: &enclosure_dev->list); |
8625 | kfree(objp: enclosure_dev); |
8626 | } |
8627 | break; |
8628 | default: |
8629 | break; |
8630 | } |
8631 | } |
8632 | |
8633 | /** |
8634 | * _scsih_sas_broadcast_primitive_event - handle broadcast events |
8635 | * @ioc: per adapter object |
8636 | * @fw_event: The fw_event_work object |
8637 | * Context: user. |
8638 | */ |
8639 | static void |
8640 | _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, |
8641 | struct fw_event_work *fw_event) |
8642 | { |
8643 | struct scsi_cmnd *scmd; |
8644 | struct scsi_device *sdev; |
8645 | struct scsiio_tracker *st; |
8646 | u16 smid, handle; |
8647 | u32 lun; |
8648 | struct MPT3SAS_DEVICE *sas_device_priv_data; |
8649 | u32 termination_count; |
8650 | u32 query_count; |
8651 | Mpi2SCSITaskManagementReply_t *mpi_reply; |
8652 | Mpi2EventDataSasBroadcastPrimitive_t *event_data = |
8653 | (Mpi2EventDataSasBroadcastPrimitive_t *) |
8654 | fw_event->event_data; |
8655 | u16 ioc_status; |
8656 | unsigned long flags; |
8657 | int r; |
8658 | u8 max_retries = 0; |
8659 | u8 task_abort_retries; |
8660 | |
8661 | mutex_lock(&ioc->tm_cmds.mutex); |
8662 | ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n", |
8663 | __func__, event_data->PhyNum, event_data->PortWidth); |
8664 | |
8665 | _scsih_block_io_all_device(ioc); |
8666 | |
8667 | spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); |
8668 | mpi_reply = ioc->tm_cmds.reply; |
8669 | broadcast_aen_retry: |
8670 | |
8671 | /* sanity checks for retrying this loop */ |
8672 | if (max_retries++ == 5) { |
8673 | dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__)); |
8674 | goto out; |
8675 | } else if (max_retries > 1) |
8676 | dewtprintk(ioc, |
8677 | ioc_info(ioc, "%s: %d retry\n", |
8678 | __func__, max_retries - 1)); |
8679 | |
8680 | termination_count = 0; |
8681 | query_count = 0; |
8682 | for (smid = 1; smid <= ioc->scsiio_depth; smid++) { |
8683 | if (ioc->shost_recovery) |
8684 | goto out; |
8685 | scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); |
8686 | if (!scmd) |
8687 | continue; |
8688 | st = scsi_cmd_priv(cmd: scmd); |
8689 | sdev = scmd->device; |
8690 | sas_device_priv_data = sdev->hostdata; |
8691 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target) |
8692 | continue; |
8693 | /* skip hidden raid components */ |
8694 | if (sas_device_priv_data->sas_target->flags & |
8695 | MPT_TARGET_FLAGS_RAID_COMPONENT) |
8696 | continue; |
8697 | /* skip volumes */ |
8698 | if (sas_device_priv_data->sas_target->flags & |
8699 | MPT_TARGET_FLAGS_VOLUME) |
8700 | continue; |
8701 | /* skip PCIe devices */ |
8702 | if (sas_device_priv_data->sas_target->flags & |
8703 | MPT_TARGET_FLAGS_PCIE_DEVICE) |
8704 | continue; |
8705 | |
8706 | handle = sas_device_priv_data->sas_target->handle; |
8707 | lun = sas_device_priv_data->lun; |
8708 | query_count++; |
8709 | |
8710 | if (ioc->shost_recovery) |
8711 | goto out; |
8712 | |
8713 | spin_unlock_irqrestore(lock: &ioc->scsi_lookup_lock, flags); |
8714 | r = mpt3sas_scsih_issue_tm(ioc, handle, channel: 0, id: 0, lun, |
8715 | MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid_task: st->smid, |
8716 | msix_task: st->msix_io, timeout: 30, tr_method: 0); |
8717 | if (r == FAILED) { |
8718 | sdev_printk(KERN_WARNING, sdev, |
8719 | "mpt3sas_scsih_issue_tm: FAILED when sending " |
8720 | "QUERY_TASK: scmd(%p)\n", scmd); |
8721 | spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); |
8722 | goto broadcast_aen_retry; |
8723 | } |
8724 | ioc_status = le16_to_cpu(mpi_reply->IOCStatus) |
8725 | & MPI2_IOCSTATUS_MASK; |
8726 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
8727 | sdev_printk(KERN_WARNING, sdev, |
8728 | "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n", |
8729 | ioc_status, scmd); |
8730 | spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); |
8731 | goto broadcast_aen_retry; |
8732 | } |
8733 | |
8734 | /* see if IO is still owned by IOC and target */ |
8735 | if (mpi_reply->ResponseCode == |
8736 | MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED || |
8737 | mpi_reply->ResponseCode == |
8738 | MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) { |
8739 | spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); |
8740 | continue; |
8741 | } |
8742 | task_abort_retries = 0; |
8743 | tm_retry: |
8744 | if (task_abort_retries++ == 60) { |
8745 | dewtprintk(ioc, |
8746 | ioc_info(ioc, "%s: ABORT_TASK: giving up\n", |
8747 | __func__)); |
8748 | spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); |
8749 | goto broadcast_aen_retry; |
8750 | } |
8751 | |
8752 | if (ioc->shost_recovery) |
8753 | goto out_no_lock; |
8754 | |
8755 | r = mpt3sas_scsih_issue_tm(ioc, handle, channel: sdev->channel, id: sdev->id, |
8756 | lun: sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, |
8757 | smid_task: st->smid, msix_task: st->msix_io, timeout: 30, tr_method: 0); |
8758 | if (r == FAILED || st->cb_idx != 0xFF) { |
8759 | sdev_printk(KERN_WARNING, sdev, |
8760 | "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : " |
8761 | "scmd(%p)\n", scmd); |
8762 | goto tm_retry; |
8763 | } |
8764 | |
8765 | if (task_abort_retries > 1) |
8766 | sdev_printk(KERN_WARNING, sdev, |
8767 | "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):" |
8768 | " scmd(%p)\n", |
8769 | task_abort_retries - 1, scmd); |
8770 | |
8771 | termination_count += le32_to_cpu(mpi_reply->TerminationCount); |
8772 | spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); |
8773 | } |
8774 | |
8775 | if (ioc->broadcast_aen_pending) { |
8776 | dewtprintk(ioc, |
8777 | ioc_info(ioc, |
8778 | "%s: loop back due to pending AEN\n", |
8779 | __func__)); |
8780 | ioc->broadcast_aen_pending = 0; |
8781 | goto broadcast_aen_retry; |
8782 | } |
8783 | |
8784 | out: |
8785 | spin_unlock_irqrestore(lock: &ioc->scsi_lookup_lock, flags); |
8786 | out_no_lock: |
8787 | |
8788 | dewtprintk(ioc, |
8789 | ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n", |
8790 | __func__, query_count, termination_count)); |
8791 | |
8792 | ioc->broadcast_aen_busy = 0; |
8793 | if (!ioc->shost_recovery) |
8794 | _scsih_ublock_io_all_device(ioc); |
8795 | mutex_unlock(lock: &ioc->tm_cmds.mutex); |
8796 | } |
8797 | |
8798 | /** |
8799 | * _scsih_sas_discovery_event - handle discovery events |
8800 | * @ioc: per adapter object |
8801 | * @fw_event: The fw_event_work object |
8802 | * Context: user. |
8803 | */ |
8804 | static void |
8805 | _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc, |
8806 | struct fw_event_work *fw_event) |
8807 | { |
8808 | Mpi2EventDataSasDiscovery_t *event_data = |
8809 | (Mpi2EventDataSasDiscovery_t *) fw_event->event_data; |
8810 | |
8811 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) { |
8812 | ioc_info(ioc, "discovery event: (%s)", |
8813 | event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ? |
8814 | "start": "stop"); |
8815 | if (event_data->DiscoveryStatus) |
8816 | pr_cont("discovery_status(0x%08x)", |
8817 | le32_to_cpu(event_data->DiscoveryStatus)); |
8818 | pr_cont("\n"); |
8819 | } |
8820 | |
8821 | if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED && |
8822 | !ioc->sas_hba.num_phys) { |
8823 | if (disable_discovery > 0 && ioc->shost_recovery) { |
8824 | /* Wait for the reset to complete */ |
8825 | while (ioc->shost_recovery) |
8826 | ssleep(seconds: 1); |
8827 | } |
8828 | _scsih_sas_host_add(ioc); |
8829 | } |
8830 | } |
8831 | |
8832 | /** |
8833 | * _scsih_sas_device_discovery_error_event - display SAS device discovery error |
8834 | * events |
8835 | * @ioc: per adapter object |
8836 | * @fw_event: The fw_event_work object |
8837 | * Context: user. |
8838 | */ |
8839 | static void |
8840 | _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc, |
8841 | struct fw_event_work *fw_event) |
8842 | { |
8843 | Mpi25EventDataSasDeviceDiscoveryError_t *event_data = |
8844 | (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data; |
8845 | |
8846 | switch (event_data->ReasonCode) { |
8847 | case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED: |
8848 | ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n", |
8849 | le16_to_cpu(event_data->DevHandle), |
8850 | (u64)le64_to_cpu(event_data->SASAddress), |
8851 | event_data->PhysicalPort); |
8852 | break; |
8853 | case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT: |
8854 | ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n", |
8855 | le16_to_cpu(event_data->DevHandle), |
8856 | (u64)le64_to_cpu(event_data->SASAddress), |
8857 | event_data->PhysicalPort); |
8858 | break; |
8859 | default: |
8860 | break; |
8861 | } |
8862 | } |
8863 | |
8864 | /** |
8865 | * _scsih_pcie_enumeration_event - handle enumeration events |
8866 | * @ioc: per adapter object |
8867 | * @fw_event: The fw_event_work object |
8868 | * Context: user. |
8869 | */ |
8870 | static void |
8871 | _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc, |
8872 | struct fw_event_work *fw_event) |
8873 | { |
8874 | Mpi26EventDataPCIeEnumeration_t *event_data = |
8875 | (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data; |
8876 | |
8877 | if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)) |
8878 | return; |
8879 | |
8880 | ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x", |
8881 | (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ? |
8882 | "started": "completed", |
8883 | event_data->Flags); |
8884 | if (event_data->EnumerationStatus) |
8885 | pr_cont("enumeration_status(0x%08x)", |
8886 | le32_to_cpu(event_data->EnumerationStatus)); |
8887 | pr_cont("\n"); |
8888 | } |
8889 | |
8890 | /** |
8891 | * _scsih_ir_fastpath - turn on fastpath for IR physdisk |
8892 | * @ioc: per adapter object |
8893 | * @handle: device handle for physical disk |
8894 | * @phys_disk_num: physical disk number |
8895 | * |
8896 | * Return: 0 for success, else failure. |
8897 | */ |
8898 | static int |
8899 | _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num) |
8900 | { |
8901 | Mpi2RaidActionRequest_t *mpi_request; |
8902 | Mpi2RaidActionReply_t *mpi_reply; |
8903 | u16 smid; |
8904 | u8 issue_reset = 0; |
8905 | int rc = 0; |
8906 | u16 ioc_status; |
8907 | u32 log_info; |
8908 | |
8909 | if (ioc->hba_mpi_version_belonged == MPI2_VERSION) |
8910 | return rc; |
8911 | |
8912 | mutex_lock(&ioc->scsih_cmds.mutex); |
8913 | |
8914 | if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { |
8915 | ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); |
8916 | rc = -EAGAIN; |
8917 | goto out; |
8918 | } |
8919 | ioc->scsih_cmds.status = MPT3_CMD_PENDING; |
8920 | |
8921 | smid = mpt3sas_base_get_smid(ioc, cb_idx: ioc->scsih_cb_idx); |
8922 | if (!smid) { |
8923 | ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); |
8924 | ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; |
8925 | rc = -EAGAIN; |
8926 | goto out; |
8927 | } |
8928 | |
8929 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); |
8930 | ioc->scsih_cmds.smid = smid; |
8931 | memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); |
8932 | |
8933 | mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; |
8934 | mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN; |
8935 | mpi_request->PhysDiskNum = phys_disk_num; |
8936 | |
8937 | dewtprintk(ioc, |
8938 | ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n", |
8939 | handle, phys_disk_num)); |
8940 | |
8941 | init_completion(x: &ioc->scsih_cmds.done); |
8942 | ioc->put_smid_default(ioc, smid); |
8943 | wait_for_completion_timeout(x: &ioc->scsih_cmds.done, timeout: 10*HZ); |
8944 | |
8945 | if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { |
8946 | mpt3sas_check_cmd_timeout(ioc, |
8947 | ioc->scsih_cmds.status, mpi_request, |
8948 | sizeof(Mpi2RaidActionRequest_t)/4, issue_reset); |
8949 | rc = -EFAULT; |
8950 | goto out; |
8951 | } |
8952 | |
8953 | if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { |
8954 | |
8955 | mpi_reply = ioc->scsih_cmds.reply; |
8956 | ioc_status = le16_to_cpu(mpi_reply->IOCStatus); |
8957 | if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) |
8958 | log_info = le32_to_cpu(mpi_reply->IOCLogInfo); |
8959 | else |
8960 | log_info = 0; |
8961 | ioc_status &= MPI2_IOCSTATUS_MASK; |
8962 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
8963 | dewtprintk(ioc, |
8964 | ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n", |
8965 | ioc_status, log_info)); |
8966 | rc = -EFAULT; |
8967 | } else |
8968 | dewtprintk(ioc, |
8969 | ioc_info(ioc, "IR RAID_ACTION: completed successfully\n")); |
8970 | } |
8971 | |
8972 | out: |
8973 | ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; |
8974 | mutex_unlock(lock: &ioc->scsih_cmds.mutex); |
8975 | |
8976 | if (issue_reset) |
8977 | mpt3sas_base_hard_reset_handler(ioc, type: FORCE_BIG_HAMMER); |
8978 | return rc; |
8979 | } |
8980 | |
8981 | /** |
8982 | * _scsih_reprobe_lun - reprobing lun |
8983 | * @sdev: scsi device struct |
8984 | * @no_uld_attach: sdev->no_uld_attach flag setting |
8985 | * |
8986 | **/ |
8987 | static void |
8988 | _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach) |
8989 | { |
8990 | sdev->no_uld_attach = no_uld_attach ? 1 : 0; |
8991 | sdev_printk(KERN_INFO, sdev, "%s raid component\n", |
8992 | sdev->no_uld_attach ? "hiding": "exposing"); |
8993 | WARN_ON(scsi_device_reprobe(sdev)); |
8994 | } |
8995 | |
8996 | /** |
8997 | * _scsih_sas_volume_add - add new volume |
8998 | * @ioc: per adapter object |
8999 | * @element: IR config element data |
9000 | * Context: user. |
9001 | */ |
9002 | static void |
9003 | _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc, |
9004 | Mpi2EventIrConfigElement_t *element) |
9005 | { |
9006 | struct _raid_device *raid_device; |
9007 | unsigned long flags; |
9008 | u64 wwid; |
9009 | u16 handle = le16_to_cpu(element->VolDevHandle); |
9010 | int rc; |
9011 | |
9012 | mpt3sas_config_get_volume_wwid(ioc, volume_handle: handle, wwid: &wwid); |
9013 | if (!wwid) { |
9014 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
9015 | __FILE__, __LINE__, __func__); |
9016 | return; |
9017 | } |
9018 | |
9019 | spin_lock_irqsave(&ioc->raid_device_lock, flags); |
9020 | raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid); |
9021 | spin_unlock_irqrestore(lock: &ioc->raid_device_lock, flags); |
9022 | |
9023 | if (raid_device) |
9024 | return; |
9025 | |
9026 | raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); |
9027 | if (!raid_device) { |
9028 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
9029 | __FILE__, __LINE__, __func__); |
9030 | return; |
9031 | } |
9032 | |
9033 | raid_device->id = ioc->sas_id++; |
9034 | raid_device->channel = RAID_CHANNEL; |
9035 | raid_device->handle = handle; |
9036 | raid_device->wwid = wwid; |
9037 | _scsih_raid_device_add(ioc, raid_device); |
9038 | if (!ioc->wait_for_discovery_to_complete) { |
9039 | rc = scsi_add_device(host: ioc->shost, RAID_CHANNEL, |
9040 | target: raid_device->id, lun: 0); |
9041 | if (rc) |
9042 | _scsih_raid_device_remove(ioc, raid_device); |
9043 | } else { |
9044 | spin_lock_irqsave(&ioc->raid_device_lock, flags); |
9045 | _scsih_determine_boot_device(ioc, device: raid_device, channel: 1); |
9046 | spin_unlock_irqrestore(lock: &ioc->raid_device_lock, flags); |
9047 | } |
9048 | } |
9049 | |
9050 | /** |
9051 | * _scsih_sas_volume_delete - delete volume |
9052 | * @ioc: per adapter object |
9053 | * @handle: volume device handle |
9054 | * Context: user. |
9055 | */ |
9056 | static void |
9057 | _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
9058 | { |
9059 | struct _raid_device *raid_device; |
9060 | unsigned long flags; |
9061 | struct MPT3SAS_TARGET *sas_target_priv_data; |
9062 | struct scsi_target *starget = NULL; |
9063 | |
9064 | spin_lock_irqsave(&ioc->raid_device_lock, flags); |
9065 | raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); |
9066 | if (raid_device) { |
9067 | if (raid_device->starget) { |
9068 | starget = raid_device->starget; |
9069 | sas_target_priv_data = starget->hostdata; |
9070 | sas_target_priv_data->deleted = 1; |
9071 | } |
9072 | ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", |
9073 | raid_device->handle, (u64)raid_device->wwid); |
9074 | list_del(entry: &raid_device->list); |
9075 | kfree(objp: raid_device); |
9076 | } |
9077 | spin_unlock_irqrestore(lock: &ioc->raid_device_lock, flags); |
9078 | if (starget) |
9079 | scsi_remove_target(&starget->dev); |
9080 | } |
9081 | |
9082 | /** |
9083 | * _scsih_sas_pd_expose - expose pd component to /dev/sdX |
9084 | * @ioc: per adapter object |
9085 | * @element: IR config element data |
9086 | * Context: user. |
9087 | */ |
9088 | static void |
9089 | _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc, |
9090 | Mpi2EventIrConfigElement_t *element) |
9091 | { |
9092 | struct _sas_device *sas_device; |
9093 | struct scsi_target *starget = NULL; |
9094 | struct MPT3SAS_TARGET *sas_target_priv_data; |
9095 | unsigned long flags; |
9096 | u16 handle = le16_to_cpu(element->PhysDiskDevHandle); |
9097 | |
9098 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
9099 | sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); |
9100 | if (sas_device) { |
9101 | sas_device->volume_handle = 0; |
9102 | sas_device->volume_wwid = 0; |
9103 | clear_bit(nr: handle, addr: ioc->pd_handles); |
9104 | if (sas_device->starget && sas_device->starget->hostdata) { |
9105 | starget = sas_device->starget; |
9106 | sas_target_priv_data = starget->hostdata; |
9107 | sas_target_priv_data->flags &= |
9108 | ~MPT_TARGET_FLAGS_RAID_COMPONENT; |
9109 | } |
9110 | } |
9111 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
9112 | if (!sas_device) |
9113 | return; |
9114 | |
9115 | /* exposing raid component */ |
9116 | if (starget) |
9117 | starget_for_each_device(starget, NULL, fn: _scsih_reprobe_lun); |
9118 | |
9119 | sas_device_put(s: sas_device); |
9120 | } |
9121 | |
9122 | /** |
9123 | * _scsih_sas_pd_hide - hide pd component from /dev/sdX |
9124 | * @ioc: per adapter object |
9125 | * @element: IR config element data |
9126 | * Context: user. |
9127 | */ |
9128 | static void |
9129 | _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc, |
9130 | Mpi2EventIrConfigElement_t *element) |
9131 | { |
9132 | struct _sas_device *sas_device; |
9133 | struct scsi_target *starget = NULL; |
9134 | struct MPT3SAS_TARGET *sas_target_priv_data; |
9135 | unsigned long flags; |
9136 | u16 handle = le16_to_cpu(element->PhysDiskDevHandle); |
9137 | u16 volume_handle = 0; |
9138 | u64 volume_wwid = 0; |
9139 | |
9140 | mpt3sas_config_get_volume_handle(ioc, pd_handle: handle, volume_handle: &volume_handle); |
9141 | if (volume_handle) |
9142 | mpt3sas_config_get_volume_wwid(ioc, volume_handle, |
9143 | wwid: &volume_wwid); |
9144 | |
9145 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
9146 | sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); |
9147 | if (sas_device) { |
9148 | set_bit(nr: handle, addr: ioc->pd_handles); |
9149 | if (sas_device->starget && sas_device->starget->hostdata) { |
9150 | starget = sas_device->starget; |
9151 | sas_target_priv_data = starget->hostdata; |
9152 | sas_target_priv_data->flags |= |
9153 | MPT_TARGET_FLAGS_RAID_COMPONENT; |
9154 | sas_device->volume_handle = volume_handle; |
9155 | sas_device->volume_wwid = volume_wwid; |
9156 | } |
9157 | } |
9158 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
9159 | if (!sas_device) |
9160 | return; |
9161 | |
9162 | /* hiding raid component */ |
9163 | _scsih_ir_fastpath(ioc, handle, phys_disk_num: element->PhysDiskNum); |
9164 | |
9165 | if (starget) |
9166 | starget_for_each_device(starget, (void *)1, fn: _scsih_reprobe_lun); |
9167 | |
9168 | sas_device_put(s: sas_device); |
9169 | } |
9170 | |
9171 | /** |
9172 | * _scsih_sas_pd_delete - delete pd component |
9173 | * @ioc: per adapter object |
9174 | * @element: IR config element data |
9175 | * Context: user. |
9176 | */ |
9177 | static void |
9178 | _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc, |
9179 | Mpi2EventIrConfigElement_t *element) |
9180 | { |
9181 | u16 handle = le16_to_cpu(element->PhysDiskDevHandle); |
9182 | |
9183 | _scsih_device_remove_by_handle(ioc, handle); |
9184 | } |
9185 | |
9186 | /** |
9187 | * _scsih_sas_pd_add - remove pd component |
9188 | * @ioc: per adapter object |
9189 | * @element: IR config element data |
9190 | * Context: user. |
9191 | */ |
9192 | static void |
9193 | _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc, |
9194 | Mpi2EventIrConfigElement_t *element) |
9195 | { |
9196 | struct _sas_device *sas_device; |
9197 | u16 handle = le16_to_cpu(element->PhysDiskDevHandle); |
9198 | Mpi2ConfigReply_t mpi_reply; |
9199 | Mpi2SasDevicePage0_t sas_device_pg0; |
9200 | u32 ioc_status; |
9201 | u64 sas_address; |
9202 | u16 parent_handle; |
9203 | |
9204 | set_bit(nr: handle, addr: ioc->pd_handles); |
9205 | |
9206 | sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); |
9207 | if (sas_device) { |
9208 | _scsih_ir_fastpath(ioc, handle, phys_disk_num: element->PhysDiskNum); |
9209 | sas_device_put(s: sas_device); |
9210 | return; |
9211 | } |
9212 | |
9213 | if ((mpt3sas_config_get_sas_device_pg0(ioc, mpi_reply: &mpi_reply, config_page: &sas_device_pg0, |
9214 | MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { |
9215 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
9216 | __FILE__, __LINE__, __func__); |
9217 | return; |
9218 | } |
9219 | |
9220 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
9221 | MPI2_IOCSTATUS_MASK; |
9222 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
9223 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
9224 | __FILE__, __LINE__, __func__); |
9225 | return; |
9226 | } |
9227 | |
9228 | parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); |
9229 | if (!_scsih_get_sas_address(ioc, handle: parent_handle, sas_address: &sas_address)) |
9230 | mpt3sas_transport_update_links(ioc, sas_address, handle, |
9231 | phy_number: sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, |
9232 | port: mpt3sas_get_port_by_id(ioc, |
9233 | port_id: sas_device_pg0.PhysicalPort, bypass_dirty_port_flag: 0)); |
9234 | |
9235 | _scsih_ir_fastpath(ioc, handle, phys_disk_num: element->PhysDiskNum); |
9236 | _scsih_add_device(ioc, handle, phy_num: 0, is_pd: 1); |
9237 | } |
9238 | |
9239 | /** |
9240 | * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events |
9241 | * @ioc: per adapter object |
9242 | * @event_data: event data payload |
9243 | * Context: user. |
9244 | */ |
9245 | static void |
9246 | _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc, |
9247 | Mpi2EventDataIrConfigChangeList_t *event_data) |
9248 | { |
9249 | Mpi2EventIrConfigElement_t *element; |
9250 | u8 element_type; |
9251 | int i; |
9252 | char *reason_str = NULL, *element_str = NULL; |
9253 | |
9254 | element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; |
9255 | |
9256 | ioc_info(ioc, "raid config change: (%s), elements(%d)\n", |
9257 | le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ? |
9258 | "foreign": "native", |
9259 | event_data->NumElements); |
9260 | for (i = 0; i < event_data->NumElements; i++, element++) { |
9261 | switch (element->ReasonCode) { |
9262 | case MPI2_EVENT_IR_CHANGE_RC_ADDED: |
9263 | reason_str = "add"; |
9264 | break; |
9265 | case MPI2_EVENT_IR_CHANGE_RC_REMOVED: |
9266 | reason_str = "remove"; |
9267 | break; |
9268 | case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE: |
9269 | reason_str = "no change"; |
9270 | break; |
9271 | case MPI2_EVENT_IR_CHANGE_RC_HIDE: |
9272 | reason_str = "hide"; |
9273 | break; |
9274 | case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: |
9275 | reason_str = "unhide"; |
9276 | break; |
9277 | case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: |
9278 | reason_str = "volume_created"; |
9279 | break; |
9280 | case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: |
9281 | reason_str = "volume_deleted"; |
9282 | break; |
9283 | case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: |
9284 | reason_str = "pd_created"; |
9285 | break; |
9286 | case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: |
9287 | reason_str = "pd_deleted"; |
9288 | break; |
9289 | default: |
9290 | reason_str = "unknown reason"; |
9291 | break; |
9292 | } |
9293 | element_type = le16_to_cpu(element->ElementFlags) & |
9294 | MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK; |
9295 | switch (element_type) { |
9296 | case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT: |
9297 | element_str = "volume"; |
9298 | break; |
9299 | case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT: |
9300 | element_str = "phys disk"; |
9301 | break; |
9302 | case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT: |
9303 | element_str = "hot spare"; |
9304 | break; |
9305 | default: |
9306 | element_str = "unknown element"; |
9307 | break; |
9308 | } |
9309 | pr_info("\t(%s:%s), vol handle(0x%04x), "\ |
9310 | "pd handle(0x%04x), pd num(0x%02x)\n", element_str, |
9311 | reason_str, le16_to_cpu(element->VolDevHandle), |
9312 | le16_to_cpu(element->PhysDiskDevHandle), |
9313 | element->PhysDiskNum); |
9314 | } |
9315 | } |
9316 | |
9317 | /** |
9318 | * _scsih_sas_ir_config_change_event - handle ir configuration change events |
9319 | * @ioc: per adapter object |
9320 | * @fw_event: The fw_event_work object |
9321 | * Context: user. |
9322 | */ |
9323 | static void |
9324 | _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc, |
9325 | struct fw_event_work *fw_event) |
9326 | { |
9327 | Mpi2EventIrConfigElement_t *element; |
9328 | int i; |
9329 | u8 foreign_config; |
9330 | Mpi2EventDataIrConfigChangeList_t *event_data = |
9331 | (Mpi2EventDataIrConfigChangeList_t *) |
9332 | fw_event->event_data; |
9333 | |
9334 | if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) && |
9335 | (!ioc->hide_ir_msg)) |
9336 | _scsih_sas_ir_config_change_event_debug(ioc, event_data); |
9337 | |
9338 | foreign_config = (le32_to_cpu(event_data->Flags) & |
9339 | MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0; |
9340 | |
9341 | element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; |
9342 | if (ioc->shost_recovery && |
9343 | ioc->hba_mpi_version_belonged != MPI2_VERSION) { |
9344 | for (i = 0; i < event_data->NumElements; i++, element++) { |
9345 | if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE) |
9346 | _scsih_ir_fastpath(ioc, |
9347 | le16_to_cpu(element->PhysDiskDevHandle), |
9348 | phys_disk_num: element->PhysDiskNum); |
9349 | } |
9350 | return; |
9351 | } |
9352 | |
9353 | for (i = 0; i < event_data->NumElements; i++, element++) { |
9354 | |
9355 | switch (element->ReasonCode) { |
9356 | case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: |
9357 | case MPI2_EVENT_IR_CHANGE_RC_ADDED: |
9358 | if (!foreign_config) |
9359 | _scsih_sas_volume_add(ioc, element); |
9360 | break; |
9361 | case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: |
9362 | case MPI2_EVENT_IR_CHANGE_RC_REMOVED: |
9363 | if (!foreign_config) |
9364 | _scsih_sas_volume_delete(ioc, |
9365 | le16_to_cpu(element->VolDevHandle)); |
9366 | break; |
9367 | case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: |
9368 | if (!ioc->is_warpdrive) |
9369 | _scsih_sas_pd_hide(ioc, element); |
9370 | break; |
9371 | case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: |
9372 | if (!ioc->is_warpdrive) |
9373 | _scsih_sas_pd_expose(ioc, element); |
9374 | break; |
9375 | case MPI2_EVENT_IR_CHANGE_RC_HIDE: |
9376 | if (!ioc->is_warpdrive) |
9377 | _scsih_sas_pd_add(ioc, element); |
9378 | break; |
9379 | case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: |
9380 | if (!ioc->is_warpdrive) |
9381 | _scsih_sas_pd_delete(ioc, element); |
9382 | break; |
9383 | } |
9384 | } |
9385 | } |
9386 | |
9387 | /** |
9388 | * _scsih_sas_ir_volume_event - IR volume event |
9389 | * @ioc: per adapter object |
9390 | * @fw_event: The fw_event_work object |
9391 | * Context: user. |
9392 | */ |
9393 | static void |
9394 | _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc, |
9395 | struct fw_event_work *fw_event) |
9396 | { |
9397 | u64 wwid; |
9398 | unsigned long flags; |
9399 | struct _raid_device *raid_device; |
9400 | u16 handle; |
9401 | u32 state; |
9402 | int rc; |
9403 | Mpi2EventDataIrVolume_t *event_data = |
9404 | (Mpi2EventDataIrVolume_t *) fw_event->event_data; |
9405 | |
9406 | if (ioc->shost_recovery) |
9407 | return; |
9408 | |
9409 | if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) |
9410 | return; |
9411 | |
9412 | handle = le16_to_cpu(event_data->VolDevHandle); |
9413 | state = le32_to_cpu(event_data->NewValue); |
9414 | if (!ioc->hide_ir_msg) |
9415 | dewtprintk(ioc, |
9416 | ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", |
9417 | __func__, handle, |
9418 | le32_to_cpu(event_data->PreviousValue), |
9419 | state)); |
9420 | switch (state) { |
9421 | case MPI2_RAID_VOL_STATE_MISSING: |
9422 | case MPI2_RAID_VOL_STATE_FAILED: |
9423 | _scsih_sas_volume_delete(ioc, handle); |
9424 | break; |
9425 | |
9426 | case MPI2_RAID_VOL_STATE_ONLINE: |
9427 | case MPI2_RAID_VOL_STATE_DEGRADED: |
9428 | case MPI2_RAID_VOL_STATE_OPTIMAL: |
9429 | |
9430 | spin_lock_irqsave(&ioc->raid_device_lock, flags); |
9431 | raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); |
9432 | spin_unlock_irqrestore(lock: &ioc->raid_device_lock, flags); |
9433 | |
9434 | if (raid_device) |
9435 | break; |
9436 | |
9437 | mpt3sas_config_get_volume_wwid(ioc, volume_handle: handle, wwid: &wwid); |
9438 | if (!wwid) { |
9439 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
9440 | __FILE__, __LINE__, __func__); |
9441 | break; |
9442 | } |
9443 | |
9444 | raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); |
9445 | if (!raid_device) { |
9446 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
9447 | __FILE__, __LINE__, __func__); |
9448 | break; |
9449 | } |
9450 | |
9451 | raid_device->id = ioc->sas_id++; |
9452 | raid_device->channel = RAID_CHANNEL; |
9453 | raid_device->handle = handle; |
9454 | raid_device->wwid = wwid; |
9455 | _scsih_raid_device_add(ioc, raid_device); |
9456 | rc = scsi_add_device(host: ioc->shost, RAID_CHANNEL, |
9457 | target: raid_device->id, lun: 0); |
9458 | if (rc) |
9459 | _scsih_raid_device_remove(ioc, raid_device); |
9460 | break; |
9461 | |
9462 | case MPI2_RAID_VOL_STATE_INITIALIZING: |
9463 | default: |
9464 | break; |
9465 | } |
9466 | } |
9467 | |
9468 | /** |
9469 | * _scsih_sas_ir_physical_disk_event - PD event |
9470 | * @ioc: per adapter object |
9471 | * @fw_event: The fw_event_work object |
9472 | * Context: user. |
9473 | */ |
9474 | static void |
9475 | _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc, |
9476 | struct fw_event_work *fw_event) |
9477 | { |
9478 | u16 handle, parent_handle; |
9479 | u32 state; |
9480 | struct _sas_device *sas_device; |
9481 | Mpi2ConfigReply_t mpi_reply; |
9482 | Mpi2SasDevicePage0_t sas_device_pg0; |
9483 | u32 ioc_status; |
9484 | Mpi2EventDataIrPhysicalDisk_t *event_data = |
9485 | (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data; |
9486 | u64 sas_address; |
9487 | |
9488 | if (ioc->shost_recovery) |
9489 | return; |
9490 | |
9491 | if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) |
9492 | return; |
9493 | |
9494 | handle = le16_to_cpu(event_data->PhysDiskDevHandle); |
9495 | state = le32_to_cpu(event_data->NewValue); |
9496 | |
9497 | if (!ioc->hide_ir_msg) |
9498 | dewtprintk(ioc, |
9499 | ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", |
9500 | __func__, handle, |
9501 | le32_to_cpu(event_data->PreviousValue), |
9502 | state)); |
9503 | |
9504 | switch (state) { |
9505 | case MPI2_RAID_PD_STATE_ONLINE: |
9506 | case MPI2_RAID_PD_STATE_DEGRADED: |
9507 | case MPI2_RAID_PD_STATE_REBUILDING: |
9508 | case MPI2_RAID_PD_STATE_OPTIMAL: |
9509 | case MPI2_RAID_PD_STATE_HOT_SPARE: |
9510 | |
9511 | if (!ioc->is_warpdrive) |
9512 | set_bit(nr: handle, addr: ioc->pd_handles); |
9513 | |
9514 | sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); |
9515 | if (sas_device) { |
9516 | sas_device_put(s: sas_device); |
9517 | return; |
9518 | } |
9519 | |
9520 | if ((mpt3sas_config_get_sas_device_pg0(ioc, mpi_reply: &mpi_reply, |
9521 | config_page: &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, |
9522 | handle))) { |
9523 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
9524 | __FILE__, __LINE__, __func__); |
9525 | return; |
9526 | } |
9527 | |
9528 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
9529 | MPI2_IOCSTATUS_MASK; |
9530 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
9531 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
9532 | __FILE__, __LINE__, __func__); |
9533 | return; |
9534 | } |
9535 | |
9536 | parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); |
9537 | if (!_scsih_get_sas_address(ioc, handle: parent_handle, sas_address: &sas_address)) |
9538 | mpt3sas_transport_update_links(ioc, sas_address, handle, |
9539 | phy_number: sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, |
9540 | port: mpt3sas_get_port_by_id(ioc, |
9541 | port_id: sas_device_pg0.PhysicalPort, bypass_dirty_port_flag: 0)); |
9542 | |
9543 | _scsih_add_device(ioc, handle, phy_num: 0, is_pd: 1); |
9544 | |
9545 | break; |
9546 | |
9547 | case MPI2_RAID_PD_STATE_OFFLINE: |
9548 | case MPI2_RAID_PD_STATE_NOT_CONFIGURED: |
9549 | case MPI2_RAID_PD_STATE_NOT_COMPATIBLE: |
9550 | default: |
9551 | break; |
9552 | } |
9553 | } |
9554 | |
9555 | /** |
9556 | * _scsih_sas_ir_operation_status_event_debug - debug for IR op event |
9557 | * @ioc: per adapter object |
9558 | * @event_data: event data payload |
9559 | * Context: user. |
9560 | */ |
9561 | static void |
9562 | _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc, |
9563 | Mpi2EventDataIrOperationStatus_t *event_data) |
9564 | { |
9565 | char *reason_str = NULL; |
9566 | |
9567 | switch (event_data->RAIDOperation) { |
9568 | case MPI2_EVENT_IR_RAIDOP_RESYNC: |
9569 | reason_str = "resync"; |
9570 | break; |
9571 | case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION: |
9572 | reason_str = "online capacity expansion"; |
9573 | break; |
9574 | case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK: |
9575 | reason_str = "consistency check"; |
9576 | break; |
9577 | case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT: |
9578 | reason_str = "background init"; |
9579 | break; |
9580 | case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT: |
9581 | reason_str = "make data consistent"; |
9582 | break; |
9583 | } |
9584 | |
9585 | if (!reason_str) |
9586 | return; |
9587 | |
9588 | ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n", |
9589 | reason_str, |
9590 | le16_to_cpu(event_data->VolDevHandle), |
9591 | event_data->PercentComplete); |
9592 | } |
9593 | |
9594 | /** |
9595 | * _scsih_sas_ir_operation_status_event - handle RAID operation events |
9596 | * @ioc: per adapter object |
9597 | * @fw_event: The fw_event_work object |
9598 | * Context: user. |
9599 | */ |
9600 | static void |
9601 | _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc, |
9602 | struct fw_event_work *fw_event) |
9603 | { |
9604 | Mpi2EventDataIrOperationStatus_t *event_data = |
9605 | (Mpi2EventDataIrOperationStatus_t *) |
9606 | fw_event->event_data; |
9607 | static struct _raid_device *raid_device; |
9608 | unsigned long flags; |
9609 | u16 handle; |
9610 | |
9611 | if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) && |
9612 | (!ioc->hide_ir_msg)) |
9613 | _scsih_sas_ir_operation_status_event_debug(ioc, |
9614 | event_data); |
9615 | |
9616 | /* code added for raid transport support */ |
9617 | if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) { |
9618 | |
9619 | spin_lock_irqsave(&ioc->raid_device_lock, flags); |
9620 | handle = le16_to_cpu(event_data->VolDevHandle); |
9621 | raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); |
9622 | if (raid_device) |
9623 | raid_device->percent_complete = |
9624 | event_data->PercentComplete; |
9625 | spin_unlock_irqrestore(lock: &ioc->raid_device_lock, flags); |
9626 | } |
9627 | } |
9628 | |
9629 | /** |
9630 | * _scsih_prep_device_scan - initialize parameters prior to device scan |
9631 | * @ioc: per adapter object |
9632 | * |
9633 | * Set the deleted flag prior to device scan. If the device is found during |
9634 | * the scan, then we clear the deleted flag. |
9635 | */ |
9636 | static void |
9637 | _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc) |
9638 | { |
9639 | struct MPT3SAS_DEVICE *sas_device_priv_data; |
9640 | struct scsi_device *sdev; |
9641 | |
9642 | shost_for_each_device(sdev, ioc->shost) { |
9643 | sas_device_priv_data = sdev->hostdata; |
9644 | if (sas_device_priv_data && sas_device_priv_data->sas_target) |
9645 | sas_device_priv_data->sas_target->deleted = 1; |
9646 | } |
9647 | } |
9648 | |
9649 | /** |
9650 | * _scsih_update_device_qdepth - Update QD during Reset. |
9651 | * @ioc: per adapter object |
9652 | * |
9653 | */ |
9654 | static void |
9655 | _scsih_update_device_qdepth(struct MPT3SAS_ADAPTER *ioc) |
9656 | { |
9657 | struct MPT3SAS_DEVICE *sas_device_priv_data; |
9658 | struct MPT3SAS_TARGET *sas_target_priv_data; |
9659 | struct _sas_device *sas_device; |
9660 | struct scsi_device *sdev; |
9661 | u16 qdepth; |
9662 | |
9663 | ioc_info(ioc, "Update devices with firmware reported queue depth\n"); |
9664 | shost_for_each_device(sdev, ioc->shost) { |
9665 | sas_device_priv_data = sdev->hostdata; |
9666 | if (sas_device_priv_data && sas_device_priv_data->sas_target) { |
9667 | sas_target_priv_data = sas_device_priv_data->sas_target; |
9668 | sas_device = sas_device_priv_data->sas_target->sas_dev; |
9669 | if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) |
9670 | qdepth = ioc->max_nvme_qd; |
9671 | else if (sas_device && |
9672 | sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
9673 | qdepth = (sas_device->port_type > 1) ? |
9674 | ioc->max_wideport_qd : ioc->max_narrowport_qd; |
9675 | else if (sas_device && |
9676 | sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) |
9677 | qdepth = ioc->max_sata_qd; |
9678 | else |
9679 | continue; |
9680 | mpt3sas_scsih_change_queue_depth(sdev, qdepth); |
9681 | } |
9682 | } |
9683 | } |
9684 | |
9685 | /** |
9686 | * _scsih_mark_responding_sas_device - mark a sas_devices as responding |
9687 | * @ioc: per adapter object |
9688 | * @sas_device_pg0: SAS Device page 0 |
9689 | * |
9690 | * After host reset, find out whether devices are still responding. |
9691 | * Used in _scsih_remove_unresponsive_sas_devices. |
9692 | */ |
9693 | static void |
9694 | _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, |
9695 | Mpi2SasDevicePage0_t *sas_device_pg0) |
9696 | { |
9697 | struct MPT3SAS_TARGET *sas_target_priv_data = NULL; |
9698 | struct scsi_target *starget; |
9699 | struct _sas_device *sas_device = NULL; |
9700 | struct _enclosure_node *enclosure_dev = NULL; |
9701 | unsigned long flags; |
9702 | struct hba_port *port = mpt3sas_get_port_by_id( |
9703 | ioc, port_id: sas_device_pg0->PhysicalPort, bypass_dirty_port_flag: 0); |
9704 | |
9705 | if (sas_device_pg0->EnclosureHandle) { |
9706 | enclosure_dev = |
9707 | mpt3sas_scsih_enclosure_find_by_handle(ioc, |
9708 | le16_to_cpu(sas_device_pg0->EnclosureHandle)); |
9709 | if (enclosure_dev == NULL) |
9710 | ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n", |
9711 | sas_device_pg0->EnclosureHandle); |
9712 | } |
9713 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
9714 | list_for_each_entry(sas_device, &ioc->sas_device_list, list) { |
9715 | if (sas_device->sas_address != le64_to_cpu( |
9716 | sas_device_pg0->SASAddress)) |
9717 | continue; |
9718 | if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot)) |
9719 | continue; |
9720 | if (sas_device->port != port) |
9721 | continue; |
9722 | sas_device->responding = 1; |
9723 | starget = sas_device->starget; |
9724 | if (starget && starget->hostdata) { |
9725 | sas_target_priv_data = starget->hostdata; |
9726 | sas_target_priv_data->tm_busy = 0; |
9727 | sas_target_priv_data->deleted = 0; |
9728 | } else |
9729 | sas_target_priv_data = NULL; |
9730 | if (starget) { |
9731 | starget_printk(KERN_INFO, starget, |
9732 | "handle(0x%04x), sas_addr(0x%016llx)\n", |
9733 | le16_to_cpu(sas_device_pg0->DevHandle), |
9734 | (unsigned long long) |
9735 | sas_device->sas_address); |
9736 | |
9737 | if (sas_device->enclosure_handle != 0) |
9738 | starget_printk(KERN_INFO, starget, |
9739 | "enclosure logical id(0x%016llx), slot(%d)\n", |
9740 | (unsigned long long) |
9741 | sas_device->enclosure_logical_id, |
9742 | sas_device->slot); |
9743 | } |
9744 | if (le16_to_cpu(sas_device_pg0->Flags) & |
9745 | MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { |
9746 | sas_device->enclosure_level = |
9747 | sas_device_pg0->EnclosureLevel; |
9748 | memcpy(&sas_device->connector_name[0], |
9749 | &sas_device_pg0->ConnectorName[0], 4); |
9750 | } else { |
9751 | sas_device->enclosure_level = 0; |
9752 | sas_device->connector_name[0] = '\0'; |
9753 | } |
9754 | |
9755 | sas_device->enclosure_handle = |
9756 | le16_to_cpu(sas_device_pg0->EnclosureHandle); |
9757 | sas_device->is_chassis_slot_valid = 0; |
9758 | if (enclosure_dev) { |
9759 | sas_device->enclosure_logical_id = le64_to_cpu( |
9760 | enclosure_dev->pg0.EnclosureLogicalID); |
9761 | if (le16_to_cpu(enclosure_dev->pg0.Flags) & |
9762 | MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { |
9763 | sas_device->is_chassis_slot_valid = 1; |
9764 | sas_device->chassis_slot = |
9765 | enclosure_dev->pg0.ChassisSlot; |
9766 | } |
9767 | } |
9768 | |
9769 | if (sas_device->handle == le16_to_cpu( |
9770 | sas_device_pg0->DevHandle)) |
9771 | goto out; |
9772 | pr_info("\thandle changed from(0x%04x)!!!\n", |
9773 | sas_device->handle); |
9774 | sas_device->handle = le16_to_cpu( |
9775 | sas_device_pg0->DevHandle); |
9776 | if (sas_target_priv_data) |
9777 | sas_target_priv_data->handle = |
9778 | le16_to_cpu(sas_device_pg0->DevHandle); |
9779 | goto out; |
9780 | } |
9781 | out: |
9782 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
9783 | } |
9784 | |
9785 | /** |
9786 | * _scsih_create_enclosure_list_after_reset - Free Existing list, |
9787 | * And create enclosure list by scanning all Enclosure Page(0)s |
9788 | * @ioc: per adapter object |
9789 | */ |
9790 | static void |
9791 | _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc) |
9792 | { |
9793 | struct _enclosure_node *enclosure_dev; |
9794 | Mpi2ConfigReply_t mpi_reply; |
9795 | u16 enclosure_handle; |
9796 | int rc; |
9797 | |
9798 | /* Free existing enclosure list */ |
9799 | mpt3sas_free_enclosure_list(ioc); |
9800 | |
9801 | /* Re constructing enclosure list after reset*/ |
9802 | enclosure_handle = 0xFFFF; |
9803 | do { |
9804 | enclosure_dev = |
9805 | kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL); |
9806 | if (!enclosure_dev) { |
9807 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
9808 | __FILE__, __LINE__, __func__); |
9809 | return; |
9810 | } |
9811 | rc = mpt3sas_config_get_enclosure_pg0(ioc, mpi_reply: &mpi_reply, |
9812 | config_page: &enclosure_dev->pg0, |
9813 | MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE, |
9814 | handle: enclosure_handle); |
9815 | |
9816 | if (rc || (le16_to_cpu(mpi_reply.IOCStatus) & |
9817 | MPI2_IOCSTATUS_MASK)) { |
9818 | kfree(objp: enclosure_dev); |
9819 | return; |
9820 | } |
9821 | list_add_tail(new: &enclosure_dev->list, |
9822 | head: &ioc->enclosure_list); |
9823 | enclosure_handle = |
9824 | le16_to_cpu(enclosure_dev->pg0.EnclosureHandle); |
9825 | } while (1); |
9826 | } |
9827 | |
9828 | /** |
9829 | * _scsih_search_responding_sas_devices - |
9830 | * @ioc: per adapter object |
9831 | * |
9832 | * After host reset, find out whether devices are still responding. |
9833 | * If not remove. |
9834 | */ |
9835 | static void |
9836 | _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc) |
9837 | { |
9838 | Mpi2SasDevicePage0_t sas_device_pg0; |
9839 | Mpi2ConfigReply_t mpi_reply; |
9840 | u16 ioc_status; |
9841 | u16 handle; |
9842 | u32 device_info; |
9843 | |
9844 | ioc_info(ioc, "search for end-devices: start\n"); |
9845 | |
9846 | if (list_empty(head: &ioc->sas_device_list)) |
9847 | goto out; |
9848 | |
9849 | handle = 0xFFFF; |
9850 | while (!(mpt3sas_config_get_sas_device_pg0(ioc, mpi_reply: &mpi_reply, |
9851 | config_page: &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, |
9852 | handle))) { |
9853 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
9854 | MPI2_IOCSTATUS_MASK; |
9855 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) |
9856 | break; |
9857 | handle = le16_to_cpu(sas_device_pg0.DevHandle); |
9858 | device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); |
9859 | if (!(_scsih_is_end_device(device_info))) |
9860 | continue; |
9861 | _scsih_mark_responding_sas_device(ioc, sas_device_pg0: &sas_device_pg0); |
9862 | } |
9863 | |
9864 | out: |
9865 | ioc_info(ioc, "search for end-devices: complete\n"); |
9866 | } |
9867 | |
9868 | /** |
9869 | * _scsih_mark_responding_pcie_device - mark a pcie_device as responding |
9870 | * @ioc: per adapter object |
9871 | * @pcie_device_pg0: PCIe Device page 0 |
9872 | * |
9873 | * After host reset, find out whether devices are still responding. |
9874 | * Used in _scsih_remove_unresponding_devices. |
9875 | */ |
9876 | static void |
9877 | _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc, |
9878 | Mpi26PCIeDevicePage0_t *pcie_device_pg0) |
9879 | { |
9880 | struct MPT3SAS_TARGET *sas_target_priv_data = NULL; |
9881 | struct scsi_target *starget; |
9882 | struct _pcie_device *pcie_device; |
9883 | unsigned long flags; |
9884 | |
9885 | spin_lock_irqsave(&ioc->pcie_device_lock, flags); |
9886 | list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { |
9887 | if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID)) |
9888 | && (pcie_device->slot == le16_to_cpu( |
9889 | pcie_device_pg0->Slot))) { |
9890 | pcie_device->access_status = |
9891 | pcie_device_pg0->AccessStatus; |
9892 | pcie_device->responding = 1; |
9893 | starget = pcie_device->starget; |
9894 | if (starget && starget->hostdata) { |
9895 | sas_target_priv_data = starget->hostdata; |
9896 | sas_target_priv_data->tm_busy = 0; |
9897 | sas_target_priv_data->deleted = 0; |
9898 | } else |
9899 | sas_target_priv_data = NULL; |
9900 | if (starget) { |
9901 | starget_printk(KERN_INFO, starget, |
9902 | "handle(0x%04x), wwid(0x%016llx) ", |
9903 | pcie_device->handle, |
9904 | (unsigned long long)pcie_device->wwid); |
9905 | if (pcie_device->enclosure_handle != 0) |
9906 | starget_printk(KERN_INFO, starget, |
9907 | "enclosure logical id(0x%016llx), " |
9908 | "slot(%d)\n", |
9909 | (unsigned long long) |
9910 | pcie_device->enclosure_logical_id, |
9911 | pcie_device->slot); |
9912 | } |
9913 | |
9914 | if (((le32_to_cpu(pcie_device_pg0->Flags)) & |
9915 | MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) && |
9916 | (ioc->hba_mpi_version_belonged != MPI2_VERSION)) { |
9917 | pcie_device->enclosure_level = |
9918 | pcie_device_pg0->EnclosureLevel; |
9919 | memcpy(&pcie_device->connector_name[0], |
9920 | &pcie_device_pg0->ConnectorName[0], 4); |
9921 | } else { |
9922 | pcie_device->enclosure_level = 0; |
9923 | pcie_device->connector_name[0] = '\0'; |
9924 | } |
9925 | |
9926 | if (pcie_device->handle == le16_to_cpu( |
9927 | pcie_device_pg0->DevHandle)) |
9928 | goto out; |
9929 | pr_info("\thandle changed from(0x%04x)!!!\n", |
9930 | pcie_device->handle); |
9931 | pcie_device->handle = le16_to_cpu( |
9932 | pcie_device_pg0->DevHandle); |
9933 | if (sas_target_priv_data) |
9934 | sas_target_priv_data->handle = |
9935 | le16_to_cpu(pcie_device_pg0->DevHandle); |
9936 | goto out; |
9937 | } |
9938 | } |
9939 | |
9940 | out: |
9941 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
9942 | } |
9943 | |
9944 | /** |
9945 | * _scsih_search_responding_pcie_devices - |
9946 | * @ioc: per adapter object |
9947 | * |
9948 | * After host reset, find out whether devices are still responding. |
9949 | * If not remove. |
9950 | */ |
9951 | static void |
9952 | _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc) |
9953 | { |
9954 | Mpi26PCIeDevicePage0_t pcie_device_pg0; |
9955 | Mpi2ConfigReply_t mpi_reply; |
9956 | u16 ioc_status; |
9957 | u16 handle; |
9958 | u32 device_info; |
9959 | |
9960 | ioc_info(ioc, "search for end-devices: start\n"); |
9961 | |
9962 | if (list_empty(head: &ioc->pcie_device_list)) |
9963 | goto out; |
9964 | |
9965 | handle = 0xFFFF; |
9966 | while (!(mpt3sas_config_get_pcie_device_pg0(ioc, mpi_reply: &mpi_reply, |
9967 | config_page: &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, |
9968 | handle))) { |
9969 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
9970 | MPI2_IOCSTATUS_MASK; |
9971 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
9972 | ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n", |
9973 | __func__, ioc_status, |
9974 | le32_to_cpu(mpi_reply.IOCLogInfo)); |
9975 | break; |
9976 | } |
9977 | handle = le16_to_cpu(pcie_device_pg0.DevHandle); |
9978 | device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); |
9979 | if (!(_scsih_is_nvme_pciescsi_device(device_info))) |
9980 | continue; |
9981 | _scsih_mark_responding_pcie_device(ioc, pcie_device_pg0: &pcie_device_pg0); |
9982 | } |
9983 | out: |
9984 | ioc_info(ioc, "search for PCIe end-devices: complete\n"); |
9985 | } |
9986 | |
9987 | /** |
9988 | * _scsih_mark_responding_raid_device - mark a raid_device as responding |
9989 | * @ioc: per adapter object |
9990 | * @wwid: world wide identifier for raid volume |
9991 | * @handle: device handle |
9992 | * |
9993 | * After host reset, find out whether devices are still responding. |
9994 | * Used in _scsih_remove_unresponsive_raid_devices. |
9995 | */ |
9996 | static void |
9997 | _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid, |
9998 | u16 handle) |
9999 | { |
10000 | struct MPT3SAS_TARGET *sas_target_priv_data = NULL; |
10001 | struct scsi_target *starget; |
10002 | struct _raid_device *raid_device; |
10003 | unsigned long flags; |
10004 | |
10005 | spin_lock_irqsave(&ioc->raid_device_lock, flags); |
10006 | list_for_each_entry(raid_device, &ioc->raid_device_list, list) { |
10007 | if (raid_device->wwid == wwid && raid_device->starget) { |
10008 | starget = raid_device->starget; |
10009 | if (starget && starget->hostdata) { |
10010 | sas_target_priv_data = starget->hostdata; |
10011 | sas_target_priv_data->deleted = 0; |
10012 | } else |
10013 | sas_target_priv_data = NULL; |
10014 | raid_device->responding = 1; |
10015 | spin_unlock_irqrestore(lock: &ioc->raid_device_lock, flags); |
10016 | starget_printk(KERN_INFO, raid_device->starget, |
10017 | "handle(0x%04x), wwid(0x%016llx)\n", handle, |
10018 | (unsigned long long)raid_device->wwid); |
10019 | |
10020 | /* |
10021 | * WARPDRIVE: The handles of the PDs might have changed |
10022 | * across the host reset so re-initialize the |
10023 | * required data for Direct IO |
10024 | */ |
10025 | mpt3sas_init_warpdrive_properties(ioc, raid_device); |
10026 | spin_lock_irqsave(&ioc->raid_device_lock, flags); |
10027 | if (raid_device->handle == handle) { |
10028 | spin_unlock_irqrestore(lock: &ioc->raid_device_lock, |
10029 | flags); |
10030 | return; |
10031 | } |
10032 | pr_info("\thandle changed from(0x%04x)!!!\n", |
10033 | raid_device->handle); |
10034 | raid_device->handle = handle; |
10035 | if (sas_target_priv_data) |
10036 | sas_target_priv_data->handle = handle; |
10037 | spin_unlock_irqrestore(lock: &ioc->raid_device_lock, flags); |
10038 | return; |
10039 | } |
10040 | } |
10041 | spin_unlock_irqrestore(lock: &ioc->raid_device_lock, flags); |
10042 | } |
10043 | |
10044 | /** |
10045 | * _scsih_search_responding_raid_devices - |
10046 | * @ioc: per adapter object |
10047 | * |
10048 | * After host reset, find out whether devices are still responding. |
10049 | * If not remove. |
10050 | */ |
10051 | static void |
10052 | _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc) |
10053 | { |
10054 | Mpi2RaidVolPage1_t volume_pg1; |
10055 | Mpi2RaidVolPage0_t volume_pg0; |
10056 | Mpi2RaidPhysDiskPage0_t pd_pg0; |
10057 | Mpi2ConfigReply_t mpi_reply; |
10058 | u16 ioc_status; |
10059 | u16 handle; |
10060 | u8 phys_disk_num; |
10061 | |
10062 | if (!ioc->ir_firmware) |
10063 | return; |
10064 | |
10065 | ioc_info(ioc, "search for raid volumes: start\n"); |
10066 | |
10067 | if (list_empty(head: &ioc->raid_device_list)) |
10068 | goto out; |
10069 | |
10070 | handle = 0xFFFF; |
10071 | while (!(mpt3sas_config_get_raid_volume_pg1(ioc, mpi_reply: &mpi_reply, |
10072 | config_page: &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { |
10073 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
10074 | MPI2_IOCSTATUS_MASK; |
10075 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) |
10076 | break; |
10077 | handle = le16_to_cpu(volume_pg1.DevHandle); |
10078 | |
10079 | if (mpt3sas_config_get_raid_volume_pg0(ioc, mpi_reply: &mpi_reply, |
10080 | config_page: &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, |
10081 | sz: sizeof(Mpi2RaidVolPage0_t))) |
10082 | continue; |
10083 | |
10084 | if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || |
10085 | volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE || |
10086 | volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) |
10087 | _scsih_mark_responding_raid_device(ioc, |
10088 | le64_to_cpu(volume_pg1.WWID), handle); |
10089 | } |
10090 | |
10091 | /* refresh the pd_handles */ |
10092 | if (!ioc->is_warpdrive) { |
10093 | phys_disk_num = 0xFF; |
10094 | memset(ioc->pd_handles, 0, ioc->pd_handles_sz); |
10095 | while (!(mpt3sas_config_get_phys_disk_pg0(ioc, mpi_reply: &mpi_reply, |
10096 | config_page: &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, |
10097 | form_specific: phys_disk_num))) { |
10098 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
10099 | MPI2_IOCSTATUS_MASK; |
10100 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) |
10101 | break; |
10102 | phys_disk_num = pd_pg0.PhysDiskNum; |
10103 | handle = le16_to_cpu(pd_pg0.DevHandle); |
10104 | set_bit(nr: handle, addr: ioc->pd_handles); |
10105 | } |
10106 | } |
10107 | out: |
10108 | ioc_info(ioc, "search for responding raid volumes: complete\n"); |
10109 | } |
10110 | |
10111 | /** |
10112 | * _scsih_mark_responding_expander - mark a expander as responding |
10113 | * @ioc: per adapter object |
10114 | * @expander_pg0:SAS Expander Config Page0 |
10115 | * |
10116 | * After host reset, find out whether devices are still responding. |
10117 | * Used in _scsih_remove_unresponsive_expanders. |
10118 | */ |
10119 | static void |
10120 | _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, |
10121 | Mpi2ExpanderPage0_t *expander_pg0) |
10122 | { |
10123 | struct _sas_node *sas_expander = NULL; |
10124 | unsigned long flags; |
10125 | int i; |
10126 | struct _enclosure_node *enclosure_dev = NULL; |
10127 | u16 handle = le16_to_cpu(expander_pg0->DevHandle); |
10128 | u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle); |
10129 | u64 sas_address = le64_to_cpu(expander_pg0->SASAddress); |
10130 | struct hba_port *port = mpt3sas_get_port_by_id( |
10131 | ioc, port_id: expander_pg0->PhysicalPort, bypass_dirty_port_flag: 0); |
10132 | |
10133 | if (enclosure_handle) |
10134 | enclosure_dev = |
10135 | mpt3sas_scsih_enclosure_find_by_handle(ioc, |
10136 | handle: enclosure_handle); |
10137 | |
10138 | spin_lock_irqsave(&ioc->sas_node_lock, flags); |
10139 | list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { |
10140 | if (sas_expander->sas_address != sas_address) |
10141 | continue; |
10142 | if (sas_expander->port != port) |
10143 | continue; |
10144 | sas_expander->responding = 1; |
10145 | |
10146 | if (enclosure_dev) { |
10147 | sas_expander->enclosure_logical_id = |
10148 | le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); |
10149 | sas_expander->enclosure_handle = |
10150 | le16_to_cpu(expander_pg0->EnclosureHandle); |
10151 | } |
10152 | |
10153 | if (sas_expander->handle == handle) |
10154 | goto out; |
10155 | pr_info("\texpander(0x%016llx): handle changed"\ |
10156 | " from(0x%04x) to (0x%04x)!!!\n", |
10157 | (unsigned long long)sas_expander->sas_address, |
10158 | sas_expander->handle, handle); |
10159 | sas_expander->handle = handle; |
10160 | for (i = 0 ; i < sas_expander->num_phys ; i++) |
10161 | sas_expander->phy[i].handle = handle; |
10162 | goto out; |
10163 | } |
10164 | out: |
10165 | spin_unlock_irqrestore(lock: &ioc->sas_node_lock, flags); |
10166 | } |
10167 | |
10168 | /** |
10169 | * _scsih_search_responding_expanders - |
10170 | * @ioc: per adapter object |
10171 | * |
10172 | * After host reset, find out whether devices are still responding. |
10173 | * If not remove. |
10174 | */ |
10175 | static void |
10176 | _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc) |
10177 | { |
10178 | Mpi2ExpanderPage0_t expander_pg0; |
10179 | Mpi2ConfigReply_t mpi_reply; |
10180 | u16 ioc_status; |
10181 | u64 sas_address; |
10182 | u16 handle; |
10183 | u8 port; |
10184 | |
10185 | ioc_info(ioc, "search for expanders: start\n"); |
10186 | |
10187 | if (list_empty(head: &ioc->sas_expander_list)) |
10188 | goto out; |
10189 | |
10190 | handle = 0xFFFF; |
10191 | while (!(mpt3sas_config_get_expander_pg0(ioc, mpi_reply: &mpi_reply, config_page: &expander_pg0, |
10192 | MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { |
10193 | |
10194 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
10195 | MPI2_IOCSTATUS_MASK; |
10196 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) |
10197 | break; |
10198 | |
10199 | handle = le16_to_cpu(expander_pg0.DevHandle); |
10200 | sas_address = le64_to_cpu(expander_pg0.SASAddress); |
10201 | port = expander_pg0.PhysicalPort; |
10202 | pr_info( |
10203 | "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n", |
10204 | handle, (unsigned long long)sas_address, |
10205 | (ioc->multipath_on_hba ? |
10206 | port : MULTIPATH_DISABLED_PORT_ID)); |
10207 | _scsih_mark_responding_expander(ioc, expander_pg0: &expander_pg0); |
10208 | } |
10209 | |
10210 | out: |
10211 | ioc_info(ioc, "search for expanders: complete\n"); |
10212 | } |
10213 | |
10214 | /** |
10215 | * _scsih_remove_unresponding_devices - removing unresponding devices |
10216 | * @ioc: per adapter object |
10217 | */ |
10218 | static void |
10219 | _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc) |
10220 | { |
10221 | struct _sas_device *sas_device, *sas_device_next; |
10222 | struct _sas_node *sas_expander, *sas_expander_next; |
10223 | struct _raid_device *raid_device, *raid_device_next; |
10224 | struct _pcie_device *pcie_device, *pcie_device_next; |
10225 | struct list_head tmp_list; |
10226 | unsigned long flags; |
10227 | LIST_HEAD(head); |
10228 | |
10229 | ioc_info(ioc, "removing unresponding devices: start\n"); |
10230 | |
10231 | /* removing unresponding end devices */ |
10232 | ioc_info(ioc, "removing unresponding devices: end-devices\n"); |
10233 | /* |
10234 | * Iterate, pulling off devices marked as non-responding. We become the |
10235 | * owner for the reference the list had on any object we prune. |
10236 | */ |
10237 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
10238 | |
10239 | /* |
10240 | * Clean up the sas_device_init_list list as |
10241 | * driver goes for fresh scan as part of diag reset. |
10242 | */ |
10243 | list_for_each_entry_safe(sas_device, sas_device_next, |
10244 | &ioc->sas_device_init_list, list) { |
10245 | list_del_init(entry: &sas_device->list); |
10246 | sas_device_put(s: sas_device); |
10247 | } |
10248 | |
10249 | list_for_each_entry_safe(sas_device, sas_device_next, |
10250 | &ioc->sas_device_list, list) { |
10251 | if (!sas_device->responding) |
10252 | list_move_tail(list: &sas_device->list, head: &head); |
10253 | else |
10254 | sas_device->responding = 0; |
10255 | } |
10256 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
10257 | |
10258 | /* |
10259 | * Now, uninitialize and remove the unresponding devices we pruned. |
10260 | */ |
10261 | list_for_each_entry_safe(sas_device, sas_device_next, &head, list) { |
10262 | _scsih_remove_device(ioc, sas_device); |
10263 | list_del_init(entry: &sas_device->list); |
10264 | sas_device_put(s: sas_device); |
10265 | } |
10266 | |
10267 | ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n"); |
10268 | INIT_LIST_HEAD(list: &head); |
10269 | spin_lock_irqsave(&ioc->pcie_device_lock, flags); |
10270 | /* |
10271 | * Clean up the pcie_device_init_list list as |
10272 | * driver goes for fresh scan as part of diag reset. |
10273 | */ |
10274 | list_for_each_entry_safe(pcie_device, pcie_device_next, |
10275 | &ioc->pcie_device_init_list, list) { |
10276 | list_del_init(entry: &pcie_device->list); |
10277 | pcie_device_put(p: pcie_device); |
10278 | } |
10279 | |
10280 | list_for_each_entry_safe(pcie_device, pcie_device_next, |
10281 | &ioc->pcie_device_list, list) { |
10282 | if (!pcie_device->responding) |
10283 | list_move_tail(list: &pcie_device->list, head: &head); |
10284 | else |
10285 | pcie_device->responding = 0; |
10286 | } |
10287 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
10288 | |
10289 | list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) { |
10290 | _scsih_pcie_device_remove_from_sml(ioc, pcie_device); |
10291 | list_del_init(entry: &pcie_device->list); |
10292 | pcie_device_put(p: pcie_device); |
10293 | } |
10294 | |
10295 | /* removing unresponding volumes */ |
10296 | if (ioc->ir_firmware) { |
10297 | ioc_info(ioc, "removing unresponding devices: volumes\n"); |
10298 | list_for_each_entry_safe(raid_device, raid_device_next, |
10299 | &ioc->raid_device_list, list) { |
10300 | if (!raid_device->responding) |
10301 | _scsih_sas_volume_delete(ioc, |
10302 | handle: raid_device->handle); |
10303 | else |
10304 | raid_device->responding = 0; |
10305 | } |
10306 | } |
10307 | |
10308 | /* removing unresponding expanders */ |
10309 | ioc_info(ioc, "removing unresponding devices: expanders\n"); |
10310 | spin_lock_irqsave(&ioc->sas_node_lock, flags); |
10311 | INIT_LIST_HEAD(list: &tmp_list); |
10312 | list_for_each_entry_safe(sas_expander, sas_expander_next, |
10313 | &ioc->sas_expander_list, list) { |
10314 | if (!sas_expander->responding) |
10315 | list_move_tail(list: &sas_expander->list, head: &tmp_list); |
10316 | else |
10317 | sas_expander->responding = 0; |
10318 | } |
10319 | spin_unlock_irqrestore(lock: &ioc->sas_node_lock, flags); |
10320 | list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list, |
10321 | list) { |
10322 | _scsih_expander_node_remove(ioc, sas_expander); |
10323 | } |
10324 | |
10325 | ioc_info(ioc, "removing unresponding devices: complete\n"); |
10326 | |
10327 | /* unblock devices */ |
10328 | _scsih_ublock_io_all_device(ioc); |
10329 | } |
10330 | |
10331 | static void |
10332 | _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc, |
10333 | struct _sas_node *sas_expander, u16 handle) |
10334 | { |
10335 | Mpi2ExpanderPage1_t expander_pg1; |
10336 | Mpi2ConfigReply_t mpi_reply; |
10337 | int i; |
10338 | |
10339 | for (i = 0 ; i < sas_expander->num_phys ; i++) { |
10340 | if ((mpt3sas_config_get_expander_pg1(ioc, mpi_reply: &mpi_reply, |
10341 | config_page: &expander_pg1, phy_number: i, handle))) { |
10342 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
10343 | __FILE__, __LINE__, __func__); |
10344 | return; |
10345 | } |
10346 | |
10347 | mpt3sas_transport_update_links(ioc, sas_address: sas_expander->sas_address, |
10348 | le16_to_cpu(expander_pg1.AttachedDevHandle), phy_number: i, |
10349 | link_rate: expander_pg1.NegotiatedLinkRate >> 4, |
10350 | port: sas_expander->port); |
10351 | } |
10352 | } |
10353 | |
10354 | /** |
10355 | * _scsih_scan_for_devices_after_reset - scan for devices after host reset |
10356 | * @ioc: per adapter object |
10357 | */ |
10358 | static void |
10359 | _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) |
10360 | { |
10361 | Mpi2ExpanderPage0_t expander_pg0; |
10362 | Mpi2SasDevicePage0_t sas_device_pg0; |
10363 | Mpi26PCIeDevicePage0_t pcie_device_pg0; |
10364 | Mpi2RaidVolPage1_t volume_pg1; |
10365 | Mpi2RaidVolPage0_t volume_pg0; |
10366 | Mpi2RaidPhysDiskPage0_t pd_pg0; |
10367 | Mpi2EventIrConfigElement_t element; |
10368 | Mpi2ConfigReply_t mpi_reply; |
10369 | u8 phys_disk_num, port_id; |
10370 | u16 ioc_status; |
10371 | u16 handle, parent_handle; |
10372 | u64 sas_address; |
10373 | struct _sas_device *sas_device; |
10374 | struct _pcie_device *pcie_device; |
10375 | struct _sas_node *expander_device; |
10376 | static struct _raid_device *raid_device; |
10377 | u8 retry_count; |
10378 | unsigned long flags; |
10379 | |
10380 | ioc_info(ioc, "scan devices: start\n"); |
10381 | |
10382 | _scsih_sas_host_refresh(ioc); |
10383 | |
10384 | ioc_info(ioc, "\tscan devices: expanders start\n"); |
10385 | |
10386 | /* expanders */ |
10387 | handle = 0xFFFF; |
10388 | while (!(mpt3sas_config_get_expander_pg0(ioc, mpi_reply: &mpi_reply, config_page: &expander_pg0, |
10389 | MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { |
10390 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
10391 | MPI2_IOCSTATUS_MASK; |
10392 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
10393 | ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n", |
10394 | ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); |
10395 | break; |
10396 | } |
10397 | handle = le16_to_cpu(expander_pg0.DevHandle); |
10398 | spin_lock_irqsave(&ioc->sas_node_lock, flags); |
10399 | port_id = expander_pg0.PhysicalPort; |
10400 | expander_device = mpt3sas_scsih_expander_find_by_sas_address( |
10401 | ioc, le64_to_cpu(expander_pg0.SASAddress), |
10402 | port: mpt3sas_get_port_by_id(ioc, port_id, bypass_dirty_port_flag: 0)); |
10403 | spin_unlock_irqrestore(lock: &ioc->sas_node_lock, flags); |
10404 | if (expander_device) |
10405 | _scsih_refresh_expander_links(ioc, sas_expander: expander_device, |
10406 | handle); |
10407 | else { |
10408 | ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n", |
10409 | handle, |
10410 | (u64)le64_to_cpu(expander_pg0.SASAddress)); |
10411 | _scsih_expander_add(ioc, handle); |
10412 | ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n", |
10413 | handle, |
10414 | (u64)le64_to_cpu(expander_pg0.SASAddress)); |
10415 | } |
10416 | } |
10417 | |
10418 | ioc_info(ioc, "\tscan devices: expanders complete\n"); |
10419 | |
10420 | if (!ioc->ir_firmware) |
10421 | goto skip_to_sas; |
10422 | |
10423 | ioc_info(ioc, "\tscan devices: phys disk start\n"); |
10424 | |
10425 | /* phys disk */ |
10426 | phys_disk_num = 0xFF; |
10427 | while (!(mpt3sas_config_get_phys_disk_pg0(ioc, mpi_reply: &mpi_reply, |
10428 | config_page: &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, |
10429 | form_specific: phys_disk_num))) { |
10430 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
10431 | MPI2_IOCSTATUS_MASK; |
10432 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
10433 | ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n", |
10434 | ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); |
10435 | break; |
10436 | } |
10437 | phys_disk_num = pd_pg0.PhysDiskNum; |
10438 | handle = le16_to_cpu(pd_pg0.DevHandle); |
10439 | sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); |
10440 | if (sas_device) { |
10441 | sas_device_put(s: sas_device); |
10442 | continue; |
10443 | } |
10444 | if (mpt3sas_config_get_sas_device_pg0(ioc, mpi_reply: &mpi_reply, |
10445 | config_page: &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, |
10446 | handle) != 0) |
10447 | continue; |
10448 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
10449 | MPI2_IOCSTATUS_MASK; |
10450 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
10451 | ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n", |
10452 | ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); |
10453 | break; |
10454 | } |
10455 | parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); |
10456 | if (!_scsih_get_sas_address(ioc, handle: parent_handle, |
10457 | sas_address: &sas_address)) { |
10458 | ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n", |
10459 | handle, |
10460 | (u64)le64_to_cpu(sas_device_pg0.SASAddress)); |
10461 | port_id = sas_device_pg0.PhysicalPort; |
10462 | mpt3sas_transport_update_links(ioc, sas_address, |
10463 | handle, phy_number: sas_device_pg0.PhyNum, |
10464 | MPI2_SAS_NEG_LINK_RATE_1_5, |
10465 | port: mpt3sas_get_port_by_id(ioc, port_id, bypass_dirty_port_flag: 0)); |
10466 | set_bit(nr: handle, addr: ioc->pd_handles); |
10467 | retry_count = 0; |
10468 | /* This will retry adding the end device. |
10469 | * _scsih_add_device() will decide on retries and |
10470 | * return "1" when it should be retried |
10471 | */ |
10472 | while (_scsih_add_device(ioc, handle, phy_num: retry_count++, |
10473 | is_pd: 1)) { |
10474 | ssleep(seconds: 1); |
10475 | } |
10476 | ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n", |
10477 | handle, |
10478 | (u64)le64_to_cpu(sas_device_pg0.SASAddress)); |
10479 | } |
10480 | } |
10481 | |
10482 | ioc_info(ioc, "\tscan devices: phys disk complete\n"); |
10483 | |
10484 | ioc_info(ioc, "\tscan devices: volumes start\n"); |
10485 | |
10486 | /* volumes */ |
10487 | handle = 0xFFFF; |
10488 | while (!(mpt3sas_config_get_raid_volume_pg1(ioc, mpi_reply: &mpi_reply, |
10489 | config_page: &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { |
10490 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
10491 | MPI2_IOCSTATUS_MASK; |
10492 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
10493 | ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n", |
10494 | ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); |
10495 | break; |
10496 | } |
10497 | handle = le16_to_cpu(volume_pg1.DevHandle); |
10498 | spin_lock_irqsave(&ioc->raid_device_lock, flags); |
10499 | raid_device = _scsih_raid_device_find_by_wwid(ioc, |
10500 | le64_to_cpu(volume_pg1.WWID)); |
10501 | spin_unlock_irqrestore(lock: &ioc->raid_device_lock, flags); |
10502 | if (raid_device) |
10503 | continue; |
10504 | if (mpt3sas_config_get_raid_volume_pg0(ioc, mpi_reply: &mpi_reply, |
10505 | config_page: &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, |
10506 | sz: sizeof(Mpi2RaidVolPage0_t))) |
10507 | continue; |
10508 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
10509 | MPI2_IOCSTATUS_MASK; |
10510 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
10511 | ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n", |
10512 | ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); |
10513 | break; |
10514 | } |
10515 | if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || |
10516 | volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE || |
10517 | volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) { |
10518 | memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t)); |
10519 | element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED; |
10520 | element.VolDevHandle = volume_pg1.DevHandle; |
10521 | ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n", |
10522 | volume_pg1.DevHandle); |
10523 | _scsih_sas_volume_add(ioc, element: &element); |
10524 | ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n", |
10525 | volume_pg1.DevHandle); |
10526 | } |
10527 | } |
10528 | |
10529 | ioc_info(ioc, "\tscan devices: volumes complete\n"); |
10530 | |
10531 | skip_to_sas: |
10532 | |
10533 | ioc_info(ioc, "\tscan devices: end devices start\n"); |
10534 | |
10535 | /* sas devices */ |
10536 | handle = 0xFFFF; |
10537 | while (!(mpt3sas_config_get_sas_device_pg0(ioc, mpi_reply: &mpi_reply, |
10538 | config_page: &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, |
10539 | handle))) { |
10540 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
10541 | MPI2_IOCSTATUS_MASK; |
10542 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
10543 | ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n", |
10544 | ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); |
10545 | break; |
10546 | } |
10547 | handle = le16_to_cpu(sas_device_pg0.DevHandle); |
10548 | if (!(_scsih_is_end_device( |
10549 | le32_to_cpu(sas_device_pg0.DeviceInfo)))) |
10550 | continue; |
10551 | port_id = sas_device_pg0.PhysicalPort; |
10552 | sas_device = mpt3sas_get_sdev_by_addr(ioc, |
10553 | le64_to_cpu(sas_device_pg0.SASAddress), |
10554 | port: mpt3sas_get_port_by_id(ioc, port_id, bypass_dirty_port_flag: 0)); |
10555 | if (sas_device) { |
10556 | sas_device_put(s: sas_device); |
10557 | continue; |
10558 | } |
10559 | parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); |
10560 | if (!_scsih_get_sas_address(ioc, handle: parent_handle, sas_address: &sas_address)) { |
10561 | ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n", |
10562 | handle, |
10563 | (u64)le64_to_cpu(sas_device_pg0.SASAddress)); |
10564 | mpt3sas_transport_update_links(ioc, sas_address, handle, |
10565 | phy_number: sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, |
10566 | port: mpt3sas_get_port_by_id(ioc, port_id, bypass_dirty_port_flag: 0)); |
10567 | retry_count = 0; |
10568 | /* This will retry adding the end device. |
10569 | * _scsih_add_device() will decide on retries and |
10570 | * return "1" when it should be retried |
10571 | */ |
10572 | while (_scsih_add_device(ioc, handle, phy_num: retry_count++, |
10573 | is_pd: 0)) { |
10574 | ssleep(seconds: 1); |
10575 | } |
10576 | ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n", |
10577 | handle, |
10578 | (u64)le64_to_cpu(sas_device_pg0.SASAddress)); |
10579 | } |
10580 | } |
10581 | ioc_info(ioc, "\tscan devices: end devices complete\n"); |
10582 | ioc_info(ioc, "\tscan devices: pcie end devices start\n"); |
10583 | |
10584 | /* pcie devices */ |
10585 | handle = 0xFFFF; |
10586 | while (!(mpt3sas_config_get_pcie_device_pg0(ioc, mpi_reply: &mpi_reply, |
10587 | config_page: &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, |
10588 | handle))) { |
10589 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) |
10590 | & MPI2_IOCSTATUS_MASK; |
10591 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
10592 | ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n", |
10593 | ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); |
10594 | break; |
10595 | } |
10596 | handle = le16_to_cpu(pcie_device_pg0.DevHandle); |
10597 | if (!(_scsih_is_nvme_pciescsi_device( |
10598 | le32_to_cpu(pcie_device_pg0.DeviceInfo)))) |
10599 | continue; |
10600 | pcie_device = mpt3sas_get_pdev_by_wwid(ioc, |
10601 | le64_to_cpu(pcie_device_pg0.WWID)); |
10602 | if (pcie_device) { |
10603 | pcie_device_put(p: pcie_device); |
10604 | continue; |
10605 | } |
10606 | retry_count = 0; |
10607 | parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle); |
10608 | _scsih_pcie_add_device(ioc, handle); |
10609 | |
10610 | ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n", |
10611 | handle, (u64)le64_to_cpu(pcie_device_pg0.WWID)); |
10612 | } |
10613 | |
10614 | ioc_info(ioc, "\tpcie devices: pcie end devices complete\n"); |
10615 | ioc_info(ioc, "scan devices: complete\n"); |
10616 | } |
10617 | |
10618 | /** |
10619 | * mpt3sas_scsih_pre_reset_handler - reset callback handler (for scsih) |
10620 | * @ioc: per adapter object |
10621 | * |
10622 | * The handler for doing any required cleanup or initialization. |
10623 | */ |
10624 | void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) |
10625 | { |
10626 | dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__)); |
10627 | } |
10628 | |
10629 | /** |
10630 | * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding |
10631 | * scsi & tm cmds. |
10632 | * @ioc: per adapter object |
10633 | * |
10634 | * The handler for doing any required cleanup or initialization. |
10635 | */ |
10636 | void |
10637 | mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc) |
10638 | { |
10639 | dtmprintk(ioc, |
10640 | ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__)); |
10641 | if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) { |
10642 | ioc->scsih_cmds.status |= MPT3_CMD_RESET; |
10643 | mpt3sas_base_free_smid(ioc, smid: ioc->scsih_cmds.smid); |
10644 | complete(&ioc->scsih_cmds.done); |
10645 | } |
10646 | if (ioc->tm_cmds.status & MPT3_CMD_PENDING) { |
10647 | ioc->tm_cmds.status |= MPT3_CMD_RESET; |
10648 | mpt3sas_base_free_smid(ioc, smid: ioc->tm_cmds.smid); |
10649 | complete(&ioc->tm_cmds.done); |
10650 | } |
10651 | |
10652 | memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz); |
10653 | memset(ioc->device_remove_in_progress, 0, |
10654 | ioc->device_remove_in_progress_sz); |
10655 | _scsih_fw_event_cleanup_queue(ioc); |
10656 | _scsih_flush_running_cmds(ioc); |
10657 | } |
10658 | |
10659 | /** |
10660 | * mpt3sas_scsih_reset_done_handler - reset callback handler (for scsih) |
10661 | * @ioc: per adapter object |
10662 | * |
10663 | * The handler for doing any required cleanup or initialization. |
10664 | */ |
10665 | void |
10666 | mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc) |
10667 | { |
10668 | dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__)); |
10669 | if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) { |
10670 | if (ioc->multipath_on_hba) { |
10671 | _scsih_sas_port_refresh(ioc); |
10672 | _scsih_update_vphys_after_reset(ioc); |
10673 | } |
10674 | _scsih_prep_device_scan(ioc); |
10675 | _scsih_create_enclosure_list_after_reset(ioc); |
10676 | _scsih_search_responding_sas_devices(ioc); |
10677 | _scsih_search_responding_pcie_devices(ioc); |
10678 | _scsih_search_responding_raid_devices(ioc); |
10679 | _scsih_search_responding_expanders(ioc); |
10680 | _scsih_error_recovery_delete_devices(ioc); |
10681 | } |
10682 | } |
10683 | |
10684 | /** |
10685 | * _mpt3sas_fw_work - delayed task for processing firmware events |
10686 | * @ioc: per adapter object |
10687 | * @fw_event: The fw_event_work object |
10688 | * Context: user. |
10689 | */ |
10690 | static void |
10691 | _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) |
10692 | { |
10693 | ioc->current_event = fw_event; |
10694 | _scsih_fw_event_del_from_list(ioc, fw_event); |
10695 | |
10696 | /* the queue is being flushed so ignore this event */ |
10697 | if (ioc->remove_host || ioc->pci_error_recovery) { |
10698 | fw_event_work_put(fw_work: fw_event); |
10699 | ioc->current_event = NULL; |
10700 | return; |
10701 | } |
10702 | |
10703 | switch (fw_event->event) { |
10704 | case MPT3SAS_PROCESS_TRIGGER_DIAG: |
10705 | mpt3sas_process_trigger_data(ioc, |
10706 | event_data: (struct SL_WH_TRIGGERS_EVENT_DATA_T *) |
10707 | fw_event->event_data); |
10708 | break; |
10709 | case MPT3SAS_REMOVE_UNRESPONDING_DEVICES: |
10710 | while (scsi_host_in_recovery(shost: ioc->shost) || |
10711 | ioc->shost_recovery) { |
10712 | /* |
10713 | * If we're unloading or cancelling the work, bail. |
10714 | * Otherwise, this can become an infinite loop. |
10715 | */ |
10716 | if (ioc->remove_host || ioc->fw_events_cleanup) |
10717 | goto out; |
10718 | ssleep(seconds: 1); |
10719 | } |
10720 | _scsih_remove_unresponding_devices(ioc); |
10721 | _scsih_del_dirty_vphy(ioc); |
10722 | _scsih_del_dirty_port_entries(ioc); |
10723 | if (ioc->is_gen35_ioc) |
10724 | _scsih_update_device_qdepth(ioc); |
10725 | _scsih_scan_for_devices_after_reset(ioc); |
10726 | /* |
10727 | * If diag reset has occurred during the driver load |
10728 | * then driver has to complete the driver load operation |
10729 | * by executing the following items: |
10730 | *- Register the devices from sas_device_init_list to SML |
10731 | *- clear is_driver_loading flag, |
10732 | *- start the watchdog thread. |
10733 | * In happy driver load path, above things are taken care of when |
10734 | * driver executes scsih_scan_finished(). |
10735 | */ |
10736 | if (ioc->is_driver_loading) |
10737 | _scsih_complete_devices_scanning(ioc); |
10738 | _scsih_set_nvme_max_shutdown_latency(ioc); |
10739 | break; |
10740 | case MPT3SAS_PORT_ENABLE_COMPLETE: |
10741 | ioc->start_scan = 0; |
10742 | if (missing_delay[0] != -1 && missing_delay[1] != -1) |
10743 | mpt3sas_base_update_missing_delay(ioc, device_missing_delay: missing_delay[0], |
10744 | io_missing_delay: missing_delay[1]); |
10745 | dewtprintk(ioc, |
10746 | ioc_info(ioc, "port enable: complete from worker thread\n")); |
10747 | break; |
10748 | case MPT3SAS_TURN_ON_PFA_LED: |
10749 | _scsih_turn_on_pfa_led(ioc, handle: fw_event->device_handle); |
10750 | break; |
10751 | case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: |
10752 | _scsih_sas_topology_change_event(ioc, fw_event); |
10753 | break; |
10754 | case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: |
10755 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) |
10756 | _scsih_sas_device_status_change_event_debug(ioc, |
10757 | event_data: (Mpi2EventDataSasDeviceStatusChange_t *) |
10758 | fw_event->event_data); |
10759 | break; |
10760 | case MPI2_EVENT_SAS_DISCOVERY: |
10761 | _scsih_sas_discovery_event(ioc, fw_event); |
10762 | break; |
10763 | case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR: |
10764 | _scsih_sas_device_discovery_error_event(ioc, fw_event); |
10765 | break; |
10766 | case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: |
10767 | _scsih_sas_broadcast_primitive_event(ioc, fw_event); |
10768 | break; |
10769 | case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: |
10770 | _scsih_sas_enclosure_dev_status_change_event(ioc, |
10771 | fw_event); |
10772 | break; |
10773 | case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: |
10774 | _scsih_sas_ir_config_change_event(ioc, fw_event); |
10775 | break; |
10776 | case MPI2_EVENT_IR_VOLUME: |
10777 | _scsih_sas_ir_volume_event(ioc, fw_event); |
10778 | break; |
10779 | case MPI2_EVENT_IR_PHYSICAL_DISK: |
10780 | _scsih_sas_ir_physical_disk_event(ioc, fw_event); |
10781 | break; |
10782 | case MPI2_EVENT_IR_OPERATION_STATUS: |
10783 | _scsih_sas_ir_operation_status_event(ioc, fw_event); |
10784 | break; |
10785 | case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE: |
10786 | _scsih_pcie_device_status_change_event(ioc, fw_event); |
10787 | break; |
10788 | case MPI2_EVENT_PCIE_ENUMERATION: |
10789 | _scsih_pcie_enumeration_event(ioc, fw_event); |
10790 | break; |
10791 | case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: |
10792 | _scsih_pcie_topology_change_event(ioc, fw_event); |
10793 | ioc->current_event = NULL; |
10794 | return; |
10795 | } |
10796 | out: |
10797 | fw_event_work_put(fw_work: fw_event); |
10798 | ioc->current_event = NULL; |
10799 | } |
10800 | |
10801 | /** |
10802 | * _firmware_event_work |
10803 | * @work: The fw_event_work object |
10804 | * Context: user. |
10805 | * |
10806 | * wrappers for the work thread handling firmware events |
10807 | */ |
10808 | |
10809 | static void |
10810 | _firmware_event_work(struct work_struct *work) |
10811 | { |
10812 | struct fw_event_work *fw_event = container_of(work, |
10813 | struct fw_event_work, work); |
10814 | |
10815 | _mpt3sas_fw_work(ioc: fw_event->ioc, fw_event); |
10816 | } |
10817 | |
10818 | /** |
10819 | * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time) |
10820 | * @ioc: per adapter object |
10821 | * @msix_index: MSIX table index supplied by the OS |
10822 | * @reply: reply message frame(lower 32bit addr) |
10823 | * Context: interrupt. |
10824 | * |
10825 | * This function merely adds a new work task into ioc->firmware_event_thread. |
10826 | * The tasks are worked from _firmware_event_work in user context. |
10827 | * |
10828 | * Return: 1 meaning mf should be freed from _base_interrupt |
10829 | * 0 means the mf is freed from this function. |
10830 | */ |
10831 | u8 |
10832 | mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, |
10833 | u32 reply) |
10834 | { |
10835 | struct fw_event_work *fw_event; |
10836 | Mpi2EventNotificationReply_t *mpi_reply; |
10837 | u16 event; |
10838 | u16 sz; |
10839 | Mpi26EventDataActiveCableExcept_t *ActiveCableEventData; |
10840 | |
10841 | /* events turned off due to host reset */ |
10842 | if (ioc->pci_error_recovery) |
10843 | return 1; |
10844 | |
10845 | mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, phys_addr: reply); |
10846 | |
10847 | if (unlikely(!mpi_reply)) { |
10848 | ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", |
10849 | __FILE__, __LINE__, __func__); |
10850 | return 1; |
10851 | } |
10852 | |
10853 | event = le16_to_cpu(mpi_reply->Event); |
10854 | |
10855 | if (event != MPI2_EVENT_LOG_ENTRY_ADDED) |
10856 | mpt3sas_trigger_event(ioc, event, log_entry_qualifier: 0); |
10857 | |
10858 | switch (event) { |
10859 | /* handle these */ |
10860 | case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: |
10861 | { |
10862 | Mpi2EventDataSasBroadcastPrimitive_t *baen_data = |
10863 | (Mpi2EventDataSasBroadcastPrimitive_t *) |
10864 | mpi_reply->EventData; |
10865 | |
10866 | if (baen_data->Primitive != |
10867 | MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT) |
10868 | return 1; |
10869 | |
10870 | if (ioc->broadcast_aen_busy) { |
10871 | ioc->broadcast_aen_pending++; |
10872 | return 1; |
10873 | } else |
10874 | ioc->broadcast_aen_busy = 1; |
10875 | break; |
10876 | } |
10877 | |
10878 | case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: |
10879 | _scsih_check_topo_delete_events(ioc, |
10880 | event_data: (Mpi2EventDataSasTopologyChangeList_t *) |
10881 | mpi_reply->EventData); |
10882 | /* |
10883 | * No need to add the topology change list |
10884 | * event to fw event work queue when |
10885 | * diag reset is going on. Since during diag |
10886 | * reset driver scan the devices by reading |
10887 | * sas device page0's not by processing the |
10888 | * events. |
10889 | */ |
10890 | if (ioc->shost_recovery) |
10891 | return 1; |
10892 | break; |
10893 | case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: |
10894 | _scsih_check_pcie_topo_remove_events(ioc, |
10895 | event_data: (Mpi26EventDataPCIeTopologyChangeList_t *) |
10896 | mpi_reply->EventData); |
10897 | if (ioc->shost_recovery) |
10898 | return 1; |
10899 | break; |
10900 | case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: |
10901 | _scsih_check_ir_config_unhide_events(ioc, |
10902 | event_data: (Mpi2EventDataIrConfigChangeList_t *) |
10903 | mpi_reply->EventData); |
10904 | break; |
10905 | case MPI2_EVENT_IR_VOLUME: |
10906 | _scsih_check_volume_delete_events(ioc, |
10907 | event_data: (Mpi2EventDataIrVolume_t *) |
10908 | mpi_reply->EventData); |
10909 | break; |
10910 | case MPI2_EVENT_LOG_ENTRY_ADDED: |
10911 | { |
10912 | Mpi2EventDataLogEntryAdded_t *log_entry; |
10913 | u32 log_code; |
10914 | |
10915 | if (!ioc->is_warpdrive) |
10916 | break; |
10917 | |
10918 | log_entry = (Mpi2EventDataLogEntryAdded_t *) |
10919 | mpi_reply->EventData; |
10920 | log_code = le32_to_cpu(*(__le32 *)log_entry->LogData); |
10921 | |
10922 | if (le16_to_cpu(log_entry->LogEntryQualifier) |
10923 | != MPT2_WARPDRIVE_LOGENTRY) |
10924 | break; |
10925 | |
10926 | switch (log_code) { |
10927 | case MPT2_WARPDRIVE_LC_SSDT: |
10928 | ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n"); |
10929 | break; |
10930 | case MPT2_WARPDRIVE_LC_SSDLW: |
10931 | ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n"); |
10932 | break; |
10933 | case MPT2_WARPDRIVE_LC_SSDLF: |
10934 | ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n"); |
10935 | break; |
10936 | case MPT2_WARPDRIVE_LC_BRMF: |
10937 | ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n"); |
10938 | break; |
10939 | } |
10940 | |
10941 | break; |
10942 | } |
10943 | case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: |
10944 | _scsih_sas_device_status_change_event(ioc, |
10945 | event_data: (Mpi2EventDataSasDeviceStatusChange_t *) |
10946 | mpi_reply->EventData); |
10947 | break; |
10948 | case MPI2_EVENT_IR_OPERATION_STATUS: |
10949 | case MPI2_EVENT_SAS_DISCOVERY: |
10950 | case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR: |
10951 | case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: |
10952 | case MPI2_EVENT_IR_PHYSICAL_DISK: |
10953 | case MPI2_EVENT_PCIE_ENUMERATION: |
10954 | case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE: |
10955 | break; |
10956 | |
10957 | case MPI2_EVENT_TEMP_THRESHOLD: |
10958 | _scsih_temp_threshold_events(ioc, |
10959 | event_data: (Mpi2EventDataTemperature_t *) |
10960 | mpi_reply->EventData); |
10961 | break; |
10962 | case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: |
10963 | ActiveCableEventData = |
10964 | (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; |
10965 | switch (ActiveCableEventData->ReasonCode) { |
10966 | case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER: |
10967 | ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n", |
10968 | ActiveCableEventData->ReceptacleID); |
10969 | pr_notice("cannot be powered and devices connected\n"); |
10970 | pr_notice("to this active cable will not be seen\n"); |
10971 | pr_notice("This active cable requires %d mW of power\n", |
10972 | le32_to_cpu( |
10973 | ActiveCableEventData->ActiveCablePowerRequirement)); |
10974 | break; |
10975 | |
10976 | case MPI26_EVENT_ACTIVE_CABLE_DEGRADED: |
10977 | ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n", |
10978 | ActiveCableEventData->ReceptacleID); |
10979 | pr_notice( |
10980 | "is not running at optimal speed(12 Gb/s rate)\n"); |
10981 | break; |
10982 | } |
10983 | |
10984 | break; |
10985 | |
10986 | default: /* ignore the rest */ |
10987 | return 1; |
10988 | } |
10989 | |
10990 | sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; |
10991 | fw_event = alloc_fw_event_work(len: sz); |
10992 | if (!fw_event) { |
10993 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
10994 | __FILE__, __LINE__, __func__); |
10995 | return 1; |
10996 | } |
10997 | |
10998 | memcpy(fw_event->event_data, mpi_reply->EventData, sz); |
10999 | fw_event->ioc = ioc; |
11000 | fw_event->VF_ID = mpi_reply->VF_ID; |
11001 | fw_event->VP_ID = mpi_reply->VP_ID; |
11002 | fw_event->event = event; |
11003 | _scsih_fw_event_add(ioc, fw_event); |
11004 | fw_event_work_put(fw_work: fw_event); |
11005 | return 1; |
11006 | } |
11007 | |
11008 | /** |
11009 | * _scsih_expander_node_remove - removing expander device from list. |
11010 | * @ioc: per adapter object |
11011 | * @sas_expander: the sas_device object |
11012 | * |
11013 | * Removing object and freeing associated memory from the |
11014 | * ioc->sas_expander_list. |
11015 | */ |
11016 | static void |
11017 | _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, |
11018 | struct _sas_node *sas_expander) |
11019 | { |
11020 | struct _sas_port *mpt3sas_port, *next; |
11021 | unsigned long flags; |
11022 | int port_id; |
11023 | |
11024 | /* remove sibling ports attached to this expander */ |
11025 | list_for_each_entry_safe(mpt3sas_port, next, |
11026 | &sas_expander->sas_port_list, port_list) { |
11027 | if (ioc->shost_recovery) |
11028 | return; |
11029 | if (mpt3sas_port->remote_identify.device_type == |
11030 | SAS_END_DEVICE) |
11031 | mpt3sas_device_remove_by_sas_address(ioc, |
11032 | sas_address: mpt3sas_port->remote_identify.sas_address, |
11033 | port: mpt3sas_port->hba_port); |
11034 | else if (mpt3sas_port->remote_identify.device_type == |
11035 | SAS_EDGE_EXPANDER_DEVICE || |
11036 | mpt3sas_port->remote_identify.device_type == |
11037 | SAS_FANOUT_EXPANDER_DEVICE) |
11038 | mpt3sas_expander_remove(ioc, |
11039 | sas_address: mpt3sas_port->remote_identify.sas_address, |
11040 | port: mpt3sas_port->hba_port); |
11041 | } |
11042 | |
11043 | port_id = sas_expander->port->port_id; |
11044 | |
11045 | mpt3sas_transport_port_remove(ioc, sas_address: sas_expander->sas_address, |
11046 | sas_address_parent: sas_expander->sas_address_parent, port: sas_expander->port); |
11047 | |
11048 | ioc_info(ioc, |
11049 | "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n", |
11050 | sas_expander->handle, (unsigned long long) |
11051 | sas_expander->sas_address, |
11052 | port_id); |
11053 | |
11054 | spin_lock_irqsave(&ioc->sas_node_lock, flags); |
11055 | list_del(entry: &sas_expander->list); |
11056 | spin_unlock_irqrestore(lock: &ioc->sas_node_lock, flags); |
11057 | |
11058 | kfree(objp: sas_expander->phy); |
11059 | kfree(objp: sas_expander); |
11060 | } |
11061 | |
11062 | /** |
11063 | * _scsih_nvme_shutdown - NVMe shutdown notification |
11064 | * @ioc: per adapter object |
11065 | * |
11066 | * Sending IoUnitControl request with shutdown operation code to alert IOC that |
11067 | * the host system is shutting down so that IOC can issue NVMe shutdown to |
11068 | * NVMe drives attached to it. |
11069 | */ |
11070 | static void |
11071 | _scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc) |
11072 | { |
11073 | Mpi26IoUnitControlRequest_t *mpi_request; |
11074 | Mpi26IoUnitControlReply_t *mpi_reply; |
11075 | u16 smid; |
11076 | |
11077 | /* are there any NVMe devices ? */ |
11078 | if (list_empty(head: &ioc->pcie_device_list)) |
11079 | return; |
11080 | |
11081 | mutex_lock(&ioc->scsih_cmds.mutex); |
11082 | |
11083 | if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { |
11084 | ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); |
11085 | goto out; |
11086 | } |
11087 | |
11088 | ioc->scsih_cmds.status = MPT3_CMD_PENDING; |
11089 | |
11090 | smid = mpt3sas_base_get_smid(ioc, cb_idx: ioc->scsih_cb_idx); |
11091 | if (!smid) { |
11092 | ioc_err(ioc, |
11093 | "%s: failed obtaining a smid\n", __func__); |
11094 | ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; |
11095 | goto out; |
11096 | } |
11097 | |
11098 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); |
11099 | ioc->scsih_cmds.smid = smid; |
11100 | memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t)); |
11101 | mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL; |
11102 | mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN; |
11103 | |
11104 | init_completion(x: &ioc->scsih_cmds.done); |
11105 | ioc->put_smid_default(ioc, smid); |
11106 | /* Wait for max_shutdown_latency seconds */ |
11107 | ioc_info(ioc, |
11108 | "Io Unit Control shutdown (sending), Shutdown latency %d sec\n", |
11109 | ioc->max_shutdown_latency); |
11110 | wait_for_completion_timeout(x: &ioc->scsih_cmds.done, |
11111 | timeout: ioc->max_shutdown_latency*HZ); |
11112 | |
11113 | if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { |
11114 | ioc_err(ioc, "%s: timeout\n", __func__); |
11115 | goto out; |
11116 | } |
11117 | |
11118 | if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { |
11119 | mpi_reply = ioc->scsih_cmds.reply; |
11120 | ioc_info(ioc, "Io Unit Control shutdown (complete):" |
11121 | "ioc_status(0x%04x), loginfo(0x%08x)\n", |
11122 | le16_to_cpu(mpi_reply->IOCStatus), |
11123 | le32_to_cpu(mpi_reply->IOCLogInfo)); |
11124 | } |
11125 | out: |
11126 | ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; |
11127 | mutex_unlock(lock: &ioc->scsih_cmds.mutex); |
11128 | } |
11129 | |
11130 | |
11131 | /** |
11132 | * _scsih_ir_shutdown - IR shutdown notification |
11133 | * @ioc: per adapter object |
11134 | * |
11135 | * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that |
11136 | * the host system is shutting down. |
11137 | */ |
11138 | static void |
11139 | _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc) |
11140 | { |
11141 | Mpi2RaidActionRequest_t *mpi_request; |
11142 | Mpi2RaidActionReply_t *mpi_reply; |
11143 | u16 smid; |
11144 | |
11145 | /* is IR firmware build loaded ? */ |
11146 | if (!ioc->ir_firmware) |
11147 | return; |
11148 | |
11149 | /* are there any volumes ? */ |
11150 | if (list_empty(head: &ioc->raid_device_list)) |
11151 | return; |
11152 | |
11153 | mutex_lock(&ioc->scsih_cmds.mutex); |
11154 | |
11155 | if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { |
11156 | ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); |
11157 | goto out; |
11158 | } |
11159 | ioc->scsih_cmds.status = MPT3_CMD_PENDING; |
11160 | |
11161 | smid = mpt3sas_base_get_smid(ioc, cb_idx: ioc->scsih_cb_idx); |
11162 | if (!smid) { |
11163 | ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); |
11164 | ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; |
11165 | goto out; |
11166 | } |
11167 | |
11168 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); |
11169 | ioc->scsih_cmds.smid = smid; |
11170 | memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); |
11171 | |
11172 | mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; |
11173 | mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED; |
11174 | |
11175 | if (!ioc->hide_ir_msg) |
11176 | ioc_info(ioc, "IR shutdown (sending)\n"); |
11177 | init_completion(x: &ioc->scsih_cmds.done); |
11178 | ioc->put_smid_default(ioc, smid); |
11179 | wait_for_completion_timeout(x: &ioc->scsih_cmds.done, timeout: 10*HZ); |
11180 | |
11181 | if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { |
11182 | ioc_err(ioc, "%s: timeout\n", __func__); |
11183 | goto out; |
11184 | } |
11185 | |
11186 | if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { |
11187 | mpi_reply = ioc->scsih_cmds.reply; |
11188 | if (!ioc->hide_ir_msg) |
11189 | ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n", |
11190 | le16_to_cpu(mpi_reply->IOCStatus), |
11191 | le32_to_cpu(mpi_reply->IOCLogInfo)); |
11192 | } |
11193 | |
11194 | out: |
11195 | ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; |
11196 | mutex_unlock(lock: &ioc->scsih_cmds.mutex); |
11197 | } |
11198 | |
11199 | /** |
11200 | * _scsih_get_shost_and_ioc - get shost and ioc |
11201 | * and verify whether they are NULL or not |
11202 | * @pdev: PCI device struct |
11203 | * @shost: address of scsi host pointer |
11204 | * @ioc: address of HBA adapter pointer |
11205 | * |
11206 | * Return zero if *shost and *ioc are not NULL otherwise return error number. |
11207 | */ |
11208 | static int |
11209 | _scsih_get_shost_and_ioc(struct pci_dev *pdev, |
11210 | struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc) |
11211 | { |
11212 | *shost = pci_get_drvdata(pdev); |
11213 | if (*shost == NULL) { |
11214 | dev_err(&pdev->dev, "pdev's driver data is null\n"); |
11215 | return -ENXIO; |
11216 | } |
11217 | |
11218 | *ioc = shost_priv(shost: *shost); |
11219 | if (*ioc == NULL) { |
11220 | dev_err(&pdev->dev, "shost's private data is null\n"); |
11221 | return -ENXIO; |
11222 | } |
11223 | |
11224 | return 0; |
11225 | } |
11226 | |
11227 | /** |
11228 | * scsih_remove - detach and remove add host |
11229 | * @pdev: PCI device struct |
11230 | * |
11231 | * Routine called when unloading the driver. |
11232 | */ |
11233 | static void scsih_remove(struct pci_dev *pdev) |
11234 | { |
11235 | struct Scsi_Host *shost; |
11236 | struct MPT3SAS_ADAPTER *ioc; |
11237 | struct _sas_port *mpt3sas_port, *next_port; |
11238 | struct _raid_device *raid_device, *next; |
11239 | struct MPT3SAS_TARGET *sas_target_priv_data; |
11240 | struct _pcie_device *pcie_device, *pcienext; |
11241 | struct workqueue_struct *wq; |
11242 | unsigned long flags; |
11243 | Mpi2ConfigReply_t mpi_reply; |
11244 | struct hba_port *port, *port_next; |
11245 | |
11246 | if (_scsih_get_shost_and_ioc(pdev, shost: &shost, ioc: &ioc)) |
11247 | return; |
11248 | |
11249 | ioc->remove_host = 1; |
11250 | |
11251 | if (!pci_device_is_present(pdev)) { |
11252 | mpt3sas_base_pause_mq_polling(ioc); |
11253 | _scsih_flush_running_cmds(ioc); |
11254 | } |
11255 | |
11256 | _scsih_fw_event_cleanup_queue(ioc); |
11257 | |
11258 | spin_lock_irqsave(&ioc->fw_event_lock, flags); |
11259 | wq = ioc->firmware_event_thread; |
11260 | ioc->firmware_event_thread = NULL; |
11261 | spin_unlock_irqrestore(lock: &ioc->fw_event_lock, flags); |
11262 | if (wq) |
11263 | destroy_workqueue(wq); |
11264 | /* |
11265 | * Copy back the unmodified ioc page1. so that on next driver load, |
11266 | * current modified changes on ioc page1 won't take effect. |
11267 | */ |
11268 | if (ioc->is_aero_ioc) |
11269 | mpt3sas_config_set_ioc_pg1(ioc, mpi_reply: &mpi_reply, |
11270 | config_page: &ioc->ioc_pg1_copy); |
11271 | /* release all the volumes */ |
11272 | _scsih_ir_shutdown(ioc); |
11273 | mpt3sas_destroy_debugfs(ioc); |
11274 | sas_remove_host(shost); |
11275 | list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list, |
11276 | list) { |
11277 | if (raid_device->starget) { |
11278 | sas_target_priv_data = |
11279 | raid_device->starget->hostdata; |
11280 | sas_target_priv_data->deleted = 1; |
11281 | scsi_remove_target(&raid_device->starget->dev); |
11282 | } |
11283 | ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", |
11284 | raid_device->handle, (u64)raid_device->wwid); |
11285 | _scsih_raid_device_remove(ioc, raid_device); |
11286 | } |
11287 | list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list, |
11288 | list) { |
11289 | _scsih_pcie_device_remove_from_sml(ioc, pcie_device); |
11290 | list_del_init(entry: &pcie_device->list); |
11291 | pcie_device_put(p: pcie_device); |
11292 | } |
11293 | |
11294 | /* free ports attached to the sas_host */ |
11295 | list_for_each_entry_safe(mpt3sas_port, next_port, |
11296 | &ioc->sas_hba.sas_port_list, port_list) { |
11297 | if (mpt3sas_port->remote_identify.device_type == |
11298 | SAS_END_DEVICE) |
11299 | mpt3sas_device_remove_by_sas_address(ioc, |
11300 | sas_address: mpt3sas_port->remote_identify.sas_address, |
11301 | port: mpt3sas_port->hba_port); |
11302 | else if (mpt3sas_port->remote_identify.device_type == |
11303 | SAS_EDGE_EXPANDER_DEVICE || |
11304 | mpt3sas_port->remote_identify.device_type == |
11305 | SAS_FANOUT_EXPANDER_DEVICE) |
11306 | mpt3sas_expander_remove(ioc, |
11307 | sas_address: mpt3sas_port->remote_identify.sas_address, |
11308 | port: mpt3sas_port->hba_port); |
11309 | } |
11310 | |
11311 | list_for_each_entry_safe(port, port_next, |
11312 | &ioc->port_table_list, list) { |
11313 | list_del(entry: &port->list); |
11314 | kfree(objp: port); |
11315 | } |
11316 | |
11317 | /* free phys attached to the sas_host */ |
11318 | if (ioc->sas_hba.num_phys) { |
11319 | kfree(objp: ioc->sas_hba.phy); |
11320 | ioc->sas_hba.phy = NULL; |
11321 | ioc->sas_hba.num_phys = 0; |
11322 | } |
11323 | |
11324 | mpt3sas_base_detach(ioc); |
11325 | mpt3sas_ctl_release(ioc); |
11326 | spin_lock(lock: &gioc_lock); |
11327 | list_del(entry: &ioc->list); |
11328 | spin_unlock(lock: &gioc_lock); |
11329 | scsi_host_put(t: shost); |
11330 | } |
11331 | |
11332 | /** |
11333 | * scsih_shutdown - routine call during system shutdown |
11334 | * @pdev: PCI device struct |
11335 | */ |
11336 | static void |
11337 | scsih_shutdown(struct pci_dev *pdev) |
11338 | { |
11339 | struct Scsi_Host *shost; |
11340 | struct MPT3SAS_ADAPTER *ioc; |
11341 | struct workqueue_struct *wq; |
11342 | unsigned long flags; |
11343 | Mpi2ConfigReply_t mpi_reply; |
11344 | |
11345 | if (_scsih_get_shost_and_ioc(pdev, shost: &shost, ioc: &ioc)) |
11346 | return; |
11347 | |
11348 | ioc->remove_host = 1; |
11349 | |
11350 | if (!pci_device_is_present(pdev)) { |
11351 | mpt3sas_base_pause_mq_polling(ioc); |
11352 | _scsih_flush_running_cmds(ioc); |
11353 | } |
11354 | |
11355 | _scsih_fw_event_cleanup_queue(ioc); |
11356 | |
11357 | spin_lock_irqsave(&ioc->fw_event_lock, flags); |
11358 | wq = ioc->firmware_event_thread; |
11359 | ioc->firmware_event_thread = NULL; |
11360 | spin_unlock_irqrestore(lock: &ioc->fw_event_lock, flags); |
11361 | if (wq) |
11362 | destroy_workqueue(wq); |
11363 | /* |
11364 | * Copy back the unmodified ioc page1 so that on next driver load, |
11365 | * current modified changes on ioc page1 won't take effect. |
11366 | */ |
11367 | if (ioc->is_aero_ioc) |
11368 | mpt3sas_config_set_ioc_pg1(ioc, mpi_reply: &mpi_reply, |
11369 | config_page: &ioc->ioc_pg1_copy); |
11370 | |
11371 | _scsih_ir_shutdown(ioc); |
11372 | _scsih_nvme_shutdown(ioc); |
11373 | mpt3sas_base_mask_interrupts(ioc); |
11374 | mpt3sas_base_stop_watchdog(ioc); |
11375 | ioc->shost_recovery = 1; |
11376 | mpt3sas_base_make_ioc_ready(ioc, type: SOFT_RESET); |
11377 | ioc->shost_recovery = 0; |
11378 | mpt3sas_base_free_irq(ioc); |
11379 | mpt3sas_base_disable_msix(ioc); |
11380 | } |
11381 | |
11382 | |
11383 | /** |
11384 | * _scsih_probe_boot_devices - reports 1st device |
11385 | * @ioc: per adapter object |
11386 | * |
11387 | * If specified in bios page 2, this routine reports the 1st |
11388 | * device scsi-ml or sas transport for persistent boot device |
11389 | * purposes. Please refer to function _scsih_determine_boot_device() |
11390 | */ |
11391 | static void |
11392 | _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc) |
11393 | { |
11394 | u32 channel; |
11395 | void *device; |
11396 | struct _sas_device *sas_device; |
11397 | struct _raid_device *raid_device; |
11398 | struct _pcie_device *pcie_device; |
11399 | u16 handle; |
11400 | u64 sas_address_parent; |
11401 | u64 sas_address; |
11402 | unsigned long flags; |
11403 | int rc; |
11404 | int tid; |
11405 | struct hba_port *port; |
11406 | |
11407 | /* no Bios, return immediately */ |
11408 | if (!ioc->bios_pg3.BiosVersion) |
11409 | return; |
11410 | |
11411 | device = NULL; |
11412 | if (ioc->req_boot_device.device) { |
11413 | device = ioc->req_boot_device.device; |
11414 | channel = ioc->req_boot_device.channel; |
11415 | } else if (ioc->req_alt_boot_device.device) { |
11416 | device = ioc->req_alt_boot_device.device; |
11417 | channel = ioc->req_alt_boot_device.channel; |
11418 | } else if (ioc->current_boot_device.device) { |
11419 | device = ioc->current_boot_device.device; |
11420 | channel = ioc->current_boot_device.channel; |
11421 | } |
11422 | |
11423 | if (!device) |
11424 | return; |
11425 | |
11426 | if (channel == RAID_CHANNEL) { |
11427 | raid_device = device; |
11428 | /* |
11429 | * If this boot vd is already registered with SML then |
11430 | * no need to register it again as part of device scanning |
11431 | * after diag reset during driver load operation. |
11432 | */ |
11433 | if (raid_device->starget) |
11434 | return; |
11435 | rc = scsi_add_device(host: ioc->shost, RAID_CHANNEL, |
11436 | target: raid_device->id, lun: 0); |
11437 | if (rc) |
11438 | _scsih_raid_device_remove(ioc, raid_device); |
11439 | } else if (channel == PCIE_CHANNEL) { |
11440 | pcie_device = device; |
11441 | /* |
11442 | * If this boot NVMe device is already registered with SML then |
11443 | * no need to register it again as part of device scanning |
11444 | * after diag reset during driver load operation. |
11445 | */ |
11446 | if (pcie_device->starget) |
11447 | return; |
11448 | spin_lock_irqsave(&ioc->pcie_device_lock, flags); |
11449 | tid = pcie_device->id; |
11450 | list_move_tail(list: &pcie_device->list, head: &ioc->pcie_device_list); |
11451 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
11452 | rc = scsi_add_device(host: ioc->shost, PCIE_CHANNEL, target: tid, lun: 0); |
11453 | if (rc) |
11454 | _scsih_pcie_device_remove(ioc, pcie_device); |
11455 | } else { |
11456 | sas_device = device; |
11457 | /* |
11458 | * If this boot sas/sata device is already registered with SML |
11459 | * then no need to register it again as part of device scanning |
11460 | * after diag reset during driver load operation. |
11461 | */ |
11462 | if (sas_device->starget) |
11463 | return; |
11464 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
11465 | handle = sas_device->handle; |
11466 | sas_address_parent = sas_device->sas_address_parent; |
11467 | sas_address = sas_device->sas_address; |
11468 | port = sas_device->port; |
11469 | list_move_tail(list: &sas_device->list, head: &ioc->sas_device_list); |
11470 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
11471 | |
11472 | if (ioc->hide_drives) |
11473 | return; |
11474 | |
11475 | if (!port) |
11476 | return; |
11477 | |
11478 | if (!mpt3sas_transport_port_add(ioc, handle, |
11479 | sas_address: sas_address_parent, port)) { |
11480 | _scsih_sas_device_remove(ioc, sas_device); |
11481 | } else if (!sas_device->starget) { |
11482 | if (!ioc->is_driver_loading) { |
11483 | mpt3sas_transport_port_remove(ioc, |
11484 | sas_address, |
11485 | sas_address_parent, port); |
11486 | _scsih_sas_device_remove(ioc, sas_device); |
11487 | } |
11488 | } |
11489 | } |
11490 | } |
11491 | |
11492 | /** |
11493 | * _scsih_probe_raid - reporting raid volumes to scsi-ml |
11494 | * @ioc: per adapter object |
11495 | * |
11496 | * Called during initial loading of the driver. |
11497 | */ |
11498 | static void |
11499 | _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc) |
11500 | { |
11501 | struct _raid_device *raid_device, *raid_next; |
11502 | int rc; |
11503 | |
11504 | list_for_each_entry_safe(raid_device, raid_next, |
11505 | &ioc->raid_device_list, list) { |
11506 | if (raid_device->starget) |
11507 | continue; |
11508 | rc = scsi_add_device(host: ioc->shost, RAID_CHANNEL, |
11509 | target: raid_device->id, lun: 0); |
11510 | if (rc) |
11511 | _scsih_raid_device_remove(ioc, raid_device); |
11512 | } |
11513 | } |
11514 | |
11515 | static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc) |
11516 | { |
11517 | struct _sas_device *sas_device = NULL; |
11518 | unsigned long flags; |
11519 | |
11520 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
11521 | if (!list_empty(head: &ioc->sas_device_init_list)) { |
11522 | sas_device = list_first_entry(&ioc->sas_device_init_list, |
11523 | struct _sas_device, list); |
11524 | sas_device_get(s: sas_device); |
11525 | } |
11526 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
11527 | |
11528 | return sas_device; |
11529 | } |
11530 | |
11531 | static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc, |
11532 | struct _sas_device *sas_device) |
11533 | { |
11534 | unsigned long flags; |
11535 | |
11536 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
11537 | |
11538 | /* |
11539 | * Since we dropped the lock during the call to port_add(), we need to |
11540 | * be careful here that somebody else didn't move or delete this item |
11541 | * while we were busy with other things. |
11542 | * |
11543 | * If it was on the list, we need a put() for the reference the list |
11544 | * had. Either way, we need a get() for the destination list. |
11545 | */ |
11546 | if (!list_empty(head: &sas_device->list)) { |
11547 | list_del_init(entry: &sas_device->list); |
11548 | sas_device_put(s: sas_device); |
11549 | } |
11550 | |
11551 | sas_device_get(s: sas_device); |
11552 | list_add_tail(new: &sas_device->list, head: &ioc->sas_device_list); |
11553 | |
11554 | spin_unlock_irqrestore(lock: &ioc->sas_device_lock, flags); |
11555 | } |
11556 | |
11557 | /** |
11558 | * _scsih_probe_sas - reporting sas devices to sas transport |
11559 | * @ioc: per adapter object |
11560 | * |
11561 | * Called during initial loading of the driver. |
11562 | */ |
11563 | static void |
11564 | _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc) |
11565 | { |
11566 | struct _sas_device *sas_device; |
11567 | |
11568 | if (ioc->hide_drives) |
11569 | return; |
11570 | |
11571 | while ((sas_device = get_next_sas_device(ioc))) { |
11572 | if (!mpt3sas_transport_port_add(ioc, handle: sas_device->handle, |
11573 | sas_address: sas_device->sas_address_parent, port: sas_device->port)) { |
11574 | _scsih_sas_device_remove(ioc, sas_device); |
11575 | sas_device_put(s: sas_device); |
11576 | continue; |
11577 | } else if (!sas_device->starget) { |
11578 | /* |
11579 | * When asyn scanning is enabled, its not possible to |
11580 | * remove devices while scanning is turned on due to an |
11581 | * oops in scsi_sysfs_add_sdev()->add_device()-> |
11582 | * sysfs_addrm_start() |
11583 | */ |
11584 | if (!ioc->is_driver_loading) { |
11585 | mpt3sas_transport_port_remove(ioc, |
11586 | sas_address: sas_device->sas_address, |
11587 | sas_address_parent: sas_device->sas_address_parent, |
11588 | port: sas_device->port); |
11589 | _scsih_sas_device_remove(ioc, sas_device); |
11590 | sas_device_put(s: sas_device); |
11591 | continue; |
11592 | } |
11593 | } |
11594 | sas_device_make_active(ioc, sas_device); |
11595 | sas_device_put(s: sas_device); |
11596 | } |
11597 | } |
11598 | |
11599 | /** |
11600 | * get_next_pcie_device - Get the next pcie device |
11601 | * @ioc: per adapter object |
11602 | * |
11603 | * Get the next pcie device from pcie_device_init_list list. |
11604 | * |
11605 | * Return: pcie device structure if pcie_device_init_list list is not empty |
11606 | * otherwise returns NULL |
11607 | */ |
11608 | static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc) |
11609 | { |
11610 | struct _pcie_device *pcie_device = NULL; |
11611 | unsigned long flags; |
11612 | |
11613 | spin_lock_irqsave(&ioc->pcie_device_lock, flags); |
11614 | if (!list_empty(head: &ioc->pcie_device_init_list)) { |
11615 | pcie_device = list_first_entry(&ioc->pcie_device_init_list, |
11616 | struct _pcie_device, list); |
11617 | pcie_device_get(p: pcie_device); |
11618 | } |
11619 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
11620 | |
11621 | return pcie_device; |
11622 | } |
11623 | |
11624 | /** |
11625 | * pcie_device_make_active - Add pcie device to pcie_device_list list |
11626 | * @ioc: per adapter object |
11627 | * @pcie_device: pcie device object |
11628 | * |
11629 | * Add the pcie device which has registered with SCSI Transport Later to |
11630 | * pcie_device_list list |
11631 | */ |
11632 | static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc, |
11633 | struct _pcie_device *pcie_device) |
11634 | { |
11635 | unsigned long flags; |
11636 | |
11637 | spin_lock_irqsave(&ioc->pcie_device_lock, flags); |
11638 | |
11639 | if (!list_empty(head: &pcie_device->list)) { |
11640 | list_del_init(entry: &pcie_device->list); |
11641 | pcie_device_put(p: pcie_device); |
11642 | } |
11643 | pcie_device_get(p: pcie_device); |
11644 | list_add_tail(new: &pcie_device->list, head: &ioc->pcie_device_list); |
11645 | |
11646 | spin_unlock_irqrestore(lock: &ioc->pcie_device_lock, flags); |
11647 | } |
11648 | |
11649 | /** |
11650 | * _scsih_probe_pcie - reporting PCIe devices to scsi-ml |
11651 | * @ioc: per adapter object |
11652 | * |
11653 | * Called during initial loading of the driver. |
11654 | */ |
11655 | static void |
11656 | _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc) |
11657 | { |
11658 | struct _pcie_device *pcie_device; |
11659 | int rc; |
11660 | |
11661 | /* PCIe Device List */ |
11662 | while ((pcie_device = get_next_pcie_device(ioc))) { |
11663 | if (pcie_device->starget) { |
11664 | pcie_device_put(p: pcie_device); |
11665 | continue; |
11666 | } |
11667 | if (pcie_device->access_status == |
11668 | MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) { |
11669 | pcie_device_make_active(ioc, pcie_device); |
11670 | pcie_device_put(p: pcie_device); |
11671 | continue; |
11672 | } |
11673 | rc = scsi_add_device(host: ioc->shost, PCIE_CHANNEL, |
11674 | target: pcie_device->id, lun: 0); |
11675 | if (rc) { |
11676 | _scsih_pcie_device_remove(ioc, pcie_device); |
11677 | pcie_device_put(p: pcie_device); |
11678 | continue; |
11679 | } else if (!pcie_device->starget) { |
11680 | /* |
11681 | * When async scanning is enabled, its not possible to |
11682 | * remove devices while scanning is turned on due to an |
11683 | * oops in scsi_sysfs_add_sdev()->add_device()-> |
11684 | * sysfs_addrm_start() |
11685 | */ |
11686 | if (!ioc->is_driver_loading) { |
11687 | /* TODO-- Need to find out whether this condition will |
11688 | * occur or not |
11689 | */ |
11690 | _scsih_pcie_device_remove(ioc, pcie_device); |
11691 | pcie_device_put(p: pcie_device); |
11692 | continue; |
11693 | } |
11694 | } |
11695 | pcie_device_make_active(ioc, pcie_device); |
11696 | pcie_device_put(p: pcie_device); |
11697 | } |
11698 | } |
11699 | |
11700 | /** |
11701 | * _scsih_probe_devices - probing for devices |
11702 | * @ioc: per adapter object |
11703 | * |
11704 | * Called during initial loading of the driver. |
11705 | */ |
11706 | static void |
11707 | _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc) |
11708 | { |
11709 | u16 volume_mapping_flags; |
11710 | |
11711 | if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR)) |
11712 | return; /* return when IOC doesn't support initiator mode */ |
11713 | |
11714 | _scsih_probe_boot_devices(ioc); |
11715 | |
11716 | if (ioc->ir_firmware) { |
11717 | volume_mapping_flags = |
11718 | le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) & |
11719 | MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; |
11720 | if (volume_mapping_flags == |
11721 | MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) { |
11722 | _scsih_probe_raid(ioc); |
11723 | _scsih_probe_sas(ioc); |
11724 | } else { |
11725 | _scsih_probe_sas(ioc); |
11726 | _scsih_probe_raid(ioc); |
11727 | } |
11728 | } else { |
11729 | _scsih_probe_sas(ioc); |
11730 | _scsih_probe_pcie(ioc); |
11731 | } |
11732 | } |
11733 | |
11734 | /** |
11735 | * scsih_scan_start - scsi lld callback for .scan_start |
11736 | * @shost: SCSI host pointer |
11737 | * |
11738 | * The shost has the ability to discover targets on its own instead |
11739 | * of scanning the entire bus. In our implemention, we will kick off |
11740 | * firmware discovery. |
11741 | */ |
11742 | static void |
11743 | scsih_scan_start(struct Scsi_Host *shost) |
11744 | { |
11745 | struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); |
11746 | int rc; |
11747 | if (diag_buffer_enable != -1 && diag_buffer_enable != 0) |
11748 | mpt3sas_enable_diag_buffer(ioc, bits_to_register: diag_buffer_enable); |
11749 | else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0) |
11750 | mpt3sas_enable_diag_buffer(ioc, bits_to_register: 1); |
11751 | |
11752 | if (disable_discovery > 0) |
11753 | return; |
11754 | |
11755 | ioc->start_scan = 1; |
11756 | rc = mpt3sas_port_enable(ioc); |
11757 | |
11758 | if (rc != 0) |
11759 | ioc_info(ioc, "port enable: FAILED\n"); |
11760 | } |
11761 | |
11762 | /** |
11763 | * _scsih_complete_devices_scanning - add the devices to sml and |
11764 | * complete ioc initialization. |
11765 | * @ioc: per adapter object |
11766 | * |
11767 | * Return nothing. |
11768 | */ |
11769 | static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc) |
11770 | { |
11771 | |
11772 | if (ioc->wait_for_discovery_to_complete) { |
11773 | ioc->wait_for_discovery_to_complete = 0; |
11774 | _scsih_probe_devices(ioc); |
11775 | } |
11776 | |
11777 | mpt3sas_base_start_watchdog(ioc); |
11778 | ioc->is_driver_loading = 0; |
11779 | } |
11780 | |
11781 | /** |
11782 | * scsih_scan_finished - scsi lld callback for .scan_finished |
11783 | * @shost: SCSI host pointer |
11784 | * @time: elapsed time of the scan in jiffies |
11785 | * |
11786 | * This function will be called periodicallyn until it returns 1 with the |
11787 | * scsi_host and the elapsed time of the scan in jiffies. In our implemention, |
11788 | * we wait for firmware discovery to complete, then return 1. |
11789 | */ |
11790 | static int |
11791 | scsih_scan_finished(struct Scsi_Host *shost, unsigned long time) |
11792 | { |
11793 | struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); |
11794 | u32 ioc_state; |
11795 | int issue_hard_reset = 0; |
11796 | |
11797 | if (disable_discovery > 0) { |
11798 | ioc->is_driver_loading = 0; |
11799 | ioc->wait_for_discovery_to_complete = 0; |
11800 | return 1; |
11801 | } |
11802 | |
11803 | if (time >= (300 * HZ)) { |
11804 | ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; |
11805 | ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n"); |
11806 | ioc->is_driver_loading = 0; |
11807 | return 1; |
11808 | } |
11809 | |
11810 | if (ioc->start_scan) { |
11811 | ioc_state = mpt3sas_base_get_iocstate(ioc, cooked: 0); |
11812 | if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { |
11813 | mpt3sas_print_fault_code(ioc, ioc_state & |
11814 | MPI2_DOORBELL_DATA_MASK); |
11815 | issue_hard_reset = 1; |
11816 | goto out; |
11817 | } else if ((ioc_state & MPI2_IOC_STATE_MASK) == |
11818 | MPI2_IOC_STATE_COREDUMP) { |
11819 | mpt3sas_base_coredump_info(ioc, fault_code: ioc_state & |
11820 | MPI2_DOORBELL_DATA_MASK); |
11821 | mpt3sas_base_wait_for_coredump_completion(ioc, caller: __func__); |
11822 | issue_hard_reset = 1; |
11823 | goto out; |
11824 | } |
11825 | return 0; |
11826 | } |
11827 | |
11828 | if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) { |
11829 | ioc_info(ioc, |
11830 | "port enable: aborted due to diag reset\n"); |
11831 | ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; |
11832 | goto out; |
11833 | } |
11834 | if (ioc->start_scan_failed) { |
11835 | ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n", |
11836 | ioc->start_scan_failed); |
11837 | ioc->is_driver_loading = 0; |
11838 | ioc->wait_for_discovery_to_complete = 0; |
11839 | ioc->remove_host = 1; |
11840 | return 1; |
11841 | } |
11842 | |
11843 | ioc_info(ioc, "port enable: SUCCESS\n"); |
11844 | ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; |
11845 | _scsih_complete_devices_scanning(ioc); |
11846 | |
11847 | out: |
11848 | if (issue_hard_reset) { |
11849 | ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; |
11850 | if (mpt3sas_base_hard_reset_handler(ioc, type: SOFT_RESET)) |
11851 | ioc->is_driver_loading = 0; |
11852 | } |
11853 | return 1; |
11854 | } |
11855 | |
11856 | /** |
11857 | * scsih_map_queues - map reply queues with request queues |
11858 | * @shost: SCSI host pointer |
11859 | */ |
11860 | static void scsih_map_queues(struct Scsi_Host *shost) |
11861 | { |
11862 | struct MPT3SAS_ADAPTER *ioc = |
11863 | (struct MPT3SAS_ADAPTER *)shost->hostdata; |
11864 | struct blk_mq_queue_map *map; |
11865 | int i, qoff, offset; |
11866 | int nr_msix_vectors = ioc->iopoll_q_start_index; |
11867 | int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors; |
11868 | |
11869 | if (shost->nr_hw_queues == 1) |
11870 | return; |
11871 | |
11872 | for (i = 0, qoff = 0; i < shost->nr_maps; i++) { |
11873 | map = &shost->tag_set.map[i]; |
11874 | map->nr_queues = 0; |
11875 | offset = 0; |
11876 | if (i == HCTX_TYPE_DEFAULT) { |
11877 | map->nr_queues = |
11878 | nr_msix_vectors - ioc->high_iops_queues; |
11879 | offset = ioc->high_iops_queues; |
11880 | } else if (i == HCTX_TYPE_POLL) |
11881 | map->nr_queues = iopoll_q_count; |
11882 | |
11883 | if (!map->nr_queues) |
11884 | BUG_ON(i == HCTX_TYPE_DEFAULT); |
11885 | |
11886 | /* |
11887 | * The poll queue(s) doesn't have an IRQ (and hence IRQ |
11888 | * affinity), so use the regular blk-mq cpu mapping |
11889 | */ |
11890 | map->queue_offset = qoff; |
11891 | if (i != HCTX_TYPE_POLL) |
11892 | blk_mq_map_hw_queues(qmap: map, dev: &ioc->pdev->dev, offset); |
11893 | else |
11894 | blk_mq_map_queues(qmap: map); |
11895 | |
11896 | qoff += map->nr_queues; |
11897 | } |
11898 | } |
11899 | |
11900 | /* shost template for SAS 2.0 HBA devices */ |
11901 | static const struct scsi_host_template mpt2sas_driver_template = { |
11902 | .module = THIS_MODULE, |
11903 | .name = "Fusion MPT SAS Host", |
11904 | .proc_name = MPT2SAS_DRIVER_NAME, |
11905 | .queuecommand = scsih_qcmd, |
11906 | .target_alloc = scsih_target_alloc, |
11907 | .sdev_init = scsih_sdev_init, |
11908 | .sdev_configure = scsih_sdev_configure, |
11909 | .target_destroy = scsih_target_destroy, |
11910 | .sdev_destroy = scsih_sdev_destroy, |
11911 | .scan_finished = scsih_scan_finished, |
11912 | .scan_start = scsih_scan_start, |
11913 | .change_queue_depth = scsih_change_queue_depth, |
11914 | .eh_abort_handler = scsih_abort, |
11915 | .eh_device_reset_handler = scsih_dev_reset, |
11916 | .eh_target_reset_handler = scsih_target_reset, |
11917 | .eh_host_reset_handler = scsih_host_reset, |
11918 | .bios_param = scsih_bios_param, |
11919 | .can_queue = 1, |
11920 | .this_id = -1, |
11921 | .sg_tablesize = MPT2SAS_SG_DEPTH, |
11922 | .max_sectors = 32767, |
11923 | .cmd_per_lun = 7, |
11924 | .shost_groups = mpt3sas_host_groups, |
11925 | .sdev_groups = mpt3sas_dev_groups, |
11926 | .track_queue_depth = 1, |
11927 | .cmd_size = sizeof(struct scsiio_tracker), |
11928 | }; |
11929 | |
11930 | /* raid transport support for SAS 2.0 HBA devices */ |
11931 | static struct raid_function_template mpt2sas_raid_functions = { |
11932 | .cookie = &mpt2sas_driver_template, |
11933 | .is_raid = scsih_is_raid, |
11934 | .get_resync = scsih_get_resync, |
11935 | .get_state = scsih_get_state, |
11936 | }; |
11937 | |
11938 | /* shost template for SAS 3.0 HBA devices */ |
11939 | static const struct scsi_host_template mpt3sas_driver_template = { |
11940 | .module = THIS_MODULE, |
11941 | .name = "Fusion MPT SAS Host", |
11942 | .proc_name = MPT3SAS_DRIVER_NAME, |
11943 | .queuecommand = scsih_qcmd, |
11944 | .target_alloc = scsih_target_alloc, |
11945 | .sdev_init = scsih_sdev_init, |
11946 | .sdev_configure = scsih_sdev_configure, |
11947 | .target_destroy = scsih_target_destroy, |
11948 | .sdev_destroy = scsih_sdev_destroy, |
11949 | .scan_finished = scsih_scan_finished, |
11950 | .scan_start = scsih_scan_start, |
11951 | .change_queue_depth = scsih_change_queue_depth, |
11952 | .eh_abort_handler = scsih_abort, |
11953 | .eh_device_reset_handler = scsih_dev_reset, |
11954 | .eh_target_reset_handler = scsih_target_reset, |
11955 | .eh_host_reset_handler = scsih_host_reset, |
11956 | .bios_param = scsih_bios_param, |
11957 | .can_queue = 1, |
11958 | .this_id = -1, |
11959 | .sg_tablesize = MPT3SAS_SG_DEPTH, |
11960 | .max_sectors = 32767, |
11961 | .max_segment_size = 0xffffffff, |
11962 | .cmd_per_lun = 128, |
11963 | .shost_groups = mpt3sas_host_groups, |
11964 | .sdev_groups = mpt3sas_dev_groups, |
11965 | .track_queue_depth = 1, |
11966 | .cmd_size = sizeof(struct scsiio_tracker), |
11967 | .map_queues = scsih_map_queues, |
11968 | .mq_poll = mpt3sas_blk_mq_poll, |
11969 | }; |
11970 | |
11971 | /* raid transport support for SAS 3.0 HBA devices */ |
11972 | static struct raid_function_template mpt3sas_raid_functions = { |
11973 | .cookie = &mpt3sas_driver_template, |
11974 | .is_raid = scsih_is_raid, |
11975 | .get_resync = scsih_get_resync, |
11976 | .get_state = scsih_get_state, |
11977 | }; |
11978 | |
11979 | /** |
11980 | * _scsih_determine_hba_mpi_version - determine in which MPI version class |
11981 | * this device belongs to. |
11982 | * @pdev: PCI device struct |
11983 | * |
11984 | * return MPI2_VERSION for SAS 2.0 HBA devices, |
11985 | * MPI25_VERSION for SAS 3.0 HBA devices, and |
11986 | * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices |
11987 | */ |
11988 | static u16 |
11989 | _scsih_determine_hba_mpi_version(struct pci_dev *pdev) |
11990 | { |
11991 | |
11992 | switch (pdev->device) { |
11993 | case MPI2_MFGPAGE_DEVID_SSS6200: |
11994 | case MPI2_MFGPAGE_DEVID_SAS2004: |
11995 | case MPI2_MFGPAGE_DEVID_SAS2008: |
11996 | case MPI2_MFGPAGE_DEVID_SAS2108_1: |
11997 | case MPI2_MFGPAGE_DEVID_SAS2108_2: |
11998 | case MPI2_MFGPAGE_DEVID_SAS2108_3: |
11999 | case MPI2_MFGPAGE_DEVID_SAS2116_1: |
12000 | case MPI2_MFGPAGE_DEVID_SAS2116_2: |
12001 | case MPI2_MFGPAGE_DEVID_SAS2208_1: |
12002 | case MPI2_MFGPAGE_DEVID_SAS2208_2: |
12003 | case MPI2_MFGPAGE_DEVID_SAS2208_3: |
12004 | case MPI2_MFGPAGE_DEVID_SAS2208_4: |
12005 | case MPI2_MFGPAGE_DEVID_SAS2208_5: |
12006 | case MPI2_MFGPAGE_DEVID_SAS2208_6: |
12007 | case MPI2_MFGPAGE_DEVID_SAS2308_1: |
12008 | case MPI2_MFGPAGE_DEVID_SAS2308_2: |
12009 | case MPI2_MFGPAGE_DEVID_SAS2308_3: |
12010 | case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP: |
12011 | case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1: |
12012 | return MPI2_VERSION; |
12013 | case MPI25_MFGPAGE_DEVID_SAS3004: |
12014 | case MPI25_MFGPAGE_DEVID_SAS3008: |
12015 | case MPI25_MFGPAGE_DEVID_SAS3108_1: |
12016 | case MPI25_MFGPAGE_DEVID_SAS3108_2: |
12017 | case MPI25_MFGPAGE_DEVID_SAS3108_5: |
12018 | case MPI25_MFGPAGE_DEVID_SAS3108_6: |
12019 | return MPI25_VERSION; |
12020 | case MPI26_MFGPAGE_DEVID_SAS3216: |
12021 | case MPI26_MFGPAGE_DEVID_SAS3224: |
12022 | case MPI26_MFGPAGE_DEVID_SAS3316_1: |
12023 | case MPI26_MFGPAGE_DEVID_SAS3316_2: |
12024 | case MPI26_MFGPAGE_DEVID_SAS3316_3: |
12025 | case MPI26_MFGPAGE_DEVID_SAS3316_4: |
12026 | case MPI26_MFGPAGE_DEVID_SAS3324_1: |
12027 | case MPI26_MFGPAGE_DEVID_SAS3324_2: |
12028 | case MPI26_MFGPAGE_DEVID_SAS3324_3: |
12029 | case MPI26_MFGPAGE_DEVID_SAS3324_4: |
12030 | case MPI26_MFGPAGE_DEVID_SAS3508: |
12031 | case MPI26_MFGPAGE_DEVID_SAS3508_1: |
12032 | case MPI26_MFGPAGE_DEVID_SAS3408: |
12033 | case MPI26_MFGPAGE_DEVID_SAS3516: |
12034 | case MPI26_MFGPAGE_DEVID_SAS3516_1: |
12035 | case MPI26_MFGPAGE_DEVID_SAS3416: |
12036 | case MPI26_MFGPAGE_DEVID_SAS3616: |
12037 | case MPI26_ATLAS_PCIe_SWITCH_DEVID: |
12038 | case MPI26_MFGPAGE_DEVID_CFG_SEC_3916: |
12039 | case MPI26_MFGPAGE_DEVID_HARD_SEC_3916: |
12040 | case MPI26_MFGPAGE_DEVID_CFG_SEC_3816: |
12041 | case MPI26_MFGPAGE_DEVID_HARD_SEC_3816: |
12042 | case MPI26_MFGPAGE_DEVID_INVALID0_3916: |
12043 | case MPI26_MFGPAGE_DEVID_INVALID1_3916: |
12044 | case MPI26_MFGPAGE_DEVID_INVALID0_3816: |
12045 | case MPI26_MFGPAGE_DEVID_INVALID1_3816: |
12046 | return MPI26_VERSION; |
12047 | } |
12048 | return 0; |
12049 | } |
12050 | |
12051 | /** |
12052 | * _scsih_probe - attach and add scsi host |
12053 | * @pdev: PCI device struct |
12054 | * @id: pci device id |
12055 | * |
12056 | * Return: 0 success, anything else error. |
12057 | */ |
12058 | static int |
12059 | _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
12060 | { |
12061 | struct MPT3SAS_ADAPTER *ioc; |
12062 | struct Scsi_Host *shost = NULL; |
12063 | int rv; |
12064 | u16 hba_mpi_version; |
12065 | int iopoll_q_count = 0; |
12066 | |
12067 | /* Determine in which MPI version class this pci device belongs */ |
12068 | hba_mpi_version = _scsih_determine_hba_mpi_version(pdev); |
12069 | if (hba_mpi_version == 0) |
12070 | return -ENODEV; |
12071 | |
12072 | /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one, |
12073 | * for other generation HBA's return with -ENODEV |
12074 | */ |
12075 | if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION)) |
12076 | return -ENODEV; |
12077 | |
12078 | /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two, |
12079 | * for other generation HBA's return with -ENODEV |
12080 | */ |
12081 | if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION |
12082 | || hba_mpi_version == MPI26_VERSION))) |
12083 | return -ENODEV; |
12084 | |
12085 | switch (hba_mpi_version) { |
12086 | case MPI2_VERSION: |
12087 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | |
12088 | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); |
12089 | /* Use mpt2sas driver host template for SAS 2.0 HBA's */ |
12090 | shost = scsi_host_alloc(&mpt2sas_driver_template, |
12091 | sizeof(struct MPT3SAS_ADAPTER)); |
12092 | if (!shost) |
12093 | return -ENODEV; |
12094 | ioc = shost_priv(shost); |
12095 | memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER)); |
12096 | ioc->hba_mpi_version_belonged = hba_mpi_version; |
12097 | ioc->id = mpt2_ids++; |
12098 | sprintf(buf: ioc->driver_name, fmt: "%s", MPT2SAS_DRIVER_NAME); |
12099 | switch (pdev->device) { |
12100 | case MPI2_MFGPAGE_DEVID_SSS6200: |
12101 | ioc->is_warpdrive = 1; |
12102 | ioc->hide_ir_msg = 1; |
12103 | break; |
12104 | case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP: |
12105 | case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1: |
12106 | ioc->is_mcpu_endpoint = 1; |
12107 | break; |
12108 | default: |
12109 | ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS; |
12110 | break; |
12111 | } |
12112 | |
12113 | if (multipath_on_hba == -1 || multipath_on_hba == 0) |
12114 | ioc->multipath_on_hba = 0; |
12115 | else |
12116 | ioc->multipath_on_hba = 1; |
12117 | |
12118 | break; |
12119 | case MPI25_VERSION: |
12120 | case MPI26_VERSION: |
12121 | /* Use mpt3sas driver host template for SAS 3.0 HBA's */ |
12122 | shost = scsi_host_alloc(&mpt3sas_driver_template, |
12123 | sizeof(struct MPT3SAS_ADAPTER)); |
12124 | if (!shost) |
12125 | return -ENODEV; |
12126 | ioc = shost_priv(shost); |
12127 | memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER)); |
12128 | ioc->hba_mpi_version_belonged = hba_mpi_version; |
12129 | ioc->id = mpt3_ids++; |
12130 | sprintf(buf: ioc->driver_name, fmt: "%s", MPT3SAS_DRIVER_NAME); |
12131 | switch (pdev->device) { |
12132 | case MPI26_MFGPAGE_DEVID_SAS3508: |
12133 | case MPI26_MFGPAGE_DEVID_SAS3508_1: |
12134 | case MPI26_MFGPAGE_DEVID_SAS3408: |
12135 | case MPI26_MFGPAGE_DEVID_SAS3516: |
12136 | case MPI26_MFGPAGE_DEVID_SAS3516_1: |
12137 | case MPI26_MFGPAGE_DEVID_SAS3416: |
12138 | case MPI26_MFGPAGE_DEVID_SAS3616: |
12139 | case MPI26_ATLAS_PCIe_SWITCH_DEVID: |
12140 | ioc->is_gen35_ioc = 1; |
12141 | break; |
12142 | case MPI26_MFGPAGE_DEVID_INVALID0_3816: |
12143 | case MPI26_MFGPAGE_DEVID_INVALID0_3916: |
12144 | dev_err(&pdev->dev, |
12145 | "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid", |
12146 | pdev->device, pdev->subsystem_vendor, |
12147 | pdev->subsystem_device); |
12148 | return 1; |
12149 | case MPI26_MFGPAGE_DEVID_INVALID1_3816: |
12150 | case MPI26_MFGPAGE_DEVID_INVALID1_3916: |
12151 | dev_err(&pdev->dev, |
12152 | "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered", |
12153 | pdev->device, pdev->subsystem_vendor, |
12154 | pdev->subsystem_device); |
12155 | return 1; |
12156 | case MPI26_MFGPAGE_DEVID_CFG_SEC_3816: |
12157 | case MPI26_MFGPAGE_DEVID_CFG_SEC_3916: |
12158 | dev_info(&pdev->dev, |
12159 | "HBA is in Configurable Secure mode\n"); |
12160 | fallthrough; |
12161 | case MPI26_MFGPAGE_DEVID_HARD_SEC_3816: |
12162 | case MPI26_MFGPAGE_DEVID_HARD_SEC_3916: |
12163 | ioc->is_aero_ioc = ioc->is_gen35_ioc = 1; |
12164 | break; |
12165 | default: |
12166 | ioc->is_gen35_ioc = ioc->is_aero_ioc = 0; |
12167 | } |
12168 | if ((ioc->hba_mpi_version_belonged == MPI25_VERSION && |
12169 | pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) || |
12170 | (ioc->hba_mpi_version_belonged == MPI26_VERSION)) { |
12171 | ioc->combined_reply_queue = 1; |
12172 | if (ioc->is_gen35_ioc) |
12173 | ioc->combined_reply_index_count = |
12174 | MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35; |
12175 | else |
12176 | ioc->combined_reply_index_count = |
12177 | MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3; |
12178 | } |
12179 | |
12180 | switch (ioc->is_gen35_ioc) { |
12181 | case 0: |
12182 | if (multipath_on_hba == -1 || multipath_on_hba == 0) |
12183 | ioc->multipath_on_hba = 0; |
12184 | else |
12185 | ioc->multipath_on_hba = 1; |
12186 | break; |
12187 | case 1: |
12188 | if (multipath_on_hba == -1 || multipath_on_hba > 0) |
12189 | ioc->multipath_on_hba = 1; |
12190 | else |
12191 | ioc->multipath_on_hba = 0; |
12192 | break; |
12193 | default: |
12194 | break; |
12195 | } |
12196 | |
12197 | break; |
12198 | default: |
12199 | return -ENODEV; |
12200 | } |
12201 | |
12202 | INIT_LIST_HEAD(list: &ioc->list); |
12203 | spin_lock(lock: &gioc_lock); |
12204 | list_add_tail(new: &ioc->list, head: &mpt3sas_ioc_list); |
12205 | spin_unlock(lock: &gioc_lock); |
12206 | ioc->shost = shost; |
12207 | ioc->pdev = pdev; |
12208 | ioc->scsi_io_cb_idx = scsi_io_cb_idx; |
12209 | ioc->tm_cb_idx = tm_cb_idx; |
12210 | ioc->ctl_cb_idx = ctl_cb_idx; |
12211 | ioc->base_cb_idx = base_cb_idx; |
12212 | ioc->port_enable_cb_idx = port_enable_cb_idx; |
12213 | ioc->transport_cb_idx = transport_cb_idx; |
12214 | ioc->scsih_cb_idx = scsih_cb_idx; |
12215 | ioc->config_cb_idx = config_cb_idx; |
12216 | ioc->tm_tr_cb_idx = tm_tr_cb_idx; |
12217 | ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx; |
12218 | ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx; |
12219 | ioc->logging_level = logging_level; |
12220 | ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds; |
12221 | /* Host waits for minimum of six seconds */ |
12222 | ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT; |
12223 | /* |
12224 | * Enable MEMORY MOVE support flag. |
12225 | */ |
12226 | ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE; |
12227 | /* Enable ADDITIONAL QUERY support flag. */ |
12228 | ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY; |
12229 | |
12230 | ioc->enable_sdev_max_qd = enable_sdev_max_qd; |
12231 | |
12232 | /* misc semaphores and spin locks */ |
12233 | mutex_init(&ioc->reset_in_progress_mutex); |
12234 | mutex_init(&ioc->hostdiag_unlock_mutex); |
12235 | /* initializing pci_access_mutex lock */ |
12236 | mutex_init(&ioc->pci_access_mutex); |
12237 | spin_lock_init(&ioc->ioc_reset_in_progress_lock); |
12238 | spin_lock_init(&ioc->scsi_lookup_lock); |
12239 | spin_lock_init(&ioc->sas_device_lock); |
12240 | spin_lock_init(&ioc->sas_node_lock); |
12241 | spin_lock_init(&ioc->fw_event_lock); |
12242 | spin_lock_init(&ioc->raid_device_lock); |
12243 | spin_lock_init(&ioc->pcie_device_lock); |
12244 | spin_lock_init(&ioc->diag_trigger_lock); |
12245 | |
12246 | INIT_LIST_HEAD(list: &ioc->sas_device_list); |
12247 | INIT_LIST_HEAD(list: &ioc->sas_device_init_list); |
12248 | INIT_LIST_HEAD(list: &ioc->sas_expander_list); |
12249 | INIT_LIST_HEAD(list: &ioc->enclosure_list); |
12250 | INIT_LIST_HEAD(list: &ioc->pcie_device_list); |
12251 | INIT_LIST_HEAD(list: &ioc->pcie_device_init_list); |
12252 | INIT_LIST_HEAD(list: &ioc->fw_event_list); |
12253 | INIT_LIST_HEAD(list: &ioc->raid_device_list); |
12254 | INIT_LIST_HEAD(list: &ioc->sas_hba.sas_port_list); |
12255 | INIT_LIST_HEAD(list: &ioc->delayed_tr_list); |
12256 | INIT_LIST_HEAD(list: &ioc->delayed_sc_list); |
12257 | INIT_LIST_HEAD(list: &ioc->delayed_event_ack_list); |
12258 | INIT_LIST_HEAD(list: &ioc->delayed_tr_volume_list); |
12259 | INIT_LIST_HEAD(list: &ioc->reply_queue_list); |
12260 | INIT_LIST_HEAD(list: &ioc->port_table_list); |
12261 | |
12262 | sprintf(buf: ioc->name, fmt: "%s_cm%d", ioc->driver_name, ioc->id); |
12263 | |
12264 | /* init shost parameters */ |
12265 | shost->max_cmd_len = 32; |
12266 | shost->max_lun = max_lun; |
12267 | shost->transportt = mpt3sas_transport_template; |
12268 | shost->unique_id = ioc->id; |
12269 | |
12270 | if (ioc->is_mcpu_endpoint) { |
12271 | /* mCPU MPI support 64K max IO */ |
12272 | shost->max_sectors = 128; |
12273 | ioc_info(ioc, "The max_sectors value is set to %d\n", |
12274 | shost->max_sectors); |
12275 | } else { |
12276 | if (max_sectors != 0xFFFF) { |
12277 | if (max_sectors < 64) { |
12278 | shost->max_sectors = 64; |
12279 | ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n", |
12280 | max_sectors); |
12281 | } else if (max_sectors > 32767) { |
12282 | shost->max_sectors = 32767; |
12283 | ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n", |
12284 | max_sectors); |
12285 | } else { |
12286 | shost->max_sectors = max_sectors & 0xFFFE; |
12287 | ioc_info(ioc, "The max_sectors value is set to %d\n", |
12288 | shost->max_sectors); |
12289 | } |
12290 | } |
12291 | } |
12292 | /* register EEDP capabilities with SCSI layer */ |
12293 | if (prot_mask >= 0) |
12294 | scsi_host_set_prot(shost, mask: (prot_mask & 0x07)); |
12295 | else |
12296 | scsi_host_set_prot(shost, mask: SHOST_DIF_TYPE1_PROTECTION |
12297 | | SHOST_DIF_TYPE2_PROTECTION |
12298 | | SHOST_DIF_TYPE3_PROTECTION); |
12299 | |
12300 | scsi_host_set_guard(shost, type: SHOST_DIX_GUARD_CRC); |
12301 | |
12302 | /* event thread */ |
12303 | ioc->firmware_event_thread = alloc_ordered_workqueue( |
12304 | "fw_event_%s%d", 0, ioc->driver_name, ioc->id); |
12305 | if (!ioc->firmware_event_thread) { |
12306 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
12307 | __FILE__, __LINE__, __func__); |
12308 | rv = -ENODEV; |
12309 | goto out_thread_fail; |
12310 | } |
12311 | |
12312 | shost->host_tagset = 0; |
12313 | |
12314 | if (ioc->is_gen35_ioc && host_tagset_enable) |
12315 | shost->host_tagset = 1; |
12316 | |
12317 | ioc->is_driver_loading = 1; |
12318 | if ((mpt3sas_base_attach(ioc))) { |
12319 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
12320 | __FILE__, __LINE__, __func__); |
12321 | rv = -ENODEV; |
12322 | goto out_attach_fail; |
12323 | } |
12324 | |
12325 | if (ioc->is_warpdrive) { |
12326 | if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) |
12327 | ioc->hide_drives = 0; |
12328 | else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS) |
12329 | ioc->hide_drives = 1; |
12330 | else { |
12331 | if (mpt3sas_get_num_volumes(ioc)) |
12332 | ioc->hide_drives = 1; |
12333 | else |
12334 | ioc->hide_drives = 0; |
12335 | } |
12336 | } else |
12337 | ioc->hide_drives = 0; |
12338 | |
12339 | shost->nr_hw_queues = 1; |
12340 | |
12341 | if (shost->host_tagset) { |
12342 | shost->nr_hw_queues = |
12343 | ioc->reply_queue_count - ioc->high_iops_queues; |
12344 | |
12345 | iopoll_q_count = |
12346 | ioc->reply_queue_count - ioc->iopoll_q_start_index; |
12347 | |
12348 | shost->nr_maps = iopoll_q_count ? 3 : 1; |
12349 | |
12350 | dev_info(&ioc->pdev->dev, |
12351 | "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n", |
12352 | shost->can_queue, shost->nr_hw_queues); |
12353 | } |
12354 | |
12355 | rv = scsi_add_host(host: shost, dev: &pdev->dev); |
12356 | if (rv) { |
12357 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
12358 | __FILE__, __LINE__, __func__); |
12359 | goto out_add_shost_fail; |
12360 | } |
12361 | |
12362 | scsi_scan_host(shost); |
12363 | mpt3sas_setup_debugfs(ioc); |
12364 | return 0; |
12365 | out_add_shost_fail: |
12366 | mpt3sas_base_detach(ioc); |
12367 | out_attach_fail: |
12368 | destroy_workqueue(wq: ioc->firmware_event_thread); |
12369 | out_thread_fail: |
12370 | spin_lock(lock: &gioc_lock); |
12371 | list_del(entry: &ioc->list); |
12372 | spin_unlock(lock: &gioc_lock); |
12373 | scsi_host_put(t: shost); |
12374 | return rv; |
12375 | } |
12376 | |
12377 | /** |
12378 | * scsih_suspend - power management suspend main entry point |
12379 | * @dev: Device struct |
12380 | * |
12381 | * Return: 0 success, anything else error. |
12382 | */ |
12383 | static int __maybe_unused |
12384 | scsih_suspend(struct device *dev) |
12385 | { |
12386 | struct pci_dev *pdev = to_pci_dev(dev); |
12387 | struct Scsi_Host *shost; |
12388 | struct MPT3SAS_ADAPTER *ioc; |
12389 | int rc; |
12390 | |
12391 | rc = _scsih_get_shost_and_ioc(pdev, shost: &shost, ioc: &ioc); |
12392 | if (rc) |
12393 | return rc; |
12394 | |
12395 | mpt3sas_base_stop_watchdog(ioc); |
12396 | scsi_block_requests(shost); |
12397 | _scsih_nvme_shutdown(ioc); |
12398 | ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n", |
12399 | pdev, pci_name(pdev)); |
12400 | |
12401 | mpt3sas_base_free_resources(ioc); |
12402 | return 0; |
12403 | } |
12404 | |
12405 | /** |
12406 | * scsih_resume - power management resume main entry point |
12407 | * @dev: Device struct |
12408 | * |
12409 | * Return: 0 success, anything else error. |
12410 | */ |
12411 | static int __maybe_unused |
12412 | scsih_resume(struct device *dev) |
12413 | { |
12414 | struct pci_dev *pdev = to_pci_dev(dev); |
12415 | struct Scsi_Host *shost; |
12416 | struct MPT3SAS_ADAPTER *ioc; |
12417 | pci_power_t device_state = pdev->current_state; |
12418 | int r; |
12419 | |
12420 | r = _scsih_get_shost_and_ioc(pdev, shost: &shost, ioc: &ioc); |
12421 | if (r) |
12422 | return r; |
12423 | |
12424 | ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", |
12425 | pdev, pci_name(pdev), device_state); |
12426 | |
12427 | ioc->pdev = pdev; |
12428 | r = mpt3sas_base_map_resources(ioc); |
12429 | if (r) |
12430 | return r; |
12431 | ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n"); |
12432 | mpt3sas_base_hard_reset_handler(ioc, type: SOFT_RESET); |
12433 | scsi_unblock_requests(shost); |
12434 | mpt3sas_base_start_watchdog(ioc); |
12435 | return 0; |
12436 | } |
12437 | |
12438 | /** |
12439 | * scsih_pci_error_detected - Called when a PCI error is detected. |
12440 | * @pdev: PCI device struct |
12441 | * @state: PCI channel state |
12442 | * |
12443 | * Description: Called when a PCI error is detected. |
12444 | * |
12445 | * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT. |
12446 | */ |
12447 | static pci_ers_result_t |
12448 | scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) |
12449 | { |
12450 | struct Scsi_Host *shost; |
12451 | struct MPT3SAS_ADAPTER *ioc; |
12452 | |
12453 | if (_scsih_get_shost_and_ioc(pdev, shost: &shost, ioc: &ioc)) |
12454 | return PCI_ERS_RESULT_DISCONNECT; |
12455 | |
12456 | ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state); |
12457 | |
12458 | switch (state) { |
12459 | case pci_channel_io_normal: |
12460 | return PCI_ERS_RESULT_CAN_RECOVER; |
12461 | case pci_channel_io_frozen: |
12462 | /* Fatal error, prepare for slot reset */ |
12463 | ioc->pci_error_recovery = 1; |
12464 | scsi_block_requests(ioc->shost); |
12465 | mpt3sas_base_stop_watchdog(ioc); |
12466 | mpt3sas_base_free_resources(ioc); |
12467 | return PCI_ERS_RESULT_NEED_RESET; |
12468 | case pci_channel_io_perm_failure: |
12469 | /* Permanent error, prepare for device removal */ |
12470 | ioc->pci_error_recovery = 1; |
12471 | mpt3sas_base_stop_watchdog(ioc); |
12472 | mpt3sas_base_pause_mq_polling(ioc); |
12473 | _scsih_flush_running_cmds(ioc); |
12474 | return PCI_ERS_RESULT_DISCONNECT; |
12475 | } |
12476 | return PCI_ERS_RESULT_NEED_RESET; |
12477 | } |
12478 | |
12479 | /** |
12480 | * scsih_pci_slot_reset - Called when PCI slot has been reset. |
12481 | * @pdev: PCI device struct |
12482 | * |
12483 | * Description: This routine is called by the pci error recovery |
12484 | * code after the PCI slot has been reset, just before we |
12485 | * should resume normal operations. |
12486 | */ |
12487 | static pci_ers_result_t |
12488 | scsih_pci_slot_reset(struct pci_dev *pdev) |
12489 | { |
12490 | struct Scsi_Host *shost; |
12491 | struct MPT3SAS_ADAPTER *ioc; |
12492 | int rc; |
12493 | |
12494 | if (_scsih_get_shost_and_ioc(pdev, shost: &shost, ioc: &ioc)) |
12495 | return PCI_ERS_RESULT_DISCONNECT; |
12496 | |
12497 | ioc_info(ioc, "PCI error: slot reset callback!!\n"); |
12498 | |
12499 | ioc->pci_error_recovery = 0; |
12500 | ioc->pdev = pdev; |
12501 | pci_restore_state(dev: pdev); |
12502 | rc = mpt3sas_base_map_resources(ioc); |
12503 | if (rc) |
12504 | return PCI_ERS_RESULT_DISCONNECT; |
12505 | |
12506 | ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n"); |
12507 | rc = mpt3sas_base_hard_reset_handler(ioc, type: FORCE_BIG_HAMMER); |
12508 | |
12509 | ioc_warn(ioc, "hard reset: %s\n", |
12510 | (rc == 0) ? "success": "failed"); |
12511 | |
12512 | if (!rc) |
12513 | return PCI_ERS_RESULT_RECOVERED; |
12514 | else |
12515 | return PCI_ERS_RESULT_DISCONNECT; |
12516 | } |
12517 | |
12518 | /** |
12519 | * scsih_pci_resume() - resume normal ops after PCI reset |
12520 | * @pdev: pointer to PCI device |
12521 | * |
12522 | * Called when the error recovery driver tells us that its |
12523 | * OK to resume normal operation. Use completion to allow |
12524 | * halted scsi ops to resume. |
12525 | */ |
12526 | static void |
12527 | scsih_pci_resume(struct pci_dev *pdev) |
12528 | { |
12529 | struct Scsi_Host *shost; |
12530 | struct MPT3SAS_ADAPTER *ioc; |
12531 | |
12532 | if (_scsih_get_shost_and_ioc(pdev, shost: &shost, ioc: &ioc)) |
12533 | return; |
12534 | |
12535 | ioc_info(ioc, "PCI error: resume callback!!\n"); |
12536 | |
12537 | mpt3sas_base_start_watchdog(ioc); |
12538 | scsi_unblock_requests(ioc->shost); |
12539 | } |
12540 | |
12541 | /** |
12542 | * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers |
12543 | * @pdev: pointer to PCI device |
12544 | */ |
12545 | static pci_ers_result_t |
12546 | scsih_pci_mmio_enabled(struct pci_dev *pdev) |
12547 | { |
12548 | struct Scsi_Host *shost; |
12549 | struct MPT3SAS_ADAPTER *ioc; |
12550 | |
12551 | if (_scsih_get_shost_and_ioc(pdev, shost: &shost, ioc: &ioc)) |
12552 | return PCI_ERS_RESULT_DISCONNECT; |
12553 | |
12554 | ioc_info(ioc, "PCI error: mmio enabled callback!!\n"); |
12555 | |
12556 | /* TODO - dump whatever for debugging purposes */ |
12557 | |
12558 | /* This called only if scsih_pci_error_detected returns |
12559 | * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still |
12560 | * works, no need to reset slot. |
12561 | */ |
12562 | return PCI_ERS_RESULT_RECOVERED; |
12563 | } |
12564 | |
12565 | /* |
12566 | * The pci device ids are defined in mpi/mpi2_cnfg.h. |
12567 | */ |
12568 | static const struct pci_device_id mpt3sas_pci_table[] = { |
12569 | /* Spitfire ~ 2004 */ |
12570 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004, |
12571 | PCI_ANY_ID, PCI_ANY_ID }, |
12572 | /* Falcon ~ 2008 */ |
12573 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008, |
12574 | PCI_ANY_ID, PCI_ANY_ID }, |
12575 | /* Liberator ~ 2108 */ |
12576 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1, |
12577 | PCI_ANY_ID, PCI_ANY_ID }, |
12578 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2, |
12579 | PCI_ANY_ID, PCI_ANY_ID }, |
12580 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3, |
12581 | PCI_ANY_ID, PCI_ANY_ID }, |
12582 | /* Meteor ~ 2116 */ |
12583 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1, |
12584 | PCI_ANY_ID, PCI_ANY_ID }, |
12585 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2, |
12586 | PCI_ANY_ID, PCI_ANY_ID }, |
12587 | /* Thunderbolt ~ 2208 */ |
12588 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1, |
12589 | PCI_ANY_ID, PCI_ANY_ID }, |
12590 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2, |
12591 | PCI_ANY_ID, PCI_ANY_ID }, |
12592 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3, |
12593 | PCI_ANY_ID, PCI_ANY_ID }, |
12594 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4, |
12595 | PCI_ANY_ID, PCI_ANY_ID }, |
12596 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5, |
12597 | PCI_ANY_ID, PCI_ANY_ID }, |
12598 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6, |
12599 | PCI_ANY_ID, PCI_ANY_ID }, |
12600 | /* Mustang ~ 2308 */ |
12601 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1, |
12602 | PCI_ANY_ID, PCI_ANY_ID }, |
12603 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2, |
12604 | PCI_ANY_ID, PCI_ANY_ID }, |
12605 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3, |
12606 | PCI_ANY_ID, PCI_ANY_ID }, |
12607 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP, |
12608 | PCI_ANY_ID, PCI_ANY_ID }, |
12609 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1, |
12610 | PCI_ANY_ID, PCI_ANY_ID }, |
12611 | /* SSS6200 */ |
12612 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200, |
12613 | PCI_ANY_ID, PCI_ANY_ID }, |
12614 | /* Fury ~ 3004 and 3008 */ |
12615 | { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004, |
12616 | PCI_ANY_ID, PCI_ANY_ID }, |
12617 | { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008, |
12618 | PCI_ANY_ID, PCI_ANY_ID }, |
12619 | /* Invader ~ 3108 */ |
12620 | { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1, |
12621 | PCI_ANY_ID, PCI_ANY_ID }, |
12622 | { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2, |
12623 | PCI_ANY_ID, PCI_ANY_ID }, |
12624 | { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5, |
12625 | PCI_ANY_ID, PCI_ANY_ID }, |
12626 | { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6, |
12627 | PCI_ANY_ID, PCI_ANY_ID }, |
12628 | /* Cutlass ~ 3216 and 3224 */ |
12629 | { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216, |
12630 | PCI_ANY_ID, PCI_ANY_ID }, |
12631 | { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224, |
12632 | PCI_ANY_ID, PCI_ANY_ID }, |
12633 | /* Intruder ~ 3316 and 3324 */ |
12634 | { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1, |
12635 | PCI_ANY_ID, PCI_ANY_ID }, |
12636 | { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2, |
12637 | PCI_ANY_ID, PCI_ANY_ID }, |
12638 | { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3, |
12639 | PCI_ANY_ID, PCI_ANY_ID }, |
12640 | { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4, |
12641 | PCI_ANY_ID, PCI_ANY_ID }, |
12642 | { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1, |
12643 | PCI_ANY_ID, PCI_ANY_ID }, |
12644 | { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2, |
12645 | PCI_ANY_ID, PCI_ANY_ID }, |
12646 | { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3, |
12647 | PCI_ANY_ID, PCI_ANY_ID }, |
12648 | { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4, |
12649 | PCI_ANY_ID, PCI_ANY_ID }, |
12650 | /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/ |
12651 | { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508, |
12652 | PCI_ANY_ID, PCI_ANY_ID }, |
12653 | { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1, |
12654 | PCI_ANY_ID, PCI_ANY_ID }, |
12655 | { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408, |
12656 | PCI_ANY_ID, PCI_ANY_ID }, |
12657 | { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516, |
12658 | PCI_ANY_ID, PCI_ANY_ID }, |
12659 | { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1, |
12660 | PCI_ANY_ID, PCI_ANY_ID }, |
12661 | { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416, |
12662 | PCI_ANY_ID, PCI_ANY_ID }, |
12663 | /* Mercator ~ 3616*/ |
12664 | { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616, |
12665 | PCI_ANY_ID, PCI_ANY_ID }, |
12666 | |
12667 | /* Aero SI 0x00E1 Configurable Secure |
12668 | * 0x00E2 Hard Secure |
12669 | */ |
12670 | { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916, |
12671 | PCI_ANY_ID, PCI_ANY_ID }, |
12672 | { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916, |
12673 | PCI_ANY_ID, PCI_ANY_ID }, |
12674 | |
12675 | /* |
12676 | * Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered |
12677 | */ |
12678 | { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916, |
12679 | PCI_ANY_ID, PCI_ANY_ID }, |
12680 | { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916, |
12681 | PCI_ANY_ID, PCI_ANY_ID }, |
12682 | |
12683 | /* Atlas PCIe Switch Management Port */ |
12684 | { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID, |
12685 | PCI_ANY_ID, PCI_ANY_ID }, |
12686 | |
12687 | /* Sea SI 0x00E5 Configurable Secure |
12688 | * 0x00E6 Hard Secure |
12689 | */ |
12690 | { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816, |
12691 | PCI_ANY_ID, PCI_ANY_ID }, |
12692 | { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816, |
12693 | PCI_ANY_ID, PCI_ANY_ID }, |
12694 | |
12695 | /* |
12696 | * ATTO Branded ExpressSAS H12xx GT |
12697 | */ |
12698 | { MPI2_MFGPAGE_VENDORID_ATTO, MPI26_MFGPAGE_DEVID_HARD_SEC_3816, |
12699 | PCI_ANY_ID, PCI_ANY_ID }, |
12700 | |
12701 | /* |
12702 | * Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered |
12703 | */ |
12704 | { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816, |
12705 | PCI_ANY_ID, PCI_ANY_ID }, |
12706 | { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816, |
12707 | PCI_ANY_ID, PCI_ANY_ID }, |
12708 | |
12709 | {0} /* Terminating entry */ |
12710 | }; |
12711 | MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table); |
12712 | |
12713 | static const struct pci_error_handlers _mpt3sas_err_handler = { |
12714 | .error_detected = scsih_pci_error_detected, |
12715 | .mmio_enabled = scsih_pci_mmio_enabled, |
12716 | .slot_reset = scsih_pci_slot_reset, |
12717 | .resume = scsih_pci_resume, |
12718 | }; |
12719 | |
12720 | static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume); |
12721 | |
12722 | static struct pci_driver mpt3sas_driver = { |
12723 | .name = MPT3SAS_DRIVER_NAME, |
12724 | .id_table = mpt3sas_pci_table, |
12725 | .probe = _scsih_probe, |
12726 | .remove = scsih_remove, |
12727 | .shutdown = scsih_shutdown, |
12728 | .err_handler = &_mpt3sas_err_handler, |
12729 | .driver.pm = &scsih_pm_ops, |
12730 | }; |
12731 | |
12732 | /** |
12733 | * scsih_init - main entry point for this driver. |
12734 | * |
12735 | * Return: 0 success, anything else error. |
12736 | */ |
12737 | static int |
12738 | scsih_init(void) |
12739 | { |
12740 | mpt2_ids = 0; |
12741 | mpt3_ids = 0; |
12742 | |
12743 | mpt3sas_base_initialize_callback_handler(); |
12744 | |
12745 | /* queuecommand callback hander */ |
12746 | scsi_io_cb_idx = mpt3sas_base_register_callback_handler(cb_func: _scsih_io_done); |
12747 | |
12748 | /* task management callback handler */ |
12749 | tm_cb_idx = mpt3sas_base_register_callback_handler(cb_func: _scsih_tm_done); |
12750 | |
12751 | /* base internal commands callback handler */ |
12752 | base_cb_idx = mpt3sas_base_register_callback_handler(cb_func: mpt3sas_base_done); |
12753 | port_enable_cb_idx = mpt3sas_base_register_callback_handler( |
12754 | cb_func: mpt3sas_port_enable_done); |
12755 | |
12756 | /* transport internal commands callback handler */ |
12757 | transport_cb_idx = mpt3sas_base_register_callback_handler( |
12758 | cb_func: mpt3sas_transport_done); |
12759 | |
12760 | /* scsih internal commands callback handler */ |
12761 | scsih_cb_idx = mpt3sas_base_register_callback_handler(cb_func: _scsih_done); |
12762 | |
12763 | /* configuration page API internal commands callback handler */ |
12764 | config_cb_idx = mpt3sas_base_register_callback_handler( |
12765 | cb_func: mpt3sas_config_done); |
12766 | |
12767 | /* ctl module callback handler */ |
12768 | ctl_cb_idx = mpt3sas_base_register_callback_handler(cb_func: mpt3sas_ctl_done); |
12769 | |
12770 | tm_tr_cb_idx = mpt3sas_base_register_callback_handler( |
12771 | cb_func: _scsih_tm_tr_complete); |
12772 | |
12773 | tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler( |
12774 | cb_func: _scsih_tm_volume_tr_complete); |
12775 | |
12776 | tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler( |
12777 | cb_func: _scsih_sas_control_complete); |
12778 | |
12779 | mpt3sas_init_debugfs(); |
12780 | return 0; |
12781 | } |
12782 | |
12783 | /** |
12784 | * scsih_exit - exit point for this driver (when it is a module). |
12785 | * |
12786 | * Return: 0 success, anything else error. |
12787 | */ |
12788 | static void |
12789 | scsih_exit(void) |
12790 | { |
12791 | |
12792 | mpt3sas_base_release_callback_handler(cb_idx: scsi_io_cb_idx); |
12793 | mpt3sas_base_release_callback_handler(cb_idx: tm_cb_idx); |
12794 | mpt3sas_base_release_callback_handler(cb_idx: base_cb_idx); |
12795 | mpt3sas_base_release_callback_handler(cb_idx: port_enable_cb_idx); |
12796 | mpt3sas_base_release_callback_handler(cb_idx: transport_cb_idx); |
12797 | mpt3sas_base_release_callback_handler(cb_idx: scsih_cb_idx); |
12798 | mpt3sas_base_release_callback_handler(cb_idx: config_cb_idx); |
12799 | mpt3sas_base_release_callback_handler(cb_idx: ctl_cb_idx); |
12800 | |
12801 | mpt3sas_base_release_callback_handler(cb_idx: tm_tr_cb_idx); |
12802 | mpt3sas_base_release_callback_handler(cb_idx: tm_tr_volume_cb_idx); |
12803 | mpt3sas_base_release_callback_handler(cb_idx: tm_sas_control_cb_idx); |
12804 | |
12805 | /* raid transport support */ |
12806 | if (hbas_to_enumerate != 1) |
12807 | raid_class_release(mpt3sas_raid_template); |
12808 | if (hbas_to_enumerate != 2) |
12809 | raid_class_release(mpt2sas_raid_template); |
12810 | sas_release_transport(mpt3sas_transport_template); |
12811 | mpt3sas_exit_debugfs(); |
12812 | } |
12813 | |
12814 | /** |
12815 | * _mpt3sas_init - main entry point for this driver. |
12816 | * |
12817 | * Return: 0 success, anything else error. |
12818 | */ |
12819 | static int __init |
12820 | _mpt3sas_init(void) |
12821 | { |
12822 | int error; |
12823 | |
12824 | pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME, |
12825 | MPT3SAS_DRIVER_VERSION); |
12826 | |
12827 | mpt3sas_transport_template = |
12828 | sas_attach_transport(&mpt3sas_transport_functions); |
12829 | if (!mpt3sas_transport_template) |
12830 | return -ENODEV; |
12831 | |
12832 | /* No need attach mpt3sas raid functions template |
12833 | * if hbas_to_enumarate value is one. |
12834 | */ |
12835 | if (hbas_to_enumerate != 1) { |
12836 | mpt3sas_raid_template = |
12837 | raid_class_attach(&mpt3sas_raid_functions); |
12838 | if (!mpt3sas_raid_template) { |
12839 | sas_release_transport(mpt3sas_transport_template); |
12840 | return -ENODEV; |
12841 | } |
12842 | } |
12843 | |
12844 | /* No need to attach mpt2sas raid functions template |
12845 | * if hbas_to_enumarate value is two |
12846 | */ |
12847 | if (hbas_to_enumerate != 2) { |
12848 | mpt2sas_raid_template = |
12849 | raid_class_attach(&mpt2sas_raid_functions); |
12850 | if (!mpt2sas_raid_template) { |
12851 | sas_release_transport(mpt3sas_transport_template); |
12852 | return -ENODEV; |
12853 | } |
12854 | } |
12855 | |
12856 | error = scsih_init(); |
12857 | if (error) { |
12858 | scsih_exit(); |
12859 | return error; |
12860 | } |
12861 | |
12862 | mpt3sas_ctl_init(hbas_to_enumerate); |
12863 | |
12864 | error = pci_register_driver(&mpt3sas_driver); |
12865 | if (error) { |
12866 | mpt3sas_ctl_exit(hbas_to_enumerate); |
12867 | scsih_exit(); |
12868 | } |
12869 | |
12870 | return error; |
12871 | } |
12872 | |
12873 | /** |
12874 | * _mpt3sas_exit - exit point for this driver (when it is a module). |
12875 | * |
12876 | */ |
12877 | static void __exit |
12878 | _mpt3sas_exit(void) |
12879 | { |
12880 | pr_info("mpt3sas version %s unloading\n", |
12881 | MPT3SAS_DRIVER_VERSION); |
12882 | |
12883 | pci_unregister_driver(dev: &mpt3sas_driver); |
12884 | |
12885 | mpt3sas_ctl_exit(hbas_to_enumerate); |
12886 | |
12887 | scsih_exit(); |
12888 | } |
12889 | |
12890 | module_init(_mpt3sas_init); |
12891 | module_exit(_mpt3sas_exit); |
12892 |
Definitions
- mpt3sas_ioc_list
- gioc_lock
- scsi_io_cb_idx
- tm_cb_idx
- ctl_cb_idx
- base_cb_idx
- port_enable_cb_idx
- transport_cb_idx
- scsih_cb_idx
- config_cb_idx
- mpt2_ids
- mpt3_ids
- tm_tr_cb_idx
- tm_tr_volume_cb_idx
- tm_sas_control_cb_idx
- logging_level
- max_sectors
- missing_delay
- max_lun
- hbas_to_enumerate
- diag_buffer_enable
- disable_discovery
- prot_mask
- enable_sdev_max_qd
- multipath_on_hba
- host_tagset_enable
- mpt3sas_raid_template
- mpt2sas_raid_template
- sense_info
- fw_event_work
- fw_event_work_free
- fw_event_work_get
- fw_event_work_put
- alloc_fw_event_work
- _scsi_io_transfer
- _scsih_set_debug_level
- _scsih_srch_boot_sas_address
- _scsih_srch_boot_device_name
- _scsih_srch_boot_encl_slot
- mpt3sas_get_port_by_id
- mpt3sas_get_vphy_by_phy
- _scsih_is_boot_device
- _scsih_get_sas_address
- _scsih_determine_boot_device
- __mpt3sas_get_sdev_from_target
- mpt3sas_get_sdev_from_target
- __mpt3sas_get_pdev_from_target
- mpt3sas_get_pdev_from_target
- __mpt3sas_get_sdev_by_rphy
- __mpt3sas_get_sdev_by_addr
- mpt3sas_get_sdev_by_addr
- __mpt3sas_get_sdev_by_handle
- mpt3sas_get_sdev_by_handle
- _scsih_display_enclosure_chassis_info
- _scsih_sas_device_remove
- _scsih_device_remove_by_handle
- mpt3sas_device_remove_by_sas_address
- _scsih_sas_device_add
- _scsih_sas_device_init_add
- __mpt3sas_get_pdev_by_wwid
- mpt3sas_get_pdev_by_wwid
- __mpt3sas_get_pdev_by_idchannel
- __mpt3sas_get_pdev_by_handle
- mpt3sas_get_pdev_by_handle
- _scsih_set_nvme_max_shutdown_latency
- _scsih_pcie_device_remove
- _scsih_pcie_device_remove_by_handle
- _scsih_pcie_device_add
- _scsih_pcie_device_init_add
- _scsih_raid_device_find_by_id
- mpt3sas_raid_device_find_by_handle
- _scsih_raid_device_find_by_wwid
- _scsih_raid_device_add
- _scsih_raid_device_remove
- mpt3sas_scsih_expander_find_by_handle
- mpt3sas_scsih_enclosure_find_by_handle
- mpt3sas_scsih_expander_find_by_sas_address
- _scsih_expander_node_add
- _scsih_is_end_device
- _scsih_is_nvme_pciescsi_device
- _scsih_scsi_lookup_find_by_target
- _scsih_scsi_lookup_find_by_lun
- mpt3sas_scsih_scsi_lookup_get
- scsih_change_queue_depth
- mpt3sas_scsih_change_queue_depth
- scsih_target_alloc
- scsih_target_destroy
- scsih_sdev_init
- scsih_sdev_destroy
- _scsih_display_sata_capabilities
- scsih_is_raid
- scsih_is_nvme
- scsih_get_resync
- scsih_get_state
- _scsih_set_level
- _scsih_get_volume_capabilities
- _scsih_enable_tlr
- scsih_sdev_configure
- scsih_bios_param
- _scsih_response_code
- _scsih_tm_done
- mpt3sas_scsih_set_tm_flag
- mpt3sas_scsih_clear_tm_flag
- scsih_tm_cmd_map_status
- scsih_tm_post_processing
- mpt3sas_scsih_issue_tm
- mpt3sas_scsih_issue_locked_tm
- _scsih_tm_display_info
- scsih_abort
- scsih_dev_reset
- scsih_target_reset
- scsih_host_reset
- _scsih_fw_event_add
- _scsih_fw_event_del_from_list
- mpt3sas_send_trigger_data_event
- _scsih_error_recovery_delete_devices
- mpt3sas_port_enable_complete
- dequeue_next_fw_event
- _scsih_fw_event_cleanup_queue
- _scsih_internal_device_block
- _scsih_internal_device_unblock
- _scsih_ublock_io_all_device
- _scsih_ublock_io_device
- _scsih_block_io_all_device
- _scsih_block_io_device
- _scsih_block_io_to_children_attached_to_ex
- _scsih_block_io_to_children_attached_directly
- _scsih_block_io_to_pcie_children_attached_directly
- _scsih_tm_tr_send
- _scsih_tm_tr_complete
- _scsih_allow_scmd_to_device
- _scsih_sas_control_complete
- _scsih_tm_tr_volume_send
- _scsih_tm_volume_tr_complete
- _scsih_issue_delayed_event_ack
- _scsih_issue_delayed_sas_io_unit_ctrl
- mpt3sas_check_for_pending_internal_cmds
- _scsih_check_for_pending_tm
- _scsih_check_topo_delete_events
- _scsih_check_pcie_topo_remove_events
- _scsih_set_volume_delete_flag
- _scsih_set_volume_handle_for_tr
- _scsih_check_ir_config_unhide_events
- _scsih_check_volume_delete_events
- _scsih_temp_threshold_events
- _scsih_set_satl_pending
- _scsih_flush_running_cmds
- _scsih_setup_eedp
- _scsih_eedp_error_handling
- scsih_qcmd
- _scsih_normalize_sense
- _scsih_scsi_ioc_info
- _scsih_turn_on_pfa_led
- _scsih_turn_off_pfa_led
- _scsih_send_event_to_turn_on_pfa_led
- _scsih_smart_predicted_fault
- _scsih_io_done
- _scsih_update_vphys_after_reset
- _scsih_get_port_table_after_reset
- hba_port_matched_codes
- _scsih_look_and_get_matched_port_entry
- _scsih_del_phy_part_of_anther_port
- _scsih_add_or_del_phys_from_existing_port
- _scsih_del_dirty_vphy
- _scsih_del_dirty_port_entries
- _scsih_sas_port_refresh
- _scsih_alloc_vphy
- _scsih_sas_host_refresh
- _scsih_sas_host_add
- _scsih_expander_add
- mpt3sas_expander_remove
- _scsih_done
- _scsih_check_access_status
- _scsih_check_device
- _scsih_add_device
- _scsih_remove_device
- _scsih_sas_topology_change_event_debug
- _scsih_sas_topology_change_event
- _scsih_sas_device_status_change_event_debug
- _scsih_sas_device_status_change_event
- _scsih_check_pcie_access_status
- _scsih_pcie_device_remove_from_sml
- _scsih_pcie_check_device
- _scsih_pcie_add_device
- _scsih_pcie_topology_change_event_debug
- _scsih_pcie_topology_change_event
- _scsih_pcie_device_status_change_event_debug
- _scsih_pcie_device_status_change_event
- _scsih_sas_enclosure_dev_status_change_event_debug
- _scsih_sas_enclosure_dev_status_change_event
- _scsih_sas_broadcast_primitive_event
- _scsih_sas_discovery_event
- _scsih_sas_device_discovery_error_event
- _scsih_pcie_enumeration_event
- _scsih_ir_fastpath
- _scsih_reprobe_lun
- _scsih_sas_volume_add
- _scsih_sas_volume_delete
- _scsih_sas_pd_expose
- _scsih_sas_pd_hide
- _scsih_sas_pd_delete
- _scsih_sas_pd_add
- _scsih_sas_ir_config_change_event_debug
- _scsih_sas_ir_config_change_event
- _scsih_sas_ir_volume_event
- _scsih_sas_ir_physical_disk_event
- _scsih_sas_ir_operation_status_event_debug
- _scsih_sas_ir_operation_status_event
- _scsih_prep_device_scan
- _scsih_update_device_qdepth
- _scsih_mark_responding_sas_device
- _scsih_create_enclosure_list_after_reset
- _scsih_search_responding_sas_devices
- _scsih_mark_responding_pcie_device
- _scsih_search_responding_pcie_devices
- _scsih_mark_responding_raid_device
- _scsih_search_responding_raid_devices
- _scsih_mark_responding_expander
- _scsih_search_responding_expanders
- _scsih_remove_unresponding_devices
- _scsih_refresh_expander_links
- _scsih_scan_for_devices_after_reset
- mpt3sas_scsih_pre_reset_handler
- mpt3sas_scsih_clear_outstanding_scsi_tm_commands
- mpt3sas_scsih_reset_done_handler
- _mpt3sas_fw_work
- _firmware_event_work
- mpt3sas_scsih_event_callback
- _scsih_expander_node_remove
- _scsih_nvme_shutdown
- _scsih_ir_shutdown
- _scsih_get_shost_and_ioc
- scsih_remove
- scsih_shutdown
- _scsih_probe_boot_devices
- _scsih_probe_raid
- get_next_sas_device
- sas_device_make_active
- _scsih_probe_sas
- get_next_pcie_device
- pcie_device_make_active
- _scsih_probe_pcie
- _scsih_probe_devices
- scsih_scan_start
- _scsih_complete_devices_scanning
- scsih_scan_finished
- scsih_map_queues
- mpt2sas_driver_template
- mpt2sas_raid_functions
- mpt3sas_driver_template
- mpt3sas_raid_functions
- _scsih_determine_hba_mpi_version
- _scsih_probe
- scsih_suspend
- scsih_resume
- scsih_pci_error_detected
- scsih_pci_slot_reset
- scsih_pci_resume
- scsih_pci_mmio_enabled
- mpt3sas_pci_table
- _mpt3sas_err_handler
- scsih_pm_ops
- mpt3sas_driver
- scsih_init
- scsih_exit
- _mpt3sas_init
Improve your Profiling and Debugging skills
Find out more