1 | // SPDX-License-Identifier: GPL-2.0 |
---|---|
2 | /* |
3 | * driver for Microchip PQI-based storage controllers |
4 | * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries |
5 | * Copyright (c) 2016-2018 Microsemi Corporation |
6 | * Copyright (c) 2016 PMC-Sierra, Inc. |
7 | * |
8 | * Questions/Comments/Bugfixes to storagedev@microchip.com |
9 | * |
10 | */ |
11 | |
12 | #include <linux/module.h> |
13 | #include <linux/kernel.h> |
14 | #include <linux/pci.h> |
15 | #include <linux/delay.h> |
16 | #include <linux/interrupt.h> |
17 | #include <linux/sched.h> |
18 | #include <linux/rtc.h> |
19 | #include <linux/bcd.h> |
20 | #include <linux/reboot.h> |
21 | #include <linux/cciss_ioctl.h> |
22 | #include <linux/blk-mq-pci.h> |
23 | #include <scsi/scsi_host.h> |
24 | #include <scsi/scsi_cmnd.h> |
25 | #include <scsi/scsi_device.h> |
26 | #include <scsi/scsi_eh.h> |
27 | #include <scsi/scsi_transport_sas.h> |
28 | #include <asm/unaligned.h> |
29 | #include "smartpqi.h" |
30 | #include "smartpqi_sis.h" |
31 | |
32 | #if !defined(BUILD_TIMESTAMP) |
33 | #define BUILD_TIMESTAMP |
34 | #endif |
35 | |
36 | #define DRIVER_VERSION "2.1.26-030" |
37 | #define DRIVER_MAJOR 2 |
38 | #define DRIVER_MINOR 1 |
39 | #define DRIVER_RELEASE 26 |
40 | #define DRIVER_REVISION 30 |
41 | |
42 | #define DRIVER_NAME "Microchip SmartPQI Driver (v" \ |
43 | DRIVER_VERSION BUILD_TIMESTAMP ")" |
44 | #define DRIVER_NAME_SHORT "smartpqi" |
45 | |
46 | #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor)) |
47 | |
48 | #define PQI_POST_RESET_DELAY_SECS 5 |
49 | #define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10 |
50 | |
51 | #define PQI_NO_COMPLETION ((void *)-1) |
52 | |
53 | MODULE_AUTHOR("Microchip"); |
54 | MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version " |
55 | DRIVER_VERSION); |
56 | MODULE_VERSION(DRIVER_VERSION); |
57 | MODULE_LICENSE("GPL"); |
58 | |
59 | struct pqi_cmd_priv { |
60 | int this_residual; |
61 | }; |
62 | |
63 | static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd) |
64 | { |
65 | return scsi_cmd_priv(cmd); |
66 | } |
67 | |
68 | static void pqi_verify_structures(void); |
69 | static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info, |
70 | enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason); |
71 | static void pqi_ctrl_offline_worker(struct work_struct *work); |
72 | static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); |
73 | static void pqi_scan_start(struct Scsi_Host *shost); |
74 | static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, |
75 | struct pqi_queue_group *queue_group, enum pqi_io_path path, |
76 | struct pqi_io_request *io_request); |
77 | static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, |
78 | struct pqi_iu_header *request, unsigned int flags, |
79 | struct pqi_raid_error_info *error_info); |
80 | static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, |
81 | struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, |
82 | unsigned int cdb_length, struct pqi_queue_group *queue_group, |
83 | struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio); |
84 | static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info, |
85 | struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, |
86 | struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, |
87 | struct pqi_scsi_dev_raid_map_data *rmd); |
88 | static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, |
89 | struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, |
90 | struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, |
91 | struct pqi_scsi_dev_raid_map_data *rmd); |
92 | static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info); |
93 | static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info); |
94 | static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs); |
95 | static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info); |
96 | static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info); |
97 | static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info); |
98 | static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, |
99 | struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs); |
100 | static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info); |
101 | static void pqi_tmf_worker(struct work_struct *work); |
102 | |
103 | /* for flags argument to pqi_submit_raid_request_synchronous() */ |
104 | #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1 |
105 | |
106 | static struct scsi_transport_template *pqi_sas_transport_template; |
107 | |
108 | static atomic_t pqi_controller_count = ATOMIC_INIT(0); |
109 | |
110 | enum pqi_lockup_action { |
111 | NONE, |
112 | REBOOT, |
113 | PANIC |
114 | }; |
115 | |
116 | static enum pqi_lockup_action pqi_lockup_action = NONE; |
117 | |
118 | static struct { |
119 | enum pqi_lockup_action action; |
120 | char *name; |
121 | } pqi_lockup_actions[] = { |
122 | { |
123 | .action = NONE, |
124 | .name = "none", |
125 | }, |
126 | { |
127 | .action = REBOOT, |
128 | .name = "reboot", |
129 | }, |
130 | { |
131 | .action = PANIC, |
132 | .name = "panic", |
133 | }, |
134 | }; |
135 | |
136 | static unsigned int pqi_supported_event_types[] = { |
137 | PQI_EVENT_TYPE_HOTPLUG, |
138 | PQI_EVENT_TYPE_HARDWARE, |
139 | PQI_EVENT_TYPE_PHYSICAL_DEVICE, |
140 | PQI_EVENT_TYPE_LOGICAL_DEVICE, |
141 | PQI_EVENT_TYPE_OFA, |
142 | PQI_EVENT_TYPE_AIO_STATE_CHANGE, |
143 | PQI_EVENT_TYPE_AIO_CONFIG_CHANGE, |
144 | }; |
145 | |
146 | static int pqi_disable_device_id_wildcards; |
147 | module_param_named(disable_device_id_wildcards, |
148 | pqi_disable_device_id_wildcards, int, 0644); |
149 | MODULE_PARM_DESC(disable_device_id_wildcards, |
150 | "Disable device ID wildcards."); |
151 | |
152 | static int pqi_disable_heartbeat; |
153 | module_param_named(disable_heartbeat, |
154 | pqi_disable_heartbeat, int, 0644); |
155 | MODULE_PARM_DESC(disable_heartbeat, |
156 | "Disable heartbeat."); |
157 | |
158 | static int pqi_disable_ctrl_shutdown; |
159 | module_param_named(disable_ctrl_shutdown, |
160 | pqi_disable_ctrl_shutdown, int, 0644); |
161 | MODULE_PARM_DESC(disable_ctrl_shutdown, |
162 | "Disable controller shutdown when controller locked up."); |
163 | |
164 | static char *pqi_lockup_action_param; |
165 | module_param_named(lockup_action, |
166 | pqi_lockup_action_param, charp, 0644); |
167 | MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n" |
168 | "\t\tSupported: none, reboot, panic\n" |
169 | "\t\tDefault: none"); |
170 | |
171 | static int pqi_expose_ld_first; |
172 | module_param_named(expose_ld_first, |
173 | pqi_expose_ld_first, int, 0644); |
174 | MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives."); |
175 | |
176 | static int pqi_hide_vsep; |
177 | module_param_named(hide_vsep, |
178 | pqi_hide_vsep, int, 0644); |
179 | MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives."); |
180 | |
181 | static int pqi_disable_managed_interrupts; |
182 | module_param_named(disable_managed_interrupts, |
183 | pqi_disable_managed_interrupts, int, 0644); |
184 | MODULE_PARM_DESC(disable_managed_interrupts, |
185 | "Disable the kernel automatically assigning SMP affinity to IRQs."); |
186 | |
187 | static unsigned int pqi_ctrl_ready_timeout_secs; |
188 | module_param_named(ctrl_ready_timeout, |
189 | pqi_ctrl_ready_timeout_secs, uint, 0644); |
190 | MODULE_PARM_DESC(ctrl_ready_timeout, |
191 | "Timeout in seconds for driver to wait for controller ready."); |
192 | |
193 | static char *raid_levels[] = { |
194 | "RAID-0", |
195 | "RAID-4", |
196 | "RAID-1(1+0)", |
197 | "RAID-5", |
198 | "RAID-5+1", |
199 | "RAID-6", |
200 | "RAID-1(Triple)", |
201 | }; |
202 | |
203 | static char *pqi_raid_level_to_string(u8 raid_level) |
204 | { |
205 | if (raid_level < ARRAY_SIZE(raid_levels)) |
206 | return raid_levels[raid_level]; |
207 | |
208 | return "RAID UNKNOWN"; |
209 | } |
210 | |
211 | #define SA_RAID_0 0 |
212 | #define SA_RAID_4 1 |
213 | #define SA_RAID_1 2 /* also used for RAID 10 */ |
214 | #define SA_RAID_5 3 /* also used for RAID 50 */ |
215 | #define SA_RAID_51 4 |
216 | #define SA_RAID_6 5 /* also used for RAID 60 */ |
217 | #define SA_RAID_TRIPLE 6 /* also used for RAID 1+0 Triple */ |
218 | #define SA_RAID_MAX SA_RAID_TRIPLE |
219 | #define SA_RAID_UNKNOWN 0xff |
220 | |
221 | static inline void pqi_scsi_done(struct scsi_cmnd *scmd) |
222 | { |
223 | pqi_prep_for_scsi_done(scmd); |
224 | scsi_done(cmd: scmd); |
225 | } |
226 | |
227 | static inline void pqi_disable_write_same(struct scsi_device *sdev) |
228 | { |
229 | sdev->no_write_same = 1; |
230 | } |
231 | |
232 | static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2) |
233 | { |
234 | return memcmp(p: scsi3addr1, q: scsi3addr2, size: 8) == 0; |
235 | } |
236 | |
237 | static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device) |
238 | { |
239 | return !device->is_physical_device; |
240 | } |
241 | |
242 | static inline bool pqi_is_external_raid_addr(u8 *scsi3addr) |
243 | { |
244 | return scsi3addr[2] != 0; |
245 | } |
246 | |
247 | static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) |
248 | { |
249 | return !ctrl_info->controller_online; |
250 | } |
251 | |
252 | static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) |
253 | { |
254 | if (ctrl_info->controller_online) |
255 | if (!sis_is_firmware_running(ctrl_info)) |
256 | pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason: PQI_FIRMWARE_KERNEL_NOT_UP); |
257 | } |
258 | |
259 | static inline bool pqi_is_hba_lunid(u8 *scsi3addr) |
260 | { |
261 | return pqi_scsi3addr_equal(scsi3addr1: scsi3addr, RAID_CTLR_LUNID); |
262 | } |
263 | |
264 | #define PQI_DRIVER_SCRATCH_PQI_MODE 0x1 |
265 | #define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED 0x2 |
266 | |
267 | static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info) |
268 | { |
269 | return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE; |
270 | } |
271 | |
272 | static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, |
273 | enum pqi_ctrl_mode mode) |
274 | { |
275 | u32 driver_scratch; |
276 | |
277 | driver_scratch = sis_read_driver_scratch(ctrl_info); |
278 | |
279 | if (mode == PQI_MODE) |
280 | driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE; |
281 | else |
282 | driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE; |
283 | |
284 | sis_write_driver_scratch(ctrl_info, value: driver_scratch); |
285 | } |
286 | |
287 | static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info) |
288 | { |
289 | return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0; |
290 | } |
291 | |
292 | static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported) |
293 | { |
294 | u32 driver_scratch; |
295 | |
296 | driver_scratch = sis_read_driver_scratch(ctrl_info); |
297 | |
298 | if (is_supported) |
299 | driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED; |
300 | else |
301 | driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED; |
302 | |
303 | sis_write_driver_scratch(ctrl_info, value: driver_scratch); |
304 | } |
305 | |
306 | static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info) |
307 | { |
308 | ctrl_info->scan_blocked = true; |
309 | mutex_lock(&ctrl_info->scan_mutex); |
310 | } |
311 | |
312 | static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info) |
313 | { |
314 | ctrl_info->scan_blocked = false; |
315 | mutex_unlock(lock: &ctrl_info->scan_mutex); |
316 | } |
317 | |
318 | static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info) |
319 | { |
320 | return ctrl_info->scan_blocked; |
321 | } |
322 | |
323 | static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info) |
324 | { |
325 | mutex_lock(&ctrl_info->lun_reset_mutex); |
326 | } |
327 | |
328 | static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info) |
329 | { |
330 | mutex_unlock(lock: &ctrl_info->lun_reset_mutex); |
331 | } |
332 | |
333 | static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info) |
334 | { |
335 | struct Scsi_Host *shost; |
336 | unsigned int num_loops; |
337 | int msecs_sleep; |
338 | |
339 | shost = ctrl_info->scsi_host; |
340 | |
341 | scsi_block_requests(shost); |
342 | |
343 | num_loops = 0; |
344 | msecs_sleep = 20; |
345 | while (scsi_host_busy(shost)) { |
346 | num_loops++; |
347 | if (num_loops == 10) |
348 | msecs_sleep = 500; |
349 | msleep(msecs: msecs_sleep); |
350 | } |
351 | } |
352 | |
353 | static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info) |
354 | { |
355 | scsi_unblock_requests(ctrl_info->scsi_host); |
356 | } |
357 | |
358 | static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info) |
359 | { |
360 | atomic_inc(v: &ctrl_info->num_busy_threads); |
361 | } |
362 | |
363 | static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info) |
364 | { |
365 | atomic_dec(v: &ctrl_info->num_busy_threads); |
366 | } |
367 | |
368 | static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) |
369 | { |
370 | return ctrl_info->block_requests; |
371 | } |
372 | |
373 | static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) |
374 | { |
375 | ctrl_info->block_requests = true; |
376 | } |
377 | |
378 | static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) |
379 | { |
380 | ctrl_info->block_requests = false; |
381 | wake_up_all(&ctrl_info->block_requests_wait); |
382 | } |
383 | |
384 | static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) |
385 | { |
386 | if (!pqi_ctrl_blocked(ctrl_info)) |
387 | return; |
388 | |
389 | atomic_inc(v: &ctrl_info->num_blocked_threads); |
390 | wait_event(ctrl_info->block_requests_wait, |
391 | !pqi_ctrl_blocked(ctrl_info)); |
392 | atomic_dec(v: &ctrl_info->num_blocked_threads); |
393 | } |
394 | |
395 | #define PQI_QUIESCE_WARNING_TIMEOUT_SECS 10 |
396 | |
397 | static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) |
398 | { |
399 | unsigned long start_jiffies; |
400 | unsigned long warning_timeout; |
401 | bool displayed_warning; |
402 | |
403 | displayed_warning = false; |
404 | start_jiffies = jiffies; |
405 | warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies; |
406 | |
407 | while (atomic_read(v: &ctrl_info->num_busy_threads) > |
408 | atomic_read(v: &ctrl_info->num_blocked_threads)) { |
409 | if (time_after(jiffies, warning_timeout)) { |
410 | dev_warn(&ctrl_info->pci_dev->dev, |
411 | "waiting %u seconds for driver activity to quiesce\n", |
412 | jiffies_to_msecs(jiffies - start_jiffies) / 1000); |
413 | displayed_warning = true; |
414 | warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies; |
415 | } |
416 | usleep_range(min: 1000, max: 2000); |
417 | } |
418 | |
419 | if (displayed_warning) |
420 | dev_warn(&ctrl_info->pci_dev->dev, |
421 | "driver activity quiesced after waiting for %u seconds\n", |
422 | jiffies_to_msecs(jiffies - start_jiffies) / 1000); |
423 | } |
424 | |
425 | static inline bool pqi_device_offline(struct pqi_scsi_dev *device) |
426 | { |
427 | return device->device_offline; |
428 | } |
429 | |
430 | static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info) |
431 | { |
432 | mutex_lock(&ctrl_info->ofa_mutex); |
433 | } |
434 | |
435 | static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info) |
436 | { |
437 | mutex_unlock(lock: &ctrl_info->ofa_mutex); |
438 | } |
439 | |
440 | static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info) |
441 | { |
442 | mutex_lock(&ctrl_info->ofa_mutex); |
443 | mutex_unlock(lock: &ctrl_info->ofa_mutex); |
444 | } |
445 | |
446 | static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info) |
447 | { |
448 | return mutex_is_locked(lock: &ctrl_info->ofa_mutex); |
449 | } |
450 | |
451 | static inline void pqi_device_remove_start(struct pqi_scsi_dev *device) |
452 | { |
453 | device->in_remove = true; |
454 | } |
455 | |
456 | static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device) |
457 | { |
458 | return device->in_remove; |
459 | } |
460 | |
461 | static inline void pqi_device_reset_start(struct pqi_scsi_dev *device, u8 lun) |
462 | { |
463 | device->in_reset[lun] = true; |
464 | } |
465 | |
466 | static inline void pqi_device_reset_done(struct pqi_scsi_dev *device, u8 lun) |
467 | { |
468 | device->in_reset[lun] = false; |
469 | } |
470 | |
471 | static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device, u8 lun) |
472 | { |
473 | return device->in_reset[lun]; |
474 | } |
475 | |
476 | static inline int pqi_event_type_to_event_index(unsigned int event_type) |
477 | { |
478 | int index; |
479 | |
480 | for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++) |
481 | if (event_type == pqi_supported_event_types[index]) |
482 | return index; |
483 | |
484 | return -1; |
485 | } |
486 | |
487 | static inline bool pqi_is_supported_event(unsigned int event_type) |
488 | { |
489 | return pqi_event_type_to_event_index(event_type) != -1; |
490 | } |
491 | |
492 | static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info, |
493 | unsigned long delay) |
494 | { |
495 | if (pqi_ctrl_offline(ctrl_info)) |
496 | return; |
497 | |
498 | schedule_delayed_work(dwork: &ctrl_info->rescan_work, delay); |
499 | } |
500 | |
501 | static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) |
502 | { |
503 | pqi_schedule_rescan_worker_with_delay(ctrl_info, delay: 0); |
504 | } |
505 | |
506 | #define PQI_RESCAN_WORK_DELAY (10 * HZ) |
507 | |
508 | static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info) |
509 | { |
510 | pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY); |
511 | } |
512 | |
513 | static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) |
514 | { |
515 | cancel_delayed_work_sync(dwork: &ctrl_info->rescan_work); |
516 | } |
517 | |
518 | static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) |
519 | { |
520 | if (!ctrl_info->heartbeat_counter) |
521 | return 0; |
522 | |
523 | return readl(addr: ctrl_info->heartbeat_counter); |
524 | } |
525 | |
526 | static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info) |
527 | { |
528 | return readb(addr: ctrl_info->soft_reset_status); |
529 | } |
530 | |
531 | static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info) |
532 | { |
533 | u8 status; |
534 | |
535 | status = pqi_read_soft_reset_status(ctrl_info); |
536 | status &= ~PQI_SOFT_RESET_ABORT; |
537 | writeb(val: status, addr: ctrl_info->soft_reset_status); |
538 | } |
539 | |
540 | static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd) |
541 | { |
542 | bool io_high_prio; |
543 | int priority_class; |
544 | |
545 | io_high_prio = false; |
546 | |
547 | if (device->ncq_prio_enable) { |
548 | priority_class = |
549 | IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd))); |
550 | if (priority_class == IOPRIO_CLASS_RT) { |
551 | /* Set NCQ priority for read/write commands. */ |
552 | switch (scmd->cmnd[0]) { |
553 | case WRITE_16: |
554 | case READ_16: |
555 | case WRITE_12: |
556 | case READ_12: |
557 | case WRITE_10: |
558 | case READ_10: |
559 | case WRITE_6: |
560 | case READ_6: |
561 | io_high_prio = true; |
562 | break; |
563 | } |
564 | } |
565 | } |
566 | |
567 | return io_high_prio; |
568 | } |
569 | |
570 | static int pqi_map_single(struct pci_dev *pci_dev, |
571 | struct pqi_sg_descriptor *sg_descriptor, void *buffer, |
572 | size_t buffer_length, enum dma_data_direction data_direction) |
573 | { |
574 | dma_addr_t bus_address; |
575 | |
576 | if (!buffer || buffer_length == 0 || data_direction == DMA_NONE) |
577 | return 0; |
578 | |
579 | bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length, |
580 | data_direction); |
581 | if (dma_mapping_error(dev: &pci_dev->dev, dma_addr: bus_address)) |
582 | return -ENOMEM; |
583 | |
584 | put_unaligned_le64(val: (u64)bus_address, p: &sg_descriptor->address); |
585 | put_unaligned_le32(val: buffer_length, p: &sg_descriptor->length); |
586 | put_unaligned_le32(CISS_SG_LAST, p: &sg_descriptor->flags); |
587 | |
588 | return 0; |
589 | } |
590 | |
591 | static void pqi_pci_unmap(struct pci_dev *pci_dev, |
592 | struct pqi_sg_descriptor *descriptors, int num_descriptors, |
593 | enum dma_data_direction data_direction) |
594 | { |
595 | int i; |
596 | |
597 | if (data_direction == DMA_NONE) |
598 | return; |
599 | |
600 | for (i = 0; i < num_descriptors; i++) |
601 | dma_unmap_single(&pci_dev->dev, |
602 | (dma_addr_t)get_unaligned_le64(&descriptors[i].address), |
603 | get_unaligned_le32(&descriptors[i].length), |
604 | data_direction); |
605 | } |
606 | |
607 | static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, |
608 | struct pqi_raid_path_request *request, u8 cmd, |
609 | u8 *scsi3addr, void *buffer, size_t buffer_length, |
610 | u16 vpd_page, enum dma_data_direction *dir) |
611 | { |
612 | u8 *cdb; |
613 | size_t cdb_length = buffer_length; |
614 | |
615 | memset(request, 0, sizeof(*request)); |
616 | |
617 | request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; |
618 | put_unaligned_le16(offsetof(struct pqi_raid_path_request, |
619 | sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH, |
620 | p: &request->header.iu_length); |
621 | put_unaligned_le32(val: buffer_length, p: &request->buffer_length); |
622 | memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number)); |
623 | request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; |
624 | request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; |
625 | |
626 | cdb = request->cdb; |
627 | |
628 | switch (cmd) { |
629 | case INQUIRY: |
630 | request->data_direction = SOP_READ_FLAG; |
631 | cdb[0] = INQUIRY; |
632 | if (vpd_page & VPD_PAGE) { |
633 | cdb[1] = 0x1; |
634 | cdb[2] = (u8)vpd_page; |
635 | } |
636 | cdb[4] = (u8)cdb_length; |
637 | break; |
638 | case CISS_REPORT_LOG: |
639 | case CISS_REPORT_PHYS: |
640 | request->data_direction = SOP_READ_FLAG; |
641 | cdb[0] = cmd; |
642 | if (cmd == CISS_REPORT_PHYS) { |
643 | if (ctrl_info->rpl_extended_format_4_5_supported) |
644 | cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4; |
645 | else |
646 | cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2; |
647 | } else { |
648 | cdb[1] = ctrl_info->ciss_report_log_flags; |
649 | } |
650 | put_unaligned_be32(val: cdb_length, p: &cdb[6]); |
651 | break; |
652 | case CISS_GET_RAID_MAP: |
653 | request->data_direction = SOP_READ_FLAG; |
654 | cdb[0] = CISS_READ; |
655 | cdb[1] = CISS_GET_RAID_MAP; |
656 | put_unaligned_be32(val: cdb_length, p: &cdb[6]); |
657 | break; |
658 | case SA_FLUSH_CACHE: |
659 | request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST; |
660 | request->data_direction = SOP_WRITE_FLAG; |
661 | cdb[0] = BMIC_WRITE; |
662 | cdb[6] = BMIC_FLUSH_CACHE; |
663 | put_unaligned_be16(val: cdb_length, p: &cdb[7]); |
664 | break; |
665 | case BMIC_SENSE_DIAG_OPTIONS: |
666 | cdb_length = 0; |
667 | fallthrough; |
668 | case BMIC_IDENTIFY_CONTROLLER: |
669 | case BMIC_IDENTIFY_PHYSICAL_DEVICE: |
670 | case BMIC_SENSE_SUBSYSTEM_INFORMATION: |
671 | case BMIC_SENSE_FEATURE: |
672 | request->data_direction = SOP_READ_FLAG; |
673 | cdb[0] = BMIC_READ; |
674 | cdb[6] = cmd; |
675 | put_unaligned_be16(val: cdb_length, p: &cdb[7]); |
676 | break; |
677 | case BMIC_SET_DIAG_OPTIONS: |
678 | cdb_length = 0; |
679 | fallthrough; |
680 | case BMIC_WRITE_HOST_WELLNESS: |
681 | request->data_direction = SOP_WRITE_FLAG; |
682 | cdb[0] = BMIC_WRITE; |
683 | cdb[6] = cmd; |
684 | put_unaligned_be16(val: cdb_length, p: &cdb[7]); |
685 | break; |
686 | case BMIC_CSMI_PASSTHRU: |
687 | request->data_direction = SOP_BIDIRECTIONAL; |
688 | cdb[0] = BMIC_WRITE; |
689 | cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU; |
690 | cdb[6] = cmd; |
691 | put_unaligned_be16(val: cdb_length, p: &cdb[7]); |
692 | break; |
693 | default: |
694 | dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd); |
695 | break; |
696 | } |
697 | |
698 | switch (request->data_direction) { |
699 | case SOP_READ_FLAG: |
700 | *dir = DMA_FROM_DEVICE; |
701 | break; |
702 | case SOP_WRITE_FLAG: |
703 | *dir = DMA_TO_DEVICE; |
704 | break; |
705 | case SOP_NO_DIRECTION_FLAG: |
706 | *dir = DMA_NONE; |
707 | break; |
708 | default: |
709 | *dir = DMA_BIDIRECTIONAL; |
710 | break; |
711 | } |
712 | |
713 | return pqi_map_single(pci_dev: ctrl_info->pci_dev, sg_descriptor: &request->sg_descriptors[0], |
714 | buffer, buffer_length, data_direction: *dir); |
715 | } |
716 | |
717 | static inline void pqi_reinit_io_request(struct pqi_io_request *io_request) |
718 | { |
719 | io_request->scmd = NULL; |
720 | io_request->status = 0; |
721 | io_request->error_info = NULL; |
722 | io_request->raid_bypass = false; |
723 | } |
724 | |
725 | static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd) |
726 | { |
727 | struct pqi_io_request *io_request; |
728 | u16 i; |
729 | |
730 | if (scmd) { /* SML I/O request */ |
731 | u32 blk_tag = blk_mq_unique_tag(rq: scsi_cmd_to_rq(scmd)); |
732 | |
733 | i = blk_mq_unique_tag_to_tag(unique_tag: blk_tag); |
734 | io_request = &ctrl_info->io_request_pool[i]; |
735 | if (atomic_inc_return(v: &io_request->refcount) > 1) { |
736 | atomic_dec(v: &io_request->refcount); |
737 | return NULL; |
738 | } |
739 | } else { /* IOCTL or driver internal request */ |
740 | /* |
741 | * benignly racy - may have to wait for an open slot. |
742 | * command slot range is scsi_ml_can_queue - |
743 | * [scsi_ml_can_queue + (PQI_RESERVED_IO_SLOTS - 1)] |
744 | */ |
745 | i = 0; |
746 | while (1) { |
747 | io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i]; |
748 | if (atomic_inc_return(v: &io_request->refcount) == 1) |
749 | break; |
750 | atomic_dec(v: &io_request->refcount); |
751 | i = (i + 1) % PQI_RESERVED_IO_SLOTS; |
752 | } |
753 | } |
754 | |
755 | if (io_request) |
756 | pqi_reinit_io_request(io_request); |
757 | |
758 | return io_request; |
759 | } |
760 | |
761 | static void pqi_free_io_request(struct pqi_io_request *io_request) |
762 | { |
763 | atomic_dec(v: &io_request->refcount); |
764 | } |
765 | |
766 | static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd, |
767 | u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page, |
768 | struct pqi_raid_error_info *error_info) |
769 | { |
770 | int rc; |
771 | struct pqi_raid_path_request request; |
772 | enum dma_data_direction dir; |
773 | |
774 | rc = pqi_build_raid_path_request(ctrl_info, request: &request, cmd, scsi3addr, |
775 | buffer, buffer_length, vpd_page, dir: &dir); |
776 | if (rc) |
777 | return rc; |
778 | |
779 | rc = pqi_submit_raid_request_synchronous(ctrl_info, request: &request.header, flags: 0, error_info); |
780 | |
781 | pqi_pci_unmap(pci_dev: ctrl_info->pci_dev, descriptors: request.sg_descriptors, num_descriptors: 1, data_direction: dir); |
782 | |
783 | return rc; |
784 | } |
785 | |
786 | /* helper functions for pqi_send_scsi_raid_request */ |
787 | |
788 | static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info, |
789 | u8 cmd, void *buffer, size_t buffer_length) |
790 | { |
791 | return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, |
792 | buffer, buffer_length, vpd_page: 0, NULL); |
793 | } |
794 | |
795 | static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info, |
796 | u8 cmd, void *buffer, size_t buffer_length, |
797 | struct pqi_raid_error_info *error_info) |
798 | { |
799 | return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, |
800 | buffer, buffer_length, vpd_page: 0, error_info); |
801 | } |
802 | |
803 | static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, |
804 | struct bmic_identify_controller *buffer) |
805 | { |
806 | return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER, |
807 | buffer, buffer_length: sizeof(*buffer)); |
808 | } |
809 | |
810 | static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info, |
811 | struct bmic_sense_subsystem_info *sense_info) |
812 | { |
813 | return pqi_send_ctrl_raid_request(ctrl_info, |
814 | BMIC_SENSE_SUBSYSTEM_INFORMATION, buffer: sense_info, |
815 | buffer_length: sizeof(*sense_info)); |
816 | } |
817 | |
818 | static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, |
819 | u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length) |
820 | { |
821 | return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr, |
822 | buffer, buffer_length, vpd_page, NULL); |
823 | } |
824 | |
825 | static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, |
826 | struct pqi_scsi_dev *device, |
827 | struct bmic_identify_physical_device *buffer, size_t buffer_length) |
828 | { |
829 | int rc; |
830 | enum dma_data_direction dir; |
831 | u16 bmic_device_index; |
832 | struct pqi_raid_path_request request; |
833 | |
834 | rc = pqi_build_raid_path_request(ctrl_info, request: &request, |
835 | BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer, |
836 | buffer_length, vpd_page: 0, dir: &dir); |
837 | if (rc) |
838 | return rc; |
839 | |
840 | bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr); |
841 | request.cdb[2] = (u8)bmic_device_index; |
842 | request.cdb[9] = (u8)(bmic_device_index >> 8); |
843 | |
844 | rc = pqi_submit_raid_request_synchronous(ctrl_info, request: &request.header, flags: 0, NULL); |
845 | |
846 | pqi_pci_unmap(pci_dev: ctrl_info->pci_dev, descriptors: request.sg_descriptors, num_descriptors: 1, data_direction: dir); |
847 | |
848 | return rc; |
849 | } |
850 | |
851 | static inline u32 pqi_aio_limit_to_bytes(__le16 *limit) |
852 | { |
853 | u32 bytes; |
854 | |
855 | bytes = get_unaligned_le16(p: limit); |
856 | if (bytes == 0) |
857 | bytes = ~0; |
858 | else |
859 | bytes *= 1024; |
860 | |
861 | return bytes; |
862 | } |
863 | |
864 | #pragma pack(1) |
865 | |
866 | struct bmic_sense_feature_buffer { |
867 | struct bmic_sense_feature_buffer_header header; |
868 | struct bmic_sense_feature_io_page_aio_subpage aio_subpage; |
869 | }; |
870 | |
871 | #pragma pack() |
872 | |
873 | #define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \ |
874 | offsetofend(struct bmic_sense_feature_buffer, \ |
875 | aio_subpage.max_write_raid_1_10_3drive) |
876 | |
877 | #define MINIMUM_AIO_SUBPAGE_LENGTH \ |
878 | (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \ |
879 | max_write_raid_1_10_3drive) - \ |
880 | sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header)) |
881 | |
882 | static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info) |
883 | { |
884 | int rc; |
885 | enum dma_data_direction dir; |
886 | struct pqi_raid_path_request request; |
887 | struct bmic_sense_feature_buffer *buffer; |
888 | |
889 | buffer = kmalloc(size: sizeof(*buffer), GFP_KERNEL); |
890 | if (!buffer) |
891 | return -ENOMEM; |
892 | |
893 | rc = pqi_build_raid_path_request(ctrl_info, request: &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID, |
894 | buffer, buffer_length: sizeof(*buffer), vpd_page: 0, dir: &dir); |
895 | if (rc) |
896 | goto error; |
897 | |
898 | request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE; |
899 | request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE; |
900 | |
901 | rc = pqi_submit_raid_request_synchronous(ctrl_info, request: &request.header, flags: 0, NULL); |
902 | |
903 | pqi_pci_unmap(pci_dev: ctrl_info->pci_dev, descriptors: request.sg_descriptors, num_descriptors: 1, data_direction: dir); |
904 | |
905 | if (rc) |
906 | goto error; |
907 | |
908 | if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE || |
909 | buffer->header.subpage_code != |
910 | BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE || |
911 | get_unaligned_le16(p: &buffer->header.buffer_length) < |
912 | MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH || |
913 | buffer->aio_subpage.header.page_code != |
914 | BMIC_SENSE_FEATURE_IO_PAGE || |
915 | buffer->aio_subpage.header.subpage_code != |
916 | BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE || |
917 | get_unaligned_le16(p: &buffer->aio_subpage.header.page_length) < |
918 | MINIMUM_AIO_SUBPAGE_LENGTH) { |
919 | goto error; |
920 | } |
921 | |
922 | ctrl_info->max_transfer_encrypted_sas_sata = |
923 | pqi_aio_limit_to_bytes( |
924 | limit: &buffer->aio_subpage.max_transfer_encrypted_sas_sata); |
925 | |
926 | ctrl_info->max_transfer_encrypted_nvme = |
927 | pqi_aio_limit_to_bytes( |
928 | limit: &buffer->aio_subpage.max_transfer_encrypted_nvme); |
929 | |
930 | ctrl_info->max_write_raid_5_6 = |
931 | pqi_aio_limit_to_bytes( |
932 | limit: &buffer->aio_subpage.max_write_raid_5_6); |
933 | |
934 | ctrl_info->max_write_raid_1_10_2drive = |
935 | pqi_aio_limit_to_bytes( |
936 | limit: &buffer->aio_subpage.max_write_raid_1_10_2drive); |
937 | |
938 | ctrl_info->max_write_raid_1_10_3drive = |
939 | pqi_aio_limit_to_bytes( |
940 | limit: &buffer->aio_subpage.max_write_raid_1_10_3drive); |
941 | |
942 | error: |
943 | kfree(objp: buffer); |
944 | |
945 | return rc; |
946 | } |
947 | |
948 | static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info, |
949 | enum bmic_flush_cache_shutdown_event shutdown_event) |
950 | { |
951 | int rc; |
952 | struct bmic_flush_cache *flush_cache; |
953 | |
954 | flush_cache = kzalloc(size: sizeof(*flush_cache), GFP_KERNEL); |
955 | if (!flush_cache) |
956 | return -ENOMEM; |
957 | |
958 | flush_cache->shutdown_event = shutdown_event; |
959 | |
960 | rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, buffer: flush_cache, |
961 | buffer_length: sizeof(*flush_cache)); |
962 | |
963 | kfree(objp: flush_cache); |
964 | |
965 | return rc; |
966 | } |
967 | |
968 | int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info, |
969 | struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length, |
970 | struct pqi_raid_error_info *error_info) |
971 | { |
972 | return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU, |
973 | buffer, buffer_length, error_info); |
974 | } |
975 | |
976 | #define PQI_FETCH_PTRAID_DATA (1 << 31) |
977 | |
978 | static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info) |
979 | { |
980 | int rc; |
981 | struct bmic_diag_options *diag; |
982 | |
983 | diag = kzalloc(size: sizeof(*diag), GFP_KERNEL); |
984 | if (!diag) |
985 | return -ENOMEM; |
986 | |
987 | rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS, |
988 | buffer: diag, buffer_length: sizeof(*diag)); |
989 | if (rc) |
990 | goto out; |
991 | |
992 | diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA); |
993 | |
994 | rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, buffer: diag, |
995 | buffer_length: sizeof(*diag)); |
996 | |
997 | out: |
998 | kfree(objp: diag); |
999 | |
1000 | return rc; |
1001 | } |
1002 | |
1003 | static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, |
1004 | void *buffer, size_t buffer_length) |
1005 | { |
1006 | return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS, |
1007 | buffer, buffer_length); |
1008 | } |
1009 | |
1010 | #pragma pack(1) |
1011 | |
1012 | struct bmic_host_wellness_driver_version { |
1013 | u8 start_tag[4]; |
1014 | u8 driver_version_tag[2]; |
1015 | __le16 driver_version_length; |
1016 | char driver_version[32]; |
1017 | u8 dont_write_tag[2]; |
1018 | u8 end_tag[2]; |
1019 | }; |
1020 | |
1021 | #pragma pack() |
1022 | |
1023 | static int pqi_write_driver_version_to_host_wellness( |
1024 | struct pqi_ctrl_info *ctrl_info) |
1025 | { |
1026 | int rc; |
1027 | struct bmic_host_wellness_driver_version *buffer; |
1028 | size_t buffer_length; |
1029 | |
1030 | buffer_length = sizeof(*buffer); |
1031 | |
1032 | buffer = kmalloc(size: buffer_length, GFP_KERNEL); |
1033 | if (!buffer) |
1034 | return -ENOMEM; |
1035 | |
1036 | buffer->start_tag[0] = '<'; |
1037 | buffer->start_tag[1] = 'H'; |
1038 | buffer->start_tag[2] = 'W'; |
1039 | buffer->start_tag[3] = '>'; |
1040 | buffer->driver_version_tag[0] = 'D'; |
1041 | buffer->driver_version_tag[1] = 'V'; |
1042 | put_unaligned_le16(val: sizeof(buffer->driver_version), |
1043 | p: &buffer->driver_version_length); |
1044 | strncpy(p: buffer->driver_version, q: "Linux "DRIVER_VERSION, |
1045 | size: sizeof(buffer->driver_version) - 1); |
1046 | buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0'; |
1047 | buffer->dont_write_tag[0] = 'D'; |
1048 | buffer->dont_write_tag[1] = 'W'; |
1049 | buffer->end_tag[0] = 'Z'; |
1050 | buffer->end_tag[1] = 'Z'; |
1051 | |
1052 | rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); |
1053 | |
1054 | kfree(objp: buffer); |
1055 | |
1056 | return rc; |
1057 | } |
1058 | |
1059 | #pragma pack(1) |
1060 | |
1061 | struct bmic_host_wellness_time { |
1062 | u8 start_tag[4]; |
1063 | u8 time_tag[2]; |
1064 | __le16 time_length; |
1065 | u8 time[8]; |
1066 | u8 dont_write_tag[2]; |
1067 | u8 end_tag[2]; |
1068 | }; |
1069 | |
1070 | #pragma pack() |
1071 | |
1072 | static int pqi_write_current_time_to_host_wellness( |
1073 | struct pqi_ctrl_info *ctrl_info) |
1074 | { |
1075 | int rc; |
1076 | struct bmic_host_wellness_time *buffer; |
1077 | size_t buffer_length; |
1078 | time64_t local_time; |
1079 | unsigned int year; |
1080 | struct tm tm; |
1081 | |
1082 | buffer_length = sizeof(*buffer); |
1083 | |
1084 | buffer = kmalloc(size: buffer_length, GFP_KERNEL); |
1085 | if (!buffer) |
1086 | return -ENOMEM; |
1087 | |
1088 | buffer->start_tag[0] = '<'; |
1089 | buffer->start_tag[1] = 'H'; |
1090 | buffer->start_tag[2] = 'W'; |
1091 | buffer->start_tag[3] = '>'; |
1092 | buffer->time_tag[0] = 'T'; |
1093 | buffer->time_tag[1] = 'D'; |
1094 | put_unaligned_le16(val: sizeof(buffer->time), |
1095 | p: &buffer->time_length); |
1096 | |
1097 | local_time = ktime_get_real_seconds(); |
1098 | time64_to_tm(totalsecs: local_time, offset: -sys_tz.tz_minuteswest * 60, result: &tm); |
1099 | year = tm.tm_year + 1900; |
1100 | |
1101 | buffer->time[0] = bin2bcd(tm.tm_hour); |
1102 | buffer->time[1] = bin2bcd(tm.tm_min); |
1103 | buffer->time[2] = bin2bcd(tm.tm_sec); |
1104 | buffer->time[3] = 0; |
1105 | buffer->time[4] = bin2bcd(tm.tm_mon + 1); |
1106 | buffer->time[5] = bin2bcd(tm.tm_mday); |
1107 | buffer->time[6] = bin2bcd(year / 100); |
1108 | buffer->time[7] = bin2bcd(year % 100); |
1109 | |
1110 | buffer->dont_write_tag[0] = 'D'; |
1111 | buffer->dont_write_tag[1] = 'W'; |
1112 | buffer->end_tag[0] = 'Z'; |
1113 | buffer->end_tag[1] = 'Z'; |
1114 | |
1115 | rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); |
1116 | |
1117 | kfree(objp: buffer); |
1118 | |
1119 | return rc; |
1120 | } |
1121 | |
1122 | #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ) |
1123 | |
1124 | static void pqi_update_time_worker(struct work_struct *work) |
1125 | { |
1126 | int rc; |
1127 | struct pqi_ctrl_info *ctrl_info; |
1128 | |
1129 | ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, |
1130 | update_time_work); |
1131 | |
1132 | rc = pqi_write_current_time_to_host_wellness(ctrl_info); |
1133 | if (rc) |
1134 | dev_warn(&ctrl_info->pci_dev->dev, |
1135 | "error updating time on controller\n"); |
1136 | |
1137 | schedule_delayed_work(dwork: &ctrl_info->update_time_work, |
1138 | PQI_UPDATE_TIME_WORK_INTERVAL); |
1139 | } |
1140 | |
1141 | static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info) |
1142 | { |
1143 | schedule_delayed_work(dwork: &ctrl_info->update_time_work, delay: 0); |
1144 | } |
1145 | |
1146 | static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info) |
1147 | { |
1148 | cancel_delayed_work_sync(dwork: &ctrl_info->update_time_work); |
1149 | } |
1150 | |
1151 | static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer, |
1152 | size_t buffer_length) |
1153 | { |
1154 | return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length); |
1155 | } |
1156 | |
1157 | static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer) |
1158 | { |
1159 | int rc; |
1160 | size_t lun_list_length; |
1161 | size_t lun_data_length; |
1162 | size_t new_lun_list_length; |
1163 | void *lun_data = NULL; |
1164 | struct report_lun_header *report_lun_header; |
1165 | |
1166 | report_lun_header = kmalloc(size: sizeof(*report_lun_header), GFP_KERNEL); |
1167 | if (!report_lun_header) { |
1168 | rc = -ENOMEM; |
1169 | goto out; |
1170 | } |
1171 | |
1172 | rc = pqi_report_luns(ctrl_info, cmd, buffer: report_lun_header, buffer_length: sizeof(*report_lun_header)); |
1173 | if (rc) |
1174 | goto out; |
1175 | |
1176 | lun_list_length = get_unaligned_be32(p: &report_lun_header->list_length); |
1177 | |
1178 | again: |
1179 | lun_data_length = sizeof(struct report_lun_header) + lun_list_length; |
1180 | |
1181 | lun_data = kmalloc(size: lun_data_length, GFP_KERNEL); |
1182 | if (!lun_data) { |
1183 | rc = -ENOMEM; |
1184 | goto out; |
1185 | } |
1186 | |
1187 | if (lun_list_length == 0) { |
1188 | memcpy(lun_data, report_lun_header, sizeof(*report_lun_header)); |
1189 | goto out; |
1190 | } |
1191 | |
1192 | rc = pqi_report_luns(ctrl_info, cmd, buffer: lun_data, buffer_length: lun_data_length); |
1193 | if (rc) |
1194 | goto out; |
1195 | |
1196 | new_lun_list_length = |
1197 | get_unaligned_be32(p: &((struct report_lun_header *)lun_data)->list_length); |
1198 | |
1199 | if (new_lun_list_length > lun_list_length) { |
1200 | lun_list_length = new_lun_list_length; |
1201 | kfree(objp: lun_data); |
1202 | goto again; |
1203 | } |
1204 | |
1205 | out: |
1206 | kfree(objp: report_lun_header); |
1207 | |
1208 | if (rc) { |
1209 | kfree(objp: lun_data); |
1210 | lun_data = NULL; |
1211 | } |
1212 | |
1213 | *buffer = lun_data; |
1214 | |
1215 | return rc; |
1216 | } |
1217 | |
1218 | static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) |
1219 | { |
1220 | int rc; |
1221 | unsigned int i; |
1222 | u8 rpl_response_format; |
1223 | u32 num_physicals; |
1224 | void *rpl_list; |
1225 | struct report_lun_header *rpl_header; |
1226 | struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list; |
1227 | struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list; |
1228 | |
1229 | rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, buffer: &rpl_list); |
1230 | if (rc) |
1231 | return rc; |
1232 | |
1233 | if (ctrl_info->rpl_extended_format_4_5_supported) { |
1234 | rpl_header = rpl_list; |
1235 | rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK; |
1236 | if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) { |
1237 | *buffer = rpl_list; |
1238 | return 0; |
1239 | } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) { |
1240 | dev_err(&ctrl_info->pci_dev->dev, |
1241 | "RPL returned unsupported data format %u\n", |
1242 | rpl_response_format); |
1243 | return -EINVAL; |
1244 | } else { |
1245 | dev_warn(&ctrl_info->pci_dev->dev, |
1246 | "RPL returned extended format 2 instead of 4\n"); |
1247 | } |
1248 | } |
1249 | |
1250 | rpl_8byte_wwid_list = rpl_list; |
1251 | num_physicals = get_unaligned_be32(p: &rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]); |
1252 | |
1253 | rpl_16byte_wwid_list = kmalloc(struct_size(rpl_16byte_wwid_list, lun_entries, |
1254 | num_physicals), GFP_KERNEL); |
1255 | if (!rpl_16byte_wwid_list) |
1256 | return -ENOMEM; |
1257 | |
1258 | put_unaligned_be32(val: num_physicals * sizeof(struct report_phys_lun_16byte_wwid), |
1259 | p: &rpl_16byte_wwid_list->header.list_length); |
1260 | rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags; |
1261 | |
1262 | for (i = 0; i < num_physicals; i++) { |
1263 | memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid)); |
1264 | memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid)); |
1265 | memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8); |
1266 | rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type; |
1267 | rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags; |
1268 | rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count; |
1269 | rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths; |
1270 | rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle; |
1271 | } |
1272 | |
1273 | kfree(objp: rpl_8byte_wwid_list); |
1274 | *buffer = rpl_16byte_wwid_list; |
1275 | |
1276 | return 0; |
1277 | } |
1278 | |
1279 | static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) |
1280 | { |
1281 | return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer); |
1282 | } |
1283 | |
1284 | static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info, |
1285 | struct report_phys_lun_16byte_wwid_list **physdev_list, |
1286 | struct report_log_lun_list **logdev_list) |
1287 | { |
1288 | int rc; |
1289 | size_t logdev_list_length; |
1290 | size_t logdev_data_length; |
1291 | struct report_log_lun_list *internal_logdev_list; |
1292 | struct report_log_lun_list *logdev_data; |
1293 | struct report_lun_header report_lun_header; |
1294 | |
1295 | rc = pqi_report_phys_luns(ctrl_info, buffer: (void **)physdev_list); |
1296 | if (rc) |
1297 | dev_err(&ctrl_info->pci_dev->dev, |
1298 | "report physical LUNs failed\n"); |
1299 | |
1300 | rc = pqi_report_logical_luns(ctrl_info, buffer: (void **)logdev_list); |
1301 | if (rc) |
1302 | dev_err(&ctrl_info->pci_dev->dev, |
1303 | "report logical LUNs failed\n"); |
1304 | |
1305 | /* |
1306 | * Tack the controller itself onto the end of the logical device list |
1307 | * by adding a list entry that is all zeros. |
1308 | */ |
1309 | |
1310 | logdev_data = *logdev_list; |
1311 | |
1312 | if (logdev_data) { |
1313 | logdev_list_length = |
1314 | get_unaligned_be32(p: &logdev_data->header.list_length); |
1315 | } else { |
1316 | memset(&report_lun_header, 0, sizeof(report_lun_header)); |
1317 | logdev_data = |
1318 | (struct report_log_lun_list *)&report_lun_header; |
1319 | logdev_list_length = 0; |
1320 | } |
1321 | |
1322 | logdev_data_length = sizeof(struct report_lun_header) + |
1323 | logdev_list_length; |
1324 | |
1325 | internal_logdev_list = kmalloc(size: logdev_data_length + |
1326 | sizeof(struct report_log_lun), GFP_KERNEL); |
1327 | if (!internal_logdev_list) { |
1328 | kfree(objp: *logdev_list); |
1329 | *logdev_list = NULL; |
1330 | return -ENOMEM; |
1331 | } |
1332 | |
1333 | memcpy(internal_logdev_list, logdev_data, logdev_data_length); |
1334 | memset((u8 *)internal_logdev_list + logdev_data_length, 0, |
1335 | sizeof(struct report_log_lun)); |
1336 | put_unaligned_be32(val: logdev_list_length + |
1337 | sizeof(struct report_log_lun), |
1338 | p: &internal_logdev_list->header.list_length); |
1339 | |
1340 | kfree(objp: *logdev_list); |
1341 | *logdev_list = internal_logdev_list; |
1342 | |
1343 | return 0; |
1344 | } |
1345 | |
1346 | static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device, |
1347 | int bus, int target, int lun) |
1348 | { |
1349 | device->bus = bus; |
1350 | device->target = target; |
1351 | device->lun = lun; |
1352 | } |
1353 | |
1354 | static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device) |
1355 | { |
1356 | u8 *scsi3addr; |
1357 | u32 lunid; |
1358 | int bus; |
1359 | int target; |
1360 | int lun; |
1361 | |
1362 | scsi3addr = device->scsi3addr; |
1363 | lunid = get_unaligned_le32(p: scsi3addr); |
1364 | |
1365 | if (pqi_is_hba_lunid(scsi3addr)) { |
1366 | /* The specified device is the controller. */ |
1367 | pqi_set_bus_target_lun(device, PQI_HBA_BUS, target: 0, lun: lunid & 0x3fff); |
1368 | device->target_lun_valid = true; |
1369 | return; |
1370 | } |
1371 | |
1372 | if (pqi_is_logical_device(device)) { |
1373 | if (device->is_external_raid_device) { |
1374 | bus = PQI_EXTERNAL_RAID_VOLUME_BUS; |
1375 | target = (lunid >> 16) & 0x3fff; |
1376 | lun = lunid & 0xff; |
1377 | } else { |
1378 | bus = PQI_RAID_VOLUME_BUS; |
1379 | target = 0; |
1380 | lun = lunid & 0x3fff; |
1381 | } |
1382 | pqi_set_bus_target_lun(device, bus, target, lun); |
1383 | device->target_lun_valid = true; |
1384 | return; |
1385 | } |
1386 | |
1387 | /* |
1388 | * Defer target and LUN assignment for non-controller physical devices |
1389 | * because the SAS transport layer will make these assignments later. |
1390 | */ |
1391 | pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, target: 0, lun: 0); |
1392 | } |
1393 | |
1394 | static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info, |
1395 | struct pqi_scsi_dev *device) |
1396 | { |
1397 | int rc; |
1398 | u8 raid_level; |
1399 | u8 *buffer; |
1400 | |
1401 | raid_level = SA_RAID_UNKNOWN; |
1402 | |
1403 | buffer = kmalloc(size: 64, GFP_KERNEL); |
1404 | if (buffer) { |
1405 | rc = pqi_scsi_inquiry(ctrl_info, scsi3addr: device->scsi3addr, |
1406 | VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, buffer_length: 64); |
1407 | if (rc == 0) { |
1408 | raid_level = buffer[8]; |
1409 | if (raid_level > SA_RAID_MAX) |
1410 | raid_level = SA_RAID_UNKNOWN; |
1411 | } |
1412 | kfree(objp: buffer); |
1413 | } |
1414 | |
1415 | device->raid_level = raid_level; |
1416 | } |
1417 | |
1418 | static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, |
1419 | struct pqi_scsi_dev *device, struct raid_map *raid_map) |
1420 | { |
1421 | char *err_msg; |
1422 | u32 raid_map_size; |
1423 | u32 r5or6_blocks_per_row; |
1424 | |
1425 | raid_map_size = get_unaligned_le32(p: &raid_map->structure_size); |
1426 | |
1427 | if (raid_map_size < offsetof(struct raid_map, disk_data)) { |
1428 | err_msg = "RAID map too small"; |
1429 | goto bad_raid_map; |
1430 | } |
1431 | |
1432 | if (device->raid_level == SA_RAID_1) { |
1433 | if (get_unaligned_le16(p: &raid_map->layout_map_count) != 2) { |
1434 | err_msg = "invalid RAID-1 map"; |
1435 | goto bad_raid_map; |
1436 | } |
1437 | } else if (device->raid_level == SA_RAID_TRIPLE) { |
1438 | if (get_unaligned_le16(p: &raid_map->layout_map_count) != 3) { |
1439 | err_msg = "invalid RAID-1(Triple) map"; |
1440 | goto bad_raid_map; |
1441 | } |
1442 | } else if ((device->raid_level == SA_RAID_5 || |
1443 | device->raid_level == SA_RAID_6) && |
1444 | get_unaligned_le16(p: &raid_map->layout_map_count) > 1) { |
1445 | /* RAID 50/60 */ |
1446 | r5or6_blocks_per_row = |
1447 | get_unaligned_le16(p: &raid_map->strip_size) * |
1448 | get_unaligned_le16(p: &raid_map->data_disks_per_row); |
1449 | if (r5or6_blocks_per_row == 0) { |
1450 | err_msg = "invalid RAID-5 or RAID-6 map"; |
1451 | goto bad_raid_map; |
1452 | } |
1453 | } |
1454 | |
1455 | return 0; |
1456 | |
1457 | bad_raid_map: |
1458 | dev_warn(&ctrl_info->pci_dev->dev, |
1459 | "logical device %08x%08x %s\n", |
1460 | *((u32 *)&device->scsi3addr), |
1461 | *((u32 *)&device->scsi3addr[4]), err_msg); |
1462 | |
1463 | return -EINVAL; |
1464 | } |
1465 | |
1466 | static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, |
1467 | struct pqi_scsi_dev *device) |
1468 | { |
1469 | int rc; |
1470 | u32 raid_map_size; |
1471 | struct raid_map *raid_map; |
1472 | |
1473 | raid_map = kmalloc(size: sizeof(*raid_map), GFP_KERNEL); |
1474 | if (!raid_map) |
1475 | return -ENOMEM; |
1476 | |
1477 | rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, |
1478 | scsi3addr: device->scsi3addr, buffer: raid_map, buffer_length: sizeof(*raid_map), vpd_page: 0, NULL); |
1479 | if (rc) |
1480 | goto error; |
1481 | |
1482 | raid_map_size = get_unaligned_le32(p: &raid_map->structure_size); |
1483 | |
1484 | if (raid_map_size > sizeof(*raid_map)) { |
1485 | |
1486 | kfree(objp: raid_map); |
1487 | |
1488 | raid_map = kmalloc(size: raid_map_size, GFP_KERNEL); |
1489 | if (!raid_map) |
1490 | return -ENOMEM; |
1491 | |
1492 | rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, |
1493 | scsi3addr: device->scsi3addr, buffer: raid_map, buffer_length: raid_map_size, vpd_page: 0, NULL); |
1494 | if (rc) |
1495 | goto error; |
1496 | |
1497 | if (get_unaligned_le32(p: &raid_map->structure_size) |
1498 | != raid_map_size) { |
1499 | dev_warn(&ctrl_info->pci_dev->dev, |
1500 | "requested %u bytes, received %u bytes\n", |
1501 | raid_map_size, |
1502 | get_unaligned_le32(&raid_map->structure_size)); |
1503 | rc = -EINVAL; |
1504 | goto error; |
1505 | } |
1506 | } |
1507 | |
1508 | rc = pqi_validate_raid_map(ctrl_info, device, raid_map); |
1509 | if (rc) |
1510 | goto error; |
1511 | |
1512 | device->raid_map = raid_map; |
1513 | |
1514 | return 0; |
1515 | |
1516 | error: |
1517 | kfree(objp: raid_map); |
1518 | |
1519 | return rc; |
1520 | } |
1521 | |
1522 | static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info, |
1523 | struct pqi_scsi_dev *device) |
1524 | { |
1525 | if (!ctrl_info->lv_drive_type_mix_valid) { |
1526 | device->max_transfer_encrypted = ~0; |
1527 | return; |
1528 | } |
1529 | |
1530 | switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) { |
1531 | case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY: |
1532 | case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY: |
1533 | case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY: |
1534 | case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY: |
1535 | case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY: |
1536 | case LV_DRIVE_TYPE_MIX_SAS_ONLY: |
1537 | case LV_DRIVE_TYPE_MIX_SATA_ONLY: |
1538 | device->max_transfer_encrypted = |
1539 | ctrl_info->max_transfer_encrypted_sas_sata; |
1540 | break; |
1541 | case LV_DRIVE_TYPE_MIX_NVME_ONLY: |
1542 | device->max_transfer_encrypted = |
1543 | ctrl_info->max_transfer_encrypted_nvme; |
1544 | break; |
1545 | case LV_DRIVE_TYPE_MIX_UNKNOWN: |
1546 | case LV_DRIVE_TYPE_MIX_NO_RESTRICTION: |
1547 | default: |
1548 | device->max_transfer_encrypted = |
1549 | min(ctrl_info->max_transfer_encrypted_sas_sata, |
1550 | ctrl_info->max_transfer_encrypted_nvme); |
1551 | break; |
1552 | } |
1553 | } |
1554 | |
1555 | static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info, |
1556 | struct pqi_scsi_dev *device) |
1557 | { |
1558 | int rc; |
1559 | u8 *buffer; |
1560 | u8 bypass_status; |
1561 | |
1562 | buffer = kmalloc(size: 64, GFP_KERNEL); |
1563 | if (!buffer) |
1564 | return; |
1565 | |
1566 | rc = pqi_scsi_inquiry(ctrl_info, scsi3addr: device->scsi3addr, |
1567 | VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, buffer_length: 64); |
1568 | if (rc) |
1569 | goto out; |
1570 | |
1571 | #define RAID_BYPASS_STATUS 4 |
1572 | #define RAID_BYPASS_CONFIGURED 0x1 |
1573 | #define RAID_BYPASS_ENABLED 0x2 |
1574 | |
1575 | bypass_status = buffer[RAID_BYPASS_STATUS]; |
1576 | device->raid_bypass_configured = |
1577 | (bypass_status & RAID_BYPASS_CONFIGURED) != 0; |
1578 | if (device->raid_bypass_configured && |
1579 | (bypass_status & RAID_BYPASS_ENABLED) && |
1580 | pqi_get_raid_map(ctrl_info, device) == 0) { |
1581 | device->raid_bypass_enabled = true; |
1582 | if (get_unaligned_le16(p: &device->raid_map->flags) & |
1583 | RAID_MAP_ENCRYPTION_ENABLED) |
1584 | pqi_set_max_transfer_encrypted(ctrl_info, device); |
1585 | } |
1586 | |
1587 | out: |
1588 | kfree(objp: buffer); |
1589 | } |
1590 | |
1591 | /* |
1592 | * Use vendor-specific VPD to determine online/offline status of a volume. |
1593 | */ |
1594 | |
1595 | static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info, |
1596 | struct pqi_scsi_dev *device) |
1597 | { |
1598 | int rc; |
1599 | size_t page_length; |
1600 | u8 volume_status = CISS_LV_STATUS_UNAVAILABLE; |
1601 | bool volume_offline = true; |
1602 | u32 volume_flags; |
1603 | struct ciss_vpd_logical_volume_status *vpd; |
1604 | |
1605 | vpd = kmalloc(size: sizeof(*vpd), GFP_KERNEL); |
1606 | if (!vpd) |
1607 | goto no_buffer; |
1608 | |
1609 | rc = pqi_scsi_inquiry(ctrl_info, scsi3addr: device->scsi3addr, |
1610 | VPD_PAGE | CISS_VPD_LV_STATUS, buffer: vpd, buffer_length: sizeof(*vpd)); |
1611 | if (rc) |
1612 | goto out; |
1613 | |
1614 | if (vpd->page_code != CISS_VPD_LV_STATUS) |
1615 | goto out; |
1616 | |
1617 | page_length = offsetof(struct ciss_vpd_logical_volume_status, |
1618 | volume_status) + vpd->page_length; |
1619 | if (page_length < sizeof(*vpd)) |
1620 | goto out; |
1621 | |
1622 | volume_status = vpd->volume_status; |
1623 | volume_flags = get_unaligned_be32(p: &vpd->flags); |
1624 | volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0; |
1625 | |
1626 | out: |
1627 | kfree(objp: vpd); |
1628 | no_buffer: |
1629 | device->volume_status = volume_status; |
1630 | device->volume_offline = volume_offline; |
1631 | } |
1632 | |
1633 | #define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01 |
1634 | #define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10 |
1635 | #define PQI_DEVICE_ERASE_IN_PROGRESS 0x10 |
1636 | |
1637 | static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info, |
1638 | struct pqi_scsi_dev *device, |
1639 | struct bmic_identify_physical_device *id_phys) |
1640 | { |
1641 | int rc; |
1642 | |
1643 | memset(id_phys, 0, sizeof(*id_phys)); |
1644 | |
1645 | rc = pqi_identify_physical_device(ctrl_info, device, |
1646 | buffer: id_phys, buffer_length: sizeof(*id_phys)); |
1647 | if (rc) { |
1648 | device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; |
1649 | return rc; |
1650 | } |
1651 | |
1652 | scsi_sanitize_inquiry_string(s: &id_phys->model[0], len: 8); |
1653 | scsi_sanitize_inquiry_string(s: &id_phys->model[8], len: 16); |
1654 | |
1655 | memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor)); |
1656 | memcpy(device->model, &id_phys->model[8], sizeof(device->model)); |
1657 | |
1658 | device->box_index = id_phys->box_index; |
1659 | device->phys_box_on_bus = id_phys->phys_box_on_bus; |
1660 | device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0]; |
1661 | device->queue_depth = |
1662 | get_unaligned_le16(p: &id_phys->current_queue_depth_limit); |
1663 | device->active_path_index = id_phys->active_path_number; |
1664 | device->path_map = id_phys->redundant_path_present_map; |
1665 | memcpy(&device->box, |
1666 | &id_phys->alternate_paths_phys_box_on_port, |
1667 | sizeof(device->box)); |
1668 | memcpy(&device->phys_connector, |
1669 | &id_phys->alternate_paths_phys_connector, |
1670 | sizeof(device->phys_connector)); |
1671 | device->bay = id_phys->phys_bay_in_box; |
1672 | device->lun_count = id_phys->multi_lun_device_lun_count; |
1673 | if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) && |
1674 | id_phys->phy_count) |
1675 | device->phy_id = |
1676 | id_phys->phy_to_phy_map[device->active_path_index]; |
1677 | else |
1678 | device->phy_id = 0xFF; |
1679 | |
1680 | device->ncq_prio_support = |
1681 | ((get_unaligned_le32(p: &id_phys->misc_drive_flags) >> 16) & |
1682 | PQI_DEVICE_NCQ_PRIO_SUPPORTED); |
1683 | |
1684 | device->erase_in_progress = !!(get_unaligned_le16(p: &id_phys->extra_physical_drive_flags) & PQI_DEVICE_ERASE_IN_PROGRESS); |
1685 | |
1686 | return 0; |
1687 | } |
1688 | |
1689 | static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info, |
1690 | struct pqi_scsi_dev *device) |
1691 | { |
1692 | int rc; |
1693 | u8 *buffer; |
1694 | |
1695 | buffer = kmalloc(size: 64, GFP_KERNEL); |
1696 | if (!buffer) |
1697 | return -ENOMEM; |
1698 | |
1699 | /* Send an inquiry to the device to see what it is. */ |
1700 | rc = pqi_scsi_inquiry(ctrl_info, scsi3addr: device->scsi3addr, vpd_page: 0, buffer, buffer_length: 64); |
1701 | if (rc) |
1702 | goto out; |
1703 | |
1704 | scsi_sanitize_inquiry_string(s: &buffer[8], len: 8); |
1705 | scsi_sanitize_inquiry_string(s: &buffer[16], len: 16); |
1706 | |
1707 | device->devtype = buffer[0] & 0x1f; |
1708 | memcpy(device->vendor, &buffer[8], sizeof(device->vendor)); |
1709 | memcpy(device->model, &buffer[16], sizeof(device->model)); |
1710 | |
1711 | if (device->devtype == TYPE_DISK) { |
1712 | if (device->is_external_raid_device) { |
1713 | device->raid_level = SA_RAID_UNKNOWN; |
1714 | device->volume_status = CISS_LV_OK; |
1715 | device->volume_offline = false; |
1716 | } else { |
1717 | pqi_get_raid_level(ctrl_info, device); |
1718 | pqi_get_raid_bypass_status(ctrl_info, device); |
1719 | pqi_get_volume_status(ctrl_info, device); |
1720 | } |
1721 | } |
1722 | |
1723 | out: |
1724 | kfree(objp: buffer); |
1725 | |
1726 | return rc; |
1727 | } |
1728 | |
1729 | /* |
1730 | * Prevent adding drive to OS for some corner cases such as a drive |
1731 | * undergoing a sanitize (erase) operation. Some OSes will continue to poll |
1732 | * the drive until the sanitize completes, which can take hours, |
1733 | * resulting in long bootup delays. Commands such as TUR, READ_CAP |
1734 | * are allowed, but READ/WRITE cause check condition. So the OS |
1735 | * cannot check/read the partition table. |
1736 | * Note: devices that have completed sanitize must be re-enabled |
1737 | * using the management utility. |
1738 | */ |
1739 | static inline bool pqi_keep_device_offline(struct pqi_scsi_dev *device) |
1740 | { |
1741 | return device->erase_in_progress; |
1742 | } |
1743 | |
1744 | static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info, |
1745 | struct pqi_scsi_dev *device, |
1746 | struct bmic_identify_physical_device *id_phys) |
1747 | { |
1748 | int rc; |
1749 | |
1750 | if (device->is_expander_smp_device) |
1751 | return 0; |
1752 | |
1753 | if (pqi_is_logical_device(device)) |
1754 | rc = pqi_get_logical_device_info(ctrl_info, device); |
1755 | else |
1756 | rc = pqi_get_physical_device_info(ctrl_info, device, id_phys); |
1757 | |
1758 | return rc; |
1759 | } |
1760 | |
1761 | static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, |
1762 | struct pqi_scsi_dev *device, |
1763 | struct bmic_identify_physical_device *id_phys) |
1764 | { |
1765 | int rc; |
1766 | |
1767 | rc = pqi_get_device_info_phys_logical(ctrl_info, device, id_phys); |
1768 | |
1769 | if (rc == 0 && device->lun_count == 0) |
1770 | device->lun_count = 1; |
1771 | |
1772 | return rc; |
1773 | } |
1774 | |
1775 | static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, |
1776 | struct pqi_scsi_dev *device) |
1777 | { |
1778 | char *status; |
1779 | static const char unknown_state_str[] = |
1780 | "Volume is in an unknown state (%u)"; |
1781 | char unknown_state_buffer[sizeof(unknown_state_str) + 10]; |
1782 | |
1783 | switch (device->volume_status) { |
1784 | case CISS_LV_OK: |
1785 | status = "Volume online"; |
1786 | break; |
1787 | case CISS_LV_FAILED: |
1788 | status = "Volume failed"; |
1789 | break; |
1790 | case CISS_LV_NOT_CONFIGURED: |
1791 | status = "Volume not configured"; |
1792 | break; |
1793 | case CISS_LV_DEGRADED: |
1794 | status = "Volume degraded"; |
1795 | break; |
1796 | case CISS_LV_READY_FOR_RECOVERY: |
1797 | status = "Volume ready for recovery operation"; |
1798 | break; |
1799 | case CISS_LV_UNDERGOING_RECOVERY: |
1800 | status = "Volume undergoing recovery"; |
1801 | break; |
1802 | case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED: |
1803 | status = "Wrong physical drive was replaced"; |
1804 | break; |
1805 | case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM: |
1806 | status = "A physical drive not properly connected"; |
1807 | break; |
1808 | case CISS_LV_HARDWARE_OVERHEATING: |
1809 | status = "Hardware is overheating"; |
1810 | break; |
1811 | case CISS_LV_HARDWARE_HAS_OVERHEATED: |
1812 | status = "Hardware has overheated"; |
1813 | break; |
1814 | case CISS_LV_UNDERGOING_EXPANSION: |
1815 | status = "Volume undergoing expansion"; |
1816 | break; |
1817 | case CISS_LV_NOT_AVAILABLE: |
1818 | status = "Volume waiting for transforming volume"; |
1819 | break; |
1820 | case CISS_LV_QUEUED_FOR_EXPANSION: |
1821 | status = "Volume queued for expansion"; |
1822 | break; |
1823 | case CISS_LV_DISABLED_SCSI_ID_CONFLICT: |
1824 | status = "Volume disabled due to SCSI ID conflict"; |
1825 | break; |
1826 | case CISS_LV_EJECTED: |
1827 | status = "Volume has been ejected"; |
1828 | break; |
1829 | case CISS_LV_UNDERGOING_ERASE: |
1830 | status = "Volume undergoing background erase"; |
1831 | break; |
1832 | case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD: |
1833 | status = "Volume ready for predictive spare rebuild"; |
1834 | break; |
1835 | case CISS_LV_UNDERGOING_RPI: |
1836 | status = "Volume undergoing rapid parity initialization"; |
1837 | break; |
1838 | case CISS_LV_PENDING_RPI: |
1839 | status = "Volume queued for rapid parity initialization"; |
1840 | break; |
1841 | case CISS_LV_ENCRYPTED_NO_KEY: |
1842 | status = "Encrypted volume inaccessible - key not present"; |
1843 | break; |
1844 | case CISS_LV_UNDERGOING_ENCRYPTION: |
1845 | status = "Volume undergoing encryption process"; |
1846 | break; |
1847 | case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING: |
1848 | status = "Volume undergoing encryption re-keying process"; |
1849 | break; |
1850 | case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: |
1851 | status = "Volume encrypted but encryption is disabled"; |
1852 | break; |
1853 | case CISS_LV_PENDING_ENCRYPTION: |
1854 | status = "Volume pending migration to encrypted state"; |
1855 | break; |
1856 | case CISS_LV_PENDING_ENCRYPTION_REKEYING: |
1857 | status = "Volume pending encryption rekeying"; |
1858 | break; |
1859 | case CISS_LV_NOT_SUPPORTED: |
1860 | status = "Volume not supported on this controller"; |
1861 | break; |
1862 | case CISS_LV_STATUS_UNAVAILABLE: |
1863 | status = "Volume status not available"; |
1864 | break; |
1865 | default: |
1866 | snprintf(buf: unknown_state_buffer, size: sizeof(unknown_state_buffer), |
1867 | fmt: unknown_state_str, device->volume_status); |
1868 | status = unknown_state_buffer; |
1869 | break; |
1870 | } |
1871 | |
1872 | dev_info(&ctrl_info->pci_dev->dev, |
1873 | "scsi %d:%d:%d:%d %s\n", |
1874 | ctrl_info->scsi_host->host_no, |
1875 | device->bus, device->target, device->lun, status); |
1876 | } |
1877 | |
1878 | static void pqi_rescan_worker(struct work_struct *work) |
1879 | { |
1880 | struct pqi_ctrl_info *ctrl_info; |
1881 | |
1882 | ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, |
1883 | rescan_work); |
1884 | |
1885 | pqi_scan_scsi_devices(ctrl_info); |
1886 | } |
1887 | |
1888 | static int pqi_add_device(struct pqi_ctrl_info *ctrl_info, |
1889 | struct pqi_scsi_dev *device) |
1890 | { |
1891 | int rc; |
1892 | |
1893 | if (pqi_is_logical_device(device)) |
1894 | rc = scsi_add_device(host: ctrl_info->scsi_host, channel: device->bus, |
1895 | target: device->target, lun: device->lun); |
1896 | else |
1897 | rc = pqi_add_sas_device(pqi_sas_node: ctrl_info->sas_host, device); |
1898 | |
1899 | return rc; |
1900 | } |
1901 | |
1902 | #define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS (20 * 1000) |
1903 | |
1904 | static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) |
1905 | { |
1906 | int rc; |
1907 | int lun; |
1908 | |
1909 | for (lun = 0; lun < device->lun_count; lun++) { |
1910 | rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, |
1911 | PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS); |
1912 | if (rc) |
1913 | dev_err(&ctrl_info->pci_dev->dev, |
1914 | "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n", |
1915 | ctrl_info->scsi_host->host_no, device->bus, |
1916 | device->target, lun, |
1917 | atomic_read(&device->scsi_cmds_outstanding[lun])); |
1918 | } |
1919 | |
1920 | if (pqi_is_logical_device(device)) |
1921 | scsi_remove_device(device->sdev); |
1922 | else |
1923 | pqi_remove_sas_device(device); |
1924 | |
1925 | pqi_device_remove_start(device); |
1926 | } |
1927 | |
1928 | /* Assumes the SCSI device list lock is held. */ |
1929 | |
1930 | static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info, |
1931 | int bus, int target, int lun) |
1932 | { |
1933 | struct pqi_scsi_dev *device; |
1934 | |
1935 | list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) |
1936 | if (device->bus == bus && device->target == target && device->lun == lun) |
1937 | return device; |
1938 | |
1939 | return NULL; |
1940 | } |
1941 | |
1942 | static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2) |
1943 | { |
1944 | if (dev1->is_physical_device != dev2->is_physical_device) |
1945 | return false; |
1946 | |
1947 | if (dev1->is_physical_device) |
1948 | return memcmp(p: dev1->wwid, q: dev2->wwid, size: sizeof(dev1->wwid)) == 0; |
1949 | |
1950 | return memcmp(p: dev1->volume_id, q: dev2->volume_id, size: sizeof(dev1->volume_id)) == 0; |
1951 | } |
1952 | |
1953 | enum pqi_find_result { |
1954 | DEVICE_NOT_FOUND, |
1955 | DEVICE_CHANGED, |
1956 | DEVICE_SAME, |
1957 | }; |
1958 | |
1959 | static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info, |
1960 | struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device) |
1961 | { |
1962 | struct pqi_scsi_dev *device; |
1963 | |
1964 | list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { |
1965 | if (pqi_scsi3addr_equal(scsi3addr1: device_to_find->scsi3addr, scsi3addr2: device->scsi3addr)) { |
1966 | *matching_device = device; |
1967 | if (pqi_device_equal(dev1: device_to_find, dev2: device)) { |
1968 | if (device_to_find->volume_offline) |
1969 | return DEVICE_CHANGED; |
1970 | return DEVICE_SAME; |
1971 | } |
1972 | return DEVICE_CHANGED; |
1973 | } |
1974 | } |
1975 | |
1976 | return DEVICE_NOT_FOUND; |
1977 | } |
1978 | |
1979 | static inline const char *pqi_device_type(struct pqi_scsi_dev *device) |
1980 | { |
1981 | if (device->is_expander_smp_device) |
1982 | return "Enclosure SMP "; |
1983 | |
1984 | return scsi_device_type(type: device->devtype); |
1985 | } |
1986 | |
1987 | #define PQI_DEV_INFO_BUFFER_LENGTH 128 |
1988 | |
1989 | static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, |
1990 | char *action, struct pqi_scsi_dev *device) |
1991 | { |
1992 | ssize_t count; |
1993 | char buffer[PQI_DEV_INFO_BUFFER_LENGTH]; |
1994 | |
1995 | count = scnprintf(buf: buffer, PQI_DEV_INFO_BUFFER_LENGTH, |
1996 | fmt: "%d:%d:", ctrl_info->scsi_host->host_no, device->bus); |
1997 | |
1998 | if (device->target_lun_valid) |
1999 | count += scnprintf(buf: buffer + count, |
2000 | PQI_DEV_INFO_BUFFER_LENGTH - count, |
2001 | fmt: "%d:%d", |
2002 | device->target, |
2003 | device->lun); |
2004 | else |
2005 | count += scnprintf(buf: buffer + count, |
2006 | PQI_DEV_INFO_BUFFER_LENGTH - count, |
2007 | fmt: "-:-"); |
2008 | |
2009 | if (pqi_is_logical_device(device)) |
2010 | count += scnprintf(buf: buffer + count, |
2011 | PQI_DEV_INFO_BUFFER_LENGTH - count, |
2012 | fmt: " %08x%08x", |
2013 | *((u32 *)&device->scsi3addr), |
2014 | *((u32 *)&device->scsi3addr[4])); |
2015 | else |
2016 | count += scnprintf(buf: buffer + count, |
2017 | PQI_DEV_INFO_BUFFER_LENGTH - count, |
2018 | fmt: " %016llx%016llx", |
2019 | get_unaligned_be64(p: &device->wwid[0]), |
2020 | get_unaligned_be64(p: &device->wwid[8])); |
2021 | |
2022 | count += scnprintf(buf: buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, |
2023 | fmt: " %s %.8s %.16s ", |
2024 | pqi_device_type(device), |
2025 | device->vendor, |
2026 | device->model); |
2027 | |
2028 | if (pqi_is_logical_device(device)) { |
2029 | if (device->devtype == TYPE_DISK) |
2030 | count += scnprintf(buf: buffer + count, |
2031 | PQI_DEV_INFO_BUFFER_LENGTH - count, |
2032 | fmt: "SSDSmartPathCap%c En%c %-12s", |
2033 | device->raid_bypass_configured ? '+' : '-', |
2034 | device->raid_bypass_enabled ? '+' : '-', |
2035 | pqi_raid_level_to_string(raid_level: device->raid_level)); |
2036 | } else { |
2037 | count += scnprintf(buf: buffer + count, |
2038 | PQI_DEV_INFO_BUFFER_LENGTH - count, |
2039 | fmt: "AIO%c", device->aio_enabled ? '+' : '-'); |
2040 | if (device->devtype == TYPE_DISK || |
2041 | device->devtype == TYPE_ZBC) |
2042 | count += scnprintf(buf: buffer + count, |
2043 | PQI_DEV_INFO_BUFFER_LENGTH - count, |
2044 | fmt: " qd=%-6d", device->queue_depth); |
2045 | } |
2046 | |
2047 | dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer); |
2048 | } |
2049 | |
2050 | static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2) |
2051 | { |
2052 | u32 raid_map1_size; |
2053 | u32 raid_map2_size; |
2054 | |
2055 | if (raid_map1 == NULL || raid_map2 == NULL) |
2056 | return raid_map1 == raid_map2; |
2057 | |
2058 | raid_map1_size = get_unaligned_le32(p: &raid_map1->structure_size); |
2059 | raid_map2_size = get_unaligned_le32(p: &raid_map2->structure_size); |
2060 | |
2061 | if (raid_map1_size != raid_map2_size) |
2062 | return false; |
2063 | |
2064 | return memcmp(p: raid_map1, q: raid_map2, size: raid_map1_size) == 0; |
2065 | } |
2066 | |
2067 | /* Assumes the SCSI device list lock is held. */ |
2068 | |
2069 | static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info, |
2070 | struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device) |
2071 | { |
2072 | existing_device->device_type = new_device->device_type; |
2073 | existing_device->bus = new_device->bus; |
2074 | if (new_device->target_lun_valid) { |
2075 | existing_device->target = new_device->target; |
2076 | existing_device->lun = new_device->lun; |
2077 | existing_device->target_lun_valid = true; |
2078 | } |
2079 | |
2080 | /* By definition, the scsi3addr and wwid fields are already the same. */ |
2081 | |
2082 | existing_device->is_physical_device = new_device->is_physical_device; |
2083 | memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor)); |
2084 | memcpy(existing_device->model, new_device->model, sizeof(existing_device->model)); |
2085 | existing_device->sas_address = new_device->sas_address; |
2086 | existing_device->queue_depth = new_device->queue_depth; |
2087 | existing_device->device_offline = false; |
2088 | existing_device->lun_count = new_device->lun_count; |
2089 | |
2090 | if (pqi_is_logical_device(device: existing_device)) { |
2091 | existing_device->is_external_raid_device = new_device->is_external_raid_device; |
2092 | |
2093 | if (existing_device->devtype == TYPE_DISK) { |
2094 | existing_device->raid_level = new_device->raid_level; |
2095 | existing_device->volume_status = new_device->volume_status; |
2096 | memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group)); |
2097 | if (!pqi_raid_maps_equal(raid_map1: existing_device->raid_map, raid_map2: new_device->raid_map)) { |
2098 | kfree(objp: existing_device->raid_map); |
2099 | existing_device->raid_map = new_device->raid_map; |
2100 | /* To prevent this from being freed later. */ |
2101 | new_device->raid_map = NULL; |
2102 | } |
2103 | existing_device->raid_bypass_configured = new_device->raid_bypass_configured; |
2104 | existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled; |
2105 | } |
2106 | } else { |
2107 | existing_device->aio_enabled = new_device->aio_enabled; |
2108 | existing_device->aio_handle = new_device->aio_handle; |
2109 | existing_device->is_expander_smp_device = new_device->is_expander_smp_device; |
2110 | existing_device->active_path_index = new_device->active_path_index; |
2111 | existing_device->phy_id = new_device->phy_id; |
2112 | existing_device->path_map = new_device->path_map; |
2113 | existing_device->bay = new_device->bay; |
2114 | existing_device->box_index = new_device->box_index; |
2115 | existing_device->phys_box_on_bus = new_device->phys_box_on_bus; |
2116 | existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type; |
2117 | memcpy(existing_device->box, new_device->box, sizeof(existing_device->box)); |
2118 | memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector)); |
2119 | } |
2120 | } |
2121 | |
2122 | static inline void pqi_free_device(struct pqi_scsi_dev *device) |
2123 | { |
2124 | if (device) { |
2125 | kfree(objp: device->raid_map); |
2126 | kfree(objp: device); |
2127 | } |
2128 | } |
2129 | |
2130 | /* |
2131 | * Called when exposing a new device to the OS fails in order to re-adjust |
2132 | * our internal SCSI device list to match the SCSI ML's view. |
2133 | */ |
2134 | |
2135 | static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info, |
2136 | struct pqi_scsi_dev *device) |
2137 | { |
2138 | unsigned long flags; |
2139 | |
2140 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
2141 | list_del(entry: &device->scsi_device_list_entry); |
2142 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
2143 | |
2144 | /* Allow the device structure to be freed later. */ |
2145 | device->keep_device = false; |
2146 | } |
2147 | |
2148 | static inline bool pqi_is_device_added(struct pqi_scsi_dev *device) |
2149 | { |
2150 | if (device->is_expander_smp_device) |
2151 | return device->sas_port != NULL; |
2152 | |
2153 | return device->sdev != NULL; |
2154 | } |
2155 | |
2156 | static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device) |
2157 | { |
2158 | unsigned int lun; |
2159 | struct pqi_tmf_work *tmf_work; |
2160 | |
2161 | for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++) |
2162 | INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker); |
2163 | } |
2164 | |
2165 | static inline bool pqi_volume_rescan_needed(struct pqi_scsi_dev *device) |
2166 | { |
2167 | if (pqi_device_in_remove(device)) |
2168 | return false; |
2169 | |
2170 | if (device->sdev == NULL) |
2171 | return false; |
2172 | |
2173 | if (!scsi_device_online(sdev: device->sdev)) |
2174 | return false; |
2175 | |
2176 | return device->rescan; |
2177 | } |
2178 | |
2179 | static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, |
2180 | struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices) |
2181 | { |
2182 | int rc; |
2183 | unsigned int i; |
2184 | unsigned long flags; |
2185 | enum pqi_find_result find_result; |
2186 | struct pqi_scsi_dev *device; |
2187 | struct pqi_scsi_dev *next; |
2188 | struct pqi_scsi_dev *matching_device; |
2189 | LIST_HEAD(add_list); |
2190 | LIST_HEAD(delete_list); |
2191 | |
2192 | /* |
2193 | * The idea here is to do as little work as possible while holding the |
2194 | * spinlock. That's why we go to great pains to defer anything other |
2195 | * than updating the internal device list until after we release the |
2196 | * spinlock. |
2197 | */ |
2198 | |
2199 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
2200 | |
2201 | /* Assume that all devices in the existing list have gone away. */ |
2202 | list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) |
2203 | device->device_gone = true; |
2204 | |
2205 | for (i = 0; i < num_new_devices; i++) { |
2206 | device = new_device_list[i]; |
2207 | |
2208 | find_result = pqi_scsi_find_entry(ctrl_info, device_to_find: device, |
2209 | matching_device: &matching_device); |
2210 | |
2211 | switch (find_result) { |
2212 | case DEVICE_SAME: |
2213 | /* |
2214 | * The newly found device is already in the existing |
2215 | * device list. |
2216 | */ |
2217 | device->new_device = false; |
2218 | matching_device->device_gone = false; |
2219 | pqi_scsi_update_device(ctrl_info, existing_device: matching_device, new_device: device); |
2220 | break; |
2221 | case DEVICE_NOT_FOUND: |
2222 | /* |
2223 | * The newly found device is NOT in the existing device |
2224 | * list. |
2225 | */ |
2226 | device->new_device = true; |
2227 | break; |
2228 | case DEVICE_CHANGED: |
2229 | /* |
2230 | * The original device has gone away and we need to add |
2231 | * the new device. |
2232 | */ |
2233 | device->new_device = true; |
2234 | break; |
2235 | } |
2236 | } |
2237 | |
2238 | /* Process all devices that have gone away. */ |
2239 | list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, |
2240 | scsi_device_list_entry) { |
2241 | if (device->device_gone) { |
2242 | list_del(entry: &device->scsi_device_list_entry); |
2243 | list_add_tail(new: &device->delete_list_entry, head: &delete_list); |
2244 | } |
2245 | } |
2246 | |
2247 | /* Process all new devices. */ |
2248 | for (i = 0; i < num_new_devices; i++) { |
2249 | device = new_device_list[i]; |
2250 | if (!device->new_device) |
2251 | continue; |
2252 | if (device->volume_offline) |
2253 | continue; |
2254 | list_add_tail(new: &device->scsi_device_list_entry, |
2255 | head: &ctrl_info->scsi_device_list); |
2256 | list_add_tail(new: &device->add_list_entry, head: &add_list); |
2257 | /* To prevent this device structure from being freed later. */ |
2258 | device->keep_device = true; |
2259 | pqi_init_device_tmf_work(device); |
2260 | } |
2261 | |
2262 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
2263 | |
2264 | /* |
2265 | * If OFA is in progress and there are devices that need to be deleted, |
2266 | * allow any pending reset operations to continue and unblock any SCSI |
2267 | * requests before removal. |
2268 | */ |
2269 | if (pqi_ofa_in_progress(ctrl_info)) { |
2270 | list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) |
2271 | if (pqi_is_device_added(device)) |
2272 | pqi_device_remove_start(device); |
2273 | pqi_ctrl_unblock_device_reset(ctrl_info); |
2274 | pqi_scsi_unblock_requests(ctrl_info); |
2275 | } |
2276 | |
2277 | /* Remove all devices that have gone away. */ |
2278 | list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) { |
2279 | if (device->volume_offline) { |
2280 | pqi_dev_info(ctrl_info, action: "offline", device); |
2281 | pqi_show_volume_status(ctrl_info, device); |
2282 | } else { |
2283 | pqi_dev_info(ctrl_info, action: "removed", device); |
2284 | } |
2285 | if (pqi_is_device_added(device)) |
2286 | pqi_remove_device(ctrl_info, device); |
2287 | list_del(entry: &device->delete_list_entry); |
2288 | pqi_free_device(device); |
2289 | } |
2290 | |
2291 | /* |
2292 | * Notify the SML of any existing device changes such as; |
2293 | * queue depth, device size. |
2294 | */ |
2295 | list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { |
2296 | if (device->sdev && device->queue_depth != device->advertised_queue_depth) { |
2297 | device->advertised_queue_depth = device->queue_depth; |
2298 | scsi_change_queue_depth(device->sdev, device->advertised_queue_depth); |
2299 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
2300 | if (pqi_volume_rescan_needed(device)) { |
2301 | device->rescan = false; |
2302 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
2303 | scsi_rescan_device(sdev: device->sdev); |
2304 | } else { |
2305 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
2306 | } |
2307 | } |
2308 | } |
2309 | |
2310 | /* Expose any new devices. */ |
2311 | list_for_each_entry_safe(device, next, &add_list, add_list_entry) { |
2312 | if (!pqi_is_device_added(device)) { |
2313 | rc = pqi_add_device(ctrl_info, device); |
2314 | if (rc == 0) { |
2315 | pqi_dev_info(ctrl_info, action: "added", device); |
2316 | } else { |
2317 | dev_warn(&ctrl_info->pci_dev->dev, |
2318 | "scsi %d:%d:%d:%d addition failed, device not added\n", |
2319 | ctrl_info->scsi_host->host_no, |
2320 | device->bus, device->target, |
2321 | device->lun); |
2322 | pqi_fixup_botched_add(ctrl_info, device); |
2323 | } |
2324 | } |
2325 | } |
2326 | |
2327 | } |
2328 | |
2329 | static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device) |
2330 | { |
2331 | /* |
2332 | * Only support the HBA controller itself as a RAID |
2333 | * controller. If it's a RAID controller other than |
2334 | * the HBA itself (an external RAID controller, for |
2335 | * example), we don't support it. |
2336 | */ |
2337 | if (device->device_type == SA_DEVICE_TYPE_CONTROLLER && |
2338 | !pqi_is_hba_lunid(scsi3addr: device->scsi3addr)) |
2339 | return false; |
2340 | |
2341 | return true; |
2342 | } |
2343 | |
2344 | static inline bool pqi_skip_device(u8 *scsi3addr) |
2345 | { |
2346 | /* Ignore all masked devices. */ |
2347 | if (MASKED_DEVICE(scsi3addr)) |
2348 | return true; |
2349 | |
2350 | return false; |
2351 | } |
2352 | |
2353 | static inline void pqi_mask_device(u8 *scsi3addr) |
2354 | { |
2355 | scsi3addr[3] |= 0xc0; |
2356 | } |
2357 | |
2358 | static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device) |
2359 | { |
2360 | if (pqi_is_logical_device(device)) |
2361 | return false; |
2362 | |
2363 | return (device->path_map & (device->path_map - 1)) != 0; |
2364 | } |
2365 | |
2366 | static inline bool pqi_expose_device(struct pqi_scsi_dev *device) |
2367 | { |
2368 | return !device->is_physical_device || !pqi_skip_device(scsi3addr: device->scsi3addr); |
2369 | } |
2370 | |
2371 | static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) |
2372 | { |
2373 | int i; |
2374 | int rc; |
2375 | LIST_HEAD(new_device_list_head); |
2376 | struct report_phys_lun_16byte_wwid_list *physdev_list = NULL; |
2377 | struct report_log_lun_list *logdev_list = NULL; |
2378 | struct report_phys_lun_16byte_wwid *phys_lun; |
2379 | struct report_log_lun *log_lun; |
2380 | struct bmic_identify_physical_device *id_phys = NULL; |
2381 | u32 num_physicals; |
2382 | u32 num_logicals; |
2383 | struct pqi_scsi_dev **new_device_list = NULL; |
2384 | struct pqi_scsi_dev *device; |
2385 | struct pqi_scsi_dev *next; |
2386 | unsigned int num_new_devices; |
2387 | unsigned int num_valid_devices; |
2388 | bool is_physical_device; |
2389 | u8 *scsi3addr; |
2390 | unsigned int physical_index; |
2391 | unsigned int logical_index; |
2392 | static char *out_of_memory_msg = |
2393 | "failed to allocate memory, device discovery stopped"; |
2394 | |
2395 | rc = pqi_get_device_lists(ctrl_info, physdev_list: &physdev_list, logdev_list: &logdev_list); |
2396 | if (rc) |
2397 | goto out; |
2398 | |
2399 | if (physdev_list) |
2400 | num_physicals = |
2401 | get_unaligned_be32(p: &physdev_list->header.list_length) |
2402 | / sizeof(physdev_list->lun_entries[0]); |
2403 | else |
2404 | num_physicals = 0; |
2405 | |
2406 | if (logdev_list) |
2407 | num_logicals = |
2408 | get_unaligned_be32(p: &logdev_list->header.list_length) |
2409 | / sizeof(logdev_list->lun_entries[0]); |
2410 | else |
2411 | num_logicals = 0; |
2412 | |
2413 | if (num_physicals) { |
2414 | /* |
2415 | * We need this buffer for calls to pqi_get_physical_disk_info() |
2416 | * below. We allocate it here instead of inside |
2417 | * pqi_get_physical_disk_info() because it's a fairly large |
2418 | * buffer. |
2419 | */ |
2420 | id_phys = kmalloc(size: sizeof(*id_phys), GFP_KERNEL); |
2421 | if (!id_phys) { |
2422 | dev_warn(&ctrl_info->pci_dev->dev, "%s\n", |
2423 | out_of_memory_msg); |
2424 | rc = -ENOMEM; |
2425 | goto out; |
2426 | } |
2427 | |
2428 | if (pqi_hide_vsep) { |
2429 | for (i = num_physicals - 1; i >= 0; i--) { |
2430 | phys_lun = &physdev_list->lun_entries[i]; |
2431 | if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) { |
2432 | pqi_mask_device(scsi3addr: phys_lun->lunid); |
2433 | break; |
2434 | } |
2435 | } |
2436 | } |
2437 | } |
2438 | |
2439 | if (num_logicals && |
2440 | (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX)) |
2441 | ctrl_info->lv_drive_type_mix_valid = true; |
2442 | |
2443 | num_new_devices = num_physicals + num_logicals; |
2444 | |
2445 | new_device_list = kmalloc_array(n: num_new_devices, |
2446 | size: sizeof(*new_device_list), |
2447 | GFP_KERNEL); |
2448 | if (!new_device_list) { |
2449 | dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); |
2450 | rc = -ENOMEM; |
2451 | goto out; |
2452 | } |
2453 | |
2454 | for (i = 0; i < num_new_devices; i++) { |
2455 | device = kzalloc(size: sizeof(*device), GFP_KERNEL); |
2456 | if (!device) { |
2457 | dev_warn(&ctrl_info->pci_dev->dev, "%s\n", |
2458 | out_of_memory_msg); |
2459 | rc = -ENOMEM; |
2460 | goto out; |
2461 | } |
2462 | list_add_tail(new: &device->new_device_list_entry, |
2463 | head: &new_device_list_head); |
2464 | } |
2465 | |
2466 | device = NULL; |
2467 | num_valid_devices = 0; |
2468 | physical_index = 0; |
2469 | logical_index = 0; |
2470 | |
2471 | for (i = 0; i < num_new_devices; i++) { |
2472 | |
2473 | if ((!pqi_expose_ld_first && i < num_physicals) || |
2474 | (pqi_expose_ld_first && i >= num_logicals)) { |
2475 | is_physical_device = true; |
2476 | phys_lun = &physdev_list->lun_entries[physical_index++]; |
2477 | log_lun = NULL; |
2478 | scsi3addr = phys_lun->lunid; |
2479 | } else { |
2480 | is_physical_device = false; |
2481 | phys_lun = NULL; |
2482 | log_lun = &logdev_list->lun_entries[logical_index++]; |
2483 | scsi3addr = log_lun->lunid; |
2484 | } |
2485 | |
2486 | if (is_physical_device && pqi_skip_device(scsi3addr)) |
2487 | continue; |
2488 | |
2489 | if (device) |
2490 | device = list_next_entry(device, new_device_list_entry); |
2491 | else |
2492 | device = list_first_entry(&new_device_list_head, |
2493 | struct pqi_scsi_dev, new_device_list_entry); |
2494 | |
2495 | memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); |
2496 | device->is_physical_device = is_physical_device; |
2497 | if (is_physical_device) { |
2498 | device->device_type = phys_lun->device_type; |
2499 | if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP) |
2500 | device->is_expander_smp_device = true; |
2501 | } else { |
2502 | device->is_external_raid_device = |
2503 | pqi_is_external_raid_addr(scsi3addr); |
2504 | } |
2505 | |
2506 | if (!pqi_is_supported_device(device)) |
2507 | continue; |
2508 | |
2509 | /* Gather information about the device. */ |
2510 | rc = pqi_get_device_info(ctrl_info, device, id_phys); |
2511 | if (rc == -ENOMEM) { |
2512 | dev_warn(&ctrl_info->pci_dev->dev, "%s\n", |
2513 | out_of_memory_msg); |
2514 | goto out; |
2515 | } |
2516 | if (rc) { |
2517 | if (device->is_physical_device) |
2518 | dev_warn(&ctrl_info->pci_dev->dev, |
2519 | "obtaining device info failed, skipping physical device %016llx%016llx\n", |
2520 | get_unaligned_be64(&phys_lun->wwid[0]), |
2521 | get_unaligned_be64(&phys_lun->wwid[8])); |
2522 | else |
2523 | dev_warn(&ctrl_info->pci_dev->dev, |
2524 | "obtaining device info failed, skipping logical device %08x%08x\n", |
2525 | *((u32 *)&device->scsi3addr), |
2526 | *((u32 *)&device->scsi3addr[4])); |
2527 | rc = 0; |
2528 | continue; |
2529 | } |
2530 | |
2531 | /* Do not present disks that the OS cannot fully probe. */ |
2532 | if (pqi_keep_device_offline(device)) |
2533 | continue; |
2534 | |
2535 | pqi_assign_bus_target_lun(device); |
2536 | |
2537 | if (device->is_physical_device) { |
2538 | memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid)); |
2539 | if ((phys_lun->device_flags & |
2540 | CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) && |
2541 | phys_lun->aio_handle) { |
2542 | device->aio_enabled = true; |
2543 | device->aio_handle = |
2544 | phys_lun->aio_handle; |
2545 | } |
2546 | } else { |
2547 | memcpy(device->volume_id, log_lun->volume_id, |
2548 | sizeof(device->volume_id)); |
2549 | } |
2550 | |
2551 | device->sas_address = get_unaligned_be64(p: &device->wwid[0]); |
2552 | |
2553 | new_device_list[num_valid_devices++] = device; |
2554 | } |
2555 | |
2556 | pqi_update_device_list(ctrl_info, new_device_list, num_new_devices: num_valid_devices); |
2557 | |
2558 | out: |
2559 | list_for_each_entry_safe(device, next, &new_device_list_head, |
2560 | new_device_list_entry) { |
2561 | if (device->keep_device) |
2562 | continue; |
2563 | list_del(entry: &device->new_device_list_entry); |
2564 | pqi_free_device(device); |
2565 | } |
2566 | |
2567 | kfree(objp: new_device_list); |
2568 | kfree(objp: physdev_list); |
2569 | kfree(objp: logdev_list); |
2570 | kfree(objp: id_phys); |
2571 | |
2572 | return rc; |
2573 | } |
2574 | |
2575 | static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) |
2576 | { |
2577 | int rc; |
2578 | int mutex_acquired; |
2579 | |
2580 | if (pqi_ctrl_offline(ctrl_info)) |
2581 | return -ENXIO; |
2582 | |
2583 | mutex_acquired = mutex_trylock(lock: &ctrl_info->scan_mutex); |
2584 | |
2585 | if (!mutex_acquired) { |
2586 | if (pqi_ctrl_scan_blocked(ctrl_info)) |
2587 | return -EBUSY; |
2588 | pqi_schedule_rescan_worker_delayed(ctrl_info); |
2589 | return -EINPROGRESS; |
2590 | } |
2591 | |
2592 | rc = pqi_update_scsi_devices(ctrl_info); |
2593 | if (rc && !pqi_ctrl_scan_blocked(ctrl_info)) |
2594 | pqi_schedule_rescan_worker_delayed(ctrl_info); |
2595 | |
2596 | mutex_unlock(lock: &ctrl_info->scan_mutex); |
2597 | |
2598 | return rc; |
2599 | } |
2600 | |
2601 | static void pqi_scan_start(struct Scsi_Host *shost) |
2602 | { |
2603 | struct pqi_ctrl_info *ctrl_info; |
2604 | |
2605 | ctrl_info = shost_to_hba(shost); |
2606 | |
2607 | pqi_scan_scsi_devices(ctrl_info); |
2608 | } |
2609 | |
2610 | /* Returns TRUE if scan is finished. */ |
2611 | |
2612 | static int pqi_scan_finished(struct Scsi_Host *shost, |
2613 | unsigned long elapsed_time) |
2614 | { |
2615 | struct pqi_ctrl_info *ctrl_info; |
2616 | |
2617 | ctrl_info = shost_priv(shost); |
2618 | |
2619 | return !mutex_is_locked(lock: &ctrl_info->scan_mutex); |
2620 | } |
2621 | |
2622 | static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info, |
2623 | struct raid_map *raid_map, u64 first_block) |
2624 | { |
2625 | u32 volume_blk_size; |
2626 | |
2627 | /* |
2628 | * Set the encryption tweak values based on logical block address. |
2629 | * If the block size is 512, the tweak value is equal to the LBA. |
2630 | * For other block sizes, tweak value is (LBA * block size) / 512. |
2631 | */ |
2632 | volume_blk_size = get_unaligned_le32(p: &raid_map->volume_blk_size); |
2633 | if (volume_blk_size != 512) |
2634 | first_block = (first_block * volume_blk_size) / 512; |
2635 | |
2636 | encryption_info->data_encryption_key_index = |
2637 | get_unaligned_le16(p: &raid_map->data_encryption_key_index); |
2638 | encryption_info->encrypt_tweak_lower = lower_32_bits(first_block); |
2639 | encryption_info->encrypt_tweak_upper = upper_32_bits(first_block); |
2640 | } |
2641 | |
2642 | /* |
2643 | * Attempt to perform RAID bypass mapping for a logical volume I/O. |
2644 | */ |
2645 | |
2646 | static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info, |
2647 | struct pqi_scsi_dev_raid_map_data *rmd) |
2648 | { |
2649 | bool is_supported = true; |
2650 | |
2651 | switch (rmd->raid_level) { |
2652 | case SA_RAID_0: |
2653 | break; |
2654 | case SA_RAID_1: |
2655 | if (rmd->is_write && (!ctrl_info->enable_r1_writes || |
2656 | rmd->data_length > ctrl_info->max_write_raid_1_10_2drive)) |
2657 | is_supported = false; |
2658 | break; |
2659 | case SA_RAID_TRIPLE: |
2660 | if (rmd->is_write && (!ctrl_info->enable_r1_writes || |
2661 | rmd->data_length > ctrl_info->max_write_raid_1_10_3drive)) |
2662 | is_supported = false; |
2663 | break; |
2664 | case SA_RAID_5: |
2665 | if (rmd->is_write && (!ctrl_info->enable_r5_writes || |
2666 | rmd->data_length > ctrl_info->max_write_raid_5_6)) |
2667 | is_supported = false; |
2668 | break; |
2669 | case SA_RAID_6: |
2670 | if (rmd->is_write && (!ctrl_info->enable_r6_writes || |
2671 | rmd->data_length > ctrl_info->max_write_raid_5_6)) |
2672 | is_supported = false; |
2673 | break; |
2674 | default: |
2675 | is_supported = false; |
2676 | break; |
2677 | } |
2678 | |
2679 | return is_supported; |
2680 | } |
2681 | |
2682 | #define PQI_RAID_BYPASS_INELIGIBLE 1 |
2683 | |
2684 | static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd, |
2685 | struct pqi_scsi_dev_raid_map_data *rmd) |
2686 | { |
2687 | /* Check for valid opcode, get LBA and block count. */ |
2688 | switch (scmd->cmnd[0]) { |
2689 | case WRITE_6: |
2690 | rmd->is_write = true; |
2691 | fallthrough; |
2692 | case READ_6: |
2693 | rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) | |
2694 | (scmd->cmnd[2] << 8) | scmd->cmnd[3]); |
2695 | rmd->block_cnt = (u32)scmd->cmnd[4]; |
2696 | if (rmd->block_cnt == 0) |
2697 | rmd->block_cnt = 256; |
2698 | break; |
2699 | case WRITE_10: |
2700 | rmd->is_write = true; |
2701 | fallthrough; |
2702 | case READ_10: |
2703 | rmd->first_block = (u64)get_unaligned_be32(p: &scmd->cmnd[2]); |
2704 | rmd->block_cnt = (u32)get_unaligned_be16(p: &scmd->cmnd[7]); |
2705 | break; |
2706 | case WRITE_12: |
2707 | rmd->is_write = true; |
2708 | fallthrough; |
2709 | case READ_12: |
2710 | rmd->first_block = (u64)get_unaligned_be32(p: &scmd->cmnd[2]); |
2711 | rmd->block_cnt = get_unaligned_be32(p: &scmd->cmnd[6]); |
2712 | break; |
2713 | case WRITE_16: |
2714 | rmd->is_write = true; |
2715 | fallthrough; |
2716 | case READ_16: |
2717 | rmd->first_block = get_unaligned_be64(p: &scmd->cmnd[2]); |
2718 | rmd->block_cnt = get_unaligned_be32(p: &scmd->cmnd[10]); |
2719 | break; |
2720 | default: |
2721 | /* Process via normal I/O path. */ |
2722 | return PQI_RAID_BYPASS_INELIGIBLE; |
2723 | } |
2724 | |
2725 | put_unaligned_le32(val: scsi_bufflen(cmd: scmd), p: &rmd->data_length); |
2726 | |
2727 | return 0; |
2728 | } |
2729 | |
2730 | static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info, |
2731 | struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map) |
2732 | { |
2733 | #if BITS_PER_LONG == 32 |
2734 | u64 tmpdiv; |
2735 | #endif |
2736 | |
2737 | rmd->last_block = rmd->first_block + rmd->block_cnt - 1; |
2738 | |
2739 | /* Check for invalid block or wraparound. */ |
2740 | if (rmd->last_block >= |
2741 | get_unaligned_le64(p: &raid_map->volume_blk_cnt) || |
2742 | rmd->last_block < rmd->first_block) |
2743 | return PQI_RAID_BYPASS_INELIGIBLE; |
2744 | |
2745 | rmd->data_disks_per_row = |
2746 | get_unaligned_le16(p: &raid_map->data_disks_per_row); |
2747 | rmd->strip_size = get_unaligned_le16(p: &raid_map->strip_size); |
2748 | rmd->layout_map_count = get_unaligned_le16(p: &raid_map->layout_map_count); |
2749 | |
2750 | /* Calculate stripe information for the request. */ |
2751 | rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size; |
2752 | if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */ |
2753 | return PQI_RAID_BYPASS_INELIGIBLE; |
2754 | #if BITS_PER_LONG == 32 |
2755 | tmpdiv = rmd->first_block; |
2756 | do_div(tmpdiv, rmd->blocks_per_row); |
2757 | rmd->first_row = tmpdiv; |
2758 | tmpdiv = rmd->last_block; |
2759 | do_div(tmpdiv, rmd->blocks_per_row); |
2760 | rmd->last_row = tmpdiv; |
2761 | rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row)); |
2762 | rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row)); |
2763 | tmpdiv = rmd->first_row_offset; |
2764 | do_div(tmpdiv, rmd->strip_size); |
2765 | rmd->first_column = tmpdiv; |
2766 | tmpdiv = rmd->last_row_offset; |
2767 | do_div(tmpdiv, rmd->strip_size); |
2768 | rmd->last_column = tmpdiv; |
2769 | #else |
2770 | rmd->first_row = rmd->first_block / rmd->blocks_per_row; |
2771 | rmd->last_row = rmd->last_block / rmd->blocks_per_row; |
2772 | rmd->first_row_offset = (u32)(rmd->first_block - |
2773 | (rmd->first_row * rmd->blocks_per_row)); |
2774 | rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * |
2775 | rmd->blocks_per_row)); |
2776 | rmd->first_column = rmd->first_row_offset / rmd->strip_size; |
2777 | rmd->last_column = rmd->last_row_offset / rmd->strip_size; |
2778 | #endif |
2779 | |
2780 | /* If this isn't a single row/column then give to the controller. */ |
2781 | if (rmd->first_row != rmd->last_row || |
2782 | rmd->first_column != rmd->last_column) |
2783 | return PQI_RAID_BYPASS_INELIGIBLE; |
2784 | |
2785 | /* Proceeding with driver mapping. */ |
2786 | rmd->total_disks_per_row = rmd->data_disks_per_row + |
2787 | get_unaligned_le16(p: &raid_map->metadata_disks_per_row); |
2788 | rmd->map_row = ((u32)(rmd->first_row >> |
2789 | raid_map->parity_rotation_shift)) % |
2790 | get_unaligned_le16(p: &raid_map->row_cnt); |
2791 | rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) + |
2792 | rmd->first_column; |
2793 | |
2794 | return 0; |
2795 | } |
2796 | |
2797 | static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd, |
2798 | struct raid_map *raid_map) |
2799 | { |
2800 | #if BITS_PER_LONG == 32 |
2801 | u64 tmpdiv; |
2802 | #endif |
2803 | |
2804 | if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */ |
2805 | return PQI_RAID_BYPASS_INELIGIBLE; |
2806 | |
2807 | /* RAID 50/60 */ |
2808 | /* Verify first and last block are in same RAID group. */ |
2809 | rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count; |
2810 | #if BITS_PER_LONG == 32 |
2811 | tmpdiv = rmd->first_block; |
2812 | rmd->first_group = do_div(tmpdiv, rmd->stripesize); |
2813 | tmpdiv = rmd->first_group; |
2814 | do_div(tmpdiv, rmd->blocks_per_row); |
2815 | rmd->first_group = tmpdiv; |
2816 | tmpdiv = rmd->last_block; |
2817 | rmd->last_group = do_div(tmpdiv, rmd->stripesize); |
2818 | tmpdiv = rmd->last_group; |
2819 | do_div(tmpdiv, rmd->blocks_per_row); |
2820 | rmd->last_group = tmpdiv; |
2821 | #else |
2822 | rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row; |
2823 | rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row; |
2824 | #endif |
2825 | if (rmd->first_group != rmd->last_group) |
2826 | return PQI_RAID_BYPASS_INELIGIBLE; |
2827 | |
2828 | /* Verify request is in a single row of RAID 5/6. */ |
2829 | #if BITS_PER_LONG == 32 |
2830 | tmpdiv = rmd->first_block; |
2831 | do_div(tmpdiv, rmd->stripesize); |
2832 | rmd->first_row = tmpdiv; |
2833 | rmd->r5or6_first_row = tmpdiv; |
2834 | tmpdiv = rmd->last_block; |
2835 | do_div(tmpdiv, rmd->stripesize); |
2836 | rmd->r5or6_last_row = tmpdiv; |
2837 | #else |
2838 | rmd->first_row = rmd->r5or6_first_row = |
2839 | rmd->first_block / rmd->stripesize; |
2840 | rmd->r5or6_last_row = rmd->last_block / rmd->stripesize; |
2841 | #endif |
2842 | if (rmd->r5or6_first_row != rmd->r5or6_last_row) |
2843 | return PQI_RAID_BYPASS_INELIGIBLE; |
2844 | |
2845 | /* Verify request is in a single column. */ |
2846 | #if BITS_PER_LONG == 32 |
2847 | tmpdiv = rmd->first_block; |
2848 | rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize); |
2849 | tmpdiv = rmd->first_row_offset; |
2850 | rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row); |
2851 | rmd->r5or6_first_row_offset = rmd->first_row_offset; |
2852 | tmpdiv = rmd->last_block; |
2853 | rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize); |
2854 | tmpdiv = rmd->r5or6_last_row_offset; |
2855 | rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row); |
2856 | tmpdiv = rmd->r5or6_first_row_offset; |
2857 | do_div(tmpdiv, rmd->strip_size); |
2858 | rmd->first_column = rmd->r5or6_first_column = tmpdiv; |
2859 | tmpdiv = rmd->r5or6_last_row_offset; |
2860 | do_div(tmpdiv, rmd->strip_size); |
2861 | rmd->r5or6_last_column = tmpdiv; |
2862 | #else |
2863 | rmd->first_row_offset = rmd->r5or6_first_row_offset = |
2864 | (u32)((rmd->first_block % rmd->stripesize) % |
2865 | rmd->blocks_per_row); |
2866 | |
2867 | rmd->r5or6_last_row_offset = |
2868 | (u32)((rmd->last_block % rmd->stripesize) % |
2869 | rmd->blocks_per_row); |
2870 | |
2871 | rmd->first_column = |
2872 | rmd->r5or6_first_row_offset / rmd->strip_size; |
2873 | rmd->r5or6_first_column = rmd->first_column; |
2874 | rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size; |
2875 | #endif |
2876 | if (rmd->r5or6_first_column != rmd->r5or6_last_column) |
2877 | return PQI_RAID_BYPASS_INELIGIBLE; |
2878 | |
2879 | /* Request is eligible. */ |
2880 | rmd->map_row = |
2881 | ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) % |
2882 | get_unaligned_le16(p: &raid_map->row_cnt); |
2883 | |
2884 | rmd->map_index = (rmd->first_group * |
2885 | (get_unaligned_le16(p: &raid_map->row_cnt) * |
2886 | rmd->total_disks_per_row)) + |
2887 | (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column; |
2888 | |
2889 | if (rmd->is_write) { |
2890 | u32 index; |
2891 | |
2892 | /* |
2893 | * p_parity_it_nexus and q_parity_it_nexus are pointers to the |
2894 | * parity entries inside the device's raid_map. |
2895 | * |
2896 | * A device's RAID map is bounded by: number of RAID disks squared. |
2897 | * |
2898 | * The devices RAID map size is checked during device |
2899 | * initialization. |
2900 | */ |
2901 | index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row); |
2902 | index *= rmd->total_disks_per_row; |
2903 | index -= get_unaligned_le16(p: &raid_map->metadata_disks_per_row); |
2904 | |
2905 | rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle; |
2906 | if (rmd->raid_level == SA_RAID_6) { |
2907 | rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle; |
2908 | rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1]; |
2909 | } |
2910 | #if BITS_PER_LONG == 32 |
2911 | tmpdiv = rmd->first_block; |
2912 | do_div(tmpdiv, rmd->blocks_per_row); |
2913 | rmd->row = tmpdiv; |
2914 | #else |
2915 | rmd->row = rmd->first_block / rmd->blocks_per_row; |
2916 | #endif |
2917 | } |
2918 | |
2919 | return 0; |
2920 | } |
2921 | |
2922 | static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd) |
2923 | { |
2924 | /* Build the new CDB for the physical disk I/O. */ |
2925 | if (rmd->disk_block > 0xffffffff) { |
2926 | rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16; |
2927 | rmd->cdb[1] = 0; |
2928 | put_unaligned_be64(val: rmd->disk_block, p: &rmd->cdb[2]); |
2929 | put_unaligned_be32(val: rmd->disk_block_cnt, p: &rmd->cdb[10]); |
2930 | rmd->cdb[14] = 0; |
2931 | rmd->cdb[15] = 0; |
2932 | rmd->cdb_length = 16; |
2933 | } else { |
2934 | rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10; |
2935 | rmd->cdb[1] = 0; |
2936 | put_unaligned_be32(val: (u32)rmd->disk_block, p: &rmd->cdb[2]); |
2937 | rmd->cdb[6] = 0; |
2938 | put_unaligned_be16(val: (u16)rmd->disk_block_cnt, p: &rmd->cdb[7]); |
2939 | rmd->cdb[9] = 0; |
2940 | rmd->cdb_length = 10; |
2941 | } |
2942 | } |
2943 | |
2944 | static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map, |
2945 | struct pqi_scsi_dev_raid_map_data *rmd) |
2946 | { |
2947 | u32 index; |
2948 | u32 group; |
2949 | |
2950 | group = rmd->map_index / rmd->data_disks_per_row; |
2951 | |
2952 | index = rmd->map_index - (group * rmd->data_disks_per_row); |
2953 | rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle; |
2954 | index += rmd->data_disks_per_row; |
2955 | rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle; |
2956 | if (rmd->layout_map_count > 2) { |
2957 | index += rmd->data_disks_per_row; |
2958 | rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle; |
2959 | } |
2960 | |
2961 | rmd->num_it_nexus_entries = rmd->layout_map_count; |
2962 | } |
2963 | |
2964 | static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, |
2965 | struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, |
2966 | struct pqi_queue_group *queue_group) |
2967 | { |
2968 | int rc; |
2969 | struct raid_map *raid_map; |
2970 | u32 group; |
2971 | u32 next_bypass_group; |
2972 | struct pqi_encryption_info *encryption_info_ptr; |
2973 | struct pqi_encryption_info encryption_info; |
2974 | struct pqi_scsi_dev_raid_map_data rmd = { 0 }; |
2975 | |
2976 | rc = pqi_get_aio_lba_and_block_count(scmd, rmd: &rmd); |
2977 | if (rc) |
2978 | return PQI_RAID_BYPASS_INELIGIBLE; |
2979 | |
2980 | rmd.raid_level = device->raid_level; |
2981 | |
2982 | if (!pqi_aio_raid_level_supported(ctrl_info, rmd: &rmd)) |
2983 | return PQI_RAID_BYPASS_INELIGIBLE; |
2984 | |
2985 | if (unlikely(rmd.block_cnt == 0)) |
2986 | return PQI_RAID_BYPASS_INELIGIBLE; |
2987 | |
2988 | raid_map = device->raid_map; |
2989 | |
2990 | rc = pci_get_aio_common_raid_map_values(ctrl_info, rmd: &rmd, raid_map); |
2991 | if (rc) |
2992 | return PQI_RAID_BYPASS_INELIGIBLE; |
2993 | |
2994 | if (device->raid_level == SA_RAID_1 || |
2995 | device->raid_level == SA_RAID_TRIPLE) { |
2996 | if (rmd.is_write) { |
2997 | pqi_calc_aio_r1_nexus(raid_map, rmd: &rmd); |
2998 | } else { |
2999 | group = device->next_bypass_group[rmd.map_index]; |
3000 | next_bypass_group = group + 1; |
3001 | if (next_bypass_group >= rmd.layout_map_count) |
3002 | next_bypass_group = 0; |
3003 | device->next_bypass_group[rmd.map_index] = next_bypass_group; |
3004 | rmd.map_index += group * rmd.data_disks_per_row; |
3005 | } |
3006 | } else if ((device->raid_level == SA_RAID_5 || |
3007 | device->raid_level == SA_RAID_6) && |
3008 | (rmd.layout_map_count > 1 || rmd.is_write)) { |
3009 | rc = pqi_calc_aio_r5_or_r6(rmd: &rmd, raid_map); |
3010 | if (rc) |
3011 | return PQI_RAID_BYPASS_INELIGIBLE; |
3012 | } |
3013 | |
3014 | if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES)) |
3015 | return PQI_RAID_BYPASS_INELIGIBLE; |
3016 | |
3017 | rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle; |
3018 | rmd.disk_block = get_unaligned_le64(p: &raid_map->disk_starting_blk) + |
3019 | rmd.first_row * rmd.strip_size + |
3020 | (rmd.first_row_offset - rmd.first_column * rmd.strip_size); |
3021 | rmd.disk_block_cnt = rmd.block_cnt; |
3022 | |
3023 | /* Handle differing logical/physical block sizes. */ |
3024 | if (raid_map->phys_blk_shift) { |
3025 | rmd.disk_block <<= raid_map->phys_blk_shift; |
3026 | rmd.disk_block_cnt <<= raid_map->phys_blk_shift; |
3027 | } |
3028 | |
3029 | if (unlikely(rmd.disk_block_cnt > 0xffff)) |
3030 | return PQI_RAID_BYPASS_INELIGIBLE; |
3031 | |
3032 | pqi_set_aio_cdb(rmd: &rmd); |
3033 | |
3034 | if (get_unaligned_le16(p: &raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) { |
3035 | if (rmd.data_length > device->max_transfer_encrypted) |
3036 | return PQI_RAID_BYPASS_INELIGIBLE; |
3037 | pqi_set_encryption_info(encryption_info: &encryption_info, raid_map, first_block: rmd.first_block); |
3038 | encryption_info_ptr = &encryption_info; |
3039 | } else { |
3040 | encryption_info_ptr = NULL; |
3041 | } |
3042 | |
3043 | if (rmd.is_write) { |
3044 | switch (device->raid_level) { |
3045 | case SA_RAID_1: |
3046 | case SA_RAID_TRIPLE: |
3047 | return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group, |
3048 | encryption_info: encryption_info_ptr, device, rmd: &rmd); |
3049 | case SA_RAID_5: |
3050 | case SA_RAID_6: |
3051 | return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group, |
3052 | encryption_info: encryption_info_ptr, device, rmd: &rmd); |
3053 | } |
3054 | } |
3055 | |
3056 | return pqi_aio_submit_io(ctrl_info, scmd, aio_handle: rmd.aio_handle, |
3057 | cdb: rmd.cdb, cdb_length: rmd.cdb_length, queue_group, |
3058 | encryption_info: encryption_info_ptr, raid_bypass: true, io_high_prio: false); |
3059 | } |
3060 | |
3061 | #define PQI_STATUS_IDLE 0x0 |
3062 | |
3063 | #define PQI_CREATE_ADMIN_QUEUE_PAIR 1 |
3064 | #define PQI_DELETE_ADMIN_QUEUE_PAIR 2 |
3065 | |
3066 | #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0 |
3067 | #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1 |
3068 | #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2 |
3069 | #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3 |
3070 | #define PQI_DEVICE_STATE_ERROR 0x4 |
3071 | |
3072 | #define PQI_MODE_READY_TIMEOUT_SECS 30 |
3073 | #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1 |
3074 | |
3075 | static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info) |
3076 | { |
3077 | struct pqi_device_registers __iomem *pqi_registers; |
3078 | unsigned long timeout; |
3079 | u64 signature; |
3080 | u8 status; |
3081 | |
3082 | pqi_registers = ctrl_info->pqi_registers; |
3083 | timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies; |
3084 | |
3085 | while (1) { |
3086 | signature = readq(addr: &pqi_registers->signature); |
3087 | if (memcmp(p: &signature, PQI_DEVICE_SIGNATURE, |
3088 | size: sizeof(signature)) == 0) |
3089 | break; |
3090 | if (time_after(jiffies, timeout)) { |
3091 | dev_err(&ctrl_info->pci_dev->dev, |
3092 | "timed out waiting for PQI signature\n"); |
3093 | return -ETIMEDOUT; |
3094 | } |
3095 | msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); |
3096 | } |
3097 | |
3098 | while (1) { |
3099 | status = readb(addr: &pqi_registers->function_and_status_code); |
3100 | if (status == PQI_STATUS_IDLE) |
3101 | break; |
3102 | if (time_after(jiffies, timeout)) { |
3103 | dev_err(&ctrl_info->pci_dev->dev, |
3104 | "timed out waiting for PQI IDLE\n"); |
3105 | return -ETIMEDOUT; |
3106 | } |
3107 | msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); |
3108 | } |
3109 | |
3110 | while (1) { |
3111 | if (readl(addr: &pqi_registers->device_status) == |
3112 | PQI_DEVICE_STATE_ALL_REGISTERS_READY) |
3113 | break; |
3114 | if (time_after(jiffies, timeout)) { |
3115 | dev_err(&ctrl_info->pci_dev->dev, |
3116 | "timed out waiting for PQI all registers ready\n"); |
3117 | return -ETIMEDOUT; |
3118 | } |
3119 | msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); |
3120 | } |
3121 | |
3122 | return 0; |
3123 | } |
3124 | |
3125 | static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request) |
3126 | { |
3127 | struct pqi_scsi_dev *device; |
3128 | |
3129 | device = io_request->scmd->device->hostdata; |
3130 | device->raid_bypass_enabled = false; |
3131 | device->aio_enabled = false; |
3132 | } |
3133 | |
3134 | static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path) |
3135 | { |
3136 | struct pqi_ctrl_info *ctrl_info; |
3137 | struct pqi_scsi_dev *device; |
3138 | |
3139 | device = sdev->hostdata; |
3140 | if (device->device_offline) |
3141 | return; |
3142 | |
3143 | device->device_offline = true; |
3144 | ctrl_info = shost_to_hba(shost: sdev->host); |
3145 | pqi_schedule_rescan_worker(ctrl_info); |
3146 | dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n", |
3147 | path, ctrl_info->scsi_host->host_no, device->bus, |
3148 | device->target, device->lun); |
3149 | } |
3150 | |
3151 | static void pqi_process_raid_io_error(struct pqi_io_request *io_request) |
3152 | { |
3153 | u8 scsi_status; |
3154 | u8 host_byte; |
3155 | struct scsi_cmnd *scmd; |
3156 | struct pqi_raid_error_info *error_info; |
3157 | size_t sense_data_length; |
3158 | int residual_count; |
3159 | int xfer_count; |
3160 | struct scsi_sense_hdr sshdr; |
3161 | |
3162 | scmd = io_request->scmd; |
3163 | if (!scmd) |
3164 | return; |
3165 | |
3166 | error_info = io_request->error_info; |
3167 | scsi_status = error_info->status; |
3168 | host_byte = DID_OK; |
3169 | |
3170 | switch (error_info->data_out_result) { |
3171 | case PQI_DATA_IN_OUT_GOOD: |
3172 | break; |
3173 | case PQI_DATA_IN_OUT_UNDERFLOW: |
3174 | xfer_count = |
3175 | get_unaligned_le32(p: &error_info->data_out_transferred); |
3176 | residual_count = scsi_bufflen(cmd: scmd) - xfer_count; |
3177 | scsi_set_resid(cmd: scmd, resid: residual_count); |
3178 | if (xfer_count < scmd->underflow) |
3179 | host_byte = DID_SOFT_ERROR; |
3180 | break; |
3181 | case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: |
3182 | case PQI_DATA_IN_OUT_ABORTED: |
3183 | host_byte = DID_ABORT; |
3184 | break; |
3185 | case PQI_DATA_IN_OUT_TIMEOUT: |
3186 | host_byte = DID_TIME_OUT; |
3187 | break; |
3188 | case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: |
3189 | case PQI_DATA_IN_OUT_PROTOCOL_ERROR: |
3190 | case PQI_DATA_IN_OUT_BUFFER_ERROR: |
3191 | case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: |
3192 | case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: |
3193 | case PQI_DATA_IN_OUT_ERROR: |
3194 | case PQI_DATA_IN_OUT_HARDWARE_ERROR: |
3195 | case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: |
3196 | case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: |
3197 | case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: |
3198 | case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: |
3199 | case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: |
3200 | case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: |
3201 | case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: |
3202 | case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: |
3203 | case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: |
3204 | default: |
3205 | host_byte = DID_ERROR; |
3206 | break; |
3207 | } |
3208 | |
3209 | sense_data_length = get_unaligned_le16(p: &error_info->sense_data_length); |
3210 | if (sense_data_length == 0) |
3211 | sense_data_length = |
3212 | get_unaligned_le16(p: &error_info->response_data_length); |
3213 | if (sense_data_length) { |
3214 | if (sense_data_length > sizeof(error_info->data)) |
3215 | sense_data_length = sizeof(error_info->data); |
3216 | |
3217 | if (scsi_status == SAM_STAT_CHECK_CONDITION && |
3218 | scsi_normalize_sense(sense_buffer: error_info->data, |
3219 | sb_len: sense_data_length, sshdr: &sshdr) && |
3220 | sshdr.sense_key == HARDWARE_ERROR && |
3221 | sshdr.asc == 0x3e) { |
3222 | struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost: scmd->device->host); |
3223 | struct pqi_scsi_dev *device = scmd->device->hostdata; |
3224 | |
3225 | switch (sshdr.ascq) { |
3226 | case 0x1: /* LOGICAL UNIT FAILURE */ |
3227 | if (printk_ratelimit()) |
3228 | scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n", |
3229 | ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); |
3230 | pqi_take_device_offline(sdev: scmd->device, path: "RAID"); |
3231 | host_byte = DID_NO_CONNECT; |
3232 | break; |
3233 | |
3234 | default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */ |
3235 | if (printk_ratelimit()) |
3236 | scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n", |
3237 | sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); |
3238 | break; |
3239 | } |
3240 | } |
3241 | |
3242 | if (sense_data_length > SCSI_SENSE_BUFFERSIZE) |
3243 | sense_data_length = SCSI_SENSE_BUFFERSIZE; |
3244 | memcpy(scmd->sense_buffer, error_info->data, |
3245 | sense_data_length); |
3246 | } |
3247 | |
3248 | scmd->result = scsi_status; |
3249 | set_host_byte(cmd: scmd, status: host_byte); |
3250 | } |
3251 | |
3252 | static void pqi_process_aio_io_error(struct pqi_io_request *io_request) |
3253 | { |
3254 | u8 scsi_status; |
3255 | u8 host_byte; |
3256 | struct scsi_cmnd *scmd; |
3257 | struct pqi_aio_error_info *error_info; |
3258 | size_t sense_data_length; |
3259 | int residual_count; |
3260 | int xfer_count; |
3261 | bool device_offline; |
3262 | struct pqi_scsi_dev *device; |
3263 | |
3264 | scmd = io_request->scmd; |
3265 | error_info = io_request->error_info; |
3266 | host_byte = DID_OK; |
3267 | sense_data_length = 0; |
3268 | device_offline = false; |
3269 | device = scmd->device->hostdata; |
3270 | |
3271 | switch (error_info->service_response) { |
3272 | case PQI_AIO_SERV_RESPONSE_COMPLETE: |
3273 | scsi_status = error_info->status; |
3274 | break; |
3275 | case PQI_AIO_SERV_RESPONSE_FAILURE: |
3276 | switch (error_info->status) { |
3277 | case PQI_AIO_STATUS_IO_ABORTED: |
3278 | scsi_status = SAM_STAT_TASK_ABORTED; |
3279 | break; |
3280 | case PQI_AIO_STATUS_UNDERRUN: |
3281 | scsi_status = SAM_STAT_GOOD; |
3282 | residual_count = get_unaligned_le32( |
3283 | p: &error_info->residual_count); |
3284 | scsi_set_resid(cmd: scmd, resid: residual_count); |
3285 | xfer_count = scsi_bufflen(cmd: scmd) - residual_count; |
3286 | if (xfer_count < scmd->underflow) |
3287 | host_byte = DID_SOFT_ERROR; |
3288 | break; |
3289 | case PQI_AIO_STATUS_OVERRUN: |
3290 | scsi_status = SAM_STAT_GOOD; |
3291 | break; |
3292 | case PQI_AIO_STATUS_AIO_PATH_DISABLED: |
3293 | pqi_aio_path_disabled(io_request); |
3294 | if (pqi_is_multipath_device(device)) { |
3295 | pqi_device_remove_start(device); |
3296 | host_byte = DID_NO_CONNECT; |
3297 | scsi_status = SAM_STAT_CHECK_CONDITION; |
3298 | } else { |
3299 | scsi_status = SAM_STAT_GOOD; |
3300 | io_request->status = -EAGAIN; |
3301 | } |
3302 | break; |
3303 | case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: |
3304 | case PQI_AIO_STATUS_INVALID_DEVICE: |
3305 | if (!io_request->raid_bypass) { |
3306 | device_offline = true; |
3307 | pqi_take_device_offline(sdev: scmd->device, path: "AIO"); |
3308 | host_byte = DID_NO_CONNECT; |
3309 | } |
3310 | scsi_status = SAM_STAT_CHECK_CONDITION; |
3311 | break; |
3312 | case PQI_AIO_STATUS_IO_ERROR: |
3313 | default: |
3314 | scsi_status = SAM_STAT_CHECK_CONDITION; |
3315 | break; |
3316 | } |
3317 | break; |
3318 | case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: |
3319 | case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: |
3320 | scsi_status = SAM_STAT_GOOD; |
3321 | break; |
3322 | case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: |
3323 | case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: |
3324 | default: |
3325 | scsi_status = SAM_STAT_CHECK_CONDITION; |
3326 | break; |
3327 | } |
3328 | |
3329 | if (error_info->data_present) { |
3330 | sense_data_length = |
3331 | get_unaligned_le16(p: &error_info->data_length); |
3332 | if (sense_data_length) { |
3333 | if (sense_data_length > sizeof(error_info->data)) |
3334 | sense_data_length = sizeof(error_info->data); |
3335 | if (sense_data_length > SCSI_SENSE_BUFFERSIZE) |
3336 | sense_data_length = SCSI_SENSE_BUFFERSIZE; |
3337 | memcpy(scmd->sense_buffer, error_info->data, |
3338 | sense_data_length); |
3339 | } |
3340 | } |
3341 | |
3342 | if (device_offline && sense_data_length == 0) |
3343 | scsi_build_sense(scmd, desc: 0, HARDWARE_ERROR, asc: 0x3e, ascq: 0x1); |
3344 | |
3345 | scmd->result = scsi_status; |
3346 | set_host_byte(cmd: scmd, status: host_byte); |
3347 | } |
3348 | |
3349 | static void pqi_process_io_error(unsigned int iu_type, |
3350 | struct pqi_io_request *io_request) |
3351 | { |
3352 | switch (iu_type) { |
3353 | case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: |
3354 | pqi_process_raid_io_error(io_request); |
3355 | break; |
3356 | case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: |
3357 | pqi_process_aio_io_error(io_request); |
3358 | break; |
3359 | } |
3360 | } |
3361 | |
3362 | static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info, |
3363 | struct pqi_task_management_response *response) |
3364 | { |
3365 | int rc; |
3366 | |
3367 | switch (response->response_code) { |
3368 | case SOP_TMF_COMPLETE: |
3369 | case SOP_TMF_FUNCTION_SUCCEEDED: |
3370 | rc = 0; |
3371 | break; |
3372 | case SOP_TMF_REJECTED: |
3373 | rc = -EAGAIN; |
3374 | break; |
3375 | case SOP_TMF_INCORRECT_LOGICAL_UNIT: |
3376 | rc = -ENODEV; |
3377 | break; |
3378 | default: |
3379 | rc = -EIO; |
3380 | break; |
3381 | } |
3382 | |
3383 | if (rc) |
3384 | dev_err(&ctrl_info->pci_dev->dev, |
3385 | "Task Management Function error: %d (response code: %u)\n", rc, response->response_code); |
3386 | |
3387 | return rc; |
3388 | } |
3389 | |
3390 | static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info, |
3391 | enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) |
3392 | { |
3393 | pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason); |
3394 | } |
3395 | |
3396 | static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group) |
3397 | { |
3398 | int num_responses; |
3399 | pqi_index_t oq_pi; |
3400 | pqi_index_t oq_ci; |
3401 | struct pqi_io_request *io_request; |
3402 | struct pqi_io_response *response; |
3403 | u16 request_id; |
3404 | |
3405 | num_responses = 0; |
3406 | oq_ci = queue_group->oq_ci_copy; |
3407 | |
3408 | while (1) { |
3409 | oq_pi = readl(addr: queue_group->oq_pi); |
3410 | if (oq_pi >= ctrl_info->num_elements_per_oq) { |
3411 | pqi_invalid_response(ctrl_info, ctrl_shutdown_reason: PQI_IO_PI_OUT_OF_RANGE); |
3412 | dev_err(&ctrl_info->pci_dev->dev, |
3413 | "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", |
3414 | oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci); |
3415 | return -1; |
3416 | } |
3417 | if (oq_pi == oq_ci) |
3418 | break; |
3419 | |
3420 | num_responses++; |
3421 | response = queue_group->oq_element_array + |
3422 | (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); |
3423 | |
3424 | request_id = get_unaligned_le16(p: &response->request_id); |
3425 | if (request_id >= ctrl_info->max_io_slots) { |
3426 | pqi_invalid_response(ctrl_info, ctrl_shutdown_reason: PQI_INVALID_REQ_ID); |
3427 | dev_err(&ctrl_info->pci_dev->dev, |
3428 | "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n", |
3429 | request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci); |
3430 | return -1; |
3431 | } |
3432 | |
3433 | io_request = &ctrl_info->io_request_pool[request_id]; |
3434 | if (atomic_read(v: &io_request->refcount) == 0) { |
3435 | pqi_invalid_response(ctrl_info, ctrl_shutdown_reason: PQI_UNMATCHED_REQ_ID); |
3436 | dev_err(&ctrl_info->pci_dev->dev, |
3437 | "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n", |
3438 | request_id, oq_pi, oq_ci); |
3439 | return -1; |
3440 | } |
3441 | |
3442 | switch (response->header.iu_type) { |
3443 | case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: |
3444 | case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: |
3445 | if (io_request->scmd) |
3446 | io_request->scmd->result = 0; |
3447 | fallthrough; |
3448 | case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: |
3449 | break; |
3450 | case PQI_RESPONSE_IU_VENDOR_GENERAL: |
3451 | io_request->status = |
3452 | get_unaligned_le16( |
3453 | p: &((struct pqi_vendor_general_response *)response)->status); |
3454 | break; |
3455 | case PQI_RESPONSE_IU_TASK_MANAGEMENT: |
3456 | io_request->status = pqi_interpret_task_management_response(ctrl_info, |
3457 | response: (void *)response); |
3458 | break; |
3459 | case PQI_RESPONSE_IU_AIO_PATH_DISABLED: |
3460 | pqi_aio_path_disabled(io_request); |
3461 | io_request->status = -EAGAIN; |
3462 | break; |
3463 | case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: |
3464 | case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: |
3465 | io_request->error_info = ctrl_info->error_buffer + |
3466 | (get_unaligned_le16(p: &response->error_index) * |
3467 | PQI_ERROR_BUFFER_ELEMENT_LENGTH); |
3468 | pqi_process_io_error(iu_type: response->header.iu_type, io_request); |
3469 | break; |
3470 | default: |
3471 | pqi_invalid_response(ctrl_info, ctrl_shutdown_reason: PQI_UNEXPECTED_IU_TYPE); |
3472 | dev_err(&ctrl_info->pci_dev->dev, |
3473 | "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n", |
3474 | response->header.iu_type, oq_pi, oq_ci); |
3475 | return -1; |
3476 | } |
3477 | |
3478 | io_request->io_complete_callback(io_request, io_request->context); |
3479 | |
3480 | /* |
3481 | * Note that the I/O request structure CANNOT BE TOUCHED after |
3482 | * returning from the I/O completion callback! |
3483 | */ |
3484 | oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; |
3485 | } |
3486 | |
3487 | if (num_responses) { |
3488 | queue_group->oq_ci_copy = oq_ci; |
3489 | writel(val: oq_ci, addr: queue_group->oq_ci); |
3490 | } |
3491 | |
3492 | return num_responses; |
3493 | } |
3494 | |
3495 | static inline unsigned int pqi_num_elements_free(unsigned int pi, |
3496 | unsigned int ci, unsigned int elements_in_queue) |
3497 | { |
3498 | unsigned int num_elements_used; |
3499 | |
3500 | if (pi >= ci) |
3501 | num_elements_used = pi - ci; |
3502 | else |
3503 | num_elements_used = elements_in_queue - ci + pi; |
3504 | |
3505 | return elements_in_queue - num_elements_used - 1; |
3506 | } |
3507 | |
3508 | static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info, |
3509 | struct pqi_event_acknowledge_request *iu, size_t iu_length) |
3510 | { |
3511 | pqi_index_t iq_pi; |
3512 | pqi_index_t iq_ci; |
3513 | unsigned long flags; |
3514 | void *next_element; |
3515 | struct pqi_queue_group *queue_group; |
3516 | |
3517 | queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; |
3518 | put_unaligned_le16(val: queue_group->oq_id, p: &iu->header.response_queue_id); |
3519 | |
3520 | while (1) { |
3521 | spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags); |
3522 | |
3523 | iq_pi = queue_group->iq_pi_copy[RAID_PATH]; |
3524 | iq_ci = readl(addr: queue_group->iq_ci[RAID_PATH]); |
3525 | |
3526 | if (pqi_num_elements_free(pi: iq_pi, ci: iq_ci, |
3527 | elements_in_queue: ctrl_info->num_elements_per_iq)) |
3528 | break; |
3529 | |
3530 | spin_unlock_irqrestore( |
3531 | lock: &queue_group->submit_lock[RAID_PATH], flags); |
3532 | |
3533 | if (pqi_ctrl_offline(ctrl_info)) |
3534 | return; |
3535 | } |
3536 | |
3537 | next_element = queue_group->iq_element_array[RAID_PATH] + |
3538 | (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); |
3539 | |
3540 | memcpy(next_element, iu, iu_length); |
3541 | |
3542 | iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq; |
3543 | queue_group->iq_pi_copy[RAID_PATH] = iq_pi; |
3544 | |
3545 | /* |
3546 | * This write notifies the controller that an IU is available to be |
3547 | * processed. |
3548 | */ |
3549 | writel(val: iq_pi, addr: queue_group->iq_pi[RAID_PATH]); |
3550 | |
3551 | spin_unlock_irqrestore(lock: &queue_group->submit_lock[RAID_PATH], flags); |
3552 | } |
3553 | |
3554 | static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, |
3555 | struct pqi_event *event) |
3556 | { |
3557 | struct pqi_event_acknowledge_request request; |
3558 | |
3559 | memset(&request, 0, sizeof(request)); |
3560 | |
3561 | request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT; |
3562 | put_unaligned_le16(val: sizeof(request) - PQI_REQUEST_HEADER_LENGTH, |
3563 | p: &request.header.iu_length); |
3564 | request.event_type = event->event_type; |
3565 | put_unaligned_le16(val: event->event_id, p: &request.event_id); |
3566 | put_unaligned_le32(val: event->additional_event_id, p: &request.additional_event_id); |
3567 | |
3568 | pqi_send_event_ack(ctrl_info, iu: &request, iu_length: sizeof(request)); |
3569 | } |
3570 | |
3571 | #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30 |
3572 | #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1 |
3573 | |
3574 | static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status( |
3575 | struct pqi_ctrl_info *ctrl_info) |
3576 | { |
3577 | u8 status; |
3578 | unsigned long timeout; |
3579 | |
3580 | timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies; |
3581 | |
3582 | while (1) { |
3583 | status = pqi_read_soft_reset_status(ctrl_info); |
3584 | if (status & PQI_SOFT_RESET_INITIATE) |
3585 | return RESET_INITIATE_DRIVER; |
3586 | |
3587 | if (status & PQI_SOFT_RESET_ABORT) |
3588 | return RESET_ABORT; |
3589 | |
3590 | if (!sis_is_firmware_running(ctrl_info)) |
3591 | return RESET_NORESPONSE; |
3592 | |
3593 | if (time_after(jiffies, timeout)) { |
3594 | dev_warn(&ctrl_info->pci_dev->dev, |
3595 | "timed out waiting for soft reset status\n"); |
3596 | return RESET_TIMEDOUT; |
3597 | } |
3598 | |
3599 | ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS); |
3600 | } |
3601 | } |
3602 | |
3603 | static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info) |
3604 | { |
3605 | int rc; |
3606 | unsigned int delay_secs; |
3607 | enum pqi_soft_reset_status reset_status; |
3608 | |
3609 | if (ctrl_info->soft_reset_handshake_supported) |
3610 | reset_status = pqi_poll_for_soft_reset_status(ctrl_info); |
3611 | else |
3612 | reset_status = RESET_INITIATE_FIRMWARE; |
3613 | |
3614 | delay_secs = PQI_POST_RESET_DELAY_SECS; |
3615 | |
3616 | switch (reset_status) { |
3617 | case RESET_TIMEDOUT: |
3618 | delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS; |
3619 | fallthrough; |
3620 | case RESET_INITIATE_DRIVER: |
3621 | dev_info(&ctrl_info->pci_dev->dev, |
3622 | "Online Firmware Activation: resetting controller\n"); |
3623 | sis_soft_reset(ctrl_info); |
3624 | fallthrough; |
3625 | case RESET_INITIATE_FIRMWARE: |
3626 | ctrl_info->pqi_mode_enabled = false; |
3627 | pqi_save_ctrl_mode(ctrl_info, mode: SIS_MODE); |
3628 | rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs); |
3629 | pqi_ofa_free_host_buffer(ctrl_info); |
3630 | pqi_ctrl_ofa_done(ctrl_info); |
3631 | dev_info(&ctrl_info->pci_dev->dev, |
3632 | "Online Firmware Activation: %s\n", |
3633 | rc == 0 ? "SUCCESS": "FAILED"); |
3634 | break; |
3635 | case RESET_ABORT: |
3636 | dev_info(&ctrl_info->pci_dev->dev, |
3637 | "Online Firmware Activation ABORTED\n"); |
3638 | if (ctrl_info->soft_reset_handshake_supported) |
3639 | pqi_clear_soft_reset_status(ctrl_info); |
3640 | pqi_ofa_free_host_buffer(ctrl_info); |
3641 | pqi_ctrl_ofa_done(ctrl_info); |
3642 | pqi_ofa_ctrl_unquiesce(ctrl_info); |
3643 | break; |
3644 | case RESET_NORESPONSE: |
3645 | fallthrough; |
3646 | default: |
3647 | dev_err(&ctrl_info->pci_dev->dev, |
3648 | "unexpected Online Firmware Activation reset status: 0x%x\n", |
3649 | reset_status); |
3650 | pqi_ofa_free_host_buffer(ctrl_info); |
3651 | pqi_ctrl_ofa_done(ctrl_info); |
3652 | pqi_ofa_ctrl_unquiesce(ctrl_info); |
3653 | pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason: PQI_OFA_RESPONSE_TIMEOUT); |
3654 | break; |
3655 | } |
3656 | } |
3657 | |
3658 | static void pqi_ofa_memory_alloc_worker(struct work_struct *work) |
3659 | { |
3660 | struct pqi_ctrl_info *ctrl_info; |
3661 | |
3662 | ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work); |
3663 | |
3664 | pqi_ctrl_ofa_start(ctrl_info); |
3665 | pqi_ofa_setup_host_buffer(ctrl_info); |
3666 | pqi_ofa_host_memory_update(ctrl_info); |
3667 | } |
3668 | |
3669 | static void pqi_ofa_quiesce_worker(struct work_struct *work) |
3670 | { |
3671 | struct pqi_ctrl_info *ctrl_info; |
3672 | struct pqi_event *event; |
3673 | |
3674 | ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work); |
3675 | |
3676 | event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)]; |
3677 | |
3678 | pqi_ofa_ctrl_quiesce(ctrl_info); |
3679 | pqi_acknowledge_event(ctrl_info, event); |
3680 | pqi_process_soft_reset(ctrl_info); |
3681 | } |
3682 | |
3683 | static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, |
3684 | struct pqi_event *event) |
3685 | { |
3686 | bool ack_event; |
3687 | |
3688 | ack_event = true; |
3689 | |
3690 | switch (event->event_id) { |
3691 | case PQI_EVENT_OFA_MEMORY_ALLOCATION: |
3692 | dev_info(&ctrl_info->pci_dev->dev, |
3693 | "received Online Firmware Activation memory allocation request\n"); |
3694 | schedule_work(work: &ctrl_info->ofa_memory_alloc_work); |
3695 | break; |
3696 | case PQI_EVENT_OFA_QUIESCE: |
3697 | dev_info(&ctrl_info->pci_dev->dev, |
3698 | "received Online Firmware Activation quiesce request\n"); |
3699 | schedule_work(work: &ctrl_info->ofa_quiesce_work); |
3700 | ack_event = false; |
3701 | break; |
3702 | case PQI_EVENT_OFA_CANCELED: |
3703 | dev_info(&ctrl_info->pci_dev->dev, |
3704 | "received Online Firmware Activation cancel request: reason: %u\n", |
3705 | ctrl_info->ofa_cancel_reason); |
3706 | pqi_ofa_free_host_buffer(ctrl_info); |
3707 | pqi_ctrl_ofa_done(ctrl_info); |
3708 | break; |
3709 | default: |
3710 | dev_err(&ctrl_info->pci_dev->dev, |
3711 | "received unknown Online Firmware Activation request: event ID: %u\n", |
3712 | event->event_id); |
3713 | break; |
3714 | } |
3715 | |
3716 | return ack_event; |
3717 | } |
3718 | |
3719 | static void pqi_mark_volumes_for_rescan(struct pqi_ctrl_info *ctrl_info) |
3720 | { |
3721 | unsigned long flags; |
3722 | struct pqi_scsi_dev *device; |
3723 | |
3724 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
3725 | |
3726 | list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { |
3727 | if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) |
3728 | device->rescan = true; |
3729 | } |
3730 | |
3731 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
3732 | } |
3733 | |
3734 | static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info) |
3735 | { |
3736 | unsigned long flags; |
3737 | struct pqi_scsi_dev *device; |
3738 | |
3739 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
3740 | |
3741 | list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) |
3742 | if (device->raid_bypass_enabled) |
3743 | device->raid_bypass_enabled = false; |
3744 | |
3745 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
3746 | } |
3747 | |
3748 | static void pqi_event_worker(struct work_struct *work) |
3749 | { |
3750 | unsigned int i; |
3751 | bool rescan_needed; |
3752 | struct pqi_ctrl_info *ctrl_info; |
3753 | struct pqi_event *event; |
3754 | bool ack_event; |
3755 | |
3756 | ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); |
3757 | |
3758 | pqi_ctrl_busy(ctrl_info); |
3759 | pqi_wait_if_ctrl_blocked(ctrl_info); |
3760 | if (pqi_ctrl_offline(ctrl_info)) |
3761 | goto out; |
3762 | |
3763 | rescan_needed = false; |
3764 | event = ctrl_info->events; |
3765 | for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { |
3766 | if (event->pending) { |
3767 | event->pending = false; |
3768 | if (event->event_type == PQI_EVENT_TYPE_OFA) { |
3769 | ack_event = pqi_ofa_process_event(ctrl_info, event); |
3770 | } else { |
3771 | ack_event = true; |
3772 | rescan_needed = true; |
3773 | if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE) |
3774 | pqi_mark_volumes_for_rescan(ctrl_info); |
3775 | else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE) |
3776 | pqi_disable_raid_bypass(ctrl_info); |
3777 | } |
3778 | if (ack_event) |
3779 | pqi_acknowledge_event(ctrl_info, event); |
3780 | } |
3781 | event++; |
3782 | } |
3783 | |
3784 | #define PQI_RESCAN_WORK_FOR_EVENT_DELAY (5 * HZ) |
3785 | |
3786 | if (rescan_needed) |
3787 | pqi_schedule_rescan_worker_with_delay(ctrl_info, |
3788 | PQI_RESCAN_WORK_FOR_EVENT_DELAY); |
3789 | |
3790 | out: |
3791 | pqi_ctrl_unbusy(ctrl_info); |
3792 | } |
3793 | |
3794 | #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ) |
3795 | |
3796 | static void pqi_heartbeat_timer_handler(struct timer_list *t) |
3797 | { |
3798 | int num_interrupts; |
3799 | u32 heartbeat_count; |
3800 | struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer); |
3801 | |
3802 | pqi_check_ctrl_health(ctrl_info); |
3803 | if (pqi_ctrl_offline(ctrl_info)) |
3804 | return; |
3805 | |
3806 | num_interrupts = atomic_read(v: &ctrl_info->num_interrupts); |
3807 | heartbeat_count = pqi_read_heartbeat_counter(ctrl_info); |
3808 | |
3809 | if (num_interrupts == ctrl_info->previous_num_interrupts) { |
3810 | if (heartbeat_count == ctrl_info->previous_heartbeat_count) { |
3811 | dev_err(&ctrl_info->pci_dev->dev, |
3812 | "no heartbeat detected - last heartbeat count: %u\n", |
3813 | heartbeat_count); |
3814 | pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason: PQI_NO_HEARTBEAT); |
3815 | return; |
3816 | } |
3817 | } else { |
3818 | ctrl_info->previous_num_interrupts = num_interrupts; |
3819 | } |
3820 | |
3821 | ctrl_info->previous_heartbeat_count = heartbeat_count; |
3822 | mod_timer(timer: &ctrl_info->heartbeat_timer, |
3823 | expires: jiffies + PQI_HEARTBEAT_TIMER_INTERVAL); |
3824 | } |
3825 | |
3826 | static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) |
3827 | { |
3828 | if (!ctrl_info->heartbeat_counter) |
3829 | return; |
3830 | |
3831 | ctrl_info->previous_num_interrupts = |
3832 | atomic_read(v: &ctrl_info->num_interrupts); |
3833 | ctrl_info->previous_heartbeat_count = |
3834 | pqi_read_heartbeat_counter(ctrl_info); |
3835 | |
3836 | ctrl_info->heartbeat_timer.expires = |
3837 | jiffies + PQI_HEARTBEAT_TIMER_INTERVAL; |
3838 | add_timer(timer: &ctrl_info->heartbeat_timer); |
3839 | } |
3840 | |
3841 | static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) |
3842 | { |
3843 | del_timer_sync(timer: &ctrl_info->heartbeat_timer); |
3844 | } |
3845 | |
3846 | static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info, |
3847 | struct pqi_event *event, struct pqi_event_response *response) |
3848 | { |
3849 | switch (event->event_id) { |
3850 | case PQI_EVENT_OFA_MEMORY_ALLOCATION: |
3851 | ctrl_info->ofa_bytes_requested = |
3852 | get_unaligned_le32(p: &response->data.ofa_memory_allocation.bytes_requested); |
3853 | break; |
3854 | case PQI_EVENT_OFA_CANCELED: |
3855 | ctrl_info->ofa_cancel_reason = |
3856 | get_unaligned_le16(p: &response->data.ofa_cancelled.reason); |
3857 | break; |
3858 | } |
3859 | } |
3860 | |
3861 | static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) |
3862 | { |
3863 | int num_events; |
3864 | pqi_index_t oq_pi; |
3865 | pqi_index_t oq_ci; |
3866 | struct pqi_event_queue *event_queue; |
3867 | struct pqi_event_response *response; |
3868 | struct pqi_event *event; |
3869 | int event_index; |
3870 | |
3871 | event_queue = &ctrl_info->event_queue; |
3872 | num_events = 0; |
3873 | oq_ci = event_queue->oq_ci_copy; |
3874 | |
3875 | while (1) { |
3876 | oq_pi = readl(addr: event_queue->oq_pi); |
3877 | if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) { |
3878 | pqi_invalid_response(ctrl_info, ctrl_shutdown_reason: PQI_EVENT_PI_OUT_OF_RANGE); |
3879 | dev_err(&ctrl_info->pci_dev->dev, |
3880 | "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", |
3881 | oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci); |
3882 | return -1; |
3883 | } |
3884 | |
3885 | if (oq_pi == oq_ci) |
3886 | break; |
3887 | |
3888 | num_events++; |
3889 | response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH); |
3890 | |
3891 | event_index = pqi_event_type_to_event_index(event_type: response->event_type); |
3892 | |
3893 | if (event_index >= 0 && response->request_acknowledge) { |
3894 | event = &ctrl_info->events[event_index]; |
3895 | event->pending = true; |
3896 | event->event_type = response->event_type; |
3897 | event->event_id = get_unaligned_le16(p: &response->event_id); |
3898 | event->additional_event_id = |
3899 | get_unaligned_le32(p: &response->additional_event_id); |
3900 | if (event->event_type == PQI_EVENT_TYPE_OFA) |
3901 | pqi_ofa_capture_event_payload(ctrl_info, event, response); |
3902 | } |
3903 | |
3904 | oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS; |
3905 | } |
3906 | |
3907 | if (num_events) { |
3908 | event_queue->oq_ci_copy = oq_ci; |
3909 | writel(val: oq_ci, addr: event_queue->oq_ci); |
3910 | schedule_work(work: &ctrl_info->event_work); |
3911 | } |
3912 | |
3913 | return num_events; |
3914 | } |
3915 | |
3916 | #define PQI_LEGACY_INTX_MASK 0x1 |
3917 | |
3918 | static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx) |
3919 | { |
3920 | u32 intx_mask; |
3921 | struct pqi_device_registers __iomem *pqi_registers; |
3922 | volatile void __iomem *register_addr; |
3923 | |
3924 | pqi_registers = ctrl_info->pqi_registers; |
3925 | |
3926 | if (enable_intx) |
3927 | register_addr = &pqi_registers->legacy_intx_mask_clear; |
3928 | else |
3929 | register_addr = &pqi_registers->legacy_intx_mask_set; |
3930 | |
3931 | intx_mask = readl(addr: register_addr); |
3932 | intx_mask |= PQI_LEGACY_INTX_MASK; |
3933 | writel(val: intx_mask, addr: register_addr); |
3934 | } |
3935 | |
3936 | static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info, |
3937 | enum pqi_irq_mode new_mode) |
3938 | { |
3939 | switch (ctrl_info->irq_mode) { |
3940 | case IRQ_MODE_MSIX: |
3941 | switch (new_mode) { |
3942 | case IRQ_MODE_MSIX: |
3943 | break; |
3944 | case IRQ_MODE_INTX: |
3945 | pqi_configure_legacy_intx(ctrl_info, enable_intx: true); |
3946 | sis_enable_intx(ctrl_info); |
3947 | break; |
3948 | case IRQ_MODE_NONE: |
3949 | break; |
3950 | } |
3951 | break; |
3952 | case IRQ_MODE_INTX: |
3953 | switch (new_mode) { |
3954 | case IRQ_MODE_MSIX: |
3955 | pqi_configure_legacy_intx(ctrl_info, enable_intx: false); |
3956 | sis_enable_msix(ctrl_info); |
3957 | break; |
3958 | case IRQ_MODE_INTX: |
3959 | break; |
3960 | case IRQ_MODE_NONE: |
3961 | pqi_configure_legacy_intx(ctrl_info, enable_intx: false); |
3962 | break; |
3963 | } |
3964 | break; |
3965 | case IRQ_MODE_NONE: |
3966 | switch (new_mode) { |
3967 | case IRQ_MODE_MSIX: |
3968 | sis_enable_msix(ctrl_info); |
3969 | break; |
3970 | case IRQ_MODE_INTX: |
3971 | pqi_configure_legacy_intx(ctrl_info, enable_intx: true); |
3972 | sis_enable_intx(ctrl_info); |
3973 | break; |
3974 | case IRQ_MODE_NONE: |
3975 | break; |
3976 | } |
3977 | break; |
3978 | } |
3979 | |
3980 | ctrl_info->irq_mode = new_mode; |
3981 | } |
3982 | |
3983 | #define PQI_LEGACY_INTX_PENDING 0x1 |
3984 | |
3985 | static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info) |
3986 | { |
3987 | bool valid_irq; |
3988 | u32 intx_status; |
3989 | |
3990 | switch (ctrl_info->irq_mode) { |
3991 | case IRQ_MODE_MSIX: |
3992 | valid_irq = true; |
3993 | break; |
3994 | case IRQ_MODE_INTX: |
3995 | intx_status = readl(addr: &ctrl_info->pqi_registers->legacy_intx_status); |
3996 | if (intx_status & PQI_LEGACY_INTX_PENDING) |
3997 | valid_irq = true; |
3998 | else |
3999 | valid_irq = false; |
4000 | break; |
4001 | case IRQ_MODE_NONE: |
4002 | default: |
4003 | valid_irq = false; |
4004 | break; |
4005 | } |
4006 | |
4007 | return valid_irq; |
4008 | } |
4009 | |
4010 | static irqreturn_t pqi_irq_handler(int irq, void *data) |
4011 | { |
4012 | struct pqi_ctrl_info *ctrl_info; |
4013 | struct pqi_queue_group *queue_group; |
4014 | int num_io_responses_handled; |
4015 | int num_events_handled; |
4016 | |
4017 | queue_group = data; |
4018 | ctrl_info = queue_group->ctrl_info; |
4019 | |
4020 | if (!pqi_is_valid_irq(ctrl_info)) |
4021 | return IRQ_NONE; |
4022 | |
4023 | num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); |
4024 | if (num_io_responses_handled < 0) |
4025 | goto out; |
4026 | |
4027 | if (irq == ctrl_info->event_irq) { |
4028 | num_events_handled = pqi_process_event_intr(ctrl_info); |
4029 | if (num_events_handled < 0) |
4030 | goto out; |
4031 | } else { |
4032 | num_events_handled = 0; |
4033 | } |
4034 | |
4035 | if (num_io_responses_handled + num_events_handled > 0) |
4036 | atomic_inc(v: &ctrl_info->num_interrupts); |
4037 | |
4038 | pqi_start_io(ctrl_info, queue_group, path: RAID_PATH, NULL); |
4039 | pqi_start_io(ctrl_info, queue_group, path: AIO_PATH, NULL); |
4040 | |
4041 | out: |
4042 | return IRQ_HANDLED; |
4043 | } |
4044 | |
4045 | static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) |
4046 | { |
4047 | struct pci_dev *pci_dev = ctrl_info->pci_dev; |
4048 | int i; |
4049 | int rc; |
4050 | |
4051 | ctrl_info->event_irq = pci_irq_vector(dev: pci_dev, nr: 0); |
4052 | |
4053 | for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { |
4054 | rc = request_irq(irq: pci_irq_vector(dev: pci_dev, nr: i), handler: pqi_irq_handler, flags: 0, |
4055 | DRIVER_NAME_SHORT, dev: &ctrl_info->queue_groups[i]); |
4056 | if (rc) { |
4057 | dev_err(&pci_dev->dev, |
4058 | "irq %u init failed with error %d\n", |
4059 | pci_irq_vector(pci_dev, i), rc); |
4060 | return rc; |
4061 | } |
4062 | ctrl_info->num_msix_vectors_initialized++; |
4063 | } |
4064 | |
4065 | return 0; |
4066 | } |
4067 | |
4068 | static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info) |
4069 | { |
4070 | int i; |
4071 | |
4072 | for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) |
4073 | free_irq(pci_irq_vector(dev: ctrl_info->pci_dev, nr: i), |
4074 | &ctrl_info->queue_groups[i]); |
4075 | |
4076 | ctrl_info->num_msix_vectors_initialized = 0; |
4077 | } |
4078 | |
4079 | static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) |
4080 | { |
4081 | int num_vectors_enabled; |
4082 | unsigned int flags = PCI_IRQ_MSIX; |
4083 | |
4084 | if (!pqi_disable_managed_interrupts) |
4085 | flags |= PCI_IRQ_AFFINITY; |
4086 | |
4087 | num_vectors_enabled = pci_alloc_irq_vectors(dev: ctrl_info->pci_dev, |
4088 | PQI_MIN_MSIX_VECTORS, max_vecs: ctrl_info->num_queue_groups, |
4089 | flags); |
4090 | if (num_vectors_enabled < 0) { |
4091 | dev_err(&ctrl_info->pci_dev->dev, |
4092 | "MSI-X init failed with error %d\n", |
4093 | num_vectors_enabled); |
4094 | return num_vectors_enabled; |
4095 | } |
4096 | |
4097 | ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; |
4098 | ctrl_info->irq_mode = IRQ_MODE_MSIX; |
4099 | return 0; |
4100 | } |
4101 | |
4102 | static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) |
4103 | { |
4104 | if (ctrl_info->num_msix_vectors_enabled) { |
4105 | pci_free_irq_vectors(dev: ctrl_info->pci_dev); |
4106 | ctrl_info->num_msix_vectors_enabled = 0; |
4107 | } |
4108 | } |
4109 | |
4110 | static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) |
4111 | { |
4112 | unsigned int i; |
4113 | size_t alloc_length; |
4114 | size_t element_array_length_per_iq; |
4115 | size_t element_array_length_per_oq; |
4116 | void *element_array; |
4117 | void __iomem *next_queue_index; |
4118 | void *aligned_pointer; |
4119 | unsigned int num_inbound_queues; |
4120 | unsigned int num_outbound_queues; |
4121 | unsigned int num_queue_indexes; |
4122 | struct pqi_queue_group *queue_group; |
4123 | |
4124 | element_array_length_per_iq = |
4125 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH * |
4126 | ctrl_info->num_elements_per_iq; |
4127 | element_array_length_per_oq = |
4128 | PQI_OPERATIONAL_OQ_ELEMENT_LENGTH * |
4129 | ctrl_info->num_elements_per_oq; |
4130 | num_inbound_queues = ctrl_info->num_queue_groups * 2; |
4131 | num_outbound_queues = ctrl_info->num_queue_groups; |
4132 | num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1; |
4133 | |
4134 | aligned_pointer = NULL; |
4135 | |
4136 | for (i = 0; i < num_inbound_queues; i++) { |
4137 | aligned_pointer = PTR_ALIGN(aligned_pointer, |
4138 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); |
4139 | aligned_pointer += element_array_length_per_iq; |
4140 | } |
4141 | |
4142 | for (i = 0; i < num_outbound_queues; i++) { |
4143 | aligned_pointer = PTR_ALIGN(aligned_pointer, |
4144 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); |
4145 | aligned_pointer += element_array_length_per_oq; |
4146 | } |
4147 | |
4148 | aligned_pointer = PTR_ALIGN(aligned_pointer, |
4149 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); |
4150 | aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS * |
4151 | PQI_EVENT_OQ_ELEMENT_LENGTH; |
4152 | |
4153 | for (i = 0; i < num_queue_indexes; i++) { |
4154 | aligned_pointer = PTR_ALIGN(aligned_pointer, |
4155 | PQI_OPERATIONAL_INDEX_ALIGNMENT); |
4156 | aligned_pointer += sizeof(pqi_index_t); |
4157 | } |
4158 | |
4159 | alloc_length = (size_t)aligned_pointer + |
4160 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; |
4161 | |
4162 | alloc_length += PQI_EXTRA_SGL_MEMORY; |
4163 | |
4164 | ctrl_info->queue_memory_base = |
4165 | dma_alloc_coherent(dev: &ctrl_info->pci_dev->dev, size: alloc_length, |
4166 | dma_handle: &ctrl_info->queue_memory_base_dma_handle, |
4167 | GFP_KERNEL); |
4168 | |
4169 | if (!ctrl_info->queue_memory_base) |
4170 | return -ENOMEM; |
4171 | |
4172 | ctrl_info->queue_memory_length = alloc_length; |
4173 | |
4174 | element_array = PTR_ALIGN(ctrl_info->queue_memory_base, |
4175 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); |
4176 | |
4177 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { |
4178 | queue_group = &ctrl_info->queue_groups[i]; |
4179 | queue_group->iq_element_array[RAID_PATH] = element_array; |
4180 | queue_group->iq_element_array_bus_addr[RAID_PATH] = |
4181 | ctrl_info->queue_memory_base_dma_handle + |
4182 | (element_array - ctrl_info->queue_memory_base); |
4183 | element_array += element_array_length_per_iq; |
4184 | element_array = PTR_ALIGN(element_array, |
4185 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); |
4186 | queue_group->iq_element_array[AIO_PATH] = element_array; |
4187 | queue_group->iq_element_array_bus_addr[AIO_PATH] = |
4188 | ctrl_info->queue_memory_base_dma_handle + |
4189 | (element_array - ctrl_info->queue_memory_base); |
4190 | element_array += element_array_length_per_iq; |
4191 | element_array = PTR_ALIGN(element_array, |
4192 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); |
4193 | } |
4194 | |
4195 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { |
4196 | queue_group = &ctrl_info->queue_groups[i]; |
4197 | queue_group->oq_element_array = element_array; |
4198 | queue_group->oq_element_array_bus_addr = |
4199 | ctrl_info->queue_memory_base_dma_handle + |
4200 | (element_array - ctrl_info->queue_memory_base); |
4201 | element_array += element_array_length_per_oq; |
4202 | element_array = PTR_ALIGN(element_array, |
4203 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); |
4204 | } |
4205 | |
4206 | ctrl_info->event_queue.oq_element_array = element_array; |
4207 | ctrl_info->event_queue.oq_element_array_bus_addr = |
4208 | ctrl_info->queue_memory_base_dma_handle + |
4209 | (element_array - ctrl_info->queue_memory_base); |
4210 | element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS * |
4211 | PQI_EVENT_OQ_ELEMENT_LENGTH; |
4212 | |
4213 | next_queue_index = (void __iomem *)PTR_ALIGN(element_array, |
4214 | PQI_OPERATIONAL_INDEX_ALIGNMENT); |
4215 | |
4216 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { |
4217 | queue_group = &ctrl_info->queue_groups[i]; |
4218 | queue_group->iq_ci[RAID_PATH] = next_queue_index; |
4219 | queue_group->iq_ci_bus_addr[RAID_PATH] = |
4220 | ctrl_info->queue_memory_base_dma_handle + |
4221 | (next_queue_index - |
4222 | (void __iomem *)ctrl_info->queue_memory_base); |
4223 | next_queue_index += sizeof(pqi_index_t); |
4224 | next_queue_index = PTR_ALIGN(next_queue_index, |
4225 | PQI_OPERATIONAL_INDEX_ALIGNMENT); |
4226 | queue_group->iq_ci[AIO_PATH] = next_queue_index; |
4227 | queue_group->iq_ci_bus_addr[AIO_PATH] = |
4228 | ctrl_info->queue_memory_base_dma_handle + |
4229 | (next_queue_index - |
4230 | (void __iomem *)ctrl_info->queue_memory_base); |
4231 | next_queue_index += sizeof(pqi_index_t); |
4232 | next_queue_index = PTR_ALIGN(next_queue_index, |
4233 | PQI_OPERATIONAL_INDEX_ALIGNMENT); |
4234 | queue_group->oq_pi = next_queue_index; |
4235 | queue_group->oq_pi_bus_addr = |
4236 | ctrl_info->queue_memory_base_dma_handle + |
4237 | (next_queue_index - |
4238 | (void __iomem *)ctrl_info->queue_memory_base); |
4239 | next_queue_index += sizeof(pqi_index_t); |
4240 | next_queue_index = PTR_ALIGN(next_queue_index, |
4241 | PQI_OPERATIONAL_INDEX_ALIGNMENT); |
4242 | } |
4243 | |
4244 | ctrl_info->event_queue.oq_pi = next_queue_index; |
4245 | ctrl_info->event_queue.oq_pi_bus_addr = |
4246 | ctrl_info->queue_memory_base_dma_handle + |
4247 | (next_queue_index - |
4248 | (void __iomem *)ctrl_info->queue_memory_base); |
4249 | |
4250 | return 0; |
4251 | } |
4252 | |
4253 | static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info) |
4254 | { |
4255 | unsigned int i; |
4256 | u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; |
4257 | u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; |
4258 | |
4259 | /* |
4260 | * Initialize the backpointers to the controller structure in |
4261 | * each operational queue group structure. |
4262 | */ |
4263 | for (i = 0; i < ctrl_info->num_queue_groups; i++) |
4264 | ctrl_info->queue_groups[i].ctrl_info = ctrl_info; |
4265 | |
4266 | /* |
4267 | * Assign IDs to all operational queues. Note that the IDs |
4268 | * assigned to operational IQs are independent of the IDs |
4269 | * assigned to operational OQs. |
4270 | */ |
4271 | ctrl_info->event_queue.oq_id = next_oq_id++; |
4272 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { |
4273 | ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++; |
4274 | ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++; |
4275 | ctrl_info->queue_groups[i].oq_id = next_oq_id++; |
4276 | } |
4277 | |
4278 | /* |
4279 | * Assign MSI-X table entry indexes to all queues. Note that the |
4280 | * interrupt for the event queue is shared with the first queue group. |
4281 | */ |
4282 | ctrl_info->event_queue.int_msg_num = 0; |
4283 | for (i = 0; i < ctrl_info->num_queue_groups; i++) |
4284 | ctrl_info->queue_groups[i].int_msg_num = i; |
4285 | |
4286 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { |
4287 | spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]); |
4288 | spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]); |
4289 | INIT_LIST_HEAD(list: &ctrl_info->queue_groups[i].request_list[0]); |
4290 | INIT_LIST_HEAD(list: &ctrl_info->queue_groups[i].request_list[1]); |
4291 | } |
4292 | } |
4293 | |
4294 | static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) |
4295 | { |
4296 | size_t alloc_length; |
4297 | struct pqi_admin_queues_aligned *admin_queues_aligned; |
4298 | struct pqi_admin_queues *admin_queues; |
4299 | |
4300 | alloc_length = sizeof(struct pqi_admin_queues_aligned) + |
4301 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; |
4302 | |
4303 | ctrl_info->admin_queue_memory_base = |
4304 | dma_alloc_coherent(dev: &ctrl_info->pci_dev->dev, size: alloc_length, |
4305 | dma_handle: &ctrl_info->admin_queue_memory_base_dma_handle, |
4306 | GFP_KERNEL); |
4307 | |
4308 | if (!ctrl_info->admin_queue_memory_base) |
4309 | return -ENOMEM; |
4310 | |
4311 | ctrl_info->admin_queue_memory_length = alloc_length; |
4312 | |
4313 | admin_queues = &ctrl_info->admin_queues; |
4314 | admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base, |
4315 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); |
4316 | admin_queues->iq_element_array = |
4317 | &admin_queues_aligned->iq_element_array; |
4318 | admin_queues->oq_element_array = |
4319 | &admin_queues_aligned->oq_element_array; |
4320 | admin_queues->iq_ci = |
4321 | (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci; |
4322 | admin_queues->oq_pi = |
4323 | (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi; |
4324 | |
4325 | admin_queues->iq_element_array_bus_addr = |
4326 | ctrl_info->admin_queue_memory_base_dma_handle + |
4327 | (admin_queues->iq_element_array - |
4328 | ctrl_info->admin_queue_memory_base); |
4329 | admin_queues->oq_element_array_bus_addr = |
4330 | ctrl_info->admin_queue_memory_base_dma_handle + |
4331 | (admin_queues->oq_element_array - |
4332 | ctrl_info->admin_queue_memory_base); |
4333 | admin_queues->iq_ci_bus_addr = |
4334 | ctrl_info->admin_queue_memory_base_dma_handle + |
4335 | ((void __iomem *)admin_queues->iq_ci - |
4336 | (void __iomem *)ctrl_info->admin_queue_memory_base); |
4337 | admin_queues->oq_pi_bus_addr = |
4338 | ctrl_info->admin_queue_memory_base_dma_handle + |
4339 | ((void __iomem *)admin_queues->oq_pi - |
4340 | (void __iomem *)ctrl_info->admin_queue_memory_base); |
4341 | |
4342 | return 0; |
4343 | } |
4344 | |
4345 | #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ |
4346 | #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1 |
4347 | |
4348 | static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) |
4349 | { |
4350 | struct pqi_device_registers __iomem *pqi_registers; |
4351 | struct pqi_admin_queues *admin_queues; |
4352 | unsigned long timeout; |
4353 | u8 status; |
4354 | u32 reg; |
4355 | |
4356 | pqi_registers = ctrl_info->pqi_registers; |
4357 | admin_queues = &ctrl_info->admin_queues; |
4358 | |
4359 | writeq(val: (u64)admin_queues->iq_element_array_bus_addr, |
4360 | addr: &pqi_registers->admin_iq_element_array_addr); |
4361 | writeq(val: (u64)admin_queues->oq_element_array_bus_addr, |
4362 | addr: &pqi_registers->admin_oq_element_array_addr); |
4363 | writeq(val: (u64)admin_queues->iq_ci_bus_addr, |
4364 | addr: &pqi_registers->admin_iq_ci_addr); |
4365 | writeq(val: (u64)admin_queues->oq_pi_bus_addr, |
4366 | addr: &pqi_registers->admin_oq_pi_addr); |
4367 | |
4368 | reg = PQI_ADMIN_IQ_NUM_ELEMENTS | |
4369 | (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) | |
4370 | (admin_queues->int_msg_num << 16); |
4371 | writel(val: reg, addr: &pqi_registers->admin_iq_num_elements); |
4372 | |
4373 | writel(PQI_CREATE_ADMIN_QUEUE_PAIR, |
4374 | addr: &pqi_registers->function_and_status_code); |
4375 | |
4376 | timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies; |
4377 | while (1) { |
4378 | msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS); |
4379 | status = readb(addr: &pqi_registers->function_and_status_code); |
4380 | if (status == PQI_STATUS_IDLE) |
4381 | break; |
4382 | if (time_after(jiffies, timeout)) |
4383 | return -ETIMEDOUT; |
4384 | } |
4385 | |
4386 | /* |
4387 | * The offset registers are not initialized to the correct |
4388 | * offsets until *after* the create admin queue pair command |
4389 | * completes successfully. |
4390 | */ |
4391 | admin_queues->iq_pi = ctrl_info->iomem_base + |
4392 | PQI_DEVICE_REGISTERS_OFFSET + |
4393 | readq(addr: &pqi_registers->admin_iq_pi_offset); |
4394 | admin_queues->oq_ci = ctrl_info->iomem_base + |
4395 | PQI_DEVICE_REGISTERS_OFFSET + |
4396 | readq(addr: &pqi_registers->admin_oq_ci_offset); |
4397 | |
4398 | return 0; |
4399 | } |
4400 | |
4401 | static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info, |
4402 | struct pqi_general_admin_request *request) |
4403 | { |
4404 | struct pqi_admin_queues *admin_queues; |
4405 | void *next_element; |
4406 | pqi_index_t iq_pi; |
4407 | |
4408 | admin_queues = &ctrl_info->admin_queues; |
4409 | iq_pi = admin_queues->iq_pi_copy; |
4410 | |
4411 | next_element = admin_queues->iq_element_array + |
4412 | (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH); |
4413 | |
4414 | memcpy(next_element, request, sizeof(*request)); |
4415 | |
4416 | iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS; |
4417 | admin_queues->iq_pi_copy = iq_pi; |
4418 | |
4419 | /* |
4420 | * This write notifies the controller that an IU is available to be |
4421 | * processed. |
4422 | */ |
4423 | writel(val: iq_pi, addr: admin_queues->iq_pi); |
4424 | } |
4425 | |
4426 | #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60 |
4427 | |
4428 | static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, |
4429 | struct pqi_general_admin_response *response) |
4430 | { |
4431 | struct pqi_admin_queues *admin_queues; |
4432 | pqi_index_t oq_pi; |
4433 | pqi_index_t oq_ci; |
4434 | unsigned long timeout; |
4435 | |
4436 | admin_queues = &ctrl_info->admin_queues; |
4437 | oq_ci = admin_queues->oq_ci_copy; |
4438 | |
4439 | timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies; |
4440 | |
4441 | while (1) { |
4442 | oq_pi = readl(addr: admin_queues->oq_pi); |
4443 | if (oq_pi != oq_ci) |
4444 | break; |
4445 | if (time_after(jiffies, timeout)) { |
4446 | dev_err(&ctrl_info->pci_dev->dev, |
4447 | "timed out waiting for admin response\n"); |
4448 | return -ETIMEDOUT; |
4449 | } |
4450 | if (!sis_is_firmware_running(ctrl_info)) |
4451 | return -ENXIO; |
4452 | usleep_range(min: 1000, max: 2000); |
4453 | } |
4454 | |
4455 | memcpy(response, admin_queues->oq_element_array + |
4456 | (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response)); |
4457 | |
4458 | oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS; |
4459 | admin_queues->oq_ci_copy = oq_ci; |
4460 | writel(val: oq_ci, addr: admin_queues->oq_ci); |
4461 | |
4462 | return 0; |
4463 | } |
4464 | |
4465 | static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, |
4466 | struct pqi_queue_group *queue_group, enum pqi_io_path path, |
4467 | struct pqi_io_request *io_request) |
4468 | { |
4469 | struct pqi_io_request *next; |
4470 | void *next_element; |
4471 | pqi_index_t iq_pi; |
4472 | pqi_index_t iq_ci; |
4473 | size_t iu_length; |
4474 | unsigned long flags; |
4475 | unsigned int num_elements_needed; |
4476 | unsigned int num_elements_to_end_of_queue; |
4477 | size_t copy_count; |
4478 | struct pqi_iu_header *request; |
4479 | |
4480 | spin_lock_irqsave(&queue_group->submit_lock[path], flags); |
4481 | |
4482 | if (io_request) { |
4483 | io_request->queue_group = queue_group; |
4484 | list_add_tail(new: &io_request->request_list_entry, |
4485 | head: &queue_group->request_list[path]); |
4486 | } |
4487 | |
4488 | iq_pi = queue_group->iq_pi_copy[path]; |
4489 | |
4490 | list_for_each_entry_safe(io_request, next, |
4491 | &queue_group->request_list[path], request_list_entry) { |
4492 | |
4493 | request = io_request->iu; |
4494 | |
4495 | iu_length = get_unaligned_le16(p: &request->iu_length) + |
4496 | PQI_REQUEST_HEADER_LENGTH; |
4497 | num_elements_needed = |
4498 | DIV_ROUND_UP(iu_length, |
4499 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); |
4500 | |
4501 | iq_ci = readl(addr: queue_group->iq_ci[path]); |
4502 | |
4503 | if (num_elements_needed > pqi_num_elements_free(pi: iq_pi, ci: iq_ci, |
4504 | elements_in_queue: ctrl_info->num_elements_per_iq)) |
4505 | break; |
4506 | |
4507 | put_unaligned_le16(val: queue_group->oq_id, |
4508 | p: &request->response_queue_id); |
4509 | |
4510 | next_element = queue_group->iq_element_array[path] + |
4511 | (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); |
4512 | |
4513 | num_elements_to_end_of_queue = |
4514 | ctrl_info->num_elements_per_iq - iq_pi; |
4515 | |
4516 | if (num_elements_needed <= num_elements_to_end_of_queue) { |
4517 | memcpy(next_element, request, iu_length); |
4518 | } else { |
4519 | copy_count = num_elements_to_end_of_queue * |
4520 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; |
4521 | memcpy(next_element, request, copy_count); |
4522 | memcpy(queue_group->iq_element_array[path], |
4523 | (u8 *)request + copy_count, |
4524 | iu_length - copy_count); |
4525 | } |
4526 | |
4527 | iq_pi = (iq_pi + num_elements_needed) % |
4528 | ctrl_info->num_elements_per_iq; |
4529 | |
4530 | list_del(entry: &io_request->request_list_entry); |
4531 | } |
4532 | |
4533 | if (iq_pi != queue_group->iq_pi_copy[path]) { |
4534 | queue_group->iq_pi_copy[path] = iq_pi; |
4535 | /* |
4536 | * This write notifies the controller that one or more IUs are |
4537 | * available to be processed. |
4538 | */ |
4539 | writel(val: iq_pi, addr: queue_group->iq_pi[path]); |
4540 | } |
4541 | |
4542 | spin_unlock_irqrestore(lock: &queue_group->submit_lock[path], flags); |
4543 | } |
4544 | |
4545 | #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10 |
4546 | |
4547 | static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, |
4548 | struct completion *wait) |
4549 | { |
4550 | int rc; |
4551 | |
4552 | while (1) { |
4553 | if (wait_for_completion_io_timeout(x: wait, |
4554 | PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) { |
4555 | rc = 0; |
4556 | break; |
4557 | } |
4558 | |
4559 | pqi_check_ctrl_health(ctrl_info); |
4560 | if (pqi_ctrl_offline(ctrl_info)) { |
4561 | rc = -ENXIO; |
4562 | break; |
4563 | } |
4564 | } |
4565 | |
4566 | return rc; |
4567 | } |
4568 | |
4569 | static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request, |
4570 | void *context) |
4571 | { |
4572 | struct completion *waiting = context; |
4573 | |
4574 | complete(waiting); |
4575 | } |
4576 | |
4577 | static int pqi_process_raid_io_error_synchronous( |
4578 | struct pqi_raid_error_info *error_info) |
4579 | { |
4580 | int rc = -EIO; |
4581 | |
4582 | switch (error_info->data_out_result) { |
4583 | case PQI_DATA_IN_OUT_GOOD: |
4584 | if (error_info->status == SAM_STAT_GOOD) |
4585 | rc = 0; |
4586 | break; |
4587 | case PQI_DATA_IN_OUT_UNDERFLOW: |
4588 | if (error_info->status == SAM_STAT_GOOD || |
4589 | error_info->status == SAM_STAT_CHECK_CONDITION) |
4590 | rc = 0; |
4591 | break; |
4592 | case PQI_DATA_IN_OUT_ABORTED: |
4593 | rc = PQI_CMD_STATUS_ABORTED; |
4594 | break; |
4595 | } |
4596 | |
4597 | return rc; |
4598 | } |
4599 | |
4600 | static inline bool pqi_is_blockable_request(struct pqi_iu_header *request) |
4601 | { |
4602 | return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0; |
4603 | } |
4604 | |
4605 | static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, |
4606 | struct pqi_iu_header *request, unsigned int flags, |
4607 | struct pqi_raid_error_info *error_info) |
4608 | { |
4609 | int rc = 0; |
4610 | struct pqi_io_request *io_request; |
4611 | size_t iu_length; |
4612 | DECLARE_COMPLETION_ONSTACK(wait); |
4613 | |
4614 | if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) { |
4615 | if (down_interruptible(sem: &ctrl_info->sync_request_sem)) |
4616 | return -ERESTARTSYS; |
4617 | } else { |
4618 | down(sem: &ctrl_info->sync_request_sem); |
4619 | } |
4620 | |
4621 | pqi_ctrl_busy(ctrl_info); |
4622 | /* |
4623 | * Wait for other admin queue updates such as; |
4624 | * config table changes, OFA memory updates, ... |
4625 | */ |
4626 | if (pqi_is_blockable_request(request)) |
4627 | pqi_wait_if_ctrl_blocked(ctrl_info); |
4628 | |
4629 | if (pqi_ctrl_offline(ctrl_info)) { |
4630 | rc = -ENXIO; |
4631 | goto out; |
4632 | } |
4633 | |
4634 | io_request = pqi_alloc_io_request(ctrl_info, NULL); |
4635 | |
4636 | put_unaligned_le16(val: io_request->index, |
4637 | p: &(((struct pqi_raid_path_request *)request)->request_id)); |
4638 | |
4639 | if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO) |
4640 | ((struct pqi_raid_path_request *)request)->error_index = |
4641 | ((struct pqi_raid_path_request *)request)->request_id; |
4642 | |
4643 | iu_length = get_unaligned_le16(p: &request->iu_length) + |
4644 | PQI_REQUEST_HEADER_LENGTH; |
4645 | memcpy(io_request->iu, request, iu_length); |
4646 | |
4647 | io_request->io_complete_callback = pqi_raid_synchronous_complete; |
4648 | io_request->context = &wait; |
4649 | |
4650 | pqi_start_io(ctrl_info, queue_group: &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], path: RAID_PATH, |
4651 | io_request); |
4652 | |
4653 | pqi_wait_for_completion_io(ctrl_info, wait: &wait); |
4654 | |
4655 | if (error_info) { |
4656 | if (io_request->error_info) |
4657 | memcpy(error_info, io_request->error_info, sizeof(*error_info)); |
4658 | else |
4659 | memset(error_info, 0, sizeof(*error_info)); |
4660 | } else if (rc == 0 && io_request->error_info) { |
4661 | rc = pqi_process_raid_io_error_synchronous(error_info: io_request->error_info); |
4662 | } |
4663 | |
4664 | pqi_free_io_request(io_request); |
4665 | |
4666 | out: |
4667 | pqi_ctrl_unbusy(ctrl_info); |
4668 | up(sem: &ctrl_info->sync_request_sem); |
4669 | |
4670 | return rc; |
4671 | } |
4672 | |
4673 | static int pqi_validate_admin_response( |
4674 | struct pqi_general_admin_response *response, u8 expected_function_code) |
4675 | { |
4676 | if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN) |
4677 | return -EINVAL; |
4678 | |
4679 | if (get_unaligned_le16(p: &response->header.iu_length) != |
4680 | PQI_GENERAL_ADMIN_IU_LENGTH) |
4681 | return -EINVAL; |
4682 | |
4683 | if (response->function_code != expected_function_code) |
4684 | return -EINVAL; |
4685 | |
4686 | if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) |
4687 | return -EINVAL; |
4688 | |
4689 | return 0; |
4690 | } |
4691 | |
4692 | static int pqi_submit_admin_request_synchronous( |
4693 | struct pqi_ctrl_info *ctrl_info, |
4694 | struct pqi_general_admin_request *request, |
4695 | struct pqi_general_admin_response *response) |
4696 | { |
4697 | int rc; |
4698 | |
4699 | pqi_submit_admin_request(ctrl_info, request); |
4700 | |
4701 | rc = pqi_poll_for_admin_response(ctrl_info, response); |
4702 | |
4703 | if (rc == 0) |
4704 | rc = pqi_validate_admin_response(response, expected_function_code: request->function_code); |
4705 | |
4706 | return rc; |
4707 | } |
4708 | |
4709 | static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info) |
4710 | { |
4711 | int rc; |
4712 | struct pqi_general_admin_request request; |
4713 | struct pqi_general_admin_response response; |
4714 | struct pqi_device_capability *capability; |
4715 | struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor; |
4716 | |
4717 | capability = kmalloc(size: sizeof(*capability), GFP_KERNEL); |
4718 | if (!capability) |
4719 | return -ENOMEM; |
4720 | |
4721 | memset(&request, 0, sizeof(request)); |
4722 | |
4723 | request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; |
4724 | put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, |
4725 | p: &request.header.iu_length); |
4726 | request.function_code = |
4727 | PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY; |
4728 | put_unaligned_le32(val: sizeof(*capability), |
4729 | p: &request.data.report_device_capability.buffer_length); |
4730 | |
4731 | rc = pqi_map_single(pci_dev: ctrl_info->pci_dev, |
4732 | sg_descriptor: &request.data.report_device_capability.sg_descriptor, |
4733 | buffer: capability, buffer_length: sizeof(*capability), |
4734 | data_direction: DMA_FROM_DEVICE); |
4735 | if (rc) |
4736 | goto out; |
4737 | |
4738 | rc = pqi_submit_admin_request_synchronous(ctrl_info, request: &request, response: &response); |
4739 | |
4740 | pqi_pci_unmap(pci_dev: ctrl_info->pci_dev, |
4741 | descriptors: &request.data.report_device_capability.sg_descriptor, num_descriptors: 1, |
4742 | data_direction: DMA_FROM_DEVICE); |
4743 | |
4744 | if (rc) |
4745 | goto out; |
4746 | |
4747 | if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) { |
4748 | rc = -EIO; |
4749 | goto out; |
4750 | } |
4751 | |
4752 | ctrl_info->max_inbound_queues = |
4753 | get_unaligned_le16(p: &capability->max_inbound_queues); |
4754 | ctrl_info->max_elements_per_iq = |
4755 | get_unaligned_le16(p: &capability->max_elements_per_iq); |
4756 | ctrl_info->max_iq_element_length = |
4757 | get_unaligned_le16(p: &capability->max_iq_element_length) |
4758 | * 16; |
4759 | ctrl_info->max_outbound_queues = |
4760 | get_unaligned_le16(p: &capability->max_outbound_queues); |
4761 | ctrl_info->max_elements_per_oq = |
4762 | get_unaligned_le16(p: &capability->max_elements_per_oq); |
4763 | ctrl_info->max_oq_element_length = |
4764 | get_unaligned_le16(p: &capability->max_oq_element_length) |
4765 | * 16; |
4766 | |
4767 | sop_iu_layer_descriptor = |
4768 | &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP]; |
4769 | |
4770 | ctrl_info->max_inbound_iu_length_per_firmware = |
4771 | get_unaligned_le16( |
4772 | p: &sop_iu_layer_descriptor->max_inbound_iu_length); |
4773 | ctrl_info->inbound_spanning_supported = |
4774 | sop_iu_layer_descriptor->inbound_spanning_supported; |
4775 | ctrl_info->outbound_spanning_supported = |
4776 | sop_iu_layer_descriptor->outbound_spanning_supported; |
4777 | |
4778 | out: |
4779 | kfree(objp: capability); |
4780 | |
4781 | return rc; |
4782 | } |
4783 | |
4784 | static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info) |
4785 | { |
4786 | if (ctrl_info->max_iq_element_length < |
4787 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { |
4788 | dev_err(&ctrl_info->pci_dev->dev, |
4789 | "max. inbound queue element length of %d is less than the required length of %d\n", |
4790 | ctrl_info->max_iq_element_length, |
4791 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); |
4792 | return -EINVAL; |
4793 | } |
4794 | |
4795 | if (ctrl_info->max_oq_element_length < |
4796 | PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) { |
4797 | dev_err(&ctrl_info->pci_dev->dev, |
4798 | "max. outbound queue element length of %d is less than the required length of %d\n", |
4799 | ctrl_info->max_oq_element_length, |
4800 | PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); |
4801 | return -EINVAL; |
4802 | } |
4803 | |
4804 | if (ctrl_info->max_inbound_iu_length_per_firmware < |
4805 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { |
4806 | dev_err(&ctrl_info->pci_dev->dev, |
4807 | "max. inbound IU length of %u is less than the min. required length of %d\n", |
4808 | ctrl_info->max_inbound_iu_length_per_firmware, |
4809 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); |
4810 | return -EINVAL; |
4811 | } |
4812 | |
4813 | if (!ctrl_info->inbound_spanning_supported) { |
4814 | dev_err(&ctrl_info->pci_dev->dev, |
4815 | "the controller does not support inbound spanning\n"); |
4816 | return -EINVAL; |
4817 | } |
4818 | |
4819 | if (ctrl_info->outbound_spanning_supported) { |
4820 | dev_err(&ctrl_info->pci_dev->dev, |
4821 | "the controller supports outbound spanning but this driver does not\n"); |
4822 | return -EINVAL; |
4823 | } |
4824 | |
4825 | return 0; |
4826 | } |
4827 | |
4828 | static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info) |
4829 | { |
4830 | int rc; |
4831 | struct pqi_event_queue *event_queue; |
4832 | struct pqi_general_admin_request request; |
4833 | struct pqi_general_admin_response response; |
4834 | |
4835 | event_queue = &ctrl_info->event_queue; |
4836 | |
4837 | /* |
4838 | * Create OQ (Outbound Queue - device to host queue) to dedicate |
4839 | * to events. |
4840 | */ |
4841 | memset(&request, 0, sizeof(request)); |
4842 | request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; |
4843 | put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, |
4844 | p: &request.header.iu_length); |
4845 | request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; |
4846 | put_unaligned_le16(val: event_queue->oq_id, |
4847 | p: &request.data.create_operational_oq.queue_id); |
4848 | put_unaligned_le64(val: (u64)event_queue->oq_element_array_bus_addr, |
4849 | p: &request.data.create_operational_oq.element_array_addr); |
4850 | put_unaligned_le64(val: (u64)event_queue->oq_pi_bus_addr, |
4851 | p: &request.data.create_operational_oq.pi_addr); |
4852 | put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS, |
4853 | p: &request.data.create_operational_oq.num_elements); |
4854 | put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16, |
4855 | p: &request.data.create_operational_oq.element_length); |
4856 | request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; |
4857 | put_unaligned_le16(val: event_queue->int_msg_num, |
4858 | p: &request.data.create_operational_oq.int_msg_num); |
4859 | |
4860 | rc = pqi_submit_admin_request_synchronous(ctrl_info, request: &request, |
4861 | response: &response); |
4862 | if (rc) |
4863 | return rc; |
4864 | |
4865 | event_queue->oq_ci = ctrl_info->iomem_base + |
4866 | PQI_DEVICE_REGISTERS_OFFSET + |
4867 | get_unaligned_le64( |
4868 | p: &response.data.create_operational_oq.oq_ci_offset); |
4869 | |
4870 | return 0; |
4871 | } |
4872 | |
4873 | static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, |
4874 | unsigned int group_number) |
4875 | { |
4876 | int rc; |
4877 | struct pqi_queue_group *queue_group; |
4878 | struct pqi_general_admin_request request; |
4879 | struct pqi_general_admin_response response; |
4880 | |
4881 | queue_group = &ctrl_info->queue_groups[group_number]; |
4882 | |
4883 | /* |
4884 | * Create IQ (Inbound Queue - host to device queue) for |
4885 | * RAID path. |
4886 | */ |
4887 | memset(&request, 0, sizeof(request)); |
4888 | request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; |
4889 | put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, |
4890 | p: &request.header.iu_length); |
4891 | request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; |
4892 | put_unaligned_le16(val: queue_group->iq_id[RAID_PATH], |
4893 | p: &request.data.create_operational_iq.queue_id); |
4894 | put_unaligned_le64( |
4895 | val: (u64)queue_group->iq_element_array_bus_addr[RAID_PATH], |
4896 | p: &request.data.create_operational_iq.element_array_addr); |
4897 | put_unaligned_le64(val: (u64)queue_group->iq_ci_bus_addr[RAID_PATH], |
4898 | p: &request.data.create_operational_iq.ci_addr); |
4899 | put_unaligned_le16(val: ctrl_info->num_elements_per_iq, |
4900 | p: &request.data.create_operational_iq.num_elements); |
4901 | put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, |
4902 | p: &request.data.create_operational_iq.element_length); |
4903 | request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; |
4904 | |
4905 | rc = pqi_submit_admin_request_synchronous(ctrl_info, request: &request, |
4906 | response: &response); |
4907 | if (rc) { |
4908 | dev_err(&ctrl_info->pci_dev->dev, |
4909 | "error creating inbound RAID queue\n"); |
4910 | return rc; |
4911 | } |
4912 | |
4913 | queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base + |
4914 | PQI_DEVICE_REGISTERS_OFFSET + |
4915 | get_unaligned_le64( |
4916 | p: &response.data.create_operational_iq.iq_pi_offset); |
4917 | |
4918 | /* |
4919 | * Create IQ (Inbound Queue - host to device queue) for |
4920 | * Advanced I/O (AIO) path. |
4921 | */ |
4922 | memset(&request, 0, sizeof(request)); |
4923 | request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; |
4924 | put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, |
4925 | p: &request.header.iu_length); |
4926 | request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; |
4927 | put_unaligned_le16(val: queue_group->iq_id[AIO_PATH], |
4928 | p: &request.data.create_operational_iq.queue_id); |
4929 | put_unaligned_le64(val: (u64)queue_group-> |
4930 | iq_element_array_bus_addr[AIO_PATH], |
4931 | p: &request.data.create_operational_iq.element_array_addr); |
4932 | put_unaligned_le64(val: (u64)queue_group->iq_ci_bus_addr[AIO_PATH], |
4933 | p: &request.data.create_operational_iq.ci_addr); |
4934 | put_unaligned_le16(val: ctrl_info->num_elements_per_iq, |
4935 | p: &request.data.create_operational_iq.num_elements); |
4936 | put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, |
4937 | p: &request.data.create_operational_iq.element_length); |
4938 | request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; |
4939 | |
4940 | rc = pqi_submit_admin_request_synchronous(ctrl_info, request: &request, |
4941 | response: &response); |
4942 | if (rc) { |
4943 | dev_err(&ctrl_info->pci_dev->dev, |
4944 | "error creating inbound AIO queue\n"); |
4945 | return rc; |
4946 | } |
4947 | |
4948 | queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + |
4949 | PQI_DEVICE_REGISTERS_OFFSET + |
4950 | get_unaligned_le64( |
4951 | p: &response.data.create_operational_iq.iq_pi_offset); |
4952 | |
4953 | /* |
4954 | * Designate the 2nd IQ as the AIO path. By default, all IQs are |
4955 | * assumed to be for RAID path I/O unless we change the queue's |
4956 | * property. |
4957 | */ |
4958 | memset(&request, 0, sizeof(request)); |
4959 | request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; |
4960 | put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, |
4961 | p: &request.header.iu_length); |
4962 | request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY; |
4963 | put_unaligned_le16(val: queue_group->iq_id[AIO_PATH], |
4964 | p: &request.data.change_operational_iq_properties.queue_id); |
4965 | put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE, |
4966 | p: &request.data.change_operational_iq_properties.vendor_specific); |
4967 | |
4968 | rc = pqi_submit_admin_request_synchronous(ctrl_info, request: &request, |
4969 | response: &response); |
4970 | if (rc) { |
4971 | dev_err(&ctrl_info->pci_dev->dev, |
4972 | "error changing queue property\n"); |
4973 | return rc; |
4974 | } |
4975 | |
4976 | /* |
4977 | * Create OQ (Outbound Queue - device to host queue). |
4978 | */ |
4979 | memset(&request, 0, sizeof(request)); |
4980 | request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; |
4981 | put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, |
4982 | p: &request.header.iu_length); |
4983 | request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; |
4984 | put_unaligned_le16(val: queue_group->oq_id, |
4985 | p: &request.data.create_operational_oq.queue_id); |
4986 | put_unaligned_le64(val: (u64)queue_group->oq_element_array_bus_addr, |
4987 | p: &request.data.create_operational_oq.element_array_addr); |
4988 | put_unaligned_le64(val: (u64)queue_group->oq_pi_bus_addr, |
4989 | p: &request.data.create_operational_oq.pi_addr); |
4990 | put_unaligned_le16(val: ctrl_info->num_elements_per_oq, |
4991 | p: &request.data.create_operational_oq.num_elements); |
4992 | put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16, |
4993 | p: &request.data.create_operational_oq.element_length); |
4994 | request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; |
4995 | put_unaligned_le16(val: queue_group->int_msg_num, |
4996 | p: &request.data.create_operational_oq.int_msg_num); |
4997 | |
4998 | rc = pqi_submit_admin_request_synchronous(ctrl_info, request: &request, |
4999 | response: &response); |
5000 | if (rc) { |
5001 | dev_err(&ctrl_info->pci_dev->dev, |
5002 | "error creating outbound queue\n"); |
5003 | return rc; |
5004 | } |
5005 | |
5006 | queue_group->oq_ci = ctrl_info->iomem_base + |
5007 | PQI_DEVICE_REGISTERS_OFFSET + |
5008 | get_unaligned_le64( |
5009 | p: &response.data.create_operational_oq.oq_ci_offset); |
5010 | |
5011 | return 0; |
5012 | } |
5013 | |
5014 | static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) |
5015 | { |
5016 | int rc; |
5017 | unsigned int i; |
5018 | |
5019 | rc = pqi_create_event_queue(ctrl_info); |
5020 | if (rc) { |
5021 | dev_err(&ctrl_info->pci_dev->dev, |
5022 | "error creating event queue\n"); |
5023 | return rc; |
5024 | } |
5025 | |
5026 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { |
5027 | rc = pqi_create_queue_group(ctrl_info, group_number: i); |
5028 | if (rc) { |
5029 | dev_err(&ctrl_info->pci_dev->dev, |
5030 | "error creating queue group number %u/%u\n", |
5031 | i, ctrl_info->num_queue_groups); |
5032 | return rc; |
5033 | } |
5034 | } |
5035 | |
5036 | return 0; |
5037 | } |
5038 | |
5039 | #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \ |
5040 | struct_size_t(struct pqi_event_config, descriptors, PQI_MAX_EVENT_DESCRIPTORS) |
5041 | |
5042 | static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, |
5043 | bool enable_events) |
5044 | { |
5045 | int rc; |
5046 | unsigned int i; |
5047 | struct pqi_event_config *event_config; |
5048 | struct pqi_event_descriptor *event_descriptor; |
5049 | struct pqi_general_management_request request; |
5050 | |
5051 | event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, |
5052 | GFP_KERNEL); |
5053 | if (!event_config) |
5054 | return -ENOMEM; |
5055 | |
5056 | memset(&request, 0, sizeof(request)); |
5057 | |
5058 | request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG; |
5059 | put_unaligned_le16(offsetof(struct pqi_general_management_request, |
5060 | data.report_event_configuration.sg_descriptors[1]) - |
5061 | PQI_REQUEST_HEADER_LENGTH, p: &request.header.iu_length); |
5062 | put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, |
5063 | p: &request.data.report_event_configuration.buffer_length); |
5064 | |
5065 | rc = pqi_map_single(pci_dev: ctrl_info->pci_dev, |
5066 | sg_descriptor: request.data.report_event_configuration.sg_descriptors, |
5067 | buffer: event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, |
5068 | data_direction: DMA_FROM_DEVICE); |
5069 | if (rc) |
5070 | goto out; |
5071 | |
5072 | rc = pqi_submit_raid_request_synchronous(ctrl_info, request: &request.header, flags: 0, NULL); |
5073 | |
5074 | pqi_pci_unmap(pci_dev: ctrl_info->pci_dev, |
5075 | descriptors: request.data.report_event_configuration.sg_descriptors, num_descriptors: 1, |
5076 | data_direction: DMA_FROM_DEVICE); |
5077 | |
5078 | if (rc) |
5079 | goto out; |
5080 | |
5081 | for (i = 0; i < event_config->num_event_descriptors; i++) { |
5082 | event_descriptor = &event_config->descriptors[i]; |
5083 | if (enable_events && |
5084 | pqi_is_supported_event(event_type: event_descriptor->event_type)) |
5085 | put_unaligned_le16(val: ctrl_info->event_queue.oq_id, |
5086 | p: &event_descriptor->oq_id); |
5087 | else |
5088 | put_unaligned_le16(val: 0, p: &event_descriptor->oq_id); |
5089 | } |
5090 | |
5091 | memset(&request, 0, sizeof(request)); |
5092 | |
5093 | request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG; |
5094 | put_unaligned_le16(offsetof(struct pqi_general_management_request, |
5095 | data.report_event_configuration.sg_descriptors[1]) - |
5096 | PQI_REQUEST_HEADER_LENGTH, p: &request.header.iu_length); |
5097 | put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, |
5098 | p: &request.data.report_event_configuration.buffer_length); |
5099 | |
5100 | rc = pqi_map_single(pci_dev: ctrl_info->pci_dev, |
5101 | sg_descriptor: request.data.report_event_configuration.sg_descriptors, |
5102 | buffer: event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, |
5103 | data_direction: DMA_TO_DEVICE); |
5104 | if (rc) |
5105 | goto out; |
5106 | |
5107 | rc = pqi_submit_raid_request_synchronous(ctrl_info, request: &request.header, flags: 0, NULL); |
5108 | |
5109 | pqi_pci_unmap(pci_dev: ctrl_info->pci_dev, |
5110 | descriptors: request.data.report_event_configuration.sg_descriptors, num_descriptors: 1, |
5111 | data_direction: DMA_TO_DEVICE); |
5112 | |
5113 | out: |
5114 | kfree(objp: event_config); |
5115 | |
5116 | return rc; |
5117 | } |
5118 | |
5119 | static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info) |
5120 | { |
5121 | return pqi_configure_events(ctrl_info, enable_events: true); |
5122 | } |
5123 | |
5124 | static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) |
5125 | { |
5126 | unsigned int i; |
5127 | struct device *dev; |
5128 | size_t sg_chain_buffer_length; |
5129 | struct pqi_io_request *io_request; |
5130 | |
5131 | if (!ctrl_info->io_request_pool) |
5132 | return; |
5133 | |
5134 | dev = &ctrl_info->pci_dev->dev; |
5135 | sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; |
5136 | io_request = ctrl_info->io_request_pool; |
5137 | |
5138 | for (i = 0; i < ctrl_info->max_io_slots; i++) { |
5139 | kfree(objp: io_request->iu); |
5140 | if (!io_request->sg_chain_buffer) |
5141 | break; |
5142 | dma_free_coherent(dev, size: sg_chain_buffer_length, |
5143 | cpu_addr: io_request->sg_chain_buffer, |
5144 | dma_handle: io_request->sg_chain_buffer_dma_handle); |
5145 | io_request++; |
5146 | } |
5147 | |
5148 | kfree(objp: ctrl_info->io_request_pool); |
5149 | ctrl_info->io_request_pool = NULL; |
5150 | } |
5151 | |
5152 | static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) |
5153 | { |
5154 | ctrl_info->error_buffer = dma_alloc_coherent(dev: &ctrl_info->pci_dev->dev, |
5155 | size: ctrl_info->error_buffer_length, |
5156 | dma_handle: &ctrl_info->error_buffer_dma_handle, |
5157 | GFP_KERNEL); |
5158 | if (!ctrl_info->error_buffer) |
5159 | return -ENOMEM; |
5160 | |
5161 | return 0; |
5162 | } |
5163 | |
5164 | static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info) |
5165 | { |
5166 | unsigned int i; |
5167 | void *sg_chain_buffer; |
5168 | size_t sg_chain_buffer_length; |
5169 | dma_addr_t sg_chain_buffer_dma_handle; |
5170 | struct device *dev; |
5171 | struct pqi_io_request *io_request; |
5172 | |
5173 | ctrl_info->io_request_pool = kcalloc(n: ctrl_info->max_io_slots, |
5174 | size: sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); |
5175 | |
5176 | if (!ctrl_info->io_request_pool) { |
5177 | dev_err(&ctrl_info->pci_dev->dev, |
5178 | "failed to allocate I/O request pool\n"); |
5179 | goto error; |
5180 | } |
5181 | |
5182 | dev = &ctrl_info->pci_dev->dev; |
5183 | sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; |
5184 | io_request = ctrl_info->io_request_pool; |
5185 | |
5186 | for (i = 0; i < ctrl_info->max_io_slots; i++) { |
5187 | io_request->iu = kmalloc(size: ctrl_info->max_inbound_iu_length, GFP_KERNEL); |
5188 | |
5189 | if (!io_request->iu) { |
5190 | dev_err(&ctrl_info->pci_dev->dev, |
5191 | "failed to allocate IU buffers\n"); |
5192 | goto error; |
5193 | } |
5194 | |
5195 | sg_chain_buffer = dma_alloc_coherent(dev, |
5196 | size: sg_chain_buffer_length, dma_handle: &sg_chain_buffer_dma_handle, |
5197 | GFP_KERNEL); |
5198 | |
5199 | if (!sg_chain_buffer) { |
5200 | dev_err(&ctrl_info->pci_dev->dev, |
5201 | "failed to allocate PQI scatter-gather chain buffers\n"); |
5202 | goto error; |
5203 | } |
5204 | |
5205 | io_request->index = i; |
5206 | io_request->sg_chain_buffer = sg_chain_buffer; |
5207 | io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle; |
5208 | io_request++; |
5209 | } |
5210 | |
5211 | return 0; |
5212 | |
5213 | error: |
5214 | pqi_free_all_io_requests(ctrl_info); |
5215 | |
5216 | return -ENOMEM; |
5217 | } |
5218 | |
5219 | /* |
5220 | * Calculate required resources that are sized based on max. outstanding |
5221 | * requests and max. transfer size. |
5222 | */ |
5223 | |
5224 | static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) |
5225 | { |
5226 | u32 max_transfer_size; |
5227 | u32 max_sg_entries; |
5228 | |
5229 | ctrl_info->scsi_ml_can_queue = |
5230 | ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS; |
5231 | ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests; |
5232 | |
5233 | ctrl_info->error_buffer_length = |
5234 | ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; |
5235 | |
5236 | if (reset_devices) |
5237 | max_transfer_size = min(ctrl_info->max_transfer_size, |
5238 | PQI_MAX_TRANSFER_SIZE_KDUMP); |
5239 | else |
5240 | max_transfer_size = min(ctrl_info->max_transfer_size, |
5241 | PQI_MAX_TRANSFER_SIZE); |
5242 | |
5243 | max_sg_entries = max_transfer_size / PAGE_SIZE; |
5244 | |
5245 | /* +1 to cover when the buffer is not page-aligned. */ |
5246 | max_sg_entries++; |
5247 | |
5248 | max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries); |
5249 | |
5250 | max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE; |
5251 | |
5252 | ctrl_info->sg_chain_buffer_length = |
5253 | (max_sg_entries * sizeof(struct pqi_sg_descriptor)) + |
5254 | PQI_EXTRA_SGL_MEMORY; |
5255 | ctrl_info->sg_tablesize = max_sg_entries; |
5256 | ctrl_info->max_sectors = max_transfer_size / 512; |
5257 | } |
5258 | |
5259 | static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) |
5260 | { |
5261 | int num_queue_groups; |
5262 | u16 num_elements_per_iq; |
5263 | u16 num_elements_per_oq; |
5264 | |
5265 | if (reset_devices) { |
5266 | num_queue_groups = 1; |
5267 | } else { |
5268 | int num_cpus; |
5269 | int max_queue_groups; |
5270 | |
5271 | max_queue_groups = min(ctrl_info->max_inbound_queues / 2, |
5272 | ctrl_info->max_outbound_queues - 1); |
5273 | max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS); |
5274 | |
5275 | num_cpus = num_online_cpus(); |
5276 | num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); |
5277 | num_queue_groups = min(num_queue_groups, max_queue_groups); |
5278 | } |
5279 | |
5280 | ctrl_info->num_queue_groups = num_queue_groups; |
5281 | |
5282 | /* |
5283 | * Make sure that the max. inbound IU length is an even multiple |
5284 | * of our inbound element length. |
5285 | */ |
5286 | ctrl_info->max_inbound_iu_length = |
5287 | (ctrl_info->max_inbound_iu_length_per_firmware / |
5288 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) * |
5289 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; |
5290 | |
5291 | num_elements_per_iq = |
5292 | (ctrl_info->max_inbound_iu_length / |
5293 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); |
5294 | |
5295 | /* Add one because one element in each queue is unusable. */ |
5296 | num_elements_per_iq++; |
5297 | |
5298 | num_elements_per_iq = min(num_elements_per_iq, |
5299 | ctrl_info->max_elements_per_iq); |
5300 | |
5301 | num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1; |
5302 | num_elements_per_oq = min(num_elements_per_oq, |
5303 | ctrl_info->max_elements_per_oq); |
5304 | |
5305 | ctrl_info->num_elements_per_iq = num_elements_per_iq; |
5306 | ctrl_info->num_elements_per_oq = num_elements_per_oq; |
5307 | |
5308 | ctrl_info->max_sg_per_iu = |
5309 | ((ctrl_info->max_inbound_iu_length - |
5310 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / |
5311 | sizeof(struct pqi_sg_descriptor)) + |
5312 | PQI_MAX_EMBEDDED_SG_DESCRIPTORS; |
5313 | |
5314 | ctrl_info->max_sg_per_r56_iu = |
5315 | ((ctrl_info->max_inbound_iu_length - |
5316 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / |
5317 | sizeof(struct pqi_sg_descriptor)) + |
5318 | PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS; |
5319 | } |
5320 | |
5321 | static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor, |
5322 | struct scatterlist *sg) |
5323 | { |
5324 | u64 address = (u64)sg_dma_address(sg); |
5325 | unsigned int length = sg_dma_len(sg); |
5326 | |
5327 | put_unaligned_le64(val: address, p: &sg_descriptor->address); |
5328 | put_unaligned_le32(val: length, p: &sg_descriptor->length); |
5329 | put_unaligned_le32(val: 0, p: &sg_descriptor->flags); |
5330 | } |
5331 | |
5332 | static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor, |
5333 | struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request, |
5334 | int max_sg_per_iu, bool *chained) |
5335 | { |
5336 | int i; |
5337 | unsigned int num_sg_in_iu; |
5338 | |
5339 | *chained = false; |
5340 | i = 0; |
5341 | num_sg_in_iu = 0; |
5342 | max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */ |
5343 | |
5344 | while (1) { |
5345 | pqi_set_sg_descriptor(sg_descriptor, sg); |
5346 | if (!*chained) |
5347 | num_sg_in_iu++; |
5348 | i++; |
5349 | if (i == sg_count) |
5350 | break; |
5351 | sg_descriptor++; |
5352 | if (i == max_sg_per_iu) { |
5353 | put_unaligned_le64(val: (u64)io_request->sg_chain_buffer_dma_handle, |
5354 | p: &sg_descriptor->address); |
5355 | put_unaligned_le32(val: (sg_count - num_sg_in_iu) * sizeof(*sg_descriptor), |
5356 | p: &sg_descriptor->length); |
5357 | put_unaligned_le32(CISS_SG_CHAIN, p: &sg_descriptor->flags); |
5358 | *chained = true; |
5359 | num_sg_in_iu++; |
5360 | sg_descriptor = io_request->sg_chain_buffer; |
5361 | } |
5362 | sg = sg_next(sg); |
5363 | } |
5364 | |
5365 | put_unaligned_le32(CISS_SG_LAST, p: &sg_descriptor->flags); |
5366 | |
5367 | return num_sg_in_iu; |
5368 | } |
5369 | |
5370 | static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info, |
5371 | struct pqi_raid_path_request *request, struct scsi_cmnd *scmd, |
5372 | struct pqi_io_request *io_request) |
5373 | { |
5374 | u16 iu_length; |
5375 | int sg_count; |
5376 | bool chained; |
5377 | unsigned int num_sg_in_iu; |
5378 | struct scatterlist *sg; |
5379 | struct pqi_sg_descriptor *sg_descriptor; |
5380 | |
5381 | sg_count = scsi_dma_map(cmd: scmd); |
5382 | if (sg_count < 0) |
5383 | return sg_count; |
5384 | |
5385 | iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - |
5386 | PQI_REQUEST_HEADER_LENGTH; |
5387 | |
5388 | if (sg_count == 0) |
5389 | goto out; |
5390 | |
5391 | sg = scsi_sglist(cmd: scmd); |
5392 | sg_descriptor = request->sg_descriptors; |
5393 | |
5394 | num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, |
5395 | max_sg_per_iu: ctrl_info->max_sg_per_iu, chained: &chained); |
5396 | |
5397 | request->partial = chained; |
5398 | iu_length += num_sg_in_iu * sizeof(*sg_descriptor); |
5399 | |
5400 | out: |
5401 | put_unaligned_le16(val: iu_length, p: &request->header.iu_length); |
5402 | |
5403 | return 0; |
5404 | } |
5405 | |
5406 | static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info, |
5407 | struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd, |
5408 | struct pqi_io_request *io_request) |
5409 | { |
5410 | u16 iu_length; |
5411 | int sg_count; |
5412 | bool chained; |
5413 | unsigned int num_sg_in_iu; |
5414 | struct scatterlist *sg; |
5415 | struct pqi_sg_descriptor *sg_descriptor; |
5416 | |
5417 | sg_count = scsi_dma_map(cmd: scmd); |
5418 | if (sg_count < 0) |
5419 | return sg_count; |
5420 | |
5421 | iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) - |
5422 | PQI_REQUEST_HEADER_LENGTH; |
5423 | num_sg_in_iu = 0; |
5424 | |
5425 | if (sg_count == 0) |
5426 | goto out; |
5427 | |
5428 | sg = scsi_sglist(cmd: scmd); |
5429 | sg_descriptor = request->sg_descriptors; |
5430 | |
5431 | num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, |
5432 | max_sg_per_iu: ctrl_info->max_sg_per_iu, chained: &chained); |
5433 | |
5434 | request->partial = chained; |
5435 | iu_length += num_sg_in_iu * sizeof(*sg_descriptor); |
5436 | |
5437 | out: |
5438 | put_unaligned_le16(val: iu_length, p: &request->header.iu_length); |
5439 | request->num_sg_descriptors = num_sg_in_iu; |
5440 | |
5441 | return 0; |
5442 | } |
5443 | |
5444 | static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info, |
5445 | struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd, |
5446 | struct pqi_io_request *io_request) |
5447 | { |
5448 | u16 iu_length; |
5449 | int sg_count; |
5450 | bool chained; |
5451 | unsigned int num_sg_in_iu; |
5452 | struct scatterlist *sg; |
5453 | struct pqi_sg_descriptor *sg_descriptor; |
5454 | |
5455 | sg_count = scsi_dma_map(cmd: scmd); |
5456 | if (sg_count < 0) |
5457 | return sg_count; |
5458 | |
5459 | iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) - |
5460 | PQI_REQUEST_HEADER_LENGTH; |
5461 | num_sg_in_iu = 0; |
5462 | |
5463 | if (sg_count != 0) { |
5464 | sg = scsi_sglist(cmd: scmd); |
5465 | sg_descriptor = request->sg_descriptors; |
5466 | |
5467 | num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, |
5468 | max_sg_per_iu: ctrl_info->max_sg_per_r56_iu, chained: &chained); |
5469 | |
5470 | request->partial = chained; |
5471 | iu_length += num_sg_in_iu * sizeof(*sg_descriptor); |
5472 | } |
5473 | |
5474 | put_unaligned_le16(val: iu_length, p: &request->header.iu_length); |
5475 | request->num_sg_descriptors = num_sg_in_iu; |
5476 | |
5477 | return 0; |
5478 | } |
5479 | |
5480 | static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, |
5481 | struct pqi_aio_path_request *request, struct scsi_cmnd *scmd, |
5482 | struct pqi_io_request *io_request) |
5483 | { |
5484 | u16 iu_length; |
5485 | int sg_count; |
5486 | bool chained; |
5487 | unsigned int num_sg_in_iu; |
5488 | struct scatterlist *sg; |
5489 | struct pqi_sg_descriptor *sg_descriptor; |
5490 | |
5491 | sg_count = scsi_dma_map(cmd: scmd); |
5492 | if (sg_count < 0) |
5493 | return sg_count; |
5494 | |
5495 | iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) - |
5496 | PQI_REQUEST_HEADER_LENGTH; |
5497 | num_sg_in_iu = 0; |
5498 | |
5499 | if (sg_count == 0) |
5500 | goto out; |
5501 | |
5502 | sg = scsi_sglist(cmd: scmd); |
5503 | sg_descriptor = request->sg_descriptors; |
5504 | |
5505 | num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, |
5506 | max_sg_per_iu: ctrl_info->max_sg_per_iu, chained: &chained); |
5507 | |
5508 | request->partial = chained; |
5509 | iu_length += num_sg_in_iu * sizeof(*sg_descriptor); |
5510 | |
5511 | out: |
5512 | put_unaligned_le16(val: iu_length, p: &request->header.iu_length); |
5513 | request->num_sg_descriptors = num_sg_in_iu; |
5514 | |
5515 | return 0; |
5516 | } |
5517 | |
5518 | static void pqi_raid_io_complete(struct pqi_io_request *io_request, |
5519 | void *context) |
5520 | { |
5521 | struct scsi_cmnd *scmd; |
5522 | |
5523 | scmd = io_request->scmd; |
5524 | pqi_free_io_request(io_request); |
5525 | scsi_dma_unmap(cmd: scmd); |
5526 | pqi_scsi_done(scmd); |
5527 | } |
5528 | |
5529 | static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info, |
5530 | struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, |
5531 | struct pqi_queue_group *queue_group, bool io_high_prio) |
5532 | { |
5533 | int rc; |
5534 | size_t cdb_length; |
5535 | struct pqi_io_request *io_request; |
5536 | struct pqi_raid_path_request *request; |
5537 | |
5538 | io_request = pqi_alloc_io_request(ctrl_info, scmd); |
5539 | if (!io_request) |
5540 | return SCSI_MLQUEUE_HOST_BUSY; |
5541 | |
5542 | io_request->io_complete_callback = pqi_raid_io_complete; |
5543 | io_request->scmd = scmd; |
5544 | |
5545 | request = io_request->iu; |
5546 | memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors)); |
5547 | |
5548 | request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; |
5549 | put_unaligned_le32(val: scsi_bufflen(cmd: scmd), p: &request->buffer_length); |
5550 | request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; |
5551 | request->command_priority = io_high_prio; |
5552 | put_unaligned_le16(val: io_request->index, p: &request->request_id); |
5553 | request->error_index = request->request_id; |
5554 | memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number)); |
5555 | request->ml_device_lun_number = (u8)scmd->device->lun; |
5556 | |
5557 | cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb)); |
5558 | memcpy(request->cdb, scmd->cmnd, cdb_length); |
5559 | |
5560 | switch (cdb_length) { |
5561 | case 6: |
5562 | case 10: |
5563 | case 12: |
5564 | case 16: |
5565 | request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; |
5566 | break; |
5567 | case 20: |
5568 | request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4; |
5569 | break; |
5570 | case 24: |
5571 | request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8; |
5572 | break; |
5573 | case 28: |
5574 | request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12; |
5575 | break; |
5576 | case 32: |
5577 | default: |
5578 | request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16; |
5579 | break; |
5580 | } |
5581 | |
5582 | switch (scmd->sc_data_direction) { |
5583 | case DMA_FROM_DEVICE: |
5584 | request->data_direction = SOP_READ_FLAG; |
5585 | break; |
5586 | case DMA_TO_DEVICE: |
5587 | request->data_direction = SOP_WRITE_FLAG; |
5588 | break; |
5589 | case DMA_NONE: |
5590 | request->data_direction = SOP_NO_DIRECTION_FLAG; |
5591 | break; |
5592 | case DMA_BIDIRECTIONAL: |
5593 | request->data_direction = SOP_BIDIRECTIONAL; |
5594 | break; |
5595 | default: |
5596 | dev_err(&ctrl_info->pci_dev->dev, |
5597 | "unknown data direction: %d\n", |
5598 | scmd->sc_data_direction); |
5599 | break; |
5600 | } |
5601 | |
5602 | rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request); |
5603 | if (rc) { |
5604 | pqi_free_io_request(io_request); |
5605 | return SCSI_MLQUEUE_HOST_BUSY; |
5606 | } |
5607 | |
5608 | pqi_start_io(ctrl_info, queue_group, path: RAID_PATH, io_request); |
5609 | |
5610 | return 0; |
5611 | } |
5612 | |
5613 | static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, |
5614 | struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, |
5615 | struct pqi_queue_group *queue_group) |
5616 | { |
5617 | bool io_high_prio; |
5618 | |
5619 | io_high_prio = pqi_is_io_high_priority(device, scmd); |
5620 | |
5621 | return pqi_raid_submit_io(ctrl_info, device, scmd, queue_group, io_high_prio); |
5622 | } |
5623 | |
5624 | static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) |
5625 | { |
5626 | struct scsi_cmnd *scmd; |
5627 | struct pqi_scsi_dev *device; |
5628 | struct pqi_ctrl_info *ctrl_info; |
5629 | |
5630 | if (!io_request->raid_bypass) |
5631 | return false; |
5632 | |
5633 | scmd = io_request->scmd; |
5634 | if ((scmd->result & 0xff) == SAM_STAT_GOOD) |
5635 | return false; |
5636 | if (host_byte(scmd->result) == DID_NO_CONNECT) |
5637 | return false; |
5638 | |
5639 | device = scmd->device->hostdata; |
5640 | if (pqi_device_offline(device) || pqi_device_in_remove(device)) |
5641 | return false; |
5642 | |
5643 | ctrl_info = shost_to_hba(shost: scmd->device->host); |
5644 | if (pqi_ctrl_offline(ctrl_info)) |
5645 | return false; |
5646 | |
5647 | return true; |
5648 | } |
5649 | |
5650 | static void pqi_aio_io_complete(struct pqi_io_request *io_request, |
5651 | void *context) |
5652 | { |
5653 | struct scsi_cmnd *scmd; |
5654 | |
5655 | scmd = io_request->scmd; |
5656 | scsi_dma_unmap(cmd: scmd); |
5657 | if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) { |
5658 | set_host_byte(cmd: scmd, status: DID_IMM_RETRY); |
5659 | pqi_cmd_priv(cmd: scmd)->this_residual++; |
5660 | } |
5661 | |
5662 | pqi_free_io_request(io_request); |
5663 | pqi_scsi_done(scmd); |
5664 | } |
5665 | |
5666 | static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, |
5667 | struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, |
5668 | struct pqi_queue_group *queue_group) |
5669 | { |
5670 | bool io_high_prio; |
5671 | |
5672 | io_high_prio = pqi_is_io_high_priority(device, scmd); |
5673 | |
5674 | return pqi_aio_submit_io(ctrl_info, scmd, aio_handle: device->aio_handle, |
5675 | cdb: scmd->cmnd, cdb_length: scmd->cmd_len, queue_group, NULL, |
5676 | raid_bypass: false, io_high_prio); |
5677 | } |
5678 | |
5679 | static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, |
5680 | struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, |
5681 | unsigned int cdb_length, struct pqi_queue_group *queue_group, |
5682 | struct pqi_encryption_info *encryption_info, bool raid_bypass, |
5683 | bool io_high_prio) |
5684 | { |
5685 | int rc; |
5686 | struct pqi_io_request *io_request; |
5687 | struct pqi_aio_path_request *request; |
5688 | |
5689 | io_request = pqi_alloc_io_request(ctrl_info, scmd); |
5690 | if (!io_request) |
5691 | return SCSI_MLQUEUE_HOST_BUSY; |
5692 | |
5693 | io_request->io_complete_callback = pqi_aio_io_complete; |
5694 | io_request->scmd = scmd; |
5695 | io_request->raid_bypass = raid_bypass; |
5696 | |
5697 | request = io_request->iu; |
5698 | memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors)); |
5699 | |
5700 | request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO; |
5701 | put_unaligned_le32(val: aio_handle, p: &request->nexus_id); |
5702 | put_unaligned_le32(val: scsi_bufflen(cmd: scmd), p: &request->buffer_length); |
5703 | request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; |
5704 | request->command_priority = io_high_prio; |
5705 | put_unaligned_le16(val: io_request->index, p: &request->request_id); |
5706 | request->error_index = request->request_id; |
5707 | if (!raid_bypass && ctrl_info->multi_lun_device_supported) |
5708 | put_unaligned_le64(val: scmd->device->lun << 8, p: &request->lun_number); |
5709 | if (cdb_length > sizeof(request->cdb)) |
5710 | cdb_length = sizeof(request->cdb); |
5711 | request->cdb_length = cdb_length; |
5712 | memcpy(request->cdb, cdb, cdb_length); |
5713 | |
5714 | switch (scmd->sc_data_direction) { |
5715 | case DMA_TO_DEVICE: |
5716 | request->data_direction = SOP_READ_FLAG; |
5717 | break; |
5718 | case DMA_FROM_DEVICE: |
5719 | request->data_direction = SOP_WRITE_FLAG; |
5720 | break; |
5721 | case DMA_NONE: |
5722 | request->data_direction = SOP_NO_DIRECTION_FLAG; |
5723 | break; |
5724 | case DMA_BIDIRECTIONAL: |
5725 | request->data_direction = SOP_BIDIRECTIONAL; |
5726 | break; |
5727 | default: |
5728 | dev_err(&ctrl_info->pci_dev->dev, |
5729 | "unknown data direction: %d\n", |
5730 | scmd->sc_data_direction); |
5731 | break; |
5732 | } |
5733 | |
5734 | if (encryption_info) { |
5735 | request->encryption_enable = true; |
5736 | put_unaligned_le16(val: encryption_info->data_encryption_key_index, |
5737 | p: &request->data_encryption_key_index); |
5738 | put_unaligned_le32(val: encryption_info->encrypt_tweak_lower, |
5739 | p: &request->encrypt_tweak_lower); |
5740 | put_unaligned_le32(val: encryption_info->encrypt_tweak_upper, |
5741 | p: &request->encrypt_tweak_upper); |
5742 | } |
5743 | |
5744 | rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request); |
5745 | if (rc) { |
5746 | pqi_free_io_request(io_request); |
5747 | return SCSI_MLQUEUE_HOST_BUSY; |
5748 | } |
5749 | |
5750 | pqi_start_io(ctrl_info, queue_group, path: AIO_PATH, io_request); |
5751 | |
5752 | return 0; |
5753 | } |
5754 | |
5755 | static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info, |
5756 | struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, |
5757 | struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, |
5758 | struct pqi_scsi_dev_raid_map_data *rmd) |
5759 | { |
5760 | int rc; |
5761 | struct pqi_io_request *io_request; |
5762 | struct pqi_aio_r1_path_request *r1_request; |
5763 | |
5764 | io_request = pqi_alloc_io_request(ctrl_info, scmd); |
5765 | if (!io_request) |
5766 | return SCSI_MLQUEUE_HOST_BUSY; |
5767 | |
5768 | io_request->io_complete_callback = pqi_aio_io_complete; |
5769 | io_request->scmd = scmd; |
5770 | io_request->raid_bypass = true; |
5771 | |
5772 | r1_request = io_request->iu; |
5773 | memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors)); |
5774 | |
5775 | r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO; |
5776 | put_unaligned_le16(val: *(u16 *)device->scsi3addr & 0x3fff, p: &r1_request->volume_id); |
5777 | r1_request->num_drives = rmd->num_it_nexus_entries; |
5778 | put_unaligned_le32(val: rmd->it_nexus[0], p: &r1_request->it_nexus_1); |
5779 | put_unaligned_le32(val: rmd->it_nexus[1], p: &r1_request->it_nexus_2); |
5780 | if (rmd->num_it_nexus_entries == 3) |
5781 | put_unaligned_le32(val: rmd->it_nexus[2], p: &r1_request->it_nexus_3); |
5782 | |
5783 | put_unaligned_le32(val: scsi_bufflen(cmd: scmd), p: &r1_request->data_length); |
5784 | r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; |
5785 | put_unaligned_le16(val: io_request->index, p: &r1_request->request_id); |
5786 | r1_request->error_index = r1_request->request_id; |
5787 | if (rmd->cdb_length > sizeof(r1_request->cdb)) |
5788 | rmd->cdb_length = sizeof(r1_request->cdb); |
5789 | r1_request->cdb_length = rmd->cdb_length; |
5790 | memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length); |
5791 | |
5792 | /* The direction is always write. */ |
5793 | r1_request->data_direction = SOP_READ_FLAG; |
5794 | |
5795 | if (encryption_info) { |
5796 | r1_request->encryption_enable = true; |
5797 | put_unaligned_le16(val: encryption_info->data_encryption_key_index, |
5798 | p: &r1_request->data_encryption_key_index); |
5799 | put_unaligned_le32(val: encryption_info->encrypt_tweak_lower, |
5800 | p: &r1_request->encrypt_tweak_lower); |
5801 | put_unaligned_le32(val: encryption_info->encrypt_tweak_upper, |
5802 | p: &r1_request->encrypt_tweak_upper); |
5803 | } |
5804 | |
5805 | rc = pqi_build_aio_r1_sg_list(ctrl_info, request: r1_request, scmd, io_request); |
5806 | if (rc) { |
5807 | pqi_free_io_request(io_request); |
5808 | return SCSI_MLQUEUE_HOST_BUSY; |
5809 | } |
5810 | |
5811 | pqi_start_io(ctrl_info, queue_group, path: AIO_PATH, io_request); |
5812 | |
5813 | return 0; |
5814 | } |
5815 | |
5816 | static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, |
5817 | struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, |
5818 | struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, |
5819 | struct pqi_scsi_dev_raid_map_data *rmd) |
5820 | { |
5821 | int rc; |
5822 | struct pqi_io_request *io_request; |
5823 | struct pqi_aio_r56_path_request *r56_request; |
5824 | |
5825 | io_request = pqi_alloc_io_request(ctrl_info, scmd); |
5826 | if (!io_request) |
5827 | return SCSI_MLQUEUE_HOST_BUSY; |
5828 | io_request->io_complete_callback = pqi_aio_io_complete; |
5829 | io_request->scmd = scmd; |
5830 | io_request->raid_bypass = true; |
5831 | |
5832 | r56_request = io_request->iu; |
5833 | memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors)); |
5834 | |
5835 | if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51) |
5836 | r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO; |
5837 | else |
5838 | r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO; |
5839 | |
5840 | put_unaligned_le16(val: *(u16 *)device->scsi3addr & 0x3fff, p: &r56_request->volume_id); |
5841 | put_unaligned_le32(val: rmd->aio_handle, p: &r56_request->data_it_nexus); |
5842 | put_unaligned_le32(val: rmd->p_parity_it_nexus, p: &r56_request->p_parity_it_nexus); |
5843 | if (rmd->raid_level == SA_RAID_6) { |
5844 | put_unaligned_le32(val: rmd->q_parity_it_nexus, p: &r56_request->q_parity_it_nexus); |
5845 | r56_request->xor_multiplier = rmd->xor_mult; |
5846 | } |
5847 | put_unaligned_le32(val: scsi_bufflen(cmd: scmd), p: &r56_request->data_length); |
5848 | r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; |
5849 | put_unaligned_le64(val: rmd->row, p: &r56_request->row); |
5850 | |
5851 | put_unaligned_le16(val: io_request->index, p: &r56_request->request_id); |
5852 | r56_request->error_index = r56_request->request_id; |
5853 | |
5854 | if (rmd->cdb_length > sizeof(r56_request->cdb)) |
5855 | rmd->cdb_length = sizeof(r56_request->cdb); |
5856 | r56_request->cdb_length = rmd->cdb_length; |
5857 | memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length); |
5858 | |
5859 | /* The direction is always write. */ |
5860 | r56_request->data_direction = SOP_READ_FLAG; |
5861 | |
5862 | if (encryption_info) { |
5863 | r56_request->encryption_enable = true; |
5864 | put_unaligned_le16(val: encryption_info->data_encryption_key_index, |
5865 | p: &r56_request->data_encryption_key_index); |
5866 | put_unaligned_le32(val: encryption_info->encrypt_tweak_lower, |
5867 | p: &r56_request->encrypt_tweak_lower); |
5868 | put_unaligned_le32(val: encryption_info->encrypt_tweak_upper, |
5869 | p: &r56_request->encrypt_tweak_upper); |
5870 | } |
5871 | |
5872 | rc = pqi_build_aio_r56_sg_list(ctrl_info, request: r56_request, scmd, io_request); |
5873 | if (rc) { |
5874 | pqi_free_io_request(io_request); |
5875 | return SCSI_MLQUEUE_HOST_BUSY; |
5876 | } |
5877 | |
5878 | pqi_start_io(ctrl_info, queue_group, path: AIO_PATH, io_request); |
5879 | |
5880 | return 0; |
5881 | } |
5882 | |
5883 | static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, |
5884 | struct scsi_cmnd *scmd) |
5885 | { |
5886 | /* |
5887 | * We are setting host_tagset = 1 during init. |
5888 | */ |
5889 | return blk_mq_unique_tag_to_hwq(unique_tag: blk_mq_unique_tag(rq: scsi_cmd_to_rq(scmd))); |
5890 | } |
5891 | |
5892 | static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd) |
5893 | { |
5894 | if (blk_rq_is_passthrough(rq: scsi_cmd_to_rq(scmd))) |
5895 | return false; |
5896 | |
5897 | return pqi_cmd_priv(cmd: scmd)->this_residual == 0; |
5898 | } |
5899 | |
5900 | /* |
5901 | * This function gets called just before we hand the completed SCSI request |
5902 | * back to the SML. |
5903 | */ |
5904 | |
5905 | void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd) |
5906 | { |
5907 | struct pqi_scsi_dev *device; |
5908 | struct completion *wait; |
5909 | |
5910 | if (!scmd->device) { |
5911 | set_host_byte(cmd: scmd, status: DID_NO_CONNECT); |
5912 | return; |
5913 | } |
5914 | |
5915 | device = scmd->device->hostdata; |
5916 | if (!device) { |
5917 | set_host_byte(cmd: scmd, status: DID_NO_CONNECT); |
5918 | return; |
5919 | } |
5920 | |
5921 | atomic_dec(v: &device->scsi_cmds_outstanding[scmd->device->lun]); |
5922 | |
5923 | wait = (struct completion *)xchg(&scmd->host_scribble, NULL); |
5924 | if (wait != PQI_NO_COMPLETION) |
5925 | complete(wait); |
5926 | } |
5927 | |
5928 | static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info, |
5929 | struct scsi_cmnd *scmd) |
5930 | { |
5931 | u32 oldest_jiffies; |
5932 | u8 lru_index; |
5933 | int i; |
5934 | int rc; |
5935 | struct pqi_scsi_dev *device; |
5936 | struct pqi_stream_data *pqi_stream_data; |
5937 | struct pqi_scsi_dev_raid_map_data rmd; |
5938 | |
5939 | if (!ctrl_info->enable_stream_detection) |
5940 | return false; |
5941 | |
5942 | rc = pqi_get_aio_lba_and_block_count(scmd, rmd: &rmd); |
5943 | if (rc) |
5944 | return false; |
5945 | |
5946 | /* Check writes only. */ |
5947 | if (!rmd.is_write) |
5948 | return false; |
5949 | |
5950 | device = scmd->device->hostdata; |
5951 | |
5952 | /* Check for RAID 5/6 streams. */ |
5953 | if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6) |
5954 | return false; |
5955 | |
5956 | /* |
5957 | * If controller does not support AIO RAID{5,6} writes, need to send |
5958 | * requests down non-AIO path. |
5959 | */ |
5960 | if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) || |
5961 | (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes)) |
5962 | return true; |
5963 | |
5964 | lru_index = 0; |
5965 | oldest_jiffies = INT_MAX; |
5966 | for (i = 0; i < NUM_STREAMS_PER_LUN; i++) { |
5967 | pqi_stream_data = &device->stream_data[i]; |
5968 | /* |
5969 | * Check for adjacent request or request is within |
5970 | * the previous request. |
5971 | */ |
5972 | if ((pqi_stream_data->next_lba && |
5973 | rmd.first_block >= pqi_stream_data->next_lba) && |
5974 | rmd.first_block <= pqi_stream_data->next_lba + |
5975 | rmd.block_cnt) { |
5976 | pqi_stream_data->next_lba = rmd.first_block + |
5977 | rmd.block_cnt; |
5978 | pqi_stream_data->last_accessed = jiffies; |
5979 | return true; |
5980 | } |
5981 | |
5982 | /* unused entry */ |
5983 | if (pqi_stream_data->last_accessed == 0) { |
5984 | lru_index = i; |
5985 | break; |
5986 | } |
5987 | |
5988 | /* Find entry with oldest last accessed time. */ |
5989 | if (pqi_stream_data->last_accessed <= oldest_jiffies) { |
5990 | oldest_jiffies = pqi_stream_data->last_accessed; |
5991 | lru_index = i; |
5992 | } |
5993 | } |
5994 | |
5995 | /* Set LRU entry. */ |
5996 | pqi_stream_data = &device->stream_data[lru_index]; |
5997 | pqi_stream_data->last_accessed = jiffies; |
5998 | pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt; |
5999 | |
6000 | return false; |
6001 | } |
6002 | |
6003 | static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) |
6004 | { |
6005 | int rc; |
6006 | struct pqi_ctrl_info *ctrl_info; |
6007 | struct pqi_scsi_dev *device; |
6008 | u16 hw_queue; |
6009 | struct pqi_queue_group *queue_group; |
6010 | bool raid_bypassed; |
6011 | u8 lun; |
6012 | |
6013 | scmd->host_scribble = PQI_NO_COMPLETION; |
6014 | |
6015 | device = scmd->device->hostdata; |
6016 | |
6017 | if (!device) { |
6018 | set_host_byte(cmd: scmd, status: DID_NO_CONNECT); |
6019 | pqi_scsi_done(scmd); |
6020 | return 0; |
6021 | } |
6022 | |
6023 | lun = (u8)scmd->device->lun; |
6024 | |
6025 | atomic_inc(v: &device->scsi_cmds_outstanding[lun]); |
6026 | |
6027 | ctrl_info = shost_to_hba(shost); |
6028 | |
6029 | if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) { |
6030 | set_host_byte(cmd: scmd, status: DID_NO_CONNECT); |
6031 | pqi_scsi_done(scmd); |
6032 | return 0; |
6033 | } |
6034 | |
6035 | if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device, lun)) { |
6036 | rc = SCSI_MLQUEUE_HOST_BUSY; |
6037 | goto out; |
6038 | } |
6039 | |
6040 | /* |
6041 | * This is necessary because the SML doesn't zero out this field during |
6042 | * error recovery. |
6043 | */ |
6044 | scmd->result = 0; |
6045 | |
6046 | hw_queue = pqi_get_hw_queue(ctrl_info, scmd); |
6047 | queue_group = &ctrl_info->queue_groups[hw_queue]; |
6048 | |
6049 | if (pqi_is_logical_device(device)) { |
6050 | raid_bypassed = false; |
6051 | if (device->raid_bypass_enabled && |
6052 | pqi_is_bypass_eligible_request(scmd) && |
6053 | !pqi_is_parity_write_stream(ctrl_info, scmd)) { |
6054 | rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); |
6055 | if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) { |
6056 | raid_bypassed = true; |
6057 | device->raid_bypass_cnt++; |
6058 | } |
6059 | } |
6060 | if (!raid_bypassed) |
6061 | rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); |
6062 | } else { |
6063 | if (device->aio_enabled) |
6064 | rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); |
6065 | else |
6066 | rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); |
6067 | } |
6068 | |
6069 | out: |
6070 | if (rc) { |
6071 | scmd->host_scribble = NULL; |
6072 | atomic_dec(v: &device->scsi_cmds_outstanding[lun]); |
6073 | } |
6074 | |
6075 | return rc; |
6076 | } |
6077 | |
6078 | static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info) |
6079 | { |
6080 | unsigned int i; |
6081 | unsigned int path; |
6082 | unsigned long flags; |
6083 | unsigned int queued_io_count; |
6084 | struct pqi_queue_group *queue_group; |
6085 | struct pqi_io_request *io_request; |
6086 | |
6087 | queued_io_count = 0; |
6088 | |
6089 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { |
6090 | queue_group = &ctrl_info->queue_groups[i]; |
6091 | for (path = 0; path < 2; path++) { |
6092 | spin_lock_irqsave(&queue_group->submit_lock[path], flags); |
6093 | list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry) |
6094 | queued_io_count++; |
6095 | spin_unlock_irqrestore(lock: &queue_group->submit_lock[path], flags); |
6096 | } |
6097 | } |
6098 | |
6099 | return queued_io_count; |
6100 | } |
6101 | |
6102 | static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info) |
6103 | { |
6104 | unsigned int i; |
6105 | unsigned int path; |
6106 | unsigned int nonempty_inbound_queue_count; |
6107 | struct pqi_queue_group *queue_group; |
6108 | pqi_index_t iq_pi; |
6109 | pqi_index_t iq_ci; |
6110 | |
6111 | nonempty_inbound_queue_count = 0; |
6112 | |
6113 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { |
6114 | queue_group = &ctrl_info->queue_groups[i]; |
6115 | for (path = 0; path < 2; path++) { |
6116 | iq_pi = queue_group->iq_pi_copy[path]; |
6117 | iq_ci = readl(addr: queue_group->iq_ci[path]); |
6118 | if (iq_ci != iq_pi) |
6119 | nonempty_inbound_queue_count++; |
6120 | } |
6121 | } |
6122 | |
6123 | return nonempty_inbound_queue_count; |
6124 | } |
6125 | |
6126 | #define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS 10 |
6127 | |
6128 | static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) |
6129 | { |
6130 | unsigned long start_jiffies; |
6131 | unsigned long warning_timeout; |
6132 | unsigned int queued_io_count; |
6133 | unsigned int nonempty_inbound_queue_count; |
6134 | bool displayed_warning; |
6135 | |
6136 | displayed_warning = false; |
6137 | start_jiffies = jiffies; |
6138 | warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies; |
6139 | |
6140 | while (1) { |
6141 | queued_io_count = pqi_queued_io_count(ctrl_info); |
6142 | nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info); |
6143 | if (queued_io_count == 0 && nonempty_inbound_queue_count == 0) |
6144 | break; |
6145 | pqi_check_ctrl_health(ctrl_info); |
6146 | if (pqi_ctrl_offline(ctrl_info)) |
6147 | return -ENXIO; |
6148 | if (time_after(jiffies, warning_timeout)) { |
6149 | dev_warn(&ctrl_info->pci_dev->dev, |
6150 | "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n", |
6151 | jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count); |
6152 | displayed_warning = true; |
6153 | warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies; |
6154 | } |
6155 | usleep_range(min: 1000, max: 2000); |
6156 | } |
6157 | |
6158 | if (displayed_warning) |
6159 | dev_warn(&ctrl_info->pci_dev->dev, |
6160 | "queued I/O drained after waiting for %u seconds\n", |
6161 | jiffies_to_msecs(jiffies - start_jiffies) / 1000); |
6162 | |
6163 | return 0; |
6164 | } |
6165 | |
6166 | static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, |
6167 | struct pqi_scsi_dev *device, u8 lun) |
6168 | { |
6169 | unsigned int i; |
6170 | unsigned int path; |
6171 | struct pqi_queue_group *queue_group; |
6172 | unsigned long flags; |
6173 | struct pqi_io_request *io_request; |
6174 | struct pqi_io_request *next; |
6175 | struct scsi_cmnd *scmd; |
6176 | struct pqi_scsi_dev *scsi_device; |
6177 | |
6178 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { |
6179 | queue_group = &ctrl_info->queue_groups[i]; |
6180 | |
6181 | for (path = 0; path < 2; path++) { |
6182 | spin_lock_irqsave( |
6183 | &queue_group->submit_lock[path], flags); |
6184 | |
6185 | list_for_each_entry_safe(io_request, next, |
6186 | &queue_group->request_list[path], |
6187 | request_list_entry) { |
6188 | |
6189 | scmd = io_request->scmd; |
6190 | if (!scmd) |
6191 | continue; |
6192 | |
6193 | scsi_device = scmd->device->hostdata; |
6194 | if (scsi_device != device) |
6195 | continue; |
6196 | |
6197 | if ((u8)scmd->device->lun != lun) |
6198 | continue; |
6199 | |
6200 | list_del(entry: &io_request->request_list_entry); |
6201 | set_host_byte(cmd: scmd, status: DID_RESET); |
6202 | pqi_free_io_request(io_request); |
6203 | scsi_dma_unmap(cmd: scmd); |
6204 | pqi_scsi_done(scmd); |
6205 | } |
6206 | |
6207 | spin_unlock_irqrestore( |
6208 | lock: &queue_group->submit_lock[path], flags); |
6209 | } |
6210 | } |
6211 | } |
6212 | |
6213 | #define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10 |
6214 | |
6215 | static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, |
6216 | struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs) |
6217 | { |
6218 | int cmds_outstanding; |
6219 | unsigned long start_jiffies; |
6220 | unsigned long warning_timeout; |
6221 | unsigned long msecs_waiting; |
6222 | |
6223 | start_jiffies = jiffies; |
6224 | warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies; |
6225 | |
6226 | while ((cmds_outstanding = atomic_read(v: &device->scsi_cmds_outstanding[lun])) > 0) { |
6227 | if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) { |
6228 | pqi_check_ctrl_health(ctrl_info); |
6229 | if (pqi_ctrl_offline(ctrl_info)) |
6230 | return -ENXIO; |
6231 | } |
6232 | msecs_waiting = jiffies_to_msecs(j: jiffies - start_jiffies); |
6233 | if (msecs_waiting >= timeout_msecs) { |
6234 | dev_err(&ctrl_info->pci_dev->dev, |
6235 | "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n", |
6236 | ctrl_info->scsi_host->host_no, device->bus, device->target, |
6237 | lun, msecs_waiting / 1000, cmds_outstanding); |
6238 | return -ETIMEDOUT; |
6239 | } |
6240 | if (time_after(jiffies, warning_timeout)) { |
6241 | dev_warn(&ctrl_info->pci_dev->dev, |
6242 | "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n", |
6243 | ctrl_info->scsi_host->host_no, device->bus, device->target, |
6244 | lun, msecs_waiting / 1000, cmds_outstanding); |
6245 | warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies; |
6246 | } |
6247 | usleep_range(min: 1000, max: 2000); |
6248 | } |
6249 | |
6250 | return 0; |
6251 | } |
6252 | |
6253 | static void pqi_lun_reset_complete(struct pqi_io_request *io_request, |
6254 | void *context) |
6255 | { |
6256 | struct completion *waiting = context; |
6257 | |
6258 | complete(waiting); |
6259 | } |
6260 | |
6261 | #define PQI_LUN_RESET_POLL_COMPLETION_SECS 10 |
6262 | |
6263 | static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, |
6264 | struct pqi_scsi_dev *device, u8 lun, struct completion *wait) |
6265 | { |
6266 | int rc; |
6267 | unsigned int wait_secs; |
6268 | int cmds_outstanding; |
6269 | |
6270 | wait_secs = 0; |
6271 | |
6272 | while (1) { |
6273 | if (wait_for_completion_io_timeout(x: wait, |
6274 | PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) { |
6275 | rc = 0; |
6276 | break; |
6277 | } |
6278 | |
6279 | pqi_check_ctrl_health(ctrl_info); |
6280 | if (pqi_ctrl_offline(ctrl_info)) { |
6281 | rc = -ENXIO; |
6282 | break; |
6283 | } |
6284 | |
6285 | wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS; |
6286 | cmds_outstanding = atomic_read(v: &device->scsi_cmds_outstanding[lun]); |
6287 | dev_warn(&ctrl_info->pci_dev->dev, |
6288 | "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n", |
6289 | ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding); |
6290 | } |
6291 | |
6292 | return rc; |
6293 | } |
6294 | |
6295 | #define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30 |
6296 | |
6297 | static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun) |
6298 | { |
6299 | int rc; |
6300 | struct pqi_io_request *io_request; |
6301 | DECLARE_COMPLETION_ONSTACK(wait); |
6302 | struct pqi_task_management_request *request; |
6303 | |
6304 | io_request = pqi_alloc_io_request(ctrl_info, NULL); |
6305 | io_request->io_complete_callback = pqi_lun_reset_complete; |
6306 | io_request->context = &wait; |
6307 | |
6308 | request = io_request->iu; |
6309 | memset(request, 0, sizeof(*request)); |
6310 | |
6311 | request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT; |
6312 | put_unaligned_le16(val: sizeof(*request) - PQI_REQUEST_HEADER_LENGTH, |
6313 | p: &request->header.iu_length); |
6314 | put_unaligned_le16(val: io_request->index, p: &request->request_id); |
6315 | memcpy(request->lun_number, device->scsi3addr, |
6316 | sizeof(request->lun_number)); |
6317 | if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported) |
6318 | request->ml_device_lun_number = lun; |
6319 | request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET; |
6320 | if (ctrl_info->tmf_iu_timeout_supported) |
6321 | put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, p: &request->timeout); |
6322 | |
6323 | pqi_start_io(ctrl_info, queue_group: &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], path: RAID_PATH, |
6324 | io_request); |
6325 | |
6326 | rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, lun, wait: &wait); |
6327 | if (rc == 0) |
6328 | rc = io_request->status; |
6329 | |
6330 | pqi_free_io_request(io_request); |
6331 | |
6332 | return rc; |
6333 | } |
6334 | |
6335 | #define PQI_LUN_RESET_RETRIES 3 |
6336 | #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS (10 * 1000) |
6337 | #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000) |
6338 | #define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000) |
6339 | |
6340 | static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun) |
6341 | { |
6342 | int reset_rc; |
6343 | int wait_rc; |
6344 | unsigned int retries; |
6345 | unsigned long timeout_msecs; |
6346 | |
6347 | for (retries = 0;;) { |
6348 | reset_rc = pqi_lun_reset(ctrl_info, device, lun); |
6349 | if (reset_rc == 0 || reset_rc == -ENODEV || reset_rc == -ENXIO || ++retries > PQI_LUN_RESET_RETRIES) |
6350 | break; |
6351 | msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS); |
6352 | } |
6353 | |
6354 | timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS : |
6355 | PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS; |
6356 | |
6357 | wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, timeout_msecs); |
6358 | if (wait_rc && reset_rc == 0) |
6359 | reset_rc = wait_rc; |
6360 | |
6361 | return reset_rc == 0 ? SUCCESS : FAILED; |
6362 | } |
6363 | |
6364 | static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun) |
6365 | { |
6366 | int rc; |
6367 | |
6368 | pqi_ctrl_block_requests(ctrl_info); |
6369 | pqi_ctrl_wait_until_quiesced(ctrl_info); |
6370 | pqi_fail_io_queued_for_device(ctrl_info, device, lun); |
6371 | rc = pqi_wait_until_inbound_queues_empty(ctrl_info); |
6372 | pqi_device_reset_start(device, lun); |
6373 | pqi_ctrl_unblock_requests(ctrl_info); |
6374 | if (rc) |
6375 | rc = FAILED; |
6376 | else |
6377 | rc = pqi_lun_reset_with_retries(ctrl_info, device, lun); |
6378 | pqi_device_reset_done(device, lun); |
6379 | |
6380 | return rc; |
6381 | } |
6382 | |
6383 | static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode) |
6384 | { |
6385 | int rc; |
6386 | |
6387 | mutex_lock(&ctrl_info->lun_reset_mutex); |
6388 | |
6389 | dev_err(&ctrl_info->pci_dev->dev, |
6390 | "resetting scsi %d:%d:%d:%u SCSI cmd at %p due to cmd opcode 0x%02x\n", |
6391 | ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode); |
6392 | |
6393 | pqi_check_ctrl_health(ctrl_info); |
6394 | if (pqi_ctrl_offline(ctrl_info)) |
6395 | rc = FAILED; |
6396 | else |
6397 | rc = pqi_device_reset(ctrl_info, device, lun); |
6398 | |
6399 | dev_err(&ctrl_info->pci_dev->dev, |
6400 | "reset of scsi %d:%d:%d:%u: %s\n", |
6401 | ctrl_info->scsi_host->host_no, device->bus, device->target, lun, |
6402 | rc == SUCCESS ? "SUCCESS": "FAILED"); |
6403 | |
6404 | mutex_unlock(lock: &ctrl_info->lun_reset_mutex); |
6405 | |
6406 | return rc; |
6407 | } |
6408 | |
6409 | static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) |
6410 | { |
6411 | struct Scsi_Host *shost; |
6412 | struct pqi_ctrl_info *ctrl_info; |
6413 | struct pqi_scsi_dev *device; |
6414 | u8 scsi_opcode; |
6415 | |
6416 | shost = scmd->device->host; |
6417 | ctrl_info = shost_to_hba(shost); |
6418 | device = scmd->device->hostdata; |
6419 | scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff; |
6420 | |
6421 | return pqi_device_reset_handler(ctrl_info, device, lun: (u8)scmd->device->lun, scmd, scsi_opcode); |
6422 | } |
6423 | |
6424 | static void pqi_tmf_worker(struct work_struct *work) |
6425 | { |
6426 | struct pqi_tmf_work *tmf_work; |
6427 | struct scsi_cmnd *scmd; |
6428 | |
6429 | tmf_work = container_of(work, struct pqi_tmf_work, work_struct); |
6430 | scmd = (struct scsi_cmnd *)xchg(&tmf_work->scmd, NULL); |
6431 | |
6432 | pqi_device_reset_handler(ctrl_info: tmf_work->ctrl_info, device: tmf_work->device, lun: tmf_work->lun, scmd, scsi_opcode: tmf_work->scsi_opcode); |
6433 | } |
6434 | |
6435 | static int pqi_eh_abort_handler(struct scsi_cmnd *scmd) |
6436 | { |
6437 | struct Scsi_Host *shost; |
6438 | struct pqi_ctrl_info *ctrl_info; |
6439 | struct pqi_scsi_dev *device; |
6440 | struct pqi_tmf_work *tmf_work; |
6441 | DECLARE_COMPLETION_ONSTACK(wait); |
6442 | |
6443 | shost = scmd->device->host; |
6444 | ctrl_info = shost_to_hba(shost); |
6445 | device = scmd->device->hostdata; |
6446 | |
6447 | dev_err(&ctrl_info->pci_dev->dev, |
6448 | "attempting TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p\n", |
6449 | shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); |
6450 | |
6451 | if (cmpxchg(&scmd->host_scribble, PQI_NO_COMPLETION, (void *)&wait) == NULL) { |
6452 | dev_err(&ctrl_info->pci_dev->dev, |
6453 | "scsi %d:%d:%d:%d for SCSI cmd at %p already completed\n", |
6454 | shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); |
6455 | scmd->result = DID_RESET << 16; |
6456 | goto out; |
6457 | } |
6458 | |
6459 | tmf_work = &device->tmf_work[scmd->device->lun]; |
6460 | |
6461 | if (cmpxchg(&tmf_work->scmd, NULL, scmd) == NULL) { |
6462 | tmf_work->ctrl_info = ctrl_info; |
6463 | tmf_work->device = device; |
6464 | tmf_work->lun = (u8)scmd->device->lun; |
6465 | tmf_work->scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff; |
6466 | schedule_work(work: &tmf_work->work_struct); |
6467 | } |
6468 | |
6469 | wait_for_completion(&wait); |
6470 | |
6471 | dev_err(&ctrl_info->pci_dev->dev, |
6472 | "TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p: SUCCESS\n", |
6473 | shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); |
6474 | |
6475 | out: |
6476 | |
6477 | return SUCCESS; |
6478 | } |
6479 | |
6480 | static int pqi_slave_alloc(struct scsi_device *sdev) |
6481 | { |
6482 | struct pqi_scsi_dev *device; |
6483 | unsigned long flags; |
6484 | struct pqi_ctrl_info *ctrl_info; |
6485 | struct scsi_target *starget; |
6486 | struct sas_rphy *rphy; |
6487 | |
6488 | ctrl_info = shost_to_hba(shost: sdev->host); |
6489 | |
6490 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
6491 | |
6492 | if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) { |
6493 | starget = scsi_target(sdev); |
6494 | rphy = target_to_rphy(starget); |
6495 | device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); |
6496 | if (device) { |
6497 | if (device->target_lun_valid) { |
6498 | device->ignore_device = true; |
6499 | } else { |
6500 | device->target = sdev_id(sdev); |
6501 | device->lun = sdev->lun; |
6502 | device->target_lun_valid = true; |
6503 | } |
6504 | } |
6505 | } else { |
6506 | device = pqi_find_scsi_dev(ctrl_info, bus: sdev_channel(sdev), |
6507 | target: sdev_id(sdev), lun: sdev->lun); |
6508 | } |
6509 | |
6510 | if (device) { |
6511 | sdev->hostdata = device; |
6512 | device->sdev = sdev; |
6513 | if (device->queue_depth) { |
6514 | device->advertised_queue_depth = device->queue_depth; |
6515 | scsi_change_queue_depth(sdev, |
6516 | device->advertised_queue_depth); |
6517 | } |
6518 | if (pqi_is_logical_device(device)) { |
6519 | pqi_disable_write_same(sdev); |
6520 | } else { |
6521 | sdev->allow_restart = 1; |
6522 | if (device->device_type == SA_DEVICE_TYPE_NVME) |
6523 | pqi_disable_write_same(sdev); |
6524 | } |
6525 | } |
6526 | |
6527 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
6528 | |
6529 | return 0; |
6530 | } |
6531 | |
6532 | static void pqi_map_queues(struct Scsi_Host *shost) |
6533 | { |
6534 | struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); |
6535 | |
6536 | if (!ctrl_info->disable_managed_interrupts) |
6537 | return blk_mq_pci_map_queues(qmap: &shost->tag_set.map[HCTX_TYPE_DEFAULT], |
6538 | pdev: ctrl_info->pci_dev, offset: 0); |
6539 | else |
6540 | return blk_mq_map_queues(qmap: &shost->tag_set.map[HCTX_TYPE_DEFAULT]); |
6541 | } |
6542 | |
6543 | static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device) |
6544 | { |
6545 | return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER; |
6546 | } |
6547 | |
6548 | static int pqi_slave_configure(struct scsi_device *sdev) |
6549 | { |
6550 | int rc = 0; |
6551 | struct pqi_scsi_dev *device; |
6552 | |
6553 | device = sdev->hostdata; |
6554 | device->devtype = sdev->type; |
6555 | |
6556 | if (pqi_is_tape_changer_device(device) && device->ignore_device) { |
6557 | rc = -ENXIO; |
6558 | device->ignore_device = false; |
6559 | } |
6560 | |
6561 | return rc; |
6562 | } |
6563 | |
6564 | static void pqi_slave_destroy(struct scsi_device *sdev) |
6565 | { |
6566 | struct pqi_ctrl_info *ctrl_info; |
6567 | struct pqi_scsi_dev *device; |
6568 | int mutex_acquired; |
6569 | unsigned long flags; |
6570 | |
6571 | ctrl_info = shost_to_hba(shost: sdev->host); |
6572 | |
6573 | mutex_acquired = mutex_trylock(lock: &ctrl_info->scan_mutex); |
6574 | if (!mutex_acquired) |
6575 | return; |
6576 | |
6577 | device = sdev->hostdata; |
6578 | if (!device) { |
6579 | mutex_unlock(lock: &ctrl_info->scan_mutex); |
6580 | return; |
6581 | } |
6582 | |
6583 | device->lun_count--; |
6584 | if (device->lun_count > 0) { |
6585 | mutex_unlock(lock: &ctrl_info->scan_mutex); |
6586 | return; |
6587 | } |
6588 | |
6589 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
6590 | list_del(entry: &device->scsi_device_list_entry); |
6591 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
6592 | |
6593 | mutex_unlock(lock: &ctrl_info->scan_mutex); |
6594 | |
6595 | pqi_dev_info(ctrl_info, action: "removed", device); |
6596 | pqi_free_device(device); |
6597 | } |
6598 | |
6599 | static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) |
6600 | { |
6601 | struct pci_dev *pci_dev; |
6602 | u32 subsystem_vendor; |
6603 | u32 subsystem_device; |
6604 | cciss_pci_info_struct pci_info; |
6605 | |
6606 | if (!arg) |
6607 | return -EINVAL; |
6608 | |
6609 | pci_dev = ctrl_info->pci_dev; |
6610 | |
6611 | pci_info.domain = pci_domain_nr(bus: pci_dev->bus); |
6612 | pci_info.bus = pci_dev->bus->number; |
6613 | pci_info.dev_fn = pci_dev->devfn; |
6614 | subsystem_vendor = pci_dev->subsystem_vendor; |
6615 | subsystem_device = pci_dev->subsystem_device; |
6616 | pci_info.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor; |
6617 | |
6618 | if (copy_to_user(to: arg, from: &pci_info, n: sizeof(pci_info))) |
6619 | return -EFAULT; |
6620 | |
6621 | return 0; |
6622 | } |
6623 | |
6624 | static int pqi_getdrivver_ioctl(void __user *arg) |
6625 | { |
6626 | u32 version; |
6627 | |
6628 | if (!arg) |
6629 | return -EINVAL; |
6630 | |
6631 | version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) | |
6632 | (DRIVER_RELEASE << 16) | DRIVER_REVISION; |
6633 | |
6634 | if (copy_to_user(to: arg, from: &version, n: sizeof(version))) |
6635 | return -EFAULT; |
6636 | |
6637 | return 0; |
6638 | } |
6639 | |
6640 | struct ciss_error_info { |
6641 | u8 scsi_status; |
6642 | int command_status; |
6643 | size_t sense_data_length; |
6644 | }; |
6645 | |
6646 | static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info, |
6647 | struct ciss_error_info *ciss_error_info) |
6648 | { |
6649 | int ciss_cmd_status; |
6650 | size_t sense_data_length; |
6651 | |
6652 | switch (pqi_error_info->data_out_result) { |
6653 | case PQI_DATA_IN_OUT_GOOD: |
6654 | ciss_cmd_status = CISS_CMD_STATUS_SUCCESS; |
6655 | break; |
6656 | case PQI_DATA_IN_OUT_UNDERFLOW: |
6657 | ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN; |
6658 | break; |
6659 | case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: |
6660 | ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN; |
6661 | break; |
6662 | case PQI_DATA_IN_OUT_PROTOCOL_ERROR: |
6663 | case PQI_DATA_IN_OUT_BUFFER_ERROR: |
6664 | case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: |
6665 | case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: |
6666 | case PQI_DATA_IN_OUT_ERROR: |
6667 | ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR; |
6668 | break; |
6669 | case PQI_DATA_IN_OUT_HARDWARE_ERROR: |
6670 | case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: |
6671 | case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: |
6672 | case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: |
6673 | case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: |
6674 | case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: |
6675 | case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: |
6676 | case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: |
6677 | case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: |
6678 | case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: |
6679 | ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR; |
6680 | break; |
6681 | case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: |
6682 | ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT; |
6683 | break; |
6684 | case PQI_DATA_IN_OUT_ABORTED: |
6685 | ciss_cmd_status = CISS_CMD_STATUS_ABORTED; |
6686 | break; |
6687 | case PQI_DATA_IN_OUT_TIMEOUT: |
6688 | ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT; |
6689 | break; |
6690 | default: |
6691 | ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS; |
6692 | break; |
6693 | } |
6694 | |
6695 | sense_data_length = |
6696 | get_unaligned_le16(p: &pqi_error_info->sense_data_length); |
6697 | if (sense_data_length == 0) |
6698 | sense_data_length = |
6699 | get_unaligned_le16(p: &pqi_error_info->response_data_length); |
6700 | if (sense_data_length) |
6701 | if (sense_data_length > sizeof(pqi_error_info->data)) |
6702 | sense_data_length = sizeof(pqi_error_info->data); |
6703 | |
6704 | ciss_error_info->scsi_status = pqi_error_info->status; |
6705 | ciss_error_info->command_status = ciss_cmd_status; |
6706 | ciss_error_info->sense_data_length = sense_data_length; |
6707 | } |
6708 | |
6709 | static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) |
6710 | { |
6711 | int rc; |
6712 | char *kernel_buffer = NULL; |
6713 | u16 iu_length; |
6714 | size_t sense_data_length; |
6715 | IOCTL_Command_struct iocommand; |
6716 | struct pqi_raid_path_request request; |
6717 | struct pqi_raid_error_info pqi_error_info; |
6718 | struct ciss_error_info ciss_error_info; |
6719 | |
6720 | if (pqi_ctrl_offline(ctrl_info)) |
6721 | return -ENXIO; |
6722 | if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info)) |
6723 | return -EBUSY; |
6724 | if (!arg) |
6725 | return -EINVAL; |
6726 | if (!capable(CAP_SYS_RAWIO)) |
6727 | return -EPERM; |
6728 | if (copy_from_user(to: &iocommand, from: arg, n: sizeof(iocommand))) |
6729 | return -EFAULT; |
6730 | if (iocommand.buf_size < 1 && |
6731 | iocommand.Request.Type.Direction != XFER_NONE) |
6732 | return -EINVAL; |
6733 | if (iocommand.Request.CDBLen > sizeof(request.cdb)) |
6734 | return -EINVAL; |
6735 | if (iocommand.Request.Type.Type != TYPE_CMD) |
6736 | return -EINVAL; |
6737 | |
6738 | switch (iocommand.Request.Type.Direction) { |
6739 | case XFER_NONE: |
6740 | case XFER_WRITE: |
6741 | case XFER_READ: |
6742 | case XFER_READ | XFER_WRITE: |
6743 | break; |
6744 | default: |
6745 | return -EINVAL; |
6746 | } |
6747 | |
6748 | if (iocommand.buf_size > 0) { |
6749 | kernel_buffer = kmalloc(size: iocommand.buf_size, GFP_KERNEL); |
6750 | if (!kernel_buffer) |
6751 | return -ENOMEM; |
6752 | if (iocommand.Request.Type.Direction & XFER_WRITE) { |
6753 | if (copy_from_user(to: kernel_buffer, from: iocommand.buf, |
6754 | n: iocommand.buf_size)) { |
6755 | rc = -EFAULT; |
6756 | goto out; |
6757 | } |
6758 | } else { |
6759 | memset(kernel_buffer, 0, iocommand.buf_size); |
6760 | } |
6761 | } |
6762 | |
6763 | memset(&request, 0, sizeof(request)); |
6764 | |
6765 | request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; |
6766 | iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - |
6767 | PQI_REQUEST_HEADER_LENGTH; |
6768 | memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes, |
6769 | sizeof(request.lun_number)); |
6770 | memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen); |
6771 | request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; |
6772 | |
6773 | switch (iocommand.Request.Type.Direction) { |
6774 | case XFER_NONE: |
6775 | request.data_direction = SOP_NO_DIRECTION_FLAG; |
6776 | break; |
6777 | case XFER_WRITE: |
6778 | request.data_direction = SOP_WRITE_FLAG; |
6779 | break; |
6780 | case XFER_READ: |
6781 | request.data_direction = SOP_READ_FLAG; |
6782 | break; |
6783 | case XFER_READ | XFER_WRITE: |
6784 | request.data_direction = SOP_BIDIRECTIONAL; |
6785 | break; |
6786 | } |
6787 | |
6788 | request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; |
6789 | |
6790 | if (iocommand.buf_size > 0) { |
6791 | put_unaligned_le32(val: iocommand.buf_size, p: &request.buffer_length); |
6792 | |
6793 | rc = pqi_map_single(pci_dev: ctrl_info->pci_dev, |
6794 | sg_descriptor: &request.sg_descriptors[0], buffer: kernel_buffer, |
6795 | buffer_length: iocommand.buf_size, data_direction: DMA_BIDIRECTIONAL); |
6796 | if (rc) |
6797 | goto out; |
6798 | |
6799 | iu_length += sizeof(request.sg_descriptors[0]); |
6800 | } |
6801 | |
6802 | put_unaligned_le16(val: iu_length, p: &request.header.iu_length); |
6803 | |
6804 | if (ctrl_info->raid_iu_timeout_supported) |
6805 | put_unaligned_le32(val: iocommand.Request.Timeout, p: &request.timeout); |
6806 | |
6807 | rc = pqi_submit_raid_request_synchronous(ctrl_info, request: &request.header, |
6808 | PQI_SYNC_FLAGS_INTERRUPTABLE, error_info: &pqi_error_info); |
6809 | |
6810 | if (iocommand.buf_size > 0) |
6811 | pqi_pci_unmap(pci_dev: ctrl_info->pci_dev, descriptors: request.sg_descriptors, num_descriptors: 1, |
6812 | data_direction: DMA_BIDIRECTIONAL); |
6813 | |
6814 | memset(&iocommand.error_info, 0, sizeof(iocommand.error_info)); |
6815 | |
6816 | if (rc == 0) { |
6817 | pqi_error_info_to_ciss(pqi_error_info: &pqi_error_info, ciss_error_info: &ciss_error_info); |
6818 | iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status; |
6819 | iocommand.error_info.CommandStatus = |
6820 | ciss_error_info.command_status; |
6821 | sense_data_length = ciss_error_info.sense_data_length; |
6822 | if (sense_data_length) { |
6823 | if (sense_data_length > |
6824 | sizeof(iocommand.error_info.SenseInfo)) |
6825 | sense_data_length = |
6826 | sizeof(iocommand.error_info.SenseInfo); |
6827 | memcpy(iocommand.error_info.SenseInfo, |
6828 | pqi_error_info.data, sense_data_length); |
6829 | iocommand.error_info.SenseLen = sense_data_length; |
6830 | } |
6831 | } |
6832 | |
6833 | if (copy_to_user(to: arg, from: &iocommand, n: sizeof(iocommand))) { |
6834 | rc = -EFAULT; |
6835 | goto out; |
6836 | } |
6837 | |
6838 | if (rc == 0 && iocommand.buf_size > 0 && |
6839 | (iocommand.Request.Type.Direction & XFER_READ)) { |
6840 | if (copy_to_user(to: iocommand.buf, from: kernel_buffer, |
6841 | n: iocommand.buf_size)) { |
6842 | rc = -EFAULT; |
6843 | } |
6844 | } |
6845 | |
6846 | out: |
6847 | kfree(objp: kernel_buffer); |
6848 | |
6849 | return rc; |
6850 | } |
6851 | |
6852 | static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd, |
6853 | void __user *arg) |
6854 | { |
6855 | int rc; |
6856 | struct pqi_ctrl_info *ctrl_info; |
6857 | |
6858 | ctrl_info = shost_to_hba(shost: sdev->host); |
6859 | |
6860 | switch (cmd) { |
6861 | case CCISS_DEREGDISK: |
6862 | case CCISS_REGNEWDISK: |
6863 | case CCISS_REGNEWD: |
6864 | rc = pqi_scan_scsi_devices(ctrl_info); |
6865 | break; |
6866 | case CCISS_GETPCIINFO: |
6867 | rc = pqi_getpciinfo_ioctl(ctrl_info, arg); |
6868 | break; |
6869 | case CCISS_GETDRIVVER: |
6870 | rc = pqi_getdrivver_ioctl(arg); |
6871 | break; |
6872 | case CCISS_PASSTHRU: |
6873 | rc = pqi_passthru_ioctl(ctrl_info, arg); |
6874 | break; |
6875 | default: |
6876 | rc = -EINVAL; |
6877 | break; |
6878 | } |
6879 | |
6880 | return rc; |
6881 | } |
6882 | |
6883 | static ssize_t pqi_firmware_version_show(struct device *dev, |
6884 | struct device_attribute *attr, char *buffer) |
6885 | { |
6886 | struct Scsi_Host *shost; |
6887 | struct pqi_ctrl_info *ctrl_info; |
6888 | |
6889 | shost = class_to_shost(dev); |
6890 | ctrl_info = shost_to_hba(shost); |
6891 | |
6892 | return scnprintf(buf: buffer, PAGE_SIZE, fmt: "%s\n", ctrl_info->firmware_version); |
6893 | } |
6894 | |
6895 | static ssize_t pqi_driver_version_show(struct device *dev, |
6896 | struct device_attribute *attr, char *buffer) |
6897 | { |
6898 | return scnprintf(buf: buffer, PAGE_SIZE, fmt: "%s\n", DRIVER_VERSION BUILD_TIMESTAMP); |
6899 | } |
6900 | |
6901 | static ssize_t pqi_serial_number_show(struct device *dev, |
6902 | struct device_attribute *attr, char *buffer) |
6903 | { |
6904 | struct Scsi_Host *shost; |
6905 | struct pqi_ctrl_info *ctrl_info; |
6906 | |
6907 | shost = class_to_shost(dev); |
6908 | ctrl_info = shost_to_hba(shost); |
6909 | |
6910 | return scnprintf(buf: buffer, PAGE_SIZE, fmt: "%s\n", ctrl_info->serial_number); |
6911 | } |
6912 | |
6913 | static ssize_t pqi_model_show(struct device *dev, |
6914 | struct device_attribute *attr, char *buffer) |
6915 | { |
6916 | struct Scsi_Host *shost; |
6917 | struct pqi_ctrl_info *ctrl_info; |
6918 | |
6919 | shost = class_to_shost(dev); |
6920 | ctrl_info = shost_to_hba(shost); |
6921 | |
6922 | return scnprintf(buf: buffer, PAGE_SIZE, fmt: "%s\n", ctrl_info->model); |
6923 | } |
6924 | |
6925 | static ssize_t pqi_vendor_show(struct device *dev, |
6926 | struct device_attribute *attr, char *buffer) |
6927 | { |
6928 | struct Scsi_Host *shost; |
6929 | struct pqi_ctrl_info *ctrl_info; |
6930 | |
6931 | shost = class_to_shost(dev); |
6932 | ctrl_info = shost_to_hba(shost); |
6933 | |
6934 | return scnprintf(buf: buffer, PAGE_SIZE, fmt: "%s\n", ctrl_info->vendor); |
6935 | } |
6936 | |
6937 | static ssize_t pqi_host_rescan_store(struct device *dev, |
6938 | struct device_attribute *attr, const char *buffer, size_t count) |
6939 | { |
6940 | struct Scsi_Host *shost = class_to_shost(dev); |
6941 | |
6942 | pqi_scan_start(shost); |
6943 | |
6944 | return count; |
6945 | } |
6946 | |
6947 | static ssize_t pqi_lockup_action_show(struct device *dev, |
6948 | struct device_attribute *attr, char *buffer) |
6949 | { |
6950 | int count = 0; |
6951 | unsigned int i; |
6952 | |
6953 | for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { |
6954 | if (pqi_lockup_actions[i].action == pqi_lockup_action) |
6955 | count += scnprintf(buf: buffer + count, PAGE_SIZE - count, |
6956 | fmt: "[%s] ", pqi_lockup_actions[i].name); |
6957 | else |
6958 | count += scnprintf(buf: buffer + count, PAGE_SIZE - count, |
6959 | fmt: "%s ", pqi_lockup_actions[i].name); |
6960 | } |
6961 | |
6962 | count += scnprintf(buf: buffer + count, PAGE_SIZE - count, fmt: "\n"); |
6963 | |
6964 | return count; |
6965 | } |
6966 | |
6967 | static ssize_t pqi_lockup_action_store(struct device *dev, |
6968 | struct device_attribute *attr, const char *buffer, size_t count) |
6969 | { |
6970 | unsigned int i; |
6971 | char *action_name; |
6972 | char action_name_buffer[32]; |
6973 | |
6974 | strscpy(action_name_buffer, buffer, sizeof(action_name_buffer)); |
6975 | action_name = strstrip(str: action_name_buffer); |
6976 | |
6977 | for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { |
6978 | if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) { |
6979 | pqi_lockup_action = pqi_lockup_actions[i].action; |
6980 | return count; |
6981 | } |
6982 | } |
6983 | |
6984 | return -EINVAL; |
6985 | } |
6986 | |
6987 | static ssize_t pqi_host_enable_stream_detection_show(struct device *dev, |
6988 | struct device_attribute *attr, char *buffer) |
6989 | { |
6990 | struct Scsi_Host *shost = class_to_shost(dev); |
6991 | struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); |
6992 | |
6993 | return scnprintf(buf: buffer, size: 10, fmt: "%x\n", |
6994 | ctrl_info->enable_stream_detection); |
6995 | } |
6996 | |
6997 | static ssize_t pqi_host_enable_stream_detection_store(struct device *dev, |
6998 | struct device_attribute *attr, const char *buffer, size_t count) |
6999 | { |
7000 | struct Scsi_Host *shost = class_to_shost(dev); |
7001 | struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); |
7002 | u8 set_stream_detection = 0; |
7003 | |
7004 | if (kstrtou8(s: buffer, base: 0, res: &set_stream_detection)) |
7005 | return -EINVAL; |
7006 | |
7007 | if (set_stream_detection > 0) |
7008 | set_stream_detection = 1; |
7009 | |
7010 | ctrl_info->enable_stream_detection = set_stream_detection; |
7011 | |
7012 | return count; |
7013 | } |
7014 | |
7015 | static ssize_t pqi_host_enable_r5_writes_show(struct device *dev, |
7016 | struct device_attribute *attr, char *buffer) |
7017 | { |
7018 | struct Scsi_Host *shost = class_to_shost(dev); |
7019 | struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); |
7020 | |
7021 | return scnprintf(buf: buffer, size: 10, fmt: "%x\n", ctrl_info->enable_r5_writes); |
7022 | } |
7023 | |
7024 | static ssize_t pqi_host_enable_r5_writes_store(struct device *dev, |
7025 | struct device_attribute *attr, const char *buffer, size_t count) |
7026 | { |
7027 | struct Scsi_Host *shost = class_to_shost(dev); |
7028 | struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); |
7029 | u8 set_r5_writes = 0; |
7030 | |
7031 | if (kstrtou8(s: buffer, base: 0, res: &set_r5_writes)) |
7032 | return -EINVAL; |
7033 | |
7034 | if (set_r5_writes > 0) |
7035 | set_r5_writes = 1; |
7036 | |
7037 | ctrl_info->enable_r5_writes = set_r5_writes; |
7038 | |
7039 | return count; |
7040 | } |
7041 | |
7042 | static ssize_t pqi_host_enable_r6_writes_show(struct device *dev, |
7043 | struct device_attribute *attr, char *buffer) |
7044 | { |
7045 | struct Scsi_Host *shost = class_to_shost(dev); |
7046 | struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); |
7047 | |
7048 | return scnprintf(buf: buffer, size: 10, fmt: "%x\n", ctrl_info->enable_r6_writes); |
7049 | } |
7050 | |
7051 | static ssize_t pqi_host_enable_r6_writes_store(struct device *dev, |
7052 | struct device_attribute *attr, const char *buffer, size_t count) |
7053 | { |
7054 | struct Scsi_Host *shost = class_to_shost(dev); |
7055 | struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); |
7056 | u8 set_r6_writes = 0; |
7057 | |
7058 | if (kstrtou8(s: buffer, base: 0, res: &set_r6_writes)) |
7059 | return -EINVAL; |
7060 | |
7061 | if (set_r6_writes > 0) |
7062 | set_r6_writes = 1; |
7063 | |
7064 | ctrl_info->enable_r6_writes = set_r6_writes; |
7065 | |
7066 | return count; |
7067 | } |
7068 | |
7069 | static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL); |
7070 | static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL); |
7071 | static DEVICE_ATTR(model, 0444, pqi_model_show, NULL); |
7072 | static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL); |
7073 | static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL); |
7074 | static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store); |
7075 | static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show, |
7076 | pqi_lockup_action_store); |
7077 | static DEVICE_ATTR(enable_stream_detection, 0644, |
7078 | pqi_host_enable_stream_detection_show, |
7079 | pqi_host_enable_stream_detection_store); |
7080 | static DEVICE_ATTR(enable_r5_writes, 0644, |
7081 | pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store); |
7082 | static DEVICE_ATTR(enable_r6_writes, 0644, |
7083 | pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store); |
7084 | |
7085 | static struct attribute *pqi_shost_attrs[] = { |
7086 | &dev_attr_driver_version.attr, |
7087 | &dev_attr_firmware_version.attr, |
7088 | &dev_attr_model.attr, |
7089 | &dev_attr_serial_number.attr, |
7090 | &dev_attr_vendor.attr, |
7091 | &dev_attr_rescan.attr, |
7092 | &dev_attr_lockup_action.attr, |
7093 | &dev_attr_enable_stream_detection.attr, |
7094 | &dev_attr_enable_r5_writes.attr, |
7095 | &dev_attr_enable_r6_writes.attr, |
7096 | NULL |
7097 | }; |
7098 | |
7099 | ATTRIBUTE_GROUPS(pqi_shost); |
7100 | |
7101 | static ssize_t pqi_unique_id_show(struct device *dev, |
7102 | struct device_attribute *attr, char *buffer) |
7103 | { |
7104 | struct pqi_ctrl_info *ctrl_info; |
7105 | struct scsi_device *sdev; |
7106 | struct pqi_scsi_dev *device; |
7107 | unsigned long flags; |
7108 | u8 unique_id[16]; |
7109 | |
7110 | sdev = to_scsi_device(dev); |
7111 | ctrl_info = shost_to_hba(shost: sdev->host); |
7112 | |
7113 | if (pqi_ctrl_offline(ctrl_info)) |
7114 | return -ENODEV; |
7115 | |
7116 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
7117 | |
7118 | device = sdev->hostdata; |
7119 | if (!device) { |
7120 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
7121 | return -ENODEV; |
7122 | } |
7123 | |
7124 | if (device->is_physical_device) |
7125 | memcpy(unique_id, device->wwid, sizeof(device->wwid)); |
7126 | else |
7127 | memcpy(unique_id, device->volume_id, sizeof(device->volume_id)); |
7128 | |
7129 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
7130 | |
7131 | return scnprintf(buf: buffer, PAGE_SIZE, |
7132 | fmt: "%02X%02X%02X%02X%02X%02X%02X%02X" |
7133 | "%02X%02X%02X%02X%02X%02X%02X%02X\n", |
7134 | unique_id[0], unique_id[1], unique_id[2], unique_id[3], |
7135 | unique_id[4], unique_id[5], unique_id[6], unique_id[7], |
7136 | unique_id[8], unique_id[9], unique_id[10], unique_id[11], |
7137 | unique_id[12], unique_id[13], unique_id[14], unique_id[15]); |
7138 | } |
7139 | |
7140 | static ssize_t pqi_lunid_show(struct device *dev, |
7141 | struct device_attribute *attr, char *buffer) |
7142 | { |
7143 | struct pqi_ctrl_info *ctrl_info; |
7144 | struct scsi_device *sdev; |
7145 | struct pqi_scsi_dev *device; |
7146 | unsigned long flags; |
7147 | u8 lunid[8]; |
7148 | |
7149 | sdev = to_scsi_device(dev); |
7150 | ctrl_info = shost_to_hba(shost: sdev->host); |
7151 | |
7152 | if (pqi_ctrl_offline(ctrl_info)) |
7153 | return -ENODEV; |
7154 | |
7155 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
7156 | |
7157 | device = sdev->hostdata; |
7158 | if (!device) { |
7159 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
7160 | return -ENODEV; |
7161 | } |
7162 | |
7163 | memcpy(lunid, device->scsi3addr, sizeof(lunid)); |
7164 | |
7165 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
7166 | |
7167 | return scnprintf(buf: buffer, PAGE_SIZE, fmt: "0x%8phN\n", lunid); |
7168 | } |
7169 | |
7170 | #define MAX_PATHS 8 |
7171 | |
7172 | static ssize_t pqi_path_info_show(struct device *dev, |
7173 | struct device_attribute *attr, char *buf) |
7174 | { |
7175 | struct pqi_ctrl_info *ctrl_info; |
7176 | struct scsi_device *sdev; |
7177 | struct pqi_scsi_dev *device; |
7178 | unsigned long flags; |
7179 | int i; |
7180 | int output_len = 0; |
7181 | u8 box; |
7182 | u8 bay; |
7183 | u8 path_map_index; |
7184 | char *active; |
7185 | u8 phys_connector[2]; |
7186 | |
7187 | sdev = to_scsi_device(dev); |
7188 | ctrl_info = shost_to_hba(shost: sdev->host); |
7189 | |
7190 | if (pqi_ctrl_offline(ctrl_info)) |
7191 | return -ENODEV; |
7192 | |
7193 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
7194 | |
7195 | device = sdev->hostdata; |
7196 | if (!device) { |
7197 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
7198 | return -ENODEV; |
7199 | } |
7200 | |
7201 | bay = device->bay; |
7202 | for (i = 0; i < MAX_PATHS; i++) { |
7203 | path_map_index = 1 << i; |
7204 | if (i == device->active_path_index) |
7205 | active = "Active"; |
7206 | else if (device->path_map & path_map_index) |
7207 | active = "Inactive"; |
7208 | else |
7209 | continue; |
7210 | |
7211 | output_len += scnprintf(buf: buf + output_len, |
7212 | PAGE_SIZE - output_len, |
7213 | fmt: "[%d:%d:%d:%d] %20.20s ", |
7214 | ctrl_info->scsi_host->host_no, |
7215 | device->bus, device->target, |
7216 | device->lun, |
7217 | scsi_device_type(type: device->devtype)); |
7218 | |
7219 | if (device->devtype == TYPE_RAID || |
7220 | pqi_is_logical_device(device)) |
7221 | goto end_buffer; |
7222 | |
7223 | memcpy(&phys_connector, &device->phys_connector[i], |
7224 | sizeof(phys_connector)); |
7225 | if (phys_connector[0] < '0') |
7226 | phys_connector[0] = '0'; |
7227 | if (phys_connector[1] < '0') |
7228 | phys_connector[1] = '0'; |
7229 | |
7230 | output_len += scnprintf(buf: buf + output_len, |
7231 | PAGE_SIZE - output_len, |
7232 | fmt: "PORT: %.2s ", phys_connector); |
7233 | |
7234 | box = device->box[i]; |
7235 | if (box != 0 && box != 0xFF) |
7236 | output_len += scnprintf(buf: buf + output_len, |
7237 | PAGE_SIZE - output_len, |
7238 | fmt: "BOX: %hhu ", box); |
7239 | |
7240 | if ((device->devtype == TYPE_DISK || |
7241 | device->devtype == TYPE_ZBC) && |
7242 | pqi_expose_device(device)) |
7243 | output_len += scnprintf(buf: buf + output_len, |
7244 | PAGE_SIZE - output_len, |
7245 | fmt: "BAY: %hhu ", bay); |
7246 | |
7247 | end_buffer: |
7248 | output_len += scnprintf(buf: buf + output_len, |
7249 | PAGE_SIZE - output_len, |
7250 | fmt: "%s\n", active); |
7251 | } |
7252 | |
7253 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
7254 | |
7255 | return output_len; |
7256 | } |
7257 | |
7258 | static ssize_t pqi_sas_address_show(struct device *dev, |
7259 | struct device_attribute *attr, char *buffer) |
7260 | { |
7261 | struct pqi_ctrl_info *ctrl_info; |
7262 | struct scsi_device *sdev; |
7263 | struct pqi_scsi_dev *device; |
7264 | unsigned long flags; |
7265 | u64 sas_address; |
7266 | |
7267 | sdev = to_scsi_device(dev); |
7268 | ctrl_info = shost_to_hba(shost: sdev->host); |
7269 | |
7270 | if (pqi_ctrl_offline(ctrl_info)) |
7271 | return -ENODEV; |
7272 | |
7273 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
7274 | |
7275 | device = sdev->hostdata; |
7276 | if (!device) { |
7277 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
7278 | return -ENODEV; |
7279 | } |
7280 | |
7281 | sas_address = device->sas_address; |
7282 | |
7283 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
7284 | |
7285 | return scnprintf(buf: buffer, PAGE_SIZE, fmt: "0x%016llx\n", sas_address); |
7286 | } |
7287 | |
7288 | static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev, |
7289 | struct device_attribute *attr, char *buffer) |
7290 | { |
7291 | struct pqi_ctrl_info *ctrl_info; |
7292 | struct scsi_device *sdev; |
7293 | struct pqi_scsi_dev *device; |
7294 | unsigned long flags; |
7295 | |
7296 | sdev = to_scsi_device(dev); |
7297 | ctrl_info = shost_to_hba(shost: sdev->host); |
7298 | |
7299 | if (pqi_ctrl_offline(ctrl_info)) |
7300 | return -ENODEV; |
7301 | |
7302 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
7303 | |
7304 | device = sdev->hostdata; |
7305 | if (!device) { |
7306 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
7307 | return -ENODEV; |
7308 | } |
7309 | |
7310 | buffer[0] = device->raid_bypass_enabled ? '1' : '0'; |
7311 | buffer[1] = '\n'; |
7312 | buffer[2] = '\0'; |
7313 | |
7314 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
7315 | |
7316 | return 2; |
7317 | } |
7318 | |
7319 | static ssize_t pqi_raid_level_show(struct device *dev, |
7320 | struct device_attribute *attr, char *buffer) |
7321 | { |
7322 | struct pqi_ctrl_info *ctrl_info; |
7323 | struct scsi_device *sdev; |
7324 | struct pqi_scsi_dev *device; |
7325 | unsigned long flags; |
7326 | char *raid_level; |
7327 | |
7328 | sdev = to_scsi_device(dev); |
7329 | ctrl_info = shost_to_hba(shost: sdev->host); |
7330 | |
7331 | if (pqi_ctrl_offline(ctrl_info)) |
7332 | return -ENODEV; |
7333 | |
7334 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
7335 | |
7336 | device = sdev->hostdata; |
7337 | if (!device) { |
7338 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
7339 | return -ENODEV; |
7340 | } |
7341 | |
7342 | if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) |
7343 | raid_level = pqi_raid_level_to_string(raid_level: device->raid_level); |
7344 | else |
7345 | raid_level = "N/A"; |
7346 | |
7347 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
7348 | |
7349 | return scnprintf(buf: buffer, PAGE_SIZE, fmt: "%s\n", raid_level); |
7350 | } |
7351 | |
7352 | static ssize_t pqi_raid_bypass_cnt_show(struct device *dev, |
7353 | struct device_attribute *attr, char *buffer) |
7354 | { |
7355 | struct pqi_ctrl_info *ctrl_info; |
7356 | struct scsi_device *sdev; |
7357 | struct pqi_scsi_dev *device; |
7358 | unsigned long flags; |
7359 | unsigned int raid_bypass_cnt; |
7360 | |
7361 | sdev = to_scsi_device(dev); |
7362 | ctrl_info = shost_to_hba(shost: sdev->host); |
7363 | |
7364 | if (pqi_ctrl_offline(ctrl_info)) |
7365 | return -ENODEV; |
7366 | |
7367 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
7368 | |
7369 | device = sdev->hostdata; |
7370 | if (!device) { |
7371 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
7372 | return -ENODEV; |
7373 | } |
7374 | |
7375 | raid_bypass_cnt = device->raid_bypass_cnt; |
7376 | |
7377 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
7378 | |
7379 | return scnprintf(buf: buffer, PAGE_SIZE, fmt: "0x%x\n", raid_bypass_cnt); |
7380 | } |
7381 | |
7382 | static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev, |
7383 | struct device_attribute *attr, char *buf) |
7384 | { |
7385 | struct pqi_ctrl_info *ctrl_info; |
7386 | struct scsi_device *sdev; |
7387 | struct pqi_scsi_dev *device; |
7388 | unsigned long flags; |
7389 | int output_len = 0; |
7390 | |
7391 | sdev = to_scsi_device(dev); |
7392 | ctrl_info = shost_to_hba(shost: sdev->host); |
7393 | |
7394 | if (pqi_ctrl_offline(ctrl_info)) |
7395 | return -ENODEV; |
7396 | |
7397 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
7398 | |
7399 | device = sdev->hostdata; |
7400 | if (!device) { |
7401 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
7402 | return -ENODEV; |
7403 | } |
7404 | |
7405 | output_len = snprintf(buf, PAGE_SIZE, fmt: "%d\n", |
7406 | device->ncq_prio_enable); |
7407 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
7408 | |
7409 | return output_len; |
7410 | } |
7411 | |
7412 | static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev, |
7413 | struct device_attribute *attr, |
7414 | const char *buf, size_t count) |
7415 | { |
7416 | struct pqi_ctrl_info *ctrl_info; |
7417 | struct scsi_device *sdev; |
7418 | struct pqi_scsi_dev *device; |
7419 | unsigned long flags; |
7420 | u8 ncq_prio_enable = 0; |
7421 | |
7422 | if (kstrtou8(s: buf, base: 0, res: &ncq_prio_enable)) |
7423 | return -EINVAL; |
7424 | |
7425 | sdev = to_scsi_device(dev); |
7426 | ctrl_info = shost_to_hba(shost: sdev->host); |
7427 | |
7428 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
7429 | |
7430 | device = sdev->hostdata; |
7431 | |
7432 | if (!device) { |
7433 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
7434 | return -ENODEV; |
7435 | } |
7436 | |
7437 | if (!device->ncq_prio_support) { |
7438 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
7439 | return -EINVAL; |
7440 | } |
7441 | |
7442 | device->ncq_prio_enable = ncq_prio_enable; |
7443 | |
7444 | spin_unlock_irqrestore(lock: &ctrl_info->scsi_device_list_lock, flags); |
7445 | |
7446 | return strlen(buf); |
7447 | } |
7448 | |
7449 | static ssize_t pqi_numa_node_show(struct device *dev, |
7450 | struct device_attribute *attr, char *buffer) |
7451 | { |
7452 | struct scsi_device *sdev; |
7453 | struct pqi_ctrl_info *ctrl_info; |
7454 | |
7455 | sdev = to_scsi_device(dev); |
7456 | ctrl_info = shost_to_hba(shost: sdev->host); |
7457 | |
7458 | return scnprintf(buf: buffer, PAGE_SIZE, fmt: "%d\n", ctrl_info->numa_node); |
7459 | } |
7460 | |
7461 | static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL); |
7462 | static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL); |
7463 | static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL); |
7464 | static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL); |
7465 | static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL); |
7466 | static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL); |
7467 | static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL); |
7468 | static DEVICE_ATTR(sas_ncq_prio_enable, 0644, |
7469 | pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store); |
7470 | static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL); |
7471 | |
7472 | static struct attribute *pqi_sdev_attrs[] = { |
7473 | &dev_attr_lunid.attr, |
7474 | &dev_attr_unique_id.attr, |
7475 | &dev_attr_path_info.attr, |
7476 | &dev_attr_sas_address.attr, |
7477 | &dev_attr_ssd_smart_path_enabled.attr, |
7478 | &dev_attr_raid_level.attr, |
7479 | &dev_attr_raid_bypass_cnt.attr, |
7480 | &dev_attr_sas_ncq_prio_enable.attr, |
7481 | &dev_attr_numa_node.attr, |
7482 | NULL |
7483 | }; |
7484 | |
7485 | ATTRIBUTE_GROUPS(pqi_sdev); |
7486 | |
7487 | static const struct scsi_host_template pqi_driver_template = { |
7488 | .module = THIS_MODULE, |
7489 | .name = DRIVER_NAME_SHORT, |
7490 | .proc_name = DRIVER_NAME_SHORT, |
7491 | .queuecommand = pqi_scsi_queue_command, |
7492 | .scan_start = pqi_scan_start, |
7493 | .scan_finished = pqi_scan_finished, |
7494 | .this_id = -1, |
7495 | .eh_device_reset_handler = pqi_eh_device_reset_handler, |
7496 | .eh_abort_handler = pqi_eh_abort_handler, |
7497 | .ioctl = pqi_ioctl, |
7498 | .slave_alloc = pqi_slave_alloc, |
7499 | .slave_configure = pqi_slave_configure, |
7500 | .slave_destroy = pqi_slave_destroy, |
7501 | .map_queues = pqi_map_queues, |
7502 | .sdev_groups = pqi_sdev_groups, |
7503 | .shost_groups = pqi_shost_groups, |
7504 | .cmd_size = sizeof(struct pqi_cmd_priv), |
7505 | }; |
7506 | |
7507 | static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) |
7508 | { |
7509 | int rc; |
7510 | struct Scsi_Host *shost; |
7511 | |
7512 | shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info)); |
7513 | if (!shost) { |
7514 | dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n"); |
7515 | return -ENOMEM; |
7516 | } |
7517 | |
7518 | shost->io_port = 0; |
7519 | shost->n_io_port = 0; |
7520 | shost->this_id = -1; |
7521 | shost->max_channel = PQI_MAX_BUS; |
7522 | shost->max_cmd_len = MAX_COMMAND_SIZE; |
7523 | shost->max_lun = PQI_MAX_LUNS_PER_DEVICE; |
7524 | shost->max_id = ~0; |
7525 | shost->max_sectors = ctrl_info->max_sectors; |
7526 | shost->can_queue = ctrl_info->scsi_ml_can_queue; |
7527 | shost->cmd_per_lun = shost->can_queue; |
7528 | shost->sg_tablesize = ctrl_info->sg_tablesize; |
7529 | shost->transportt = pqi_sas_transport_template; |
7530 | shost->irq = pci_irq_vector(dev: ctrl_info->pci_dev, nr: 0); |
7531 | shost->unique_id = shost->irq; |
7532 | shost->nr_hw_queues = ctrl_info->num_queue_groups; |
7533 | shost->host_tagset = 1; |
7534 | shost->hostdata[0] = (unsigned long)ctrl_info; |
7535 | |
7536 | rc = scsi_add_host(host: shost, dev: &ctrl_info->pci_dev->dev); |
7537 | if (rc) { |
7538 | dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n"); |
7539 | goto free_host; |
7540 | } |
7541 | |
7542 | rc = pqi_add_sas_host(shost, ctrl_info); |
7543 | if (rc) { |
7544 | dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n"); |
7545 | goto remove_host; |
7546 | } |
7547 | |
7548 | ctrl_info->scsi_host = shost; |
7549 | |
7550 | return 0; |
7551 | |
7552 | remove_host: |
7553 | scsi_remove_host(shost); |
7554 | free_host: |
7555 | scsi_host_put(t: shost); |
7556 | |
7557 | return rc; |
7558 | } |
7559 | |
7560 | static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info) |
7561 | { |
7562 | struct Scsi_Host *shost; |
7563 | |
7564 | pqi_delete_sas_host(ctrl_info); |
7565 | |
7566 | shost = ctrl_info->scsi_host; |
7567 | if (!shost) |
7568 | return; |
7569 | |
7570 | scsi_remove_host(shost); |
7571 | scsi_host_put(t: shost); |
7572 | } |
7573 | |
7574 | static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info) |
7575 | { |
7576 | int rc = 0; |
7577 | struct pqi_device_registers __iomem *pqi_registers; |
7578 | unsigned long timeout; |
7579 | unsigned int timeout_msecs; |
7580 | union pqi_reset_register reset_reg; |
7581 | |
7582 | pqi_registers = ctrl_info->pqi_registers; |
7583 | timeout_msecs = readw(addr: &pqi_registers->max_reset_timeout) * 100; |
7584 | timeout = msecs_to_jiffies(m: timeout_msecs) + jiffies; |
7585 | |
7586 | while (1) { |
7587 | msleep(PQI_RESET_POLL_INTERVAL_MSECS); |
7588 | reset_reg.all_bits = readl(addr: &pqi_registers->device_reset); |
7589 | if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED) |
7590 | break; |
7591 | if (!sis_is_firmware_running(ctrl_info)) { |
7592 | rc = -ENXIO; |
7593 | break; |
7594 | } |
7595 | if (time_after(jiffies, timeout)) { |
7596 | rc = -ETIMEDOUT; |
7597 | break; |
7598 | } |
7599 | } |
7600 | |
7601 | return rc; |
7602 | } |
7603 | |
7604 | static int pqi_reset(struct pqi_ctrl_info *ctrl_info) |
7605 | { |
7606 | int rc; |
7607 | union pqi_reset_register reset_reg; |
7608 | |
7609 | if (ctrl_info->pqi_reset_quiesce_supported) { |
7610 | rc = sis_pqi_reset_quiesce(ctrl_info); |
7611 | if (rc) { |
7612 | dev_err(&ctrl_info->pci_dev->dev, |
7613 | "PQI reset failed during quiesce with error %d\n", rc); |
7614 | return rc; |
7615 | } |
7616 | } |
7617 | |
7618 | reset_reg.all_bits = 0; |
7619 | reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET; |
7620 | reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET; |
7621 | |
7622 | writel(val: reset_reg.all_bits, addr: &ctrl_info->pqi_registers->device_reset); |
7623 | |
7624 | rc = pqi_wait_for_pqi_reset_completion(ctrl_info); |
7625 | if (rc) |
7626 | dev_err(&ctrl_info->pci_dev->dev, |
7627 | "PQI reset failed with error %d\n", rc); |
7628 | |
7629 | return rc; |
7630 | } |
7631 | |
7632 | static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info) |
7633 | { |
7634 | int rc; |
7635 | struct bmic_sense_subsystem_info *sense_info; |
7636 | |
7637 | sense_info = kzalloc(size: sizeof(*sense_info), GFP_KERNEL); |
7638 | if (!sense_info) |
7639 | return -ENOMEM; |
7640 | |
7641 | rc = pqi_sense_subsystem_info(ctrl_info, sense_info); |
7642 | if (rc) |
7643 | goto out; |
7644 | |
7645 | memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number, |
7646 | sizeof(sense_info->ctrl_serial_number)); |
7647 | ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0'; |
7648 | |
7649 | out: |
7650 | kfree(objp: sense_info); |
7651 | |
7652 | return rc; |
7653 | } |
7654 | |
7655 | static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info) |
7656 | { |
7657 | int rc; |
7658 | struct bmic_identify_controller *identify; |
7659 | |
7660 | identify = kmalloc(size: sizeof(*identify), GFP_KERNEL); |
7661 | if (!identify) |
7662 | return -ENOMEM; |
7663 | |
7664 | rc = pqi_identify_controller(ctrl_info, buffer: identify); |
7665 | if (rc) |
7666 | goto out; |
7667 | |
7668 | if (get_unaligned_le32(p: &identify->extra_controller_flags) & |
7669 | BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) { |
7670 | memcpy(ctrl_info->firmware_version, |
7671 | identify->firmware_version_long, |
7672 | sizeof(identify->firmware_version_long)); |
7673 | } else { |
7674 | memcpy(ctrl_info->firmware_version, |
7675 | identify->firmware_version_short, |
7676 | sizeof(identify->firmware_version_short)); |
7677 | ctrl_info->firmware_version |
7678 | [sizeof(identify->firmware_version_short)] = '\0'; |
7679 | snprintf(buf: ctrl_info->firmware_version + |
7680 | strlen(ctrl_info->firmware_version), |
7681 | size: sizeof(ctrl_info->firmware_version) - |
7682 | sizeof(identify->firmware_version_short), |
7683 | fmt: "-%u", |
7684 | get_unaligned_le16(p: &identify->firmware_build_number)); |
7685 | } |
7686 | |
7687 | memcpy(ctrl_info->model, identify->product_id, |
7688 | sizeof(identify->product_id)); |
7689 | ctrl_info->model[sizeof(identify->product_id)] = '\0'; |
7690 | |
7691 | memcpy(ctrl_info->vendor, identify->vendor_id, |
7692 | sizeof(identify->vendor_id)); |
7693 | ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0'; |
7694 | |
7695 | dev_info(&ctrl_info->pci_dev->dev, |
7696 | "Firmware version: %s\n", ctrl_info->firmware_version); |
7697 | |
7698 | out: |
7699 | kfree(objp: identify); |
7700 | |
7701 | return rc; |
7702 | } |
7703 | |
7704 | struct pqi_config_table_section_info { |
7705 | struct pqi_ctrl_info *ctrl_info; |
7706 | void *section; |
7707 | u32 section_offset; |
7708 | void __iomem *section_iomem_addr; |
7709 | }; |
7710 | |
7711 | static inline bool pqi_is_firmware_feature_supported( |
7712 | struct pqi_config_table_firmware_features *firmware_features, |
7713 | unsigned int bit_position) |
7714 | { |
7715 | unsigned int byte_index; |
7716 | |
7717 | byte_index = bit_position / BITS_PER_BYTE; |
7718 | |
7719 | if (byte_index >= le16_to_cpu(firmware_features->num_elements)) |
7720 | return false; |
7721 | |
7722 | return firmware_features->features_supported[byte_index] & |
7723 | (1 << (bit_position % BITS_PER_BYTE)) ? true : false; |
7724 | } |
7725 | |
7726 | static inline bool pqi_is_firmware_feature_enabled( |
7727 | struct pqi_config_table_firmware_features *firmware_features, |
7728 | void __iomem *firmware_features_iomem_addr, |
7729 | unsigned int bit_position) |
7730 | { |
7731 | unsigned int byte_index; |
7732 | u8 __iomem *features_enabled_iomem_addr; |
7733 | |
7734 | byte_index = (bit_position / BITS_PER_BYTE) + |
7735 | (le16_to_cpu(firmware_features->num_elements) * 2); |
7736 | |
7737 | features_enabled_iomem_addr = firmware_features_iomem_addr + |
7738 | offsetof(struct pqi_config_table_firmware_features, |
7739 | features_supported) + byte_index; |
7740 | |
7741 | return *((__force u8 *)features_enabled_iomem_addr) & |
7742 | (1 << (bit_position % BITS_PER_BYTE)) ? true : false; |
7743 | } |
7744 | |
7745 | static inline void pqi_request_firmware_feature( |
7746 | struct pqi_config_table_firmware_features *firmware_features, |
7747 | unsigned int bit_position) |
7748 | { |
7749 | unsigned int byte_index; |
7750 | |
7751 | byte_index = (bit_position / BITS_PER_BYTE) + |
7752 | le16_to_cpu(firmware_features->num_elements); |
7753 | |
7754 | firmware_features->features_supported[byte_index] |= |
7755 | (1 << (bit_position % BITS_PER_BYTE)); |
7756 | } |
7757 | |
7758 | static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info, |
7759 | u16 first_section, u16 last_section) |
7760 | { |
7761 | struct pqi_vendor_general_request request; |
7762 | |
7763 | memset(&request, 0, sizeof(request)); |
7764 | |
7765 | request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; |
7766 | put_unaligned_le16(val: sizeof(request) - PQI_REQUEST_HEADER_LENGTH, |
7767 | p: &request.header.iu_length); |
7768 | put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE, |
7769 | p: &request.function_code); |
7770 | put_unaligned_le16(val: first_section, |
7771 | p: &request.data.config_table_update.first_section); |
7772 | put_unaligned_le16(val: last_section, |
7773 | p: &request.data.config_table_update.last_section); |
7774 | |
7775 | return pqi_submit_raid_request_synchronous(ctrl_info, request: &request.header, flags: 0, NULL); |
7776 | } |
7777 | |
7778 | static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info, |
7779 | struct pqi_config_table_firmware_features *firmware_features, |
7780 | void __iomem *firmware_features_iomem_addr) |
7781 | { |
7782 | void *features_requested; |
7783 | void __iomem *features_requested_iomem_addr; |
7784 | void __iomem *host_max_known_feature_iomem_addr; |
7785 | |
7786 | features_requested = firmware_features->features_supported + |
7787 | le16_to_cpu(firmware_features->num_elements); |
7788 | |
7789 | features_requested_iomem_addr = firmware_features_iomem_addr + |
7790 | (features_requested - (void *)firmware_features); |
7791 | |
7792 | memcpy_toio(features_requested_iomem_addr, features_requested, |
7793 | le16_to_cpu(firmware_features->num_elements)); |
7794 | |
7795 | if (pqi_is_firmware_feature_supported(firmware_features, |
7796 | PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) { |
7797 | host_max_known_feature_iomem_addr = |
7798 | features_requested_iomem_addr + |
7799 | (le16_to_cpu(firmware_features->num_elements) * 2) + |
7800 | sizeof(__le16); |
7801 | writeb(PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF, addr: host_max_known_feature_iomem_addr); |
7802 | writeb(val: (PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF00) >> 8, addr: host_max_known_feature_iomem_addr + 1); |
7803 | } |
7804 | |
7805 | return pqi_config_table_update(ctrl_info, |
7806 | PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES, |
7807 | PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES); |
7808 | } |
7809 | |
7810 | struct pqi_firmware_feature { |
7811 | char *feature_name; |
7812 | unsigned int feature_bit; |
7813 | bool supported; |
7814 | bool enabled; |
7815 | void (*feature_status)(struct pqi_ctrl_info *ctrl_info, |
7816 | struct pqi_firmware_feature *firmware_feature); |
7817 | }; |
7818 | |
7819 | static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info, |
7820 | struct pqi_firmware_feature *firmware_feature) |
7821 | { |
7822 | if (!firmware_feature->supported) { |
7823 | dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n", |
7824 | firmware_feature->feature_name); |
7825 | return; |
7826 | } |
7827 | |
7828 | if (firmware_feature->enabled) { |
7829 | dev_info(&ctrl_info->pci_dev->dev, |
7830 | "%s enabled\n", firmware_feature->feature_name); |
7831 | return; |
7832 | } |
7833 | |
7834 | dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n", |
7835 | firmware_feature->feature_name); |
7836 | } |
7837 | |
7838 | static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info, |
7839 | struct pqi_firmware_feature *firmware_feature) |
7840 | { |
7841 | switch (firmware_feature->feature_bit) { |
7842 | case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS: |
7843 | ctrl_info->enable_r1_writes = firmware_feature->enabled; |
7844 | break; |
7845 | case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS: |
7846 | ctrl_info->enable_r5_writes = firmware_feature->enabled; |
7847 | break; |
7848 | case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS: |
7849 | ctrl_info->enable_r6_writes = firmware_feature->enabled; |
7850 | break; |
7851 | case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE: |
7852 | ctrl_info->soft_reset_handshake_supported = |
7853 | firmware_feature->enabled && |
7854 | pqi_read_soft_reset_status(ctrl_info); |
7855 | break; |
7856 | case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT: |
7857 | ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled; |
7858 | break; |
7859 | case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT: |
7860 | ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled; |
7861 | break; |
7862 | case PQI_FIRMWARE_FEATURE_FW_TRIAGE: |
7863 | ctrl_info->firmware_triage_supported = firmware_feature->enabled; |
7864 | pqi_save_fw_triage_setting(ctrl_info, is_supported: firmware_feature->enabled); |
7865 | break; |
7866 | case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5: |
7867 | ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled; |
7868 | break; |
7869 | case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT: |
7870 | ctrl_info->multi_lun_device_supported = firmware_feature->enabled; |
7871 | break; |
7872 | } |
7873 | |
7874 | pqi_firmware_feature_status(ctrl_info, firmware_feature); |
7875 | } |
7876 | |
7877 | static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info, |
7878 | struct pqi_firmware_feature *firmware_feature) |
7879 | { |
7880 | if (firmware_feature->feature_status) |
7881 | firmware_feature->feature_status(ctrl_info, firmware_feature); |
7882 | } |
7883 | |
7884 | static DEFINE_MUTEX(pqi_firmware_features_mutex); |
7885 | |
7886 | static struct pqi_firmware_feature pqi_firmware_features[] = { |
7887 | { |
7888 | .feature_name = "Online Firmware Activation", |
7889 | .feature_bit = PQI_FIRMWARE_FEATURE_OFA, |
7890 | .feature_status = pqi_firmware_feature_status, |
7891 | }, |
7892 | { |
7893 | .feature_name = "Serial Management Protocol", |
7894 | .feature_bit = PQI_FIRMWARE_FEATURE_SMP, |
7895 | .feature_status = pqi_firmware_feature_status, |
7896 | }, |
7897 | { |
7898 | .feature_name = "Maximum Known Feature", |
7899 | .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE, |
7900 | .feature_status = pqi_firmware_feature_status, |
7901 | }, |
7902 | { |
7903 | .feature_name = "RAID 0 Read Bypass", |
7904 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS, |
7905 | .feature_status = pqi_firmware_feature_status, |
7906 | }, |
7907 | { |
7908 | .feature_name = "RAID 1 Read Bypass", |
7909 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS, |
7910 | .feature_status = pqi_firmware_feature_status, |
7911 | }, |
7912 | { |
7913 | .feature_name = "RAID 5 Read Bypass", |
7914 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS, |
7915 | .feature_status = pqi_firmware_feature_status, |
7916 | }, |
7917 | { |
7918 | .feature_name = "RAID 6 Read Bypass", |
7919 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS, |
7920 | .feature_status = pqi_firmware_feature_status, |
7921 | }, |
7922 | { |
7923 | .feature_name = "RAID 0 Write Bypass", |
7924 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS, |
7925 | .feature_status = pqi_firmware_feature_status, |
7926 | }, |
7927 | { |
7928 | .feature_name = "RAID 1 Write Bypass", |
7929 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS, |
7930 | .feature_status = pqi_ctrl_update_feature_flags, |
7931 | }, |
7932 | { |
7933 | .feature_name = "RAID 5 Write Bypass", |
7934 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS, |
7935 | .feature_status = pqi_ctrl_update_feature_flags, |
7936 | }, |
7937 | { |
7938 | .feature_name = "RAID 6 Write Bypass", |
7939 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS, |
7940 | .feature_status = pqi_ctrl_update_feature_flags, |
7941 | }, |
7942 | { |
7943 | .feature_name = "New Soft Reset Handshake", |
7944 | .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE, |
7945 | .feature_status = pqi_ctrl_update_feature_flags, |
7946 | }, |
7947 | { |
7948 | .feature_name = "RAID IU Timeout", |
7949 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT, |
7950 | .feature_status = pqi_ctrl_update_feature_flags, |
7951 | }, |
7952 | { |
7953 | .feature_name = "TMF IU Timeout", |
7954 | .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT, |
7955 | .feature_status = pqi_ctrl_update_feature_flags, |
7956 | }, |
7957 | { |
7958 | .feature_name = "RAID Bypass on encrypted logical volumes on NVMe", |
7959 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME, |
7960 | .feature_status = pqi_firmware_feature_status, |
7961 | }, |
7962 | { |
7963 | .feature_name = "Firmware Triage", |
7964 | .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE, |
7965 | .feature_status = pqi_ctrl_update_feature_flags, |
7966 | }, |
7967 | { |
7968 | .feature_name = "RPL Extended Formats 4 and 5", |
7969 | .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5, |
7970 | .feature_status = pqi_ctrl_update_feature_flags, |
7971 | }, |
7972 | { |
7973 | .feature_name = "Multi-LUN Target", |
7974 | .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT, |
7975 | .feature_status = pqi_ctrl_update_feature_flags, |
7976 | }, |
7977 | }; |
7978 | |
7979 | static void pqi_process_firmware_features( |
7980 | struct pqi_config_table_section_info *section_info) |
7981 | { |
7982 | int rc; |
7983 | struct pqi_ctrl_info *ctrl_info; |
7984 | struct pqi_config_table_firmware_features *firmware_features; |
7985 | void __iomem *firmware_features_iomem_addr; |
7986 | unsigned int i; |
7987 | unsigned int num_features_supported; |
7988 | |
7989 | ctrl_info = section_info->ctrl_info; |
7990 | firmware_features = section_info->section; |
7991 | firmware_features_iomem_addr = section_info->section_iomem_addr; |
7992 | |
7993 | for (i = 0, num_features_supported = 0; |
7994 | i < ARRAY_SIZE(pqi_firmware_features); i++) { |
7995 | if (pqi_is_firmware_feature_supported(firmware_features, |
7996 | bit_position: pqi_firmware_features[i].feature_bit)) { |
7997 | pqi_firmware_features[i].supported = true; |
7998 | num_features_supported++; |
7999 | } else { |
8000 | pqi_firmware_feature_update(ctrl_info, |
8001 | firmware_feature: &pqi_firmware_features[i]); |
8002 | } |
8003 | } |
8004 | |
8005 | if (num_features_supported == 0) |
8006 | return; |
8007 | |
8008 | for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { |
8009 | if (!pqi_firmware_features[i].supported) |
8010 | continue; |
8011 | pqi_request_firmware_feature(firmware_features, |
8012 | bit_position: pqi_firmware_features[i].feature_bit); |
8013 | } |
8014 | |
8015 | rc = pqi_enable_firmware_features(ctrl_info, firmware_features, |
8016 | firmware_features_iomem_addr); |
8017 | if (rc) { |
8018 | dev_err(&ctrl_info->pci_dev->dev, |
8019 | "failed to enable firmware features in PQI configuration table\n"); |
8020 | for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { |
8021 | if (!pqi_firmware_features[i].supported) |
8022 | continue; |
8023 | pqi_firmware_feature_update(ctrl_info, |
8024 | firmware_feature: &pqi_firmware_features[i]); |
8025 | } |
8026 | return; |
8027 | } |
8028 | |
8029 | for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { |
8030 | if (!pqi_firmware_features[i].supported) |
8031 | continue; |
8032 | if (pqi_is_firmware_feature_enabled(firmware_features, |
8033 | firmware_features_iomem_addr, |
8034 | bit_position: pqi_firmware_features[i].feature_bit)) { |
8035 | pqi_firmware_features[i].enabled = true; |
8036 | } |
8037 | pqi_firmware_feature_update(ctrl_info, |
8038 | firmware_feature: &pqi_firmware_features[i]); |
8039 | } |
8040 | } |
8041 | |
8042 | static void pqi_init_firmware_features(void) |
8043 | { |
8044 | unsigned int i; |
8045 | |
8046 | for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { |
8047 | pqi_firmware_features[i].supported = false; |
8048 | pqi_firmware_features[i].enabled = false; |
8049 | } |
8050 | } |
8051 | |
8052 | static void pqi_process_firmware_features_section( |
8053 | struct pqi_config_table_section_info *section_info) |
8054 | { |
8055 | mutex_lock(&pqi_firmware_features_mutex); |
8056 | pqi_init_firmware_features(); |
8057 | pqi_process_firmware_features(section_info); |
8058 | mutex_unlock(lock: &pqi_firmware_features_mutex); |
8059 | } |
8060 | |
8061 | /* |
8062 | * Reset all controller settings that can be initialized during the processing |
8063 | * of the PQI Configuration Table. |
8064 | */ |
8065 | |
8066 | static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info) |
8067 | { |
8068 | ctrl_info->heartbeat_counter = NULL; |
8069 | ctrl_info->soft_reset_status = NULL; |
8070 | ctrl_info->soft_reset_handshake_supported = false; |
8071 | ctrl_info->enable_r1_writes = false; |
8072 | ctrl_info->enable_r5_writes = false; |
8073 | ctrl_info->enable_r6_writes = false; |
8074 | ctrl_info->raid_iu_timeout_supported = false; |
8075 | ctrl_info->tmf_iu_timeout_supported = false; |
8076 | ctrl_info->firmware_triage_supported = false; |
8077 | ctrl_info->rpl_extended_format_4_5_supported = false; |
8078 | ctrl_info->multi_lun_device_supported = false; |
8079 | } |
8080 | |
8081 | static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) |
8082 | { |
8083 | u32 table_length; |
8084 | u32 section_offset; |
8085 | bool firmware_feature_section_present; |
8086 | void __iomem *table_iomem_addr; |
8087 | struct pqi_config_table *config_table; |
8088 | struct pqi_config_table_section_header *section; |
8089 | struct pqi_config_table_section_info section_info; |
8090 | struct pqi_config_table_section_info feature_section_info = {0}; |
8091 | |
8092 | table_length = ctrl_info->config_table_length; |
8093 | if (table_length == 0) |
8094 | return 0; |
8095 | |
8096 | config_table = kmalloc(size: table_length, GFP_KERNEL); |
8097 | if (!config_table) { |
8098 | dev_err(&ctrl_info->pci_dev->dev, |
8099 | "failed to allocate memory for PQI configuration table\n"); |
8100 | return -ENOMEM; |
8101 | } |
8102 | |
8103 | /* |
8104 | * Copy the config table contents from I/O memory space into the |
8105 | * temporary buffer. |
8106 | */ |
8107 | table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset; |
8108 | memcpy_fromio(config_table, table_iomem_addr, table_length); |
8109 | |
8110 | firmware_feature_section_present = false; |
8111 | section_info.ctrl_info = ctrl_info; |
8112 | section_offset = get_unaligned_le32(p: &config_table->first_section_offset); |
8113 | |
8114 | while (section_offset) { |
8115 | section = (void *)config_table + section_offset; |
8116 | |
8117 | section_info.section = section; |
8118 | section_info.section_offset = section_offset; |
8119 | section_info.section_iomem_addr = table_iomem_addr + section_offset; |
8120 | |
8121 | switch (get_unaligned_le16(p: §ion->section_id)) { |
8122 | case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES: |
8123 | firmware_feature_section_present = true; |
8124 | feature_section_info = section_info; |
8125 | break; |
8126 | case PQI_CONFIG_TABLE_SECTION_HEARTBEAT: |
8127 | if (pqi_disable_heartbeat) |
8128 | dev_warn(&ctrl_info->pci_dev->dev, |
8129 | "heartbeat disabled by module parameter\n"); |
8130 | else |
8131 | ctrl_info->heartbeat_counter = |
8132 | table_iomem_addr + |
8133 | section_offset + |
8134 | offsetof(struct pqi_config_table_heartbeat, |
8135 | heartbeat_counter); |
8136 | break; |
8137 | case PQI_CONFIG_TABLE_SECTION_SOFT_RESET: |
8138 | ctrl_info->soft_reset_status = |
8139 | table_iomem_addr + |
8140 | section_offset + |
8141 | offsetof(struct pqi_config_table_soft_reset, |
8142 | soft_reset_status); |
8143 | break; |
8144 | } |
8145 | |
8146 | section_offset = get_unaligned_le16(p: §ion->next_section_offset); |
8147 | } |
8148 | |
8149 | /* |
8150 | * We process the firmware feature section after all other sections |
8151 | * have been processed so that the feature bit callbacks can take |
8152 | * into account the settings configured by other sections. |
8153 | */ |
8154 | if (firmware_feature_section_present) |
8155 | pqi_process_firmware_features_section(section_info: &feature_section_info); |
8156 | |
8157 | kfree(objp: config_table); |
8158 | |
8159 | return 0; |
8160 | } |
8161 | |
8162 | /* Switches the controller from PQI mode back into SIS mode. */ |
8163 | |
8164 | static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) |
8165 | { |
8166 | int rc; |
8167 | |
8168 | pqi_change_irq_mode(ctrl_info, new_mode: IRQ_MODE_NONE); |
8169 | rc = pqi_reset(ctrl_info); |
8170 | if (rc) |
8171 | return rc; |
8172 | rc = sis_reenable_sis_mode(ctrl_info); |
8173 | if (rc) { |
8174 | dev_err(&ctrl_info->pci_dev->dev, |
8175 | "re-enabling SIS mode failed with error %d\n", rc); |
8176 | return rc; |
8177 | } |
8178 | pqi_save_ctrl_mode(ctrl_info, mode: SIS_MODE); |
8179 | |
8180 | return 0; |
8181 | } |
8182 | |
8183 | /* |
8184 | * If the controller isn't already in SIS mode, this function forces it into |
8185 | * SIS mode. |
8186 | */ |
8187 | |
8188 | static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) |
8189 | { |
8190 | if (!sis_is_firmware_running(ctrl_info)) |
8191 | return -ENXIO; |
8192 | |
8193 | if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE) |
8194 | return 0; |
8195 | |
8196 | if (sis_is_kernel_up(ctrl_info)) { |
8197 | pqi_save_ctrl_mode(ctrl_info, mode: SIS_MODE); |
8198 | return 0; |
8199 | } |
8200 | |
8201 | return pqi_revert_to_sis_mode(ctrl_info); |
8202 | } |
8203 | |
8204 | static void pqi_perform_lockup_action(void) |
8205 | { |
8206 | switch (pqi_lockup_action) { |
8207 | case PANIC: |
8208 | panic(fmt: "FATAL: Smart Family Controller lockup detected"); |
8209 | break; |
8210 | case REBOOT: |
8211 | emergency_restart(); |
8212 | break; |
8213 | case NONE: |
8214 | default: |
8215 | break; |
8216 | } |
8217 | } |
8218 | |
8219 | static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) |
8220 | { |
8221 | int rc; |
8222 | u32 product_id; |
8223 | |
8224 | if (reset_devices) { |
8225 | if (pqi_is_fw_triage_supported(ctrl_info)) { |
8226 | rc = sis_wait_for_fw_triage_completion(ctrl_info); |
8227 | if (rc) |
8228 | return rc; |
8229 | } |
8230 | sis_soft_reset(ctrl_info); |
8231 | ssleep(PQI_POST_RESET_DELAY_SECS); |
8232 | } else { |
8233 | rc = pqi_force_sis_mode(ctrl_info); |
8234 | if (rc) |
8235 | return rc; |
8236 | } |
8237 | |
8238 | /* |
8239 | * Wait until the controller is ready to start accepting SIS |
8240 | * commands. |
8241 | */ |
8242 | rc = sis_wait_for_ctrl_ready(ctrl_info); |
8243 | if (rc) { |
8244 | if (reset_devices) { |
8245 | dev_err(&ctrl_info->pci_dev->dev, |
8246 | "kdump init failed with error %d\n", rc); |
8247 | pqi_lockup_action = REBOOT; |
8248 | pqi_perform_lockup_action(); |
8249 | } |
8250 | return rc; |
8251 | } |
8252 | |
8253 | /* |
8254 | * Get the controller properties. This allows us to determine |
8255 | * whether or not it supports PQI mode. |
8256 | */ |
8257 | rc = sis_get_ctrl_properties(ctrl_info); |
8258 | if (rc) { |
8259 | dev_err(&ctrl_info->pci_dev->dev, |
8260 | "error obtaining controller properties\n"); |
8261 | return rc; |
8262 | } |
8263 | |
8264 | rc = sis_get_pqi_capabilities(ctrl_info); |
8265 | if (rc) { |
8266 | dev_err(&ctrl_info->pci_dev->dev, |
8267 | "error obtaining controller capabilities\n"); |
8268 | return rc; |
8269 | } |
8270 | |
8271 | product_id = sis_get_product_id(ctrl_info); |
8272 | ctrl_info->product_id = (u8)product_id; |
8273 | ctrl_info->product_revision = (u8)(product_id >> 8); |
8274 | |
8275 | if (reset_devices) { |
8276 | if (ctrl_info->max_outstanding_requests > |
8277 | PQI_MAX_OUTSTANDING_REQUESTS_KDUMP) |
8278 | ctrl_info->max_outstanding_requests = |
8279 | PQI_MAX_OUTSTANDING_REQUESTS_KDUMP; |
8280 | } else { |
8281 | if (ctrl_info->max_outstanding_requests > |
8282 | PQI_MAX_OUTSTANDING_REQUESTS) |
8283 | ctrl_info->max_outstanding_requests = |
8284 | PQI_MAX_OUTSTANDING_REQUESTS; |
8285 | } |
8286 | |
8287 | pqi_calculate_io_resources(ctrl_info); |
8288 | |
8289 | rc = pqi_alloc_error_buffer(ctrl_info); |
8290 | if (rc) { |
8291 | dev_err(&ctrl_info->pci_dev->dev, |
8292 | "failed to allocate PQI error buffer\n"); |
8293 | return rc; |
8294 | } |
8295 | |
8296 | /* |
8297 | * If the function we are about to call succeeds, the |
8298 | * controller will transition from legacy SIS mode |
8299 | * into PQI mode. |
8300 | */ |
8301 | rc = sis_init_base_struct_addr(ctrl_info); |
8302 | if (rc) { |
8303 | dev_err(&ctrl_info->pci_dev->dev, |
8304 | "error initializing PQI mode\n"); |
8305 | return rc; |
8306 | } |
8307 | |
8308 | /* Wait for the controller to complete the SIS -> PQI transition. */ |
8309 | rc = pqi_wait_for_pqi_mode_ready(ctrl_info); |
8310 | if (rc) { |
8311 | dev_err(&ctrl_info->pci_dev->dev, |
8312 | "transition to PQI mode failed\n"); |
8313 | return rc; |
8314 | } |
8315 | |
8316 | /* From here on, we are running in PQI mode. */ |
8317 | ctrl_info->pqi_mode_enabled = true; |
8318 | pqi_save_ctrl_mode(ctrl_info, mode: PQI_MODE); |
8319 | |
8320 | rc = pqi_alloc_admin_queues(ctrl_info); |
8321 | if (rc) { |
8322 | dev_err(&ctrl_info->pci_dev->dev, |
8323 | "failed to allocate admin queues\n"); |
8324 | return rc; |
8325 | } |
8326 | |
8327 | rc = pqi_create_admin_queues(ctrl_info); |
8328 | if (rc) { |
8329 | dev_err(&ctrl_info->pci_dev->dev, |
8330 | "error creating admin queues\n"); |
8331 | return rc; |
8332 | } |
8333 | |
8334 | rc = pqi_report_device_capability(ctrl_info); |
8335 | if (rc) { |
8336 | dev_err(&ctrl_info->pci_dev->dev, |
8337 | "obtaining device capability failed\n"); |
8338 | return rc; |
8339 | } |
8340 | |
8341 | rc = pqi_validate_device_capability(ctrl_info); |
8342 | if (rc) |
8343 | return rc; |
8344 | |
8345 | pqi_calculate_queue_resources(ctrl_info); |
8346 | |
8347 | rc = pqi_enable_msix_interrupts(ctrl_info); |
8348 | if (rc) |
8349 | return rc; |
8350 | |
8351 | if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) { |
8352 | ctrl_info->max_msix_vectors = |
8353 | ctrl_info->num_msix_vectors_enabled; |
8354 | pqi_calculate_queue_resources(ctrl_info); |
8355 | } |
8356 | |
8357 | rc = pqi_alloc_io_resources(ctrl_info); |
8358 | if (rc) |
8359 | return rc; |
8360 | |
8361 | rc = pqi_alloc_operational_queues(ctrl_info); |
8362 | if (rc) { |
8363 | dev_err(&ctrl_info->pci_dev->dev, |
8364 | "failed to allocate operational queues\n"); |
8365 | return rc; |
8366 | } |
8367 | |
8368 | pqi_init_operational_queues(ctrl_info); |
8369 | |
8370 | rc = pqi_create_queues(ctrl_info); |
8371 | if (rc) |
8372 | return rc; |
8373 | |
8374 | rc = pqi_request_irqs(ctrl_info); |
8375 | if (rc) |
8376 | return rc; |
8377 | |
8378 | pqi_change_irq_mode(ctrl_info, new_mode: IRQ_MODE_MSIX); |
8379 | |
8380 | ctrl_info->controller_online = true; |
8381 | |
8382 | rc = pqi_process_config_table(ctrl_info); |
8383 | if (rc) |
8384 | return rc; |
8385 | |
8386 | pqi_start_heartbeat_timer(ctrl_info); |
8387 | |
8388 | if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { |
8389 | rc = pqi_get_advanced_raid_bypass_config(ctrl_info); |
8390 | if (rc) { /* Supported features not returned correctly. */ |
8391 | dev_err(&ctrl_info->pci_dev->dev, |
8392 | "error obtaining advanced RAID bypass configuration\n"); |
8393 | return rc; |
8394 | } |
8395 | ctrl_info->ciss_report_log_flags |= |
8396 | CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX; |
8397 | } |
8398 | |
8399 | rc = pqi_enable_events(ctrl_info); |
8400 | if (rc) { |
8401 | dev_err(&ctrl_info->pci_dev->dev, |
8402 | "error enabling events\n"); |
8403 | return rc; |
8404 | } |
8405 | |
8406 | /* Register with the SCSI subsystem. */ |
8407 | rc = pqi_register_scsi(ctrl_info); |
8408 | if (rc) |
8409 | return rc; |
8410 | |
8411 | rc = pqi_get_ctrl_product_details(ctrl_info); |
8412 | if (rc) { |
8413 | dev_err(&ctrl_info->pci_dev->dev, |
8414 | "error obtaining product details\n"); |
8415 | return rc; |
8416 | } |
8417 | |
8418 | rc = pqi_get_ctrl_serial_number(ctrl_info); |
8419 | if (rc) { |
8420 | dev_err(&ctrl_info->pci_dev->dev, |
8421 | "error obtaining ctrl serial number\n"); |
8422 | return rc; |
8423 | } |
8424 | |
8425 | rc = pqi_set_diag_rescan(ctrl_info); |
8426 | if (rc) { |
8427 | dev_err(&ctrl_info->pci_dev->dev, |
8428 | "error enabling multi-lun rescan\n"); |
8429 | return rc; |
8430 | } |
8431 | |
8432 | rc = pqi_write_driver_version_to_host_wellness(ctrl_info); |
8433 | if (rc) { |
8434 | dev_err(&ctrl_info->pci_dev->dev, |
8435 | "error updating host wellness\n"); |
8436 | return rc; |
8437 | } |
8438 | |
8439 | pqi_schedule_update_time_worker(ctrl_info); |
8440 | |
8441 | pqi_scan_scsi_devices(ctrl_info); |
8442 | |
8443 | return 0; |
8444 | } |
8445 | |
8446 | static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) |
8447 | { |
8448 | unsigned int i; |
8449 | struct pqi_admin_queues *admin_queues; |
8450 | struct pqi_event_queue *event_queue; |
8451 | |
8452 | admin_queues = &ctrl_info->admin_queues; |
8453 | admin_queues->iq_pi_copy = 0; |
8454 | admin_queues->oq_ci_copy = 0; |
8455 | writel(val: 0, addr: admin_queues->oq_pi); |
8456 | |
8457 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { |
8458 | ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; |
8459 | ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; |
8460 | ctrl_info->queue_groups[i].oq_ci_copy = 0; |
8461 | |
8462 | writel(val: 0, addr: ctrl_info->queue_groups[i].iq_ci[RAID_PATH]); |
8463 | writel(val: 0, addr: ctrl_info->queue_groups[i].iq_ci[AIO_PATH]); |
8464 | writel(val: 0, addr: ctrl_info->queue_groups[i].oq_pi); |
8465 | } |
8466 | |
8467 | event_queue = &ctrl_info->event_queue; |
8468 | writel(val: 0, addr: event_queue->oq_pi); |
8469 | event_queue->oq_ci_copy = 0; |
8470 | } |
8471 | |
8472 | static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) |
8473 | { |
8474 | int rc; |
8475 | |
8476 | rc = pqi_force_sis_mode(ctrl_info); |
8477 | if (rc) |
8478 | return rc; |
8479 | |
8480 | /* |
8481 | * Wait until the controller is ready to start accepting SIS |
8482 | * commands. |
8483 | */ |
8484 | rc = sis_wait_for_ctrl_ready_resume(ctrl_info); |
8485 | if (rc) |
8486 | return rc; |
8487 | |
8488 | /* |
8489 | * Get the controller properties. This allows us to determine |
8490 | * whether or not it supports PQI mode. |
8491 | */ |
8492 | rc = sis_get_ctrl_properties(ctrl_info); |
8493 | if (rc) { |
8494 | dev_err(&ctrl_info->pci_dev->dev, |
8495 | "error obtaining controller properties\n"); |
8496 | return rc; |
8497 | } |
8498 | |
8499 | rc = sis_get_pqi_capabilities(ctrl_info); |
8500 | if (rc) { |
8501 | dev_err(&ctrl_info->pci_dev->dev, |
8502 | "error obtaining controller capabilities\n"); |
8503 | return rc; |
8504 | } |
8505 | |
8506 | /* |
8507 | * If the function we are about to call succeeds, the |
8508 | * controller will transition from legacy SIS mode |
8509 | * into PQI mode. |
8510 | */ |
8511 | rc = sis_init_base_struct_addr(ctrl_info); |
8512 | if (rc) { |
8513 | dev_err(&ctrl_info->pci_dev->dev, |
8514 | "error initializing PQI mode\n"); |
8515 | return rc; |
8516 | } |
8517 | |
8518 | /* Wait for the controller to complete the SIS -> PQI transition. */ |
8519 | rc = pqi_wait_for_pqi_mode_ready(ctrl_info); |
8520 | if (rc) { |
8521 | dev_err(&ctrl_info->pci_dev->dev, |
8522 | "transition to PQI mode failed\n"); |
8523 | return rc; |
8524 | } |
8525 | |
8526 | /* From here on, we are running in PQI mode. */ |
8527 | ctrl_info->pqi_mode_enabled = true; |
8528 | pqi_save_ctrl_mode(ctrl_info, mode: PQI_MODE); |
8529 | |
8530 | pqi_reinit_queues(ctrl_info); |
8531 | |
8532 | rc = pqi_create_admin_queues(ctrl_info); |
8533 | if (rc) { |
8534 | dev_err(&ctrl_info->pci_dev->dev, |
8535 | "error creating admin queues\n"); |
8536 | return rc; |
8537 | } |
8538 | |
8539 | rc = pqi_create_queues(ctrl_info); |
8540 | if (rc) |
8541 | return rc; |
8542 | |
8543 | pqi_change_irq_mode(ctrl_info, new_mode: IRQ_MODE_MSIX); |
8544 | |
8545 | ctrl_info->controller_online = true; |
8546 | pqi_ctrl_unblock_requests(ctrl_info); |
8547 | |
8548 | pqi_ctrl_reset_config(ctrl_info); |
8549 | |
8550 | rc = pqi_process_config_table(ctrl_info); |
8551 | if (rc) |
8552 | return rc; |
8553 | |
8554 | pqi_start_heartbeat_timer(ctrl_info); |
8555 | |
8556 | if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { |
8557 | rc = pqi_get_advanced_raid_bypass_config(ctrl_info); |
8558 | if (rc) { |
8559 | dev_err(&ctrl_info->pci_dev->dev, |
8560 | "error obtaining advanced RAID bypass configuration\n"); |
8561 | return rc; |
8562 | } |
8563 | ctrl_info->ciss_report_log_flags |= |
8564 | CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX; |
8565 | } |
8566 | |
8567 | rc = pqi_enable_events(ctrl_info); |
8568 | if (rc) { |
8569 | dev_err(&ctrl_info->pci_dev->dev, |
8570 | "error enabling events\n"); |
8571 | return rc; |
8572 | } |
8573 | |
8574 | rc = pqi_get_ctrl_product_details(ctrl_info); |
8575 | if (rc) { |
8576 | dev_err(&ctrl_info->pci_dev->dev, |
8577 | "error obtaining product details\n"); |
8578 | return rc; |
8579 | } |
8580 | |
8581 | rc = pqi_set_diag_rescan(ctrl_info); |
8582 | if (rc) { |
8583 | dev_err(&ctrl_info->pci_dev->dev, |
8584 | "error enabling multi-lun rescan\n"); |
8585 | return rc; |
8586 | } |
8587 | |
8588 | rc = pqi_write_driver_version_to_host_wellness(ctrl_info); |
8589 | if (rc) { |
8590 | dev_err(&ctrl_info->pci_dev->dev, |
8591 | "error updating host wellness\n"); |
8592 | return rc; |
8593 | } |
8594 | |
8595 | if (pqi_ofa_in_progress(ctrl_info)) |
8596 | pqi_ctrl_unblock_scan(ctrl_info); |
8597 | |
8598 | pqi_scan_scsi_devices(ctrl_info); |
8599 | |
8600 | return 0; |
8601 | } |
8602 | |
8603 | static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout) |
8604 | { |
8605 | int rc; |
8606 | |
8607 | rc = pcie_capability_clear_and_set_word(dev: pci_dev, PCI_EXP_DEVCTL2, |
8608 | PCI_EXP_DEVCTL2_COMP_TIMEOUT, set: timeout); |
8609 | |
8610 | return pcibios_err_to_errno(err: rc); |
8611 | } |
8612 | |
8613 | static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) |
8614 | { |
8615 | int rc; |
8616 | u64 mask; |
8617 | |
8618 | rc = pci_enable_device(dev: ctrl_info->pci_dev); |
8619 | if (rc) { |
8620 | dev_err(&ctrl_info->pci_dev->dev, |
8621 | "failed to enable PCI device\n"); |
8622 | return rc; |
8623 | } |
8624 | |
8625 | if (sizeof(dma_addr_t) > 4) |
8626 | mask = DMA_BIT_MASK(64); |
8627 | else |
8628 | mask = DMA_BIT_MASK(32); |
8629 | |
8630 | rc = dma_set_mask_and_coherent(dev: &ctrl_info->pci_dev->dev, mask); |
8631 | if (rc) { |
8632 | dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); |
8633 | goto disable_device; |
8634 | } |
8635 | |
8636 | rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT); |
8637 | if (rc) { |
8638 | dev_err(&ctrl_info->pci_dev->dev, |
8639 | "failed to obtain PCI resources\n"); |
8640 | goto disable_device; |
8641 | } |
8642 | |
8643 | ctrl_info->iomem_base = ioremap(pci_resource_start( |
8644 | ctrl_info->pci_dev, 0), |
8645 | pci_resource_len(ctrl_info->pci_dev, 0)); |
8646 | if (!ctrl_info->iomem_base) { |
8647 | dev_err(&ctrl_info->pci_dev->dev, |
8648 | "failed to map memory for controller registers\n"); |
8649 | rc = -ENOMEM; |
8650 | goto release_regions; |
8651 | } |
8652 | |
8653 | #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6 |
8654 | |
8655 | /* Increase the PCIe completion timeout. */ |
8656 | rc = pqi_set_pcie_completion_timeout(pci_dev: ctrl_info->pci_dev, |
8657 | PCI_EXP_COMP_TIMEOUT_65_TO_210_MS); |
8658 | if (rc) { |
8659 | dev_err(&ctrl_info->pci_dev->dev, |
8660 | "failed to set PCIe completion timeout\n"); |
8661 | goto release_regions; |
8662 | } |
8663 | |
8664 | /* Enable bus mastering. */ |
8665 | pci_set_master(dev: ctrl_info->pci_dev); |
8666 | |
8667 | ctrl_info->registers = ctrl_info->iomem_base; |
8668 | ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; |
8669 | |
8670 | pci_set_drvdata(pdev: ctrl_info->pci_dev, data: ctrl_info); |
8671 | |
8672 | return 0; |
8673 | |
8674 | release_regions: |
8675 | pci_release_regions(ctrl_info->pci_dev); |
8676 | disable_device: |
8677 | pci_disable_device(dev: ctrl_info->pci_dev); |
8678 | |
8679 | return rc; |
8680 | } |
8681 | |
8682 | static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info) |
8683 | { |
8684 | iounmap(addr: ctrl_info->iomem_base); |
8685 | pci_release_regions(ctrl_info->pci_dev); |
8686 | if (pci_is_enabled(pdev: ctrl_info->pci_dev)) |
8687 | pci_disable_device(dev: ctrl_info->pci_dev); |
8688 | pci_set_drvdata(pdev: ctrl_info->pci_dev, NULL); |
8689 | } |
8690 | |
8691 | static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) |
8692 | { |
8693 | struct pqi_ctrl_info *ctrl_info; |
8694 | |
8695 | ctrl_info = kzalloc_node(size: sizeof(struct pqi_ctrl_info), |
8696 | GFP_KERNEL, node: numa_node); |
8697 | if (!ctrl_info) |
8698 | return NULL; |
8699 | |
8700 | mutex_init(&ctrl_info->scan_mutex); |
8701 | mutex_init(&ctrl_info->lun_reset_mutex); |
8702 | mutex_init(&ctrl_info->ofa_mutex); |
8703 | |
8704 | INIT_LIST_HEAD(list: &ctrl_info->scsi_device_list); |
8705 | spin_lock_init(&ctrl_info->scsi_device_list_lock); |
8706 | |
8707 | INIT_WORK(&ctrl_info->event_work, pqi_event_worker); |
8708 | atomic_set(v: &ctrl_info->num_interrupts, i: 0); |
8709 | |
8710 | INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); |
8711 | INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); |
8712 | |
8713 | timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0); |
8714 | INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); |
8715 | |
8716 | INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker); |
8717 | INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker); |
8718 | |
8719 | sema_init(sem: &ctrl_info->sync_request_sem, |
8720 | PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS); |
8721 | init_waitqueue_head(&ctrl_info->block_requests_wait); |
8722 | |
8723 | ctrl_info->ctrl_id = atomic_inc_return(v: &pqi_controller_count) - 1; |
8724 | ctrl_info->irq_mode = IRQ_MODE_NONE; |
8725 | ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; |
8726 | |
8727 | ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID; |
8728 | ctrl_info->max_transfer_encrypted_sas_sata = |
8729 | PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA; |
8730 | ctrl_info->max_transfer_encrypted_nvme = |
8731 | PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME; |
8732 | ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6; |
8733 | ctrl_info->max_write_raid_1_10_2drive = ~0; |
8734 | ctrl_info->max_write_raid_1_10_3drive = ~0; |
8735 | ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts; |
8736 | |
8737 | return ctrl_info; |
8738 | } |
8739 | |
8740 | static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info) |
8741 | { |
8742 | kfree(objp: ctrl_info); |
8743 | } |
8744 | |
8745 | static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) |
8746 | { |
8747 | pqi_free_irqs(ctrl_info); |
8748 | pqi_disable_msix_interrupts(ctrl_info); |
8749 | } |
8750 | |
8751 | static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) |
8752 | { |
8753 | pqi_free_interrupts(ctrl_info); |
8754 | if (ctrl_info->queue_memory_base) |
8755 | dma_free_coherent(dev: &ctrl_info->pci_dev->dev, |
8756 | size: ctrl_info->queue_memory_length, |
8757 | cpu_addr: ctrl_info->queue_memory_base, |
8758 | dma_handle: ctrl_info->queue_memory_base_dma_handle); |
8759 | if (ctrl_info->admin_queue_memory_base) |
8760 | dma_free_coherent(dev: &ctrl_info->pci_dev->dev, |
8761 | size: ctrl_info->admin_queue_memory_length, |
8762 | cpu_addr: ctrl_info->admin_queue_memory_base, |
8763 | dma_handle: ctrl_info->admin_queue_memory_base_dma_handle); |
8764 | pqi_free_all_io_requests(ctrl_info); |
8765 | if (ctrl_info->error_buffer) |
8766 | dma_free_coherent(dev: &ctrl_info->pci_dev->dev, |
8767 | size: ctrl_info->error_buffer_length, |
8768 | cpu_addr: ctrl_info->error_buffer, |
8769 | dma_handle: ctrl_info->error_buffer_dma_handle); |
8770 | if (ctrl_info->iomem_base) |
8771 | pqi_cleanup_pci_init(ctrl_info); |
8772 | pqi_free_ctrl_info(ctrl_info); |
8773 | } |
8774 | |
8775 | static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) |
8776 | { |
8777 | ctrl_info->controller_online = false; |
8778 | pqi_stop_heartbeat_timer(ctrl_info); |
8779 | pqi_ctrl_block_requests(ctrl_info); |
8780 | pqi_cancel_rescan_worker(ctrl_info); |
8781 | pqi_cancel_update_time_worker(ctrl_info); |
8782 | if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) { |
8783 | pqi_fail_all_outstanding_requests(ctrl_info); |
8784 | ctrl_info->pqi_mode_enabled = false; |
8785 | } |
8786 | pqi_unregister_scsi(ctrl_info); |
8787 | if (ctrl_info->pqi_mode_enabled) |
8788 | pqi_revert_to_sis_mode(ctrl_info); |
8789 | pqi_free_ctrl_resources(ctrl_info); |
8790 | } |
8791 | |
8792 | static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info) |
8793 | { |
8794 | pqi_ctrl_block_scan(ctrl_info); |
8795 | pqi_scsi_block_requests(ctrl_info); |
8796 | pqi_ctrl_block_device_reset(ctrl_info); |
8797 | pqi_ctrl_block_requests(ctrl_info); |
8798 | pqi_ctrl_wait_until_quiesced(ctrl_info); |
8799 | pqi_stop_heartbeat_timer(ctrl_info); |
8800 | } |
8801 | |
8802 | static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info) |
8803 | { |
8804 | pqi_start_heartbeat_timer(ctrl_info); |
8805 | pqi_ctrl_unblock_requests(ctrl_info); |
8806 | pqi_ctrl_unblock_device_reset(ctrl_info); |
8807 | pqi_scsi_unblock_requests(ctrl_info); |
8808 | pqi_ctrl_unblock_scan(ctrl_info); |
8809 | } |
8810 | |
8811 | static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size) |
8812 | { |
8813 | int i; |
8814 | u32 sg_count; |
8815 | struct device *dev; |
8816 | struct pqi_ofa_memory *ofap; |
8817 | struct pqi_sg_descriptor *mem_descriptor; |
8818 | dma_addr_t dma_handle; |
8819 | |
8820 | ofap = ctrl_info->pqi_ofa_mem_virt_addr; |
8821 | |
8822 | sg_count = DIV_ROUND_UP(total_size, chunk_size); |
8823 | if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS) |
8824 | goto out; |
8825 | |
8826 | ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(n: sg_count, size: sizeof(void *), GFP_KERNEL); |
8827 | if (!ctrl_info->pqi_ofa_chunk_virt_addr) |
8828 | goto out; |
8829 | |
8830 | dev = &ctrl_info->pci_dev->dev; |
8831 | |
8832 | for (i = 0; i < sg_count; i++) { |
8833 | ctrl_info->pqi_ofa_chunk_virt_addr[i] = |
8834 | dma_alloc_coherent(dev, size: chunk_size, dma_handle: &dma_handle, GFP_KERNEL); |
8835 | if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) |
8836 | goto out_free_chunks; |
8837 | mem_descriptor = &ofap->sg_descriptor[i]; |
8838 | put_unaligned_le64(val: (u64)dma_handle, p: &mem_descriptor->address); |
8839 | put_unaligned_le32(val: chunk_size, p: &mem_descriptor->length); |
8840 | } |
8841 | |
8842 | put_unaligned_le32(CISS_SG_LAST, p: &mem_descriptor->flags); |
8843 | put_unaligned_le16(val: sg_count, p: &ofap->num_memory_descriptors); |
8844 | put_unaligned_le32(val: sg_count * chunk_size, p: &ofap->bytes_allocated); |
8845 | |
8846 | return 0; |
8847 | |
8848 | out_free_chunks: |
8849 | while (--i >= 0) { |
8850 | mem_descriptor = &ofap->sg_descriptor[i]; |
8851 | dma_free_coherent(dev, size: chunk_size, |
8852 | cpu_addr: ctrl_info->pqi_ofa_chunk_virt_addr[i], |
8853 | dma_handle: get_unaligned_le64(p: &mem_descriptor->address)); |
8854 | } |
8855 | kfree(objp: ctrl_info->pqi_ofa_chunk_virt_addr); |
8856 | |
8857 | out: |
8858 | return -ENOMEM; |
8859 | } |
8860 | |
8861 | static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info) |
8862 | { |
8863 | u32 total_size; |
8864 | u32 chunk_size; |
8865 | u32 min_chunk_size; |
8866 | |
8867 | if (ctrl_info->ofa_bytes_requested == 0) |
8868 | return 0; |
8869 | |
8870 | total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested); |
8871 | min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS); |
8872 | min_chunk_size = PAGE_ALIGN(min_chunk_size); |
8873 | |
8874 | for (chunk_size = total_size; chunk_size >= min_chunk_size;) { |
8875 | if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0) |
8876 | return 0; |
8877 | chunk_size /= 2; |
8878 | chunk_size = PAGE_ALIGN(chunk_size); |
8879 | } |
8880 | |
8881 | return -ENOMEM; |
8882 | } |
8883 | |
8884 | static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info) |
8885 | { |
8886 | struct device *dev; |
8887 | struct pqi_ofa_memory *ofap; |
8888 | |
8889 | dev = &ctrl_info->pci_dev->dev; |
8890 | |
8891 | ofap = dma_alloc_coherent(dev, size: sizeof(*ofap), |
8892 | dma_handle: &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL); |
8893 | if (!ofap) |
8894 | return; |
8895 | |
8896 | ctrl_info->pqi_ofa_mem_virt_addr = ofap; |
8897 | |
8898 | if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) { |
8899 | dev_err(dev, |
8900 | "failed to allocate host buffer for Online Firmware Activation\n"); |
8901 | dma_free_coherent(dev, size: sizeof(*ofap), cpu_addr: ofap, dma_handle: ctrl_info->pqi_ofa_mem_dma_handle); |
8902 | ctrl_info->pqi_ofa_mem_virt_addr = NULL; |
8903 | return; |
8904 | } |
8905 | |
8906 | put_unaligned_le16(PQI_OFA_VERSION, p: &ofap->version); |
8907 | memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature)); |
8908 | } |
8909 | |
8910 | static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info) |
8911 | { |
8912 | unsigned int i; |
8913 | struct device *dev; |
8914 | struct pqi_ofa_memory *ofap; |
8915 | struct pqi_sg_descriptor *mem_descriptor; |
8916 | unsigned int num_memory_descriptors; |
8917 | |
8918 | ofap = ctrl_info->pqi_ofa_mem_virt_addr; |
8919 | if (!ofap) |
8920 | return; |
8921 | |
8922 | dev = &ctrl_info->pci_dev->dev; |
8923 | |
8924 | if (get_unaligned_le32(p: &ofap->bytes_allocated) == 0) |
8925 | goto out; |
8926 | |
8927 | mem_descriptor = ofap->sg_descriptor; |
8928 | num_memory_descriptors = |
8929 | get_unaligned_le16(p: &ofap->num_memory_descriptors); |
8930 | |
8931 | for (i = 0; i < num_memory_descriptors; i++) { |
8932 | dma_free_coherent(dev, |
8933 | size: get_unaligned_le32(p: &mem_descriptor[i].length), |
8934 | cpu_addr: ctrl_info->pqi_ofa_chunk_virt_addr[i], |
8935 | dma_handle: get_unaligned_le64(p: &mem_descriptor[i].address)); |
8936 | } |
8937 | kfree(objp: ctrl_info->pqi_ofa_chunk_virt_addr); |
8938 | |
8939 | out: |
8940 | dma_free_coherent(dev, size: sizeof(*ofap), cpu_addr: ofap, |
8941 | dma_handle: ctrl_info->pqi_ofa_mem_dma_handle); |
8942 | ctrl_info->pqi_ofa_mem_virt_addr = NULL; |
8943 | } |
8944 | |
8945 | static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info) |
8946 | { |
8947 | u32 buffer_length; |
8948 | struct pqi_vendor_general_request request; |
8949 | struct pqi_ofa_memory *ofap; |
8950 | |
8951 | memset(&request, 0, sizeof(request)); |
8952 | |
8953 | request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; |
8954 | put_unaligned_le16(val: sizeof(request) - PQI_REQUEST_HEADER_LENGTH, |
8955 | p: &request.header.iu_length); |
8956 | put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE, |
8957 | p: &request.function_code); |
8958 | |
8959 | ofap = ctrl_info->pqi_ofa_mem_virt_addr; |
8960 | |
8961 | if (ofap) { |
8962 | buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) + |
8963 | get_unaligned_le16(p: &ofap->num_memory_descriptors) * |
8964 | sizeof(struct pqi_sg_descriptor); |
8965 | |
8966 | put_unaligned_le64(val: (u64)ctrl_info->pqi_ofa_mem_dma_handle, |
8967 | p: &request.data.ofa_memory_allocation.buffer_address); |
8968 | put_unaligned_le32(val: buffer_length, |
8969 | p: &request.data.ofa_memory_allocation.buffer_length); |
8970 | } |
8971 | |
8972 | return pqi_submit_raid_request_synchronous(ctrl_info, request: &request.header, flags: 0, NULL); |
8973 | } |
8974 | |
8975 | static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs) |
8976 | { |
8977 | ssleep(seconds: delay_secs); |
8978 | |
8979 | return pqi_ctrl_init_resume(ctrl_info); |
8980 | } |
8981 | |
8982 | static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = { |
8983 | .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR, |
8984 | .status = SAM_STAT_CHECK_CONDITION, |
8985 | }; |
8986 | |
8987 | static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info) |
8988 | { |
8989 | unsigned int i; |
8990 | struct pqi_io_request *io_request; |
8991 | struct scsi_cmnd *scmd; |
8992 | struct scsi_device *sdev; |
8993 | |
8994 | for (i = 0; i < ctrl_info->max_io_slots; i++) { |
8995 | io_request = &ctrl_info->io_request_pool[i]; |
8996 | if (atomic_read(v: &io_request->refcount) == 0) |
8997 | continue; |
8998 | |
8999 | scmd = io_request->scmd; |
9000 | if (scmd) { |
9001 | sdev = scmd->device; |
9002 | if (!sdev || !scsi_device_online(sdev)) { |
9003 | pqi_free_io_request(io_request); |
9004 | continue; |
9005 | } else { |
9006 | set_host_byte(cmd: scmd, status: DID_NO_CONNECT); |
9007 | } |
9008 | } else { |
9009 | io_request->status = -ENXIO; |
9010 | io_request->error_info = |
9011 | &pqi_ctrl_offline_raid_error_info; |
9012 | } |
9013 | |
9014 | io_request->io_complete_callback(io_request, |
9015 | io_request->context); |
9016 | } |
9017 | } |
9018 | |
9019 | static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) |
9020 | { |
9021 | pqi_perform_lockup_action(); |
9022 | pqi_stop_heartbeat_timer(ctrl_info); |
9023 | pqi_free_interrupts(ctrl_info); |
9024 | pqi_cancel_rescan_worker(ctrl_info); |
9025 | pqi_cancel_update_time_worker(ctrl_info); |
9026 | pqi_ctrl_wait_until_quiesced(ctrl_info); |
9027 | pqi_fail_all_outstanding_requests(ctrl_info); |
9028 | pqi_ctrl_unblock_requests(ctrl_info); |
9029 | } |
9030 | |
9031 | static void pqi_ctrl_offline_worker(struct work_struct *work) |
9032 | { |
9033 | struct pqi_ctrl_info *ctrl_info; |
9034 | |
9035 | ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work); |
9036 | pqi_take_ctrl_offline_deferred(ctrl_info); |
9037 | } |
9038 | |
9039 | static char *pqi_ctrl_shutdown_reason_to_string(enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) |
9040 | { |
9041 | char *string; |
9042 | |
9043 | switch (ctrl_shutdown_reason) { |
9044 | case PQI_IQ_NOT_DRAINED_TIMEOUT: |
9045 | string = "inbound queue not drained timeout"; |
9046 | break; |
9047 | case PQI_LUN_RESET_TIMEOUT: |
9048 | string = "LUN reset timeout"; |
9049 | break; |
9050 | case PQI_IO_PENDING_POST_LUN_RESET_TIMEOUT: |
9051 | string = "I/O pending timeout after LUN reset"; |
9052 | break; |
9053 | case PQI_NO_HEARTBEAT: |
9054 | string = "no controller heartbeat detected"; |
9055 | break; |
9056 | case PQI_FIRMWARE_KERNEL_NOT_UP: |
9057 | string = "firmware kernel not ready"; |
9058 | break; |
9059 | case PQI_OFA_RESPONSE_TIMEOUT: |
9060 | string = "OFA response timeout"; |
9061 | break; |
9062 | case PQI_INVALID_REQ_ID: |
9063 | string = "invalid request ID"; |
9064 | break; |
9065 | case PQI_UNMATCHED_REQ_ID: |
9066 | string = "unmatched request ID"; |
9067 | break; |
9068 | case PQI_IO_PI_OUT_OF_RANGE: |
9069 | string = "I/O queue producer index out of range"; |
9070 | break; |
9071 | case PQI_EVENT_PI_OUT_OF_RANGE: |
9072 | string = "event queue producer index out of range"; |
9073 | break; |
9074 | case PQI_UNEXPECTED_IU_TYPE: |
9075 | string = "unexpected IU type"; |
9076 | break; |
9077 | default: |
9078 | string = "unknown reason"; |
9079 | break; |
9080 | } |
9081 | |
9082 | return string; |
9083 | } |
9084 | |
9085 | static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info, |
9086 | enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) |
9087 | { |
9088 | if (!ctrl_info->controller_online) |
9089 | return; |
9090 | |
9091 | ctrl_info->controller_online = false; |
9092 | ctrl_info->pqi_mode_enabled = false; |
9093 | pqi_ctrl_block_requests(ctrl_info); |
9094 | if (!pqi_disable_ctrl_shutdown) |
9095 | sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason); |
9096 | pci_disable_device(dev: ctrl_info->pci_dev); |
9097 | dev_err(&ctrl_info->pci_dev->dev, |
9098 | "controller offline: reason code 0x%x (%s)\n", |
9099 | ctrl_shutdown_reason, pqi_ctrl_shutdown_reason_to_string(ctrl_shutdown_reason)); |
9100 | schedule_work(work: &ctrl_info->ctrl_offline_work); |
9101 | } |
9102 | |
9103 | static void pqi_print_ctrl_info(struct pci_dev *pci_dev, |
9104 | const struct pci_device_id *id) |
9105 | { |
9106 | char *ctrl_description; |
9107 | |
9108 | if (id->driver_data) |
9109 | ctrl_description = (char *)id->driver_data; |
9110 | else |
9111 | ctrl_description = "Microchip Smart Family Controller"; |
9112 | |
9113 | dev_info(&pci_dev->dev, "%s found\n", ctrl_description); |
9114 | } |
9115 | |
9116 | static int pqi_pci_probe(struct pci_dev *pci_dev, |
9117 | const struct pci_device_id *id) |
9118 | { |
9119 | int rc; |
9120 | int node; |
9121 | struct pqi_ctrl_info *ctrl_info; |
9122 | |
9123 | pqi_print_ctrl_info(pci_dev, id); |
9124 | |
9125 | if (pqi_disable_device_id_wildcards && |
9126 | id->subvendor == PCI_ANY_ID && |
9127 | id->subdevice == PCI_ANY_ID) { |
9128 | dev_warn(&pci_dev->dev, |
9129 | "controller not probed because device ID wildcards are disabled\n"); |
9130 | return -ENODEV; |
9131 | } |
9132 | |
9133 | if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID) |
9134 | dev_warn(&pci_dev->dev, |
9135 | "controller device ID matched using wildcards\n"); |
9136 | |
9137 | node = dev_to_node(dev: &pci_dev->dev); |
9138 | if (node == NUMA_NO_NODE) { |
9139 | node = cpu_to_node(cpu: 0); |
9140 | if (node == NUMA_NO_NODE) |
9141 | node = 0; |
9142 | set_dev_node(dev: &pci_dev->dev, node); |
9143 | } |
9144 | |
9145 | ctrl_info = pqi_alloc_ctrl_info(numa_node: node); |
9146 | if (!ctrl_info) { |
9147 | dev_err(&pci_dev->dev, |
9148 | "failed to allocate controller info block\n"); |
9149 | return -ENOMEM; |
9150 | } |
9151 | ctrl_info->numa_node = node; |
9152 | |
9153 | ctrl_info->pci_dev = pci_dev; |
9154 | |
9155 | rc = pqi_pci_init(ctrl_info); |
9156 | if (rc) |
9157 | goto error; |
9158 | |
9159 | rc = pqi_ctrl_init(ctrl_info); |
9160 | if (rc) |
9161 | goto error; |
9162 | |
9163 | return 0; |
9164 | |
9165 | error: |
9166 | pqi_remove_ctrl(ctrl_info); |
9167 | |
9168 | return rc; |
9169 | } |
9170 | |
9171 | static void pqi_pci_remove(struct pci_dev *pci_dev) |
9172 | { |
9173 | struct pqi_ctrl_info *ctrl_info; |
9174 | u16 vendor_id; |
9175 | int rc; |
9176 | |
9177 | ctrl_info = pci_get_drvdata(pdev: pci_dev); |
9178 | if (!ctrl_info) |
9179 | return; |
9180 | |
9181 | pci_read_config_word(dev: ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, val: &vendor_id); |
9182 | if (vendor_id == 0xffff) |
9183 | ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL; |
9184 | else |
9185 | ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL; |
9186 | |
9187 | if (ctrl_info->ctrl_removal_state == PQI_CTRL_GRACEFUL_REMOVAL) { |
9188 | rc = pqi_flush_cache(ctrl_info, shutdown_event: RESTART); |
9189 | if (rc) |
9190 | dev_err(&pci_dev->dev, |
9191 | "unable to flush controller cache during remove\n"); |
9192 | } |
9193 | |
9194 | pqi_remove_ctrl(ctrl_info); |
9195 | } |
9196 | |
9197 | static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info) |
9198 | { |
9199 | unsigned int i; |
9200 | struct pqi_io_request *io_request; |
9201 | struct scsi_cmnd *scmd; |
9202 | |
9203 | for (i = 0; i < ctrl_info->max_io_slots; i++) { |
9204 | io_request = &ctrl_info->io_request_pool[i]; |
9205 | if (atomic_read(v: &io_request->refcount) == 0) |
9206 | continue; |
9207 | scmd = io_request->scmd; |
9208 | WARN_ON(scmd != NULL); /* IO command from SML */ |
9209 | WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/ |
9210 | } |
9211 | } |
9212 | |
9213 | static void pqi_shutdown(struct pci_dev *pci_dev) |
9214 | { |
9215 | int rc; |
9216 | struct pqi_ctrl_info *ctrl_info; |
9217 | enum bmic_flush_cache_shutdown_event shutdown_event; |
9218 | |
9219 | ctrl_info = pci_get_drvdata(pdev: pci_dev); |
9220 | if (!ctrl_info) { |
9221 | dev_err(&pci_dev->dev, |
9222 | "cache could not be flushed\n"); |
9223 | return; |
9224 | } |
9225 | |
9226 | pqi_wait_until_ofa_finished(ctrl_info); |
9227 | |
9228 | pqi_scsi_block_requests(ctrl_info); |
9229 | pqi_ctrl_block_device_reset(ctrl_info); |
9230 | pqi_ctrl_block_requests(ctrl_info); |
9231 | pqi_ctrl_wait_until_quiesced(ctrl_info); |
9232 | |
9233 | if (system_state == SYSTEM_RESTART) |
9234 | shutdown_event = RESTART; |
9235 | else |
9236 | shutdown_event = SHUTDOWN; |
9237 | |
9238 | /* |
9239 | * Write all data in the controller's battery-backed cache to |
9240 | * storage. |
9241 | */ |
9242 | rc = pqi_flush_cache(ctrl_info, shutdown_event); |
9243 | if (rc) |
9244 | dev_err(&pci_dev->dev, |
9245 | "unable to flush controller cache during shutdown\n"); |
9246 | |
9247 | pqi_crash_if_pending_command(ctrl_info); |
9248 | pqi_reset(ctrl_info); |
9249 | } |
9250 | |
9251 | static void pqi_process_lockup_action_param(void) |
9252 | { |
9253 | unsigned int i; |
9254 | |
9255 | if (!pqi_lockup_action_param) |
9256 | return; |
9257 | |
9258 | for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { |
9259 | if (strcmp(pqi_lockup_action_param, |
9260 | pqi_lockup_actions[i].name) == 0) { |
9261 | pqi_lockup_action = pqi_lockup_actions[i].action; |
9262 | return; |
9263 | } |
9264 | } |
9265 | |
9266 | pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n", |
9267 | DRIVER_NAME_SHORT, pqi_lockup_action_param); |
9268 | } |
9269 | |
9270 | #define PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS 30 |
9271 | #define PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS (30 * 60) |
9272 | |
9273 | static void pqi_process_ctrl_ready_timeout_param(void) |
9274 | { |
9275 | if (pqi_ctrl_ready_timeout_secs == 0) |
9276 | return; |
9277 | |
9278 | if (pqi_ctrl_ready_timeout_secs < PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS) { |
9279 | pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - setting timeout to %d seconds\n", |
9280 | DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS); |
9281 | pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS; |
9282 | } else if (pqi_ctrl_ready_timeout_secs > PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS) { |
9283 | pr_warn("%s: ctrl_ready_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %d seconds\n", |
9284 | DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS); |
9285 | pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS; |
9286 | } |
9287 | |
9288 | sis_ctrl_ready_timeout_secs = pqi_ctrl_ready_timeout_secs; |
9289 | } |
9290 | |
9291 | static void pqi_process_module_params(void) |
9292 | { |
9293 | pqi_process_lockup_action_param(); |
9294 | pqi_process_ctrl_ready_timeout_param(); |
9295 | } |
9296 | |
9297 | #if defined(CONFIG_PM) |
9298 | |
9299 | static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev) |
9300 | { |
9301 | if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304) |
9302 | return RESTART; |
9303 | |
9304 | return SUSPEND; |
9305 | } |
9306 | |
9307 | static int pqi_suspend_or_freeze(struct device *dev, bool suspend) |
9308 | { |
9309 | struct pci_dev *pci_dev; |
9310 | struct pqi_ctrl_info *ctrl_info; |
9311 | |
9312 | pci_dev = to_pci_dev(dev); |
9313 | ctrl_info = pci_get_drvdata(pdev: pci_dev); |
9314 | |
9315 | pqi_wait_until_ofa_finished(ctrl_info); |
9316 | |
9317 | pqi_ctrl_block_scan(ctrl_info); |
9318 | pqi_scsi_block_requests(ctrl_info); |
9319 | pqi_ctrl_block_device_reset(ctrl_info); |
9320 | pqi_ctrl_block_requests(ctrl_info); |
9321 | pqi_ctrl_wait_until_quiesced(ctrl_info); |
9322 | |
9323 | if (suspend) { |
9324 | enum bmic_flush_cache_shutdown_event shutdown_event; |
9325 | |
9326 | shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev); |
9327 | pqi_flush_cache(ctrl_info, shutdown_event); |
9328 | } |
9329 | |
9330 | pqi_stop_heartbeat_timer(ctrl_info); |
9331 | pqi_crash_if_pending_command(ctrl_info); |
9332 | pqi_free_irqs(ctrl_info); |
9333 | |
9334 | ctrl_info->controller_online = false; |
9335 | ctrl_info->pqi_mode_enabled = false; |
9336 | |
9337 | return 0; |
9338 | } |
9339 | |
9340 | static __maybe_unused int pqi_suspend(struct device *dev) |
9341 | { |
9342 | return pqi_suspend_or_freeze(dev, suspend: true); |
9343 | } |
9344 | |
9345 | static int pqi_resume_or_restore(struct device *dev) |
9346 | { |
9347 | int rc; |
9348 | struct pci_dev *pci_dev; |
9349 | struct pqi_ctrl_info *ctrl_info; |
9350 | |
9351 | pci_dev = to_pci_dev(dev); |
9352 | ctrl_info = pci_get_drvdata(pdev: pci_dev); |
9353 | |
9354 | rc = pqi_request_irqs(ctrl_info); |
9355 | if (rc) |
9356 | return rc; |
9357 | |
9358 | pqi_ctrl_unblock_device_reset(ctrl_info); |
9359 | pqi_ctrl_unblock_requests(ctrl_info); |
9360 | pqi_scsi_unblock_requests(ctrl_info); |
9361 | pqi_ctrl_unblock_scan(ctrl_info); |
9362 | |
9363 | ssleep(PQI_POST_RESET_DELAY_SECS); |
9364 | |
9365 | return pqi_ctrl_init_resume(ctrl_info); |
9366 | } |
9367 | |
9368 | static int pqi_freeze(struct device *dev) |
9369 | { |
9370 | return pqi_suspend_or_freeze(dev, suspend: false); |
9371 | } |
9372 | |
9373 | static int pqi_thaw(struct device *dev) |
9374 | { |
9375 | int rc; |
9376 | struct pci_dev *pci_dev; |
9377 | struct pqi_ctrl_info *ctrl_info; |
9378 | |
9379 | pci_dev = to_pci_dev(dev); |
9380 | ctrl_info = pci_get_drvdata(pdev: pci_dev); |
9381 | |
9382 | rc = pqi_request_irqs(ctrl_info); |
9383 | if (rc) |
9384 | return rc; |
9385 | |
9386 | ctrl_info->controller_online = true; |
9387 | ctrl_info->pqi_mode_enabled = true; |
9388 | |
9389 | pqi_ctrl_unblock_device_reset(ctrl_info); |
9390 | pqi_ctrl_unblock_requests(ctrl_info); |
9391 | pqi_scsi_unblock_requests(ctrl_info); |
9392 | pqi_ctrl_unblock_scan(ctrl_info); |
9393 | |
9394 | return 0; |
9395 | } |
9396 | |
9397 | static int pqi_poweroff(struct device *dev) |
9398 | { |
9399 | struct pci_dev *pci_dev; |
9400 | struct pqi_ctrl_info *ctrl_info; |
9401 | enum bmic_flush_cache_shutdown_event shutdown_event; |
9402 | |
9403 | pci_dev = to_pci_dev(dev); |
9404 | ctrl_info = pci_get_drvdata(pdev: pci_dev); |
9405 | |
9406 | shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev); |
9407 | pqi_flush_cache(ctrl_info, shutdown_event); |
9408 | |
9409 | return 0; |
9410 | } |
9411 | |
9412 | static const struct dev_pm_ops pqi_pm_ops = { |
9413 | .suspend = pqi_suspend, |
9414 | .resume = pqi_resume_or_restore, |
9415 | .freeze = pqi_freeze, |
9416 | .thaw = pqi_thaw, |
9417 | .poweroff = pqi_poweroff, |
9418 | .restore = pqi_resume_or_restore, |
9419 | }; |
9420 | |
9421 | #endif /* CONFIG_PM */ |
9422 | |
9423 | /* Define the PCI IDs for the controllers that we support. */ |
9424 | static const struct pci_device_id pqi_pci_id_table[] = { |
9425 | { |
9426 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9427 | 0x105b, 0x1211) |
9428 | }, |
9429 | { |
9430 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9431 | 0x105b, 0x1321) |
9432 | }, |
9433 | { |
9434 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9435 | 0x152d, 0x8a22) |
9436 | }, |
9437 | { |
9438 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9439 | 0x152d, 0x8a23) |
9440 | }, |
9441 | { |
9442 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9443 | 0x152d, 0x8a24) |
9444 | }, |
9445 | { |
9446 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9447 | 0x152d, 0x8a36) |
9448 | }, |
9449 | { |
9450 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9451 | 0x152d, 0x8a37) |
9452 | }, |
9453 | { |
9454 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9455 | 0x193d, 0x1104) |
9456 | }, |
9457 | { |
9458 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9459 | 0x193d, 0x1105) |
9460 | }, |
9461 | { |
9462 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9463 | 0x193d, 0x1106) |
9464 | }, |
9465 | { |
9466 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9467 | 0x193d, 0x1107) |
9468 | }, |
9469 | { |
9470 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9471 | 0x193d, 0x1108) |
9472 | }, |
9473 | { |
9474 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9475 | 0x193d, 0x1109) |
9476 | }, |
9477 | { |
9478 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9479 | 0x193d, 0x110b) |
9480 | }, |
9481 | { |
9482 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9483 | 0x193d, 0x8460) |
9484 | }, |
9485 | { |
9486 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9487 | 0x193d, 0x8461) |
9488 | }, |
9489 | { |
9490 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9491 | 0x193d, 0xc460) |
9492 | }, |
9493 | { |
9494 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9495 | 0x193d, 0xc461) |
9496 | }, |
9497 | { |
9498 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9499 | 0x193d, 0xf460) |
9500 | }, |
9501 | { |
9502 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9503 | 0x193d, 0xf461) |
9504 | }, |
9505 | { |
9506 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9507 | 0x1bd4, 0x0045) |
9508 | }, |
9509 | { |
9510 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9511 | 0x1bd4, 0x0046) |
9512 | }, |
9513 | { |
9514 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9515 | 0x1bd4, 0x0047) |
9516 | }, |
9517 | { |
9518 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9519 | 0x1bd4, 0x0048) |
9520 | }, |
9521 | { |
9522 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9523 | 0x1bd4, 0x004a) |
9524 | }, |
9525 | { |
9526 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9527 | 0x1bd4, 0x004b) |
9528 | }, |
9529 | { |
9530 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9531 | 0x1bd4, 0x004c) |
9532 | }, |
9533 | { |
9534 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9535 | 0x1bd4, 0x004f) |
9536 | }, |
9537 | { |
9538 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9539 | 0x1bd4, 0x0051) |
9540 | }, |
9541 | { |
9542 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9543 | 0x1bd4, 0x0052) |
9544 | }, |
9545 | { |
9546 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9547 | 0x1bd4, 0x0053) |
9548 | }, |
9549 | { |
9550 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9551 | 0x1bd4, 0x0054) |
9552 | }, |
9553 | { |
9554 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9555 | 0x1bd4, 0x006b) |
9556 | }, |
9557 | { |
9558 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9559 | 0x1bd4, 0x006c) |
9560 | }, |
9561 | { |
9562 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9563 | 0x1bd4, 0x006d) |
9564 | }, |
9565 | { |
9566 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9567 | 0x1bd4, 0x006f) |
9568 | }, |
9569 | { |
9570 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9571 | 0x1bd4, 0x0070) |
9572 | }, |
9573 | { |
9574 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9575 | 0x1bd4, 0x0071) |
9576 | }, |
9577 | { |
9578 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9579 | 0x1bd4, 0x0072) |
9580 | }, |
9581 | { |
9582 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9583 | 0x1bd4, 0x0086) |
9584 | }, |
9585 | { |
9586 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9587 | 0x1bd4, 0x0087) |
9588 | }, |
9589 | { |
9590 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9591 | 0x1bd4, 0x0088) |
9592 | }, |
9593 | { |
9594 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9595 | 0x1bd4, 0x0089) |
9596 | }, |
9597 | { |
9598 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9599 | 0x19e5, 0xd227) |
9600 | }, |
9601 | { |
9602 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9603 | 0x19e5, 0xd228) |
9604 | }, |
9605 | { |
9606 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9607 | 0x19e5, 0xd229) |
9608 | }, |
9609 | { |
9610 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9611 | 0x19e5, 0xd22a) |
9612 | }, |
9613 | { |
9614 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9615 | 0x19e5, 0xd22b) |
9616 | }, |
9617 | { |
9618 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9619 | 0x19e5, 0xd22c) |
9620 | }, |
9621 | { |
9622 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9623 | PCI_VENDOR_ID_ADAPTEC2, 0x0110) |
9624 | }, |
9625 | { |
9626 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9627 | PCI_VENDOR_ID_ADAPTEC2, 0x0608) |
9628 | }, |
9629 | { |
9630 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9631 | PCI_VENDOR_ID_ADAPTEC2, 0x0659) |
9632 | }, |
9633 | { |
9634 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9635 | PCI_VENDOR_ID_ADAPTEC2, 0x0800) |
9636 | }, |
9637 | { |
9638 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9639 | PCI_VENDOR_ID_ADAPTEC2, 0x0801) |
9640 | }, |
9641 | { |
9642 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9643 | PCI_VENDOR_ID_ADAPTEC2, 0x0802) |
9644 | }, |
9645 | { |
9646 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9647 | PCI_VENDOR_ID_ADAPTEC2, 0x0803) |
9648 | }, |
9649 | { |
9650 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9651 | PCI_VENDOR_ID_ADAPTEC2, 0x0804) |
9652 | }, |
9653 | { |
9654 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9655 | PCI_VENDOR_ID_ADAPTEC2, 0x0805) |
9656 | }, |
9657 | { |
9658 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9659 | PCI_VENDOR_ID_ADAPTEC2, 0x0806) |
9660 | }, |
9661 | { |
9662 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9663 | PCI_VENDOR_ID_ADAPTEC2, 0x0807) |
9664 | }, |
9665 | { |
9666 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9667 | PCI_VENDOR_ID_ADAPTEC2, 0x0808) |
9668 | }, |
9669 | { |
9670 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9671 | PCI_VENDOR_ID_ADAPTEC2, 0x0809) |
9672 | }, |
9673 | { |
9674 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9675 | PCI_VENDOR_ID_ADAPTEC2, 0x080a) |
9676 | }, |
9677 | { |
9678 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9679 | PCI_VENDOR_ID_ADAPTEC2, 0x0900) |
9680 | }, |
9681 | { |
9682 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9683 | PCI_VENDOR_ID_ADAPTEC2, 0x0901) |
9684 | }, |
9685 | { |
9686 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9687 | PCI_VENDOR_ID_ADAPTEC2, 0x0902) |
9688 | }, |
9689 | { |
9690 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9691 | PCI_VENDOR_ID_ADAPTEC2, 0x0903) |
9692 | }, |
9693 | { |
9694 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9695 | PCI_VENDOR_ID_ADAPTEC2, 0x0904) |
9696 | }, |
9697 | { |
9698 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9699 | PCI_VENDOR_ID_ADAPTEC2, 0x0905) |
9700 | }, |
9701 | { |
9702 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9703 | PCI_VENDOR_ID_ADAPTEC2, 0x0906) |
9704 | }, |
9705 | { |
9706 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9707 | PCI_VENDOR_ID_ADAPTEC2, 0x0907) |
9708 | }, |
9709 | { |
9710 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9711 | PCI_VENDOR_ID_ADAPTEC2, 0x0908) |
9712 | }, |
9713 | { |
9714 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9715 | PCI_VENDOR_ID_ADAPTEC2, 0x090a) |
9716 | }, |
9717 | { |
9718 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9719 | PCI_VENDOR_ID_ADAPTEC2, 0x1200) |
9720 | }, |
9721 | { |
9722 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9723 | PCI_VENDOR_ID_ADAPTEC2, 0x1201) |
9724 | }, |
9725 | { |
9726 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9727 | PCI_VENDOR_ID_ADAPTEC2, 0x1202) |
9728 | }, |
9729 | { |
9730 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9731 | PCI_VENDOR_ID_ADAPTEC2, 0x1280) |
9732 | }, |
9733 | { |
9734 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9735 | PCI_VENDOR_ID_ADAPTEC2, 0x1281) |
9736 | }, |
9737 | { |
9738 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9739 | PCI_VENDOR_ID_ADAPTEC2, 0x1282) |
9740 | }, |
9741 | { |
9742 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9743 | PCI_VENDOR_ID_ADAPTEC2, 0x1300) |
9744 | }, |
9745 | { |
9746 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9747 | PCI_VENDOR_ID_ADAPTEC2, 0x1301) |
9748 | }, |
9749 | { |
9750 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9751 | PCI_VENDOR_ID_ADAPTEC2, 0x1302) |
9752 | }, |
9753 | { |
9754 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9755 | PCI_VENDOR_ID_ADAPTEC2, 0x1303) |
9756 | }, |
9757 | { |
9758 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9759 | PCI_VENDOR_ID_ADAPTEC2, 0x1304) |
9760 | }, |
9761 | { |
9762 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9763 | PCI_VENDOR_ID_ADAPTEC2, 0x1380) |
9764 | }, |
9765 | { |
9766 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9767 | PCI_VENDOR_ID_ADAPTEC2, 0x1400) |
9768 | }, |
9769 | { |
9770 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9771 | PCI_VENDOR_ID_ADAPTEC2, 0x1402) |
9772 | }, |
9773 | { |
9774 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9775 | PCI_VENDOR_ID_ADAPTEC2, 0x1410) |
9776 | }, |
9777 | { |
9778 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9779 | PCI_VENDOR_ID_ADAPTEC2, 0x1411) |
9780 | }, |
9781 | { |
9782 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9783 | PCI_VENDOR_ID_ADAPTEC2, 0x1412) |
9784 | }, |
9785 | { |
9786 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9787 | PCI_VENDOR_ID_ADAPTEC2, 0x1420) |
9788 | }, |
9789 | { |
9790 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9791 | PCI_VENDOR_ID_ADAPTEC2, 0x1430) |
9792 | }, |
9793 | { |
9794 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9795 | PCI_VENDOR_ID_ADAPTEC2, 0x1440) |
9796 | }, |
9797 | { |
9798 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9799 | PCI_VENDOR_ID_ADAPTEC2, 0x1441) |
9800 | }, |
9801 | { |
9802 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9803 | PCI_VENDOR_ID_ADAPTEC2, 0x1450) |
9804 | }, |
9805 | { |
9806 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9807 | PCI_VENDOR_ID_ADAPTEC2, 0x1452) |
9808 | }, |
9809 | { |
9810 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9811 | PCI_VENDOR_ID_ADAPTEC2, 0x1460) |
9812 | }, |
9813 | { |
9814 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9815 | PCI_VENDOR_ID_ADAPTEC2, 0x1461) |
9816 | }, |
9817 | { |
9818 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9819 | PCI_VENDOR_ID_ADAPTEC2, 0x1462) |
9820 | }, |
9821 | { |
9822 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9823 | PCI_VENDOR_ID_ADAPTEC2, 0x1463) |
9824 | }, |
9825 | { |
9826 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9827 | PCI_VENDOR_ID_ADAPTEC2, 0x1470) |
9828 | }, |
9829 | { |
9830 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9831 | PCI_VENDOR_ID_ADAPTEC2, 0x1471) |
9832 | }, |
9833 | { |
9834 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9835 | PCI_VENDOR_ID_ADAPTEC2, 0x1472) |
9836 | }, |
9837 | { |
9838 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9839 | PCI_VENDOR_ID_ADAPTEC2, 0x1473) |
9840 | }, |
9841 | { |
9842 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9843 | PCI_VENDOR_ID_ADAPTEC2, 0x1474) |
9844 | }, |
9845 | { |
9846 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9847 | PCI_VENDOR_ID_ADAPTEC2, 0x1475) |
9848 | }, |
9849 | { |
9850 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9851 | PCI_VENDOR_ID_ADAPTEC2, 0x1480) |
9852 | }, |
9853 | { |
9854 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9855 | PCI_VENDOR_ID_ADAPTEC2, 0x1490) |
9856 | }, |
9857 | { |
9858 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9859 | PCI_VENDOR_ID_ADAPTEC2, 0x1491) |
9860 | }, |
9861 | { |
9862 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9863 | PCI_VENDOR_ID_ADAPTEC2, 0x14a0) |
9864 | }, |
9865 | { |
9866 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9867 | PCI_VENDOR_ID_ADAPTEC2, 0x14a1) |
9868 | }, |
9869 | { |
9870 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9871 | PCI_VENDOR_ID_ADAPTEC2, 0x14a2) |
9872 | }, |
9873 | { |
9874 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9875 | PCI_VENDOR_ID_ADAPTEC2, 0x14a4) |
9876 | }, |
9877 | { |
9878 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9879 | PCI_VENDOR_ID_ADAPTEC2, 0x14a5) |
9880 | }, |
9881 | { |
9882 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9883 | PCI_VENDOR_ID_ADAPTEC2, 0x14a6) |
9884 | }, |
9885 | { |
9886 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9887 | PCI_VENDOR_ID_ADAPTEC2, 0x14b0) |
9888 | }, |
9889 | { |
9890 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9891 | PCI_VENDOR_ID_ADAPTEC2, 0x14b1) |
9892 | }, |
9893 | { |
9894 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9895 | PCI_VENDOR_ID_ADAPTEC2, 0x14c0) |
9896 | }, |
9897 | { |
9898 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9899 | PCI_VENDOR_ID_ADAPTEC2, 0x14c1) |
9900 | }, |
9901 | { |
9902 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9903 | PCI_VENDOR_ID_ADAPTEC2, 0x14c2) |
9904 | }, |
9905 | { |
9906 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9907 | PCI_VENDOR_ID_ADAPTEC2, 0x14c3) |
9908 | }, |
9909 | { |
9910 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9911 | PCI_VENDOR_ID_ADAPTEC2, 0x14c4) |
9912 | }, |
9913 | { |
9914 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9915 | PCI_VENDOR_ID_ADAPTEC2, 0x14d0) |
9916 | }, |
9917 | { |
9918 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9919 | PCI_VENDOR_ID_ADAPTEC2, 0x14e0) |
9920 | }, |
9921 | { |
9922 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9923 | PCI_VENDOR_ID_ADAPTEC2, 0x14f0) |
9924 | }, |
9925 | { |
9926 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9927 | PCI_VENDOR_ID_ADVANTECH, 0x8312) |
9928 | }, |
9929 | { |
9930 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9931 | PCI_VENDOR_ID_DELL, 0x1fe0) |
9932 | }, |
9933 | { |
9934 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9935 | PCI_VENDOR_ID_HP, 0x0600) |
9936 | }, |
9937 | { |
9938 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9939 | PCI_VENDOR_ID_HP, 0x0601) |
9940 | }, |
9941 | { |
9942 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9943 | PCI_VENDOR_ID_HP, 0x0602) |
9944 | }, |
9945 | { |
9946 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9947 | PCI_VENDOR_ID_HP, 0x0603) |
9948 | }, |
9949 | { |
9950 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9951 | PCI_VENDOR_ID_HP, 0x0609) |
9952 | }, |
9953 | { |
9954 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9955 | PCI_VENDOR_ID_HP, 0x0650) |
9956 | }, |
9957 | { |
9958 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9959 | PCI_VENDOR_ID_HP, 0x0651) |
9960 | }, |
9961 | { |
9962 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9963 | PCI_VENDOR_ID_HP, 0x0652) |
9964 | }, |
9965 | { |
9966 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9967 | PCI_VENDOR_ID_HP, 0x0653) |
9968 | }, |
9969 | { |
9970 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9971 | PCI_VENDOR_ID_HP, 0x0654) |
9972 | }, |
9973 | { |
9974 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9975 | PCI_VENDOR_ID_HP, 0x0655) |
9976 | }, |
9977 | { |
9978 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9979 | PCI_VENDOR_ID_HP, 0x0700) |
9980 | }, |
9981 | { |
9982 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9983 | PCI_VENDOR_ID_HP, 0x0701) |
9984 | }, |
9985 | { |
9986 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9987 | PCI_VENDOR_ID_HP, 0x1001) |
9988 | }, |
9989 | { |
9990 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9991 | PCI_VENDOR_ID_HP, 0x1002) |
9992 | }, |
9993 | { |
9994 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9995 | PCI_VENDOR_ID_HP, 0x1100) |
9996 | }, |
9997 | { |
9998 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
9999 | PCI_VENDOR_ID_HP, 0x1101) |
10000 | }, |
10001 | { |
10002 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10003 | 0x1590, 0x0294) |
10004 | }, |
10005 | { |
10006 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10007 | 0x1590, 0x02db) |
10008 | }, |
10009 | { |
10010 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10011 | 0x1590, 0x02dc) |
10012 | }, |
10013 | { |
10014 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10015 | 0x1590, 0x032e) |
10016 | }, |
10017 | { |
10018 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10019 | 0x1590, 0x036f) |
10020 | }, |
10021 | { |
10022 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10023 | 0x1590, 0x0381) |
10024 | }, |
10025 | { |
10026 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10027 | 0x1590, 0x0382) |
10028 | }, |
10029 | { |
10030 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10031 | 0x1590, 0x0383) |
10032 | }, |
10033 | { |
10034 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10035 | 0x1d8d, 0x0800) |
10036 | }, |
10037 | { |
10038 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10039 | 0x1d8d, 0x0908) |
10040 | }, |
10041 | { |
10042 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10043 | 0x1d8d, 0x0806) |
10044 | }, |
10045 | { |
10046 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10047 | 0x1d8d, 0x0916) |
10048 | }, |
10049 | { |
10050 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10051 | PCI_VENDOR_ID_GIGABYTE, 0x1000) |
10052 | }, |
10053 | { |
10054 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10055 | 0x1dfc, 0x3161) |
10056 | }, |
10057 | { |
10058 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10059 | 0x1f0c, 0x3161) |
10060 | }, |
10061 | { |
10062 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10063 | 0x1cf2, 0x0804) |
10064 | }, |
10065 | { |
10066 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10067 | 0x1cf2, 0x0805) |
10068 | }, |
10069 | { |
10070 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10071 | 0x1cf2, 0x0806) |
10072 | }, |
10073 | { |
10074 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10075 | 0x1cf2, 0x5445) |
10076 | }, |
10077 | { |
10078 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10079 | 0x1cf2, 0x5446) |
10080 | }, |
10081 | { |
10082 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10083 | 0x1cf2, 0x5447) |
10084 | }, |
10085 | { |
10086 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10087 | 0x1cf2, 0x5449) |
10088 | }, |
10089 | { |
10090 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10091 | 0x1cf2, 0x544a) |
10092 | }, |
10093 | { |
10094 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10095 | 0x1cf2, 0x544b) |
10096 | }, |
10097 | { |
10098 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10099 | 0x1cf2, 0x544d) |
10100 | }, |
10101 | { |
10102 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10103 | 0x1cf2, 0x544e) |
10104 | }, |
10105 | { |
10106 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10107 | 0x1cf2, 0x544f) |
10108 | }, |
10109 | { |
10110 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10111 | 0x1cf2, 0x54da) |
10112 | }, |
10113 | { |
10114 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10115 | 0x1cf2, 0x54db) |
10116 | }, |
10117 | { |
10118 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10119 | 0x1cf2, 0x54dc) |
10120 | }, |
10121 | { |
10122 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10123 | 0x1cf2, 0x0b27) |
10124 | }, |
10125 | { |
10126 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10127 | 0x1cf2, 0x0b29) |
10128 | }, |
10129 | { |
10130 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10131 | 0x1cf2, 0x0b45) |
10132 | }, |
10133 | { |
10134 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10135 | 0x1cc4, 0x0101) |
10136 | }, |
10137 | { |
10138 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10139 | 0x1cc4, 0x0201) |
10140 | }, |
10141 | { |
10142 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10143 | PCI_VENDOR_ID_LENOVO, 0x0220) |
10144 | }, |
10145 | { |
10146 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10147 | PCI_VENDOR_ID_LENOVO, 0x0221) |
10148 | }, |
10149 | { |
10150 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10151 | PCI_VENDOR_ID_LENOVO, 0x0520) |
10152 | }, |
10153 | { |
10154 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10155 | PCI_VENDOR_ID_LENOVO, 0x0522) |
10156 | }, |
10157 | { |
10158 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10159 | PCI_VENDOR_ID_LENOVO, 0x0620) |
10160 | }, |
10161 | { |
10162 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10163 | PCI_VENDOR_ID_LENOVO, 0x0621) |
10164 | }, |
10165 | { |
10166 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10167 | PCI_VENDOR_ID_LENOVO, 0x0622) |
10168 | }, |
10169 | { |
10170 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10171 | PCI_VENDOR_ID_LENOVO, 0x0623) |
10172 | }, |
10173 | { |
10174 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10175 | 0x1014, 0x0718) |
10176 | }, |
10177 | { |
10178 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10179 | 0x1137, 0x02f8) |
10180 | }, |
10181 | { |
10182 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10183 | 0x1137, 0x02f9) |
10184 | }, |
10185 | { |
10186 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10187 | 0x1137, 0x02fa) |
10188 | }, |
10189 | { |
10190 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10191 | 0x1e93, 0x1000) |
10192 | }, |
10193 | { |
10194 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10195 | 0x1e93, 0x1001) |
10196 | }, |
10197 | { |
10198 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10199 | 0x1e93, 0x1002) |
10200 | }, |
10201 | { |
10202 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10203 | 0x1e93, 0x1005) |
10204 | }, |
10205 | { |
10206 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10207 | 0x1f51, 0x1001) |
10208 | }, |
10209 | { |
10210 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10211 | 0x1f51, 0x1002) |
10212 | }, |
10213 | { |
10214 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10215 | 0x1f51, 0x1003) |
10216 | }, |
10217 | { |
10218 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10219 | 0x1f51, 0x1004) |
10220 | }, |
10221 | { |
10222 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10223 | 0x1f51, 0x1005) |
10224 | }, |
10225 | { |
10226 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10227 | 0x1f51, 0x1006) |
10228 | }, |
10229 | { |
10230 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10231 | 0x1f51, 0x1007) |
10232 | }, |
10233 | { |
10234 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10235 | 0x1f51, 0x1008) |
10236 | }, |
10237 | { |
10238 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10239 | 0x1f51, 0x1009) |
10240 | }, |
10241 | { |
10242 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10243 | 0x1f51, 0x100a) |
10244 | }, |
10245 | { |
10246 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10247 | 0x1f51, 0x100e) |
10248 | }, |
10249 | { |
10250 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10251 | 0x1f51, 0x100f) |
10252 | }, |
10253 | { |
10254 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10255 | 0x1f51, 0x1010) |
10256 | }, |
10257 | { |
10258 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10259 | 0x1f51, 0x1011) |
10260 | }, |
10261 | { |
10262 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10263 | 0x1f51, 0x1043) |
10264 | }, |
10265 | { |
10266 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10267 | 0x1f51, 0x1044) |
10268 | }, |
10269 | { |
10270 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10271 | 0x1f51, 0x1045) |
10272 | }, |
10273 | { |
10274 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
10275 | PCI_ANY_ID, PCI_ANY_ID) |
10276 | }, |
10277 | { 0 } |
10278 | }; |
10279 | |
10280 | MODULE_DEVICE_TABLE(pci, pqi_pci_id_table); |
10281 | |
10282 | static struct pci_driver pqi_pci_driver = { |
10283 | .name = DRIVER_NAME_SHORT, |
10284 | .id_table = pqi_pci_id_table, |
10285 | .probe = pqi_pci_probe, |
10286 | .remove = pqi_pci_remove, |
10287 | .shutdown = pqi_shutdown, |
10288 | #if defined(CONFIG_PM) |
10289 | .driver = { |
10290 | .pm = &pqi_pm_ops |
10291 | }, |
10292 | #endif |
10293 | }; |
10294 | |
10295 | static int __init pqi_init(void) |
10296 | { |
10297 | int rc; |
10298 | |
10299 | pr_info(DRIVER_NAME "\n"); |
10300 | pqi_verify_structures(); |
10301 | sis_verify_structures(); |
10302 | |
10303 | pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions); |
10304 | if (!pqi_sas_transport_template) |
10305 | return -ENODEV; |
10306 | |
10307 | pqi_process_module_params(); |
10308 | |
10309 | rc = pci_register_driver(&pqi_pci_driver); |
10310 | if (rc) |
10311 | sas_release_transport(pqi_sas_transport_template); |
10312 | |
10313 | return rc; |
10314 | } |
10315 | |
10316 | static void __exit pqi_cleanup(void) |
10317 | { |
10318 | pci_unregister_driver(dev: &pqi_pci_driver); |
10319 | sas_release_transport(pqi_sas_transport_template); |
10320 | } |
10321 | |
10322 | module_init(pqi_init); |
10323 | module_exit(pqi_cleanup); |
10324 | |
10325 | static void pqi_verify_structures(void) |
10326 | { |
10327 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, |
10328 | sis_host_to_ctrl_doorbell) != 0x20); |
10329 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, |
10330 | sis_interrupt_mask) != 0x34); |
10331 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, |
10332 | sis_ctrl_to_host_doorbell) != 0x9c); |
10333 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, |
10334 | sis_ctrl_to_host_doorbell_clear) != 0xa0); |
10335 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, |
10336 | sis_driver_scratch) != 0xb0); |
10337 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, |
10338 | sis_product_identifier) != 0xb4); |
10339 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, |
10340 | sis_firmware_status) != 0xbc); |
10341 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, |
10342 | sis_ctrl_shutdown_reason_code) != 0xcc); |
10343 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, |
10344 | sis_mailbox) != 0x1000); |
10345 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, |
10346 | pqi_registers) != 0x4000); |
10347 | |
10348 | BUILD_BUG_ON(offsetof(struct pqi_iu_header, |
10349 | iu_type) != 0x0); |
10350 | BUILD_BUG_ON(offsetof(struct pqi_iu_header, |
10351 | iu_length) != 0x2); |
10352 | BUILD_BUG_ON(offsetof(struct pqi_iu_header, |
10353 | response_queue_id) != 0x4); |
10354 | BUILD_BUG_ON(offsetof(struct pqi_iu_header, |
10355 | driver_flags) != 0x6); |
10356 | BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8); |
10357 | |
10358 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, |
10359 | status) != 0x0); |
10360 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, |
10361 | service_response) != 0x1); |
10362 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, |
10363 | data_present) != 0x2); |
10364 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, |
10365 | reserved) != 0x3); |
10366 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, |
10367 | residual_count) != 0x4); |
10368 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, |
10369 | data_length) != 0x8); |
10370 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, |
10371 | reserved1) != 0xa); |
10372 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, |
10373 | data) != 0xc); |
10374 | BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c); |
10375 | |
10376 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, |
10377 | data_in_result) != 0x0); |
10378 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, |
10379 | data_out_result) != 0x1); |
10380 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, |
10381 | reserved) != 0x2); |
10382 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, |
10383 | status) != 0x5); |
10384 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, |
10385 | status_qualifier) != 0x6); |
10386 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, |
10387 | sense_data_length) != 0x8); |
10388 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, |
10389 | response_data_length) != 0xa); |
10390 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, |
10391 | data_in_transferred) != 0xc); |
10392 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, |
10393 | data_out_transferred) != 0x10); |
10394 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, |
10395 | data) != 0x14); |
10396 | BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114); |
10397 | |
10398 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
10399 | signature) != 0x0); |
10400 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
10401 | function_and_status_code) != 0x8); |
10402 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
10403 | max_admin_iq_elements) != 0x10); |
10404 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
10405 | max_admin_oq_elements) != 0x11); |
10406 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
10407 | admin_iq_element_length) != 0x12); |
10408 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
10409 | admin_oq_element_length) != 0x13); |
10410 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
10411 | max_reset_timeout) != 0x14); |
10412 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
10413 | legacy_intx_status) != 0x18); |
10414 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
10415 | legacy_intx_mask_set) != 0x1c); |
10416 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
10417 | legacy_intx_mask_clear) != 0x20); |
10418 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
10419 | device_status) != 0x40); |
10420 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
10421 | admin_iq_pi_offset) != 0x48); |
10422 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
10423 | admin_oq_ci_offset) != 0x50); |
10424 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
10425 | admin_iq_element_array_addr) != 0x58); |
10426 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
10427 | admin_oq_element_array_addr) != 0x60); |
10428 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
10429 | admin_iq_ci_addr) != 0x68); |
10430 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
10431 | admin_oq_pi_addr) != 0x70); |
10432 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
10433 | admin_iq_num_elements) != 0x78); |
10434 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
10435 | admin_oq_num_elements) != 0x79); |
10436 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
10437 | admin_queue_int_msg_num) != 0x7a); |
10438 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
10439 | device_error) != 0x80); |
10440 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
10441 | error_details) != 0x88); |
10442 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
10443 | device_reset) != 0x90); |
10444 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
10445 | power_action) != 0x94); |
10446 | BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100); |
10447 | |
10448 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
10449 | header.iu_type) != 0); |
10450 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
10451 | header.iu_length) != 2); |
10452 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
10453 | header.driver_flags) != 6); |
10454 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
10455 | request_id) != 8); |
10456 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
10457 | function_code) != 10); |
10458 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
10459 | data.report_device_capability.buffer_length) != 44); |
10460 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
10461 | data.report_device_capability.sg_descriptor) != 48); |
10462 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
10463 | data.create_operational_iq.queue_id) != 12); |
10464 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
10465 | data.create_operational_iq.element_array_addr) != 16); |
10466 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
10467 | data.create_operational_iq.ci_addr) != 24); |
10468 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
10469 | data.create_operational_iq.num_elements) != 32); |
10470 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
10471 | data.create_operational_iq.element_length) != 34); |
10472 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
10473 | data.create_operational_iq.queue_protocol) != 36); |
10474 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
10475 | data.create_operational_oq.queue_id) != 12); |
10476 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
10477 | data.create_operational_oq.element_array_addr) != 16); |
10478 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
10479 | data.create_operational_oq.pi_addr) != 24); |
10480 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
10481 | data.create_operational_oq.num_elements) != 32); |
10482 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
10483 | data.create_operational_oq.element_length) != 34); |
10484 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
10485 | data.create_operational_oq.queue_protocol) != 36); |
10486 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
10487 | data.create_operational_oq.int_msg_num) != 40); |
10488 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
10489 | data.create_operational_oq.coalescing_count) != 42); |
10490 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
10491 | data.create_operational_oq.min_coalescing_time) != 44); |
10492 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
10493 | data.create_operational_oq.max_coalescing_time) != 48); |
10494 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
10495 | data.delete_operational_queue.queue_id) != 12); |
10496 | BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64); |
10497 | BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, |
10498 | data.create_operational_iq) != 64 - 11); |
10499 | BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, |
10500 | data.create_operational_oq) != 64 - 11); |
10501 | BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, |
10502 | data.delete_operational_queue) != 64 - 11); |
10503 | |
10504 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, |
10505 | header.iu_type) != 0); |
10506 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, |
10507 | header.iu_length) != 2); |
10508 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, |
10509 | header.driver_flags) != 6); |
10510 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, |
10511 | request_id) != 8); |
10512 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, |
10513 | function_code) != 10); |
10514 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, |
10515 | status) != 11); |
10516 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, |
10517 | data.create_operational_iq.status_descriptor) != 12); |
10518 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, |
10519 | data.create_operational_iq.iq_pi_offset) != 16); |
10520 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, |
10521 | data.create_operational_oq.status_descriptor) != 12); |
10522 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, |
10523 | data.create_operational_oq.oq_ci_offset) != 16); |
10524 | BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64); |
10525 | |
10526 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
10527 | header.iu_type) != 0); |
10528 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
10529 | header.iu_length) != 2); |
10530 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
10531 | header.response_queue_id) != 4); |
10532 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
10533 | header.driver_flags) != 6); |
10534 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
10535 | request_id) != 8); |
10536 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
10537 | nexus_id) != 10); |
10538 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
10539 | buffer_length) != 12); |
10540 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
10541 | lun_number) != 16); |
10542 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
10543 | protocol_specific) != 24); |
10544 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
10545 | error_index) != 27); |
10546 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
10547 | cdb) != 32); |
10548 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
10549 | timeout) != 60); |
10550 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
10551 | sg_descriptors) != 64); |
10552 | BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) != |
10553 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); |
10554 | |
10555 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
10556 | header.iu_type) != 0); |
10557 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
10558 | header.iu_length) != 2); |
10559 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
10560 | header.response_queue_id) != 4); |
10561 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
10562 | header.driver_flags) != 6); |
10563 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
10564 | request_id) != 8); |
10565 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
10566 | nexus_id) != 12); |
10567 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
10568 | buffer_length) != 16); |
10569 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
10570 | data_encryption_key_index) != 22); |
10571 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
10572 | encrypt_tweak_lower) != 24); |
10573 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
10574 | encrypt_tweak_upper) != 28); |
10575 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
10576 | cdb) != 32); |
10577 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
10578 | error_index) != 48); |
10579 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
10580 | num_sg_descriptors) != 50); |
10581 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
10582 | cdb_length) != 51); |
10583 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
10584 | lun_number) != 52); |
10585 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
10586 | sg_descriptors) != 64); |
10587 | BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) != |
10588 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); |
10589 | |
10590 | BUILD_BUG_ON(offsetof(struct pqi_io_response, |
10591 | header.iu_type) != 0); |
10592 | BUILD_BUG_ON(offsetof(struct pqi_io_response, |
10593 | header.iu_length) != 2); |
10594 | BUILD_BUG_ON(offsetof(struct pqi_io_response, |
10595 | request_id) != 8); |
10596 | BUILD_BUG_ON(offsetof(struct pqi_io_response, |
10597 | error_index) != 10); |
10598 | |
10599 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, |
10600 | header.iu_type) != 0); |
10601 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, |
10602 | header.iu_length) != 2); |
10603 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, |
10604 | header.response_queue_id) != 4); |
10605 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, |
10606 | request_id) != 8); |
10607 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, |
10608 | data.report_event_configuration.buffer_length) != 12); |
10609 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, |
10610 | data.report_event_configuration.sg_descriptors) != 16); |
10611 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, |
10612 | data.set_event_configuration.global_event_oq_id) != 10); |
10613 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, |
10614 | data.set_event_configuration.buffer_length) != 12); |
10615 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, |
10616 | data.set_event_configuration.sg_descriptors) != 16); |
10617 | |
10618 | BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, |
10619 | max_inbound_iu_length) != 6); |
10620 | BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, |
10621 | max_outbound_iu_length) != 14); |
10622 | BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16); |
10623 | |
10624 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
10625 | data_length) != 0); |
10626 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
10627 | iq_arbitration_priority_support_bitmask) != 8); |
10628 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
10629 | maximum_aw_a) != 9); |
10630 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
10631 | maximum_aw_b) != 10); |
10632 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
10633 | maximum_aw_c) != 11); |
10634 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
10635 | max_inbound_queues) != 16); |
10636 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
10637 | max_elements_per_iq) != 18); |
10638 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
10639 | max_iq_element_length) != 24); |
10640 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
10641 | min_iq_element_length) != 26); |
10642 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
10643 | max_outbound_queues) != 30); |
10644 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
10645 | max_elements_per_oq) != 32); |
10646 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
10647 | intr_coalescing_time_granularity) != 34); |
10648 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
10649 | max_oq_element_length) != 36); |
10650 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
10651 | min_oq_element_length) != 38); |
10652 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
10653 | iu_layer_descriptors) != 64); |
10654 | BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576); |
10655 | |
10656 | BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, |
10657 | event_type) != 0); |
10658 | BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, |
10659 | oq_id) != 2); |
10660 | BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4); |
10661 | |
10662 | BUILD_BUG_ON(offsetof(struct pqi_event_config, |
10663 | num_event_descriptors) != 2); |
10664 | BUILD_BUG_ON(offsetof(struct pqi_event_config, |
10665 | descriptors) != 4); |
10666 | |
10667 | BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS != |
10668 | ARRAY_SIZE(pqi_supported_event_types)); |
10669 | |
10670 | BUILD_BUG_ON(offsetof(struct pqi_event_response, |
10671 | header.iu_type) != 0); |
10672 | BUILD_BUG_ON(offsetof(struct pqi_event_response, |
10673 | header.iu_length) != 2); |
10674 | BUILD_BUG_ON(offsetof(struct pqi_event_response, |
10675 | event_type) != 8); |
10676 | BUILD_BUG_ON(offsetof(struct pqi_event_response, |
10677 | event_id) != 10); |
10678 | BUILD_BUG_ON(offsetof(struct pqi_event_response, |
10679 | additional_event_id) != 12); |
10680 | BUILD_BUG_ON(offsetof(struct pqi_event_response, |
10681 | data) != 16); |
10682 | BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32); |
10683 | |
10684 | BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, |
10685 | header.iu_type) != 0); |
10686 | BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, |
10687 | header.iu_length) != 2); |
10688 | BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, |
10689 | event_type) != 8); |
10690 | BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, |
10691 | event_id) != 10); |
10692 | BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, |
10693 | additional_event_id) != 12); |
10694 | BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16); |
10695 | |
10696 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, |
10697 | header.iu_type) != 0); |
10698 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, |
10699 | header.iu_length) != 2); |
10700 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, |
10701 | request_id) != 8); |
10702 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, |
10703 | nexus_id) != 10); |
10704 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, |
10705 | timeout) != 14); |
10706 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, |
10707 | lun_number) != 16); |
10708 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, |
10709 | protocol_specific) != 24); |
10710 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, |
10711 | outbound_queue_id_to_manage) != 26); |
10712 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, |
10713 | request_id_to_manage) != 28); |
10714 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, |
10715 | task_management_function) != 30); |
10716 | BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32); |
10717 | |
10718 | BUILD_BUG_ON(offsetof(struct pqi_task_management_response, |
10719 | header.iu_type) != 0); |
10720 | BUILD_BUG_ON(offsetof(struct pqi_task_management_response, |
10721 | header.iu_length) != 2); |
10722 | BUILD_BUG_ON(offsetof(struct pqi_task_management_response, |
10723 | request_id) != 8); |
10724 | BUILD_BUG_ON(offsetof(struct pqi_task_management_response, |
10725 | nexus_id) != 10); |
10726 | BUILD_BUG_ON(offsetof(struct pqi_task_management_response, |
10727 | additional_response_info) != 12); |
10728 | BUILD_BUG_ON(offsetof(struct pqi_task_management_response, |
10729 | response_code) != 15); |
10730 | BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16); |
10731 | |
10732 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, |
10733 | configured_logical_drive_count) != 0); |
10734 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, |
10735 | configuration_signature) != 1); |
10736 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, |
10737 | firmware_version_short) != 5); |
10738 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, |
10739 | extended_logical_unit_count) != 154); |
10740 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, |
10741 | firmware_build_number) != 190); |
10742 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, |
10743 | vendor_id) != 200); |
10744 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, |
10745 | product_id) != 208); |
10746 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, |
10747 | extra_controller_flags) != 286); |
10748 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, |
10749 | controller_mode) != 292); |
10750 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, |
10751 | spare_part_number) != 293); |
10752 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, |
10753 | firmware_version_long) != 325); |
10754 | |
10755 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, |
10756 | phys_bay_in_box) != 115); |
10757 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, |
10758 | device_type) != 120); |
10759 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, |
10760 | redundant_path_present_map) != 1736); |
10761 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, |
10762 | active_path_number) != 1738); |
10763 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, |
10764 | alternate_paths_phys_connector) != 1739); |
10765 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, |
10766 | alternate_paths_phys_box_on_port) != 1755); |
10767 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, |
10768 | current_queue_depth_limit) != 1796); |
10769 | BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560); |
10770 | |
10771 | BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4); |
10772 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, |
10773 | page_code) != 0); |
10774 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, |
10775 | subpage_code) != 1); |
10776 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, |
10777 | buffer_length) != 2); |
10778 | |
10779 | BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4); |
10780 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, |
10781 | page_code) != 0); |
10782 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, |
10783 | subpage_code) != 1); |
10784 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, |
10785 | page_length) != 2); |
10786 | |
10787 | BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage) |
10788 | != 18); |
10789 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, |
10790 | header) != 0); |
10791 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, |
10792 | firmware_read_support) != 4); |
10793 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, |
10794 | driver_read_support) != 5); |
10795 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, |
10796 | firmware_write_support) != 6); |
10797 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, |
10798 | driver_write_support) != 7); |
10799 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, |
10800 | max_transfer_encrypted_sas_sata) != 8); |
10801 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, |
10802 | max_transfer_encrypted_nvme) != 10); |
10803 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, |
10804 | max_write_raid_5_6) != 12); |
10805 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, |
10806 | max_write_raid_1_10_2drive) != 14); |
10807 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, |
10808 | max_write_raid_1_10_3drive) != 16); |
10809 | |
10810 | BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255); |
10811 | BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255); |
10812 | BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH % |
10813 | PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); |
10814 | BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH % |
10815 | PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); |
10816 | BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560); |
10817 | BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH % |
10818 | PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); |
10819 | BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560); |
10820 | BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH % |
10821 | PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); |
10822 | |
10823 | BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS); |
10824 | BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= |
10825 | PQI_MAX_OUTSTANDING_REQUESTS_KDUMP); |
10826 | } |
10827 |
Definitions
- pqi_cmd_priv
- pqi_cmd_priv
- pqi_sas_transport_template
- pqi_controller_count
- pqi_lockup_action
- pqi_lockup_action
- pqi_lockup_actions
- pqi_supported_event_types
- pqi_disable_device_id_wildcards
- pqi_disable_heartbeat
- pqi_disable_ctrl_shutdown
- pqi_lockup_action_param
- pqi_expose_ld_first
- pqi_hide_vsep
- pqi_disable_managed_interrupts
- pqi_ctrl_ready_timeout_secs
- raid_levels
- pqi_raid_level_to_string
- pqi_scsi_done
- pqi_disable_write_same
- pqi_scsi3addr_equal
- pqi_is_logical_device
- pqi_is_external_raid_addr
- pqi_ctrl_offline
- pqi_check_ctrl_health
- pqi_is_hba_lunid
- pqi_get_ctrl_mode
- pqi_save_ctrl_mode
- pqi_is_fw_triage_supported
- pqi_save_fw_triage_setting
- pqi_ctrl_block_scan
- pqi_ctrl_unblock_scan
- pqi_ctrl_scan_blocked
- pqi_ctrl_block_device_reset
- pqi_ctrl_unblock_device_reset
- pqi_scsi_block_requests
- pqi_scsi_unblock_requests
- pqi_ctrl_busy
- pqi_ctrl_unbusy
- pqi_ctrl_blocked
- pqi_ctrl_block_requests
- pqi_ctrl_unblock_requests
- pqi_wait_if_ctrl_blocked
- pqi_ctrl_wait_until_quiesced
- pqi_device_offline
- pqi_ctrl_ofa_start
- pqi_ctrl_ofa_done
- pqi_wait_until_ofa_finished
- pqi_ofa_in_progress
- pqi_device_remove_start
- pqi_device_in_remove
- pqi_device_reset_start
- pqi_device_reset_done
- pqi_device_in_reset
- pqi_event_type_to_event_index
- pqi_is_supported_event
- pqi_schedule_rescan_worker_with_delay
- pqi_schedule_rescan_worker
- pqi_schedule_rescan_worker_delayed
- pqi_cancel_rescan_worker
- pqi_read_heartbeat_counter
- pqi_read_soft_reset_status
- pqi_clear_soft_reset_status
- pqi_is_io_high_priority
- pqi_map_single
- pqi_pci_unmap
- pqi_build_raid_path_request
- pqi_reinit_io_request
- pqi_alloc_io_request
- pqi_free_io_request
- pqi_send_scsi_raid_request
- pqi_send_ctrl_raid_request
- pqi_send_ctrl_raid_with_error
- pqi_identify_controller
- pqi_sense_subsystem_info
- pqi_scsi_inquiry
- pqi_identify_physical_device
- pqi_aio_limit_to_bytes
- bmic_sense_feature_buffer
- pqi_get_advanced_raid_bypass_config
- pqi_flush_cache
- pqi_csmi_smp_passthru
- pqi_set_diag_rescan
- pqi_write_host_wellness
- bmic_host_wellness_driver_version
- pqi_write_driver_version_to_host_wellness
- bmic_host_wellness_time
- pqi_write_current_time_to_host_wellness
- pqi_update_time_worker
- pqi_schedule_update_time_worker
- pqi_cancel_update_time_worker
- pqi_report_luns
- pqi_report_phys_logical_luns
- pqi_report_phys_luns
- pqi_report_logical_luns
- pqi_get_device_lists
- pqi_set_bus_target_lun
- pqi_assign_bus_target_lun
- pqi_get_raid_level
- pqi_validate_raid_map
- pqi_get_raid_map
- pqi_set_max_transfer_encrypted
- pqi_get_raid_bypass_status
- pqi_get_volume_status
- pqi_get_physical_device_info
- pqi_get_logical_device_info
- pqi_keep_device_offline
- pqi_get_device_info_phys_logical
- pqi_get_device_info
- pqi_show_volume_status
- pqi_rescan_worker
- pqi_add_device
- pqi_remove_device
- pqi_find_scsi_dev
- pqi_device_equal
- pqi_find_result
- pqi_scsi_find_entry
- pqi_device_type
- pqi_dev_info
- pqi_raid_maps_equal
- pqi_scsi_update_device
- pqi_free_device
- pqi_fixup_botched_add
- pqi_is_device_added
- pqi_init_device_tmf_work
- pqi_volume_rescan_needed
- pqi_update_device_list
- pqi_is_supported_device
- pqi_skip_device
- pqi_mask_device
- pqi_is_multipath_device
- pqi_expose_device
- pqi_update_scsi_devices
- pqi_scan_scsi_devices
- pqi_scan_start
- pqi_scan_finished
- pqi_set_encryption_info
- pqi_aio_raid_level_supported
- pqi_get_aio_lba_and_block_count
- pci_get_aio_common_raid_map_values
- pqi_calc_aio_r5_or_r6
- pqi_set_aio_cdb
- pqi_calc_aio_r1_nexus
- pqi_raid_bypass_submit_scsi_cmd
- pqi_wait_for_pqi_mode_ready
- pqi_aio_path_disabled
- pqi_take_device_offline
- pqi_process_raid_io_error
- pqi_process_aio_io_error
- pqi_process_io_error
- pqi_interpret_task_management_response
- pqi_invalid_response
- pqi_process_io_intr
- pqi_num_elements_free
- pqi_send_event_ack
- pqi_acknowledge_event
- pqi_poll_for_soft_reset_status
- pqi_process_soft_reset
- pqi_ofa_memory_alloc_worker
- pqi_ofa_quiesce_worker
- pqi_ofa_process_event
- pqi_mark_volumes_for_rescan
- pqi_disable_raid_bypass
- pqi_event_worker
- pqi_heartbeat_timer_handler
- pqi_start_heartbeat_timer
- pqi_stop_heartbeat_timer
- pqi_ofa_capture_event_payload
- pqi_process_event_intr
- pqi_configure_legacy_intx
- pqi_change_irq_mode
- pqi_is_valid_irq
- pqi_irq_handler
- pqi_request_irqs
- pqi_free_irqs
- pqi_enable_msix_interrupts
- pqi_disable_msix_interrupts
- pqi_alloc_operational_queues
- pqi_init_operational_queues
- pqi_alloc_admin_queues
- pqi_create_admin_queues
- pqi_submit_admin_request
- pqi_poll_for_admin_response
- pqi_start_io
- pqi_wait_for_completion_io
- pqi_raid_synchronous_complete
- pqi_process_raid_io_error_synchronous
- pqi_is_blockable_request
- pqi_submit_raid_request_synchronous
- pqi_validate_admin_response
- pqi_submit_admin_request_synchronous
- pqi_report_device_capability
- pqi_validate_device_capability
- pqi_create_event_queue
- pqi_create_queue_group
- pqi_create_queues
- pqi_configure_events
- pqi_enable_events
- pqi_free_all_io_requests
- pqi_alloc_error_buffer
- pqi_alloc_io_resources
- pqi_calculate_io_resources
- pqi_calculate_queue_resources
- pqi_set_sg_descriptor
- pqi_build_sg_list
- pqi_build_raid_sg_list
- pqi_build_aio_r1_sg_list
- pqi_build_aio_r56_sg_list
- pqi_build_aio_sg_list
- pqi_raid_io_complete
- pqi_raid_submit_io
- pqi_raid_submit_scsi_cmd
- pqi_raid_bypass_retry_needed
- pqi_aio_io_complete
- pqi_aio_submit_scsi_cmd
- pqi_aio_submit_io
- pqi_aio_submit_r1_write_io
- pqi_aio_submit_r56_write_io
- pqi_get_hw_queue
- pqi_is_bypass_eligible_request
- pqi_prep_for_scsi_done
- pqi_is_parity_write_stream
- pqi_scsi_queue_command
- pqi_queued_io_count
- pqi_nonempty_inbound_queue_count
- pqi_wait_until_inbound_queues_empty
- pqi_fail_io_queued_for_device
- pqi_device_wait_for_pending_io
- pqi_lun_reset_complete
- pqi_wait_for_lun_reset_completion
- pqi_lun_reset
- pqi_lun_reset_with_retries
- pqi_device_reset
- pqi_device_reset_handler
- pqi_eh_device_reset_handler
- pqi_tmf_worker
- pqi_eh_abort_handler
- pqi_slave_alloc
- pqi_map_queues
- pqi_is_tape_changer_device
- pqi_slave_configure
- pqi_slave_destroy
- pqi_getpciinfo_ioctl
- pqi_getdrivver_ioctl
- ciss_error_info
- pqi_error_info_to_ciss
- pqi_passthru_ioctl
- pqi_ioctl
- pqi_firmware_version_show
- pqi_driver_version_show
- pqi_serial_number_show
- pqi_model_show
- pqi_vendor_show
- pqi_host_rescan_store
- pqi_lockup_action_show
- pqi_lockup_action_store
- pqi_host_enable_stream_detection_show
- pqi_host_enable_stream_detection_store
- pqi_host_enable_r5_writes_show
- pqi_host_enable_r5_writes_store
- pqi_host_enable_r6_writes_show
- pqi_host_enable_r6_writes_store
- pqi_shost_attrs
- pqi_unique_id_show
- pqi_lunid_show
- pqi_path_info_show
- pqi_sas_address_show
- pqi_ssd_smart_path_enabled_show
- pqi_raid_level_show
- pqi_raid_bypass_cnt_show
- pqi_sas_ncq_prio_enable_show
- pqi_sas_ncq_prio_enable_store
- pqi_numa_node_show
- pqi_sdev_attrs
- pqi_driver_template
- pqi_register_scsi
- pqi_unregister_scsi
- pqi_wait_for_pqi_reset_completion
- pqi_reset
- pqi_get_ctrl_serial_number
- pqi_get_ctrl_product_details
- pqi_config_table_section_info
- pqi_is_firmware_feature_supported
- pqi_is_firmware_feature_enabled
- pqi_request_firmware_feature
- pqi_config_table_update
- pqi_enable_firmware_features
- pqi_firmware_feature
- pqi_firmware_feature_status
- pqi_ctrl_update_feature_flags
- pqi_firmware_feature_update
- pqi_firmware_features_mutex
- pqi_firmware_features
- pqi_process_firmware_features
- pqi_init_firmware_features
- pqi_process_firmware_features_section
- pqi_ctrl_reset_config
- pqi_process_config_table
- pqi_revert_to_sis_mode
- pqi_force_sis_mode
- pqi_perform_lockup_action
- pqi_ctrl_init
- pqi_reinit_queues
- pqi_ctrl_init_resume
- pqi_set_pcie_completion_timeout
- pqi_pci_init
- pqi_cleanup_pci_init
- pqi_alloc_ctrl_info
- pqi_free_ctrl_info
- pqi_free_interrupts
- pqi_free_ctrl_resources
- pqi_remove_ctrl
- pqi_ofa_ctrl_quiesce
- pqi_ofa_ctrl_unquiesce
- pqi_ofa_alloc_mem
- pqi_ofa_alloc_host_buffer
- pqi_ofa_setup_host_buffer
- pqi_ofa_free_host_buffer
- pqi_ofa_host_memory_update
- pqi_ofa_ctrl_restart
- pqi_ctrl_offline_raid_error_info
- pqi_fail_all_outstanding_requests
- pqi_take_ctrl_offline_deferred
- pqi_ctrl_offline_worker
- pqi_ctrl_shutdown_reason_to_string
- pqi_take_ctrl_offline
- pqi_print_ctrl_info
- pqi_pci_probe
- pqi_pci_remove
- pqi_crash_if_pending_command
- pqi_shutdown
- pqi_process_lockup_action_param
- pqi_process_ctrl_ready_timeout_param
- pqi_process_module_params
- pqi_get_flush_cache_shutdown_event
- pqi_suspend_or_freeze
- pqi_suspend
- pqi_resume_or_restore
- pqi_freeze
- pqi_thaw
- pqi_poweroff
- pqi_pm_ops
- pqi_pci_id_table
- pqi_pci_driver
- pqi_init
- pqi_cleanup
Improve your Profiling and Debugging skills
Find out more