1 | /* |
---|---|
2 | * This is the Fusion MPT base driver providing common API layer interface |
3 | * for access to MPT (Message Passing Technology) firmware. |
4 | * |
5 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c |
6 | * Copyright (C) 2012-2014 LSI Corporation |
7 | * Copyright (C) 2013-2014 Avago Technologies |
8 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) |
9 | * |
10 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License |
12 | * as published by the Free Software Foundation; either version 2 |
13 | * of the License, or (at your option) any later version. |
14 | * |
15 | * This program is distributed in the hope that it will be useful, |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
18 | * GNU General Public License for more details. |
19 | * |
20 | * NO WARRANTY |
21 | * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR |
22 | * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT |
23 | * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, |
24 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is |
25 | * solely responsible for determining the appropriateness of using and |
26 | * distributing the Program and assumes all risks associated with its |
27 | * exercise of rights under this Agreement, including but not limited to |
28 | * the risks and costs of program errors, damage to or loss of data, |
29 | * programs or equipment, and unavailability or interruption of operations. |
30 | |
31 | * DISCLAIMER OF LIABILITY |
32 | * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY |
33 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
34 | * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND |
35 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR |
36 | * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE |
37 | * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED |
38 | * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES |
39 | |
40 | * You should have received a copy of the GNU General Public License |
41 | * along with this program; if not, write to the Free Software |
42 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, |
43 | * USA. |
44 | */ |
45 | |
46 | #include <linux/kernel.h> |
47 | #include <linux/module.h> |
48 | #include <linux/errno.h> |
49 | #include <linux/init.h> |
50 | #include <linux/slab.h> |
51 | #include <linux/types.h> |
52 | #include <linux/pci.h> |
53 | #include <linux/kdev_t.h> |
54 | #include <linux/blkdev.h> |
55 | #include <linux/delay.h> |
56 | #include <linux/interrupt.h> |
57 | #include <linux/dma-mapping.h> |
58 | #include <linux/io.h> |
59 | #include <linux/time.h> |
60 | #include <linux/ktime.h> |
61 | #include <linux/kthread.h> |
62 | #include <asm/page.h> /* To get host page size per arch */ |
63 | |
64 | |
65 | #include "mpt3sas_base.h" |
66 | |
67 | static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS]; |
68 | |
69 | |
70 | #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */ |
71 | |
72 | /* maximum controller queue depth */ |
73 | #define MAX_HBA_QUEUE_DEPTH 30000 |
74 | #define MAX_CHAIN_DEPTH 100000 |
75 | static int max_queue_depth = -1; |
76 | module_param(max_queue_depth, int, 0444); |
77 | MODULE_PARM_DESC(max_queue_depth, " max controller queue depth "); |
78 | |
79 | static int max_sgl_entries = -1; |
80 | module_param(max_sgl_entries, int, 0444); |
81 | MODULE_PARM_DESC(max_sgl_entries, " max sg entries "); |
82 | |
83 | static int msix_disable = -1; |
84 | module_param(msix_disable, int, 0444); |
85 | MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); |
86 | |
87 | static int smp_affinity_enable = 1; |
88 | module_param(smp_affinity_enable, int, 0444); |
89 | MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)"); |
90 | |
91 | static int max_msix_vectors = -1; |
92 | module_param(max_msix_vectors, int, 0444); |
93 | MODULE_PARM_DESC(max_msix_vectors, |
94 | " max msix vectors"); |
95 | |
96 | static int irqpoll_weight = -1; |
97 | module_param(irqpoll_weight, int, 0444); |
98 | MODULE_PARM_DESC(irqpoll_weight, |
99 | "irq poll weight (default= one fourth of HBA queue depth)"); |
100 | |
101 | static int mpt3sas_fwfault_debug; |
102 | MODULE_PARM_DESC(mpt3sas_fwfault_debug, |
103 | " enable detection of firmware fault and halt firmware - (default=0)"); |
104 | |
105 | static int perf_mode = -1; |
106 | module_param(perf_mode, int, 0444); |
107 | MODULE_PARM_DESC(perf_mode, |
108 | "Performance mode (only for Aero/Sea Generation), options:\n\t\t" |
109 | "0 - balanced: high iops mode is enabled &\n\t\t" |
110 | "interrupt coalescing is enabled only on high iops queues,\n\t\t" |
111 | "1 - iops: high iops mode is disabled &\n\t\t" |
112 | "interrupt coalescing is enabled on all queues,\n\t\t" |
113 | "2 - latency: high iops mode is disabled &\n\t\t" |
114 | "interrupt coalescing is enabled on all queues with timeout value 0xA,\n" |
115 | "\t\tdefault - default perf_mode is 'balanced'" |
116 | ); |
117 | |
118 | static int poll_queues; |
119 | module_param(poll_queues, int, 0444); |
120 | MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t" |
121 | "This parameter is effective only if host_tagset_enable=1. &\n\t\t" |
122 | "when poll_queues are enabled then &\n\t\t" |
123 | "perf_mode is set to latency mode. &\n\t\t" |
124 | ); |
125 | |
126 | enum mpt3sas_perf_mode { |
127 | MPT_PERF_MODE_DEFAULT = -1, |
128 | MPT_PERF_MODE_BALANCED = 0, |
129 | MPT_PERF_MODE_IOPS = 1, |
130 | MPT_PERF_MODE_LATENCY = 2, |
131 | }; |
132 | |
133 | static int |
134 | _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, |
135 | u32 ioc_state, int timeout); |
136 | static int |
137 | _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc); |
138 | static void |
139 | _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc); |
140 | |
141 | static u32 |
142 | _base_readl_ext_retry(const void __iomem *addr); |
143 | |
144 | /** |
145 | * mpt3sas_base_check_cmd_timeout - Function |
146 | * to check timeout and command termination due |
147 | * to Host reset. |
148 | * |
149 | * @ioc: per adapter object. |
150 | * @status: Status of issued command. |
151 | * @mpi_request:mf request pointer. |
152 | * @sz: size of buffer. |
153 | * |
154 | * Return: 1/0 Reset to be done or Not |
155 | */ |
156 | u8 |
157 | mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc, |
158 | u8 status, void *mpi_request, int sz) |
159 | { |
160 | u8 issue_reset = 0; |
161 | |
162 | if (!(status & MPT3_CMD_RESET)) |
163 | issue_reset = 1; |
164 | |
165 | ioc_err(ioc, "Command %s\n", |
166 | issue_reset == 0 ? "terminated due to Host Reset": "Timeout"); |
167 | _debug_dump_mf(mpi_request, sz); |
168 | |
169 | return issue_reset; |
170 | } |
171 | |
172 | /** |
173 | * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. |
174 | * @val: ? |
175 | * @kp: ? |
176 | * |
177 | * Return: ? |
178 | */ |
179 | static int |
180 | _scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp) |
181 | { |
182 | int ret = param_set_int(val, kp); |
183 | struct MPT3SAS_ADAPTER *ioc; |
184 | |
185 | if (ret) |
186 | return ret; |
187 | |
188 | /* global ioc spinlock to protect controller list on list operations */ |
189 | pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug); |
190 | spin_lock(lock: &gioc_lock); |
191 | list_for_each_entry(ioc, &mpt3sas_ioc_list, list) |
192 | ioc->fwfault_debug = mpt3sas_fwfault_debug; |
193 | spin_unlock(lock: &gioc_lock); |
194 | return 0; |
195 | } |
196 | module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug, |
197 | param_get_int, &mpt3sas_fwfault_debug, 0644); |
198 | |
199 | /** |
200 | * _base_readl_aero - retry readl for max three times. |
201 | * @addr: MPT Fusion system interface register address |
202 | * |
203 | * Retry the readl() for max three times if it gets zero value |
204 | * while reading the system interface register. |
205 | */ |
206 | static inline u32 |
207 | _base_readl_aero(const void __iomem *addr) |
208 | { |
209 | u32 i = 0, ret_val; |
210 | |
211 | do { |
212 | ret_val = readl(addr); |
213 | i++; |
214 | } while (ret_val == 0 && i < 3); |
215 | |
216 | return ret_val; |
217 | } |
218 | |
219 | static u32 |
220 | _base_readl_ext_retry(const void __iomem *addr) |
221 | { |
222 | u32 i, ret_val; |
223 | |
224 | for (i = 0 ; i < 30 ; i++) { |
225 | ret_val = readl(addr); |
226 | if (ret_val != 0) |
227 | break; |
228 | } |
229 | |
230 | return ret_val; |
231 | } |
232 | |
233 | static inline u32 |
234 | _base_readl(const void __iomem *addr) |
235 | { |
236 | return readl(addr); |
237 | } |
238 | |
239 | /** |
240 | * _base_clone_reply_to_sys_mem - copies reply to reply free iomem |
241 | * in BAR0 space. |
242 | * |
243 | * @ioc: per adapter object |
244 | * @reply: reply message frame(lower 32bit addr) |
245 | * @index: System request message index. |
246 | */ |
247 | static void |
248 | _base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply, |
249 | u32 index) |
250 | { |
251 | /* |
252 | * 256 is offset within sys register. |
253 | * 256 offset MPI frame starts. Max MPI frame supported is 32. |
254 | * 32 * 128 = 4K. From here, Clone of reply free for mcpu starts |
255 | */ |
256 | u16 cmd_credit = ioc->facts.RequestCredit + 1; |
257 | void __iomem *reply_free_iomem = (void __iomem *)ioc->chip + |
258 | MPI_FRAME_START_OFFSET + |
259 | (cmd_credit * ioc->request_sz) + (index * sizeof(u32)); |
260 | |
261 | writel(val: reply, addr: reply_free_iomem); |
262 | } |
263 | |
264 | /** |
265 | * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames |
266 | * to system/BAR0 region. |
267 | * |
268 | * @dst_iomem: Pointer to the destination location in BAR0 space. |
269 | * @src: Pointer to the Source data. |
270 | * @size: Size of data to be copied. |
271 | */ |
272 | static void |
273 | _base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size) |
274 | { |
275 | int i; |
276 | u32 *src_virt_mem = (u32 *)src; |
277 | |
278 | for (i = 0; i < size/4; i++) |
279 | writel(val: (u32)src_virt_mem[i], |
280 | addr: (void __iomem *)dst_iomem + (i * 4)); |
281 | } |
282 | |
283 | /** |
284 | * _base_clone_to_sys_mem - Writes/copies data to system/BAR0 region |
285 | * |
286 | * @dst_iomem: Pointer to the destination location in BAR0 space. |
287 | * @src: Pointer to the Source data. |
288 | * @size: Size of data to be copied. |
289 | */ |
290 | static void |
291 | _base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size) |
292 | { |
293 | int i; |
294 | u32 *src_virt_mem = (u32 *)(src); |
295 | |
296 | for (i = 0; i < size/4; i++) |
297 | writel(val: (u32)src_virt_mem[i], |
298 | addr: (void __iomem *)dst_iomem + (i * 4)); |
299 | } |
300 | |
301 | /** |
302 | * _base_get_chain - Calculates and Returns virtual chain address |
303 | * for the provided smid in BAR0 space. |
304 | * |
305 | * @ioc: per adapter object |
306 | * @smid: system request message index |
307 | * @sge_chain_count: Scatter gather chain count. |
308 | * |
309 | * Return: the chain address. |
310 | */ |
311 | static inline void __iomem* |
312 | _base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid, |
313 | u8 sge_chain_count) |
314 | { |
315 | void __iomem *base_chain, *chain_virt; |
316 | u16 cmd_credit = ioc->facts.RequestCredit + 1; |
317 | |
318 | base_chain = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET + |
319 | (cmd_credit * ioc->request_sz) + |
320 | REPLY_FREE_POOL_SIZE; |
321 | chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth * |
322 | ioc->request_sz) + (sge_chain_count * ioc->request_sz); |
323 | return chain_virt; |
324 | } |
325 | |
326 | /** |
327 | * _base_get_chain_phys - Calculates and Returns physical address |
328 | * in BAR0 for scatter gather chains, for |
329 | * the provided smid. |
330 | * |
331 | * @ioc: per adapter object |
332 | * @smid: system request message index |
333 | * @sge_chain_count: Scatter gather chain count. |
334 | * |
335 | * Return: Physical chain address. |
336 | */ |
337 | static inline phys_addr_t |
338 | _base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid, |
339 | u8 sge_chain_count) |
340 | { |
341 | phys_addr_t base_chain_phys, chain_phys; |
342 | u16 cmd_credit = ioc->facts.RequestCredit + 1; |
343 | |
344 | base_chain_phys = ioc->chip_phys + MPI_FRAME_START_OFFSET + |
345 | (cmd_credit * ioc->request_sz) + |
346 | REPLY_FREE_POOL_SIZE; |
347 | chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth * |
348 | ioc->request_sz) + (sge_chain_count * ioc->request_sz); |
349 | return chain_phys; |
350 | } |
351 | |
352 | /** |
353 | * _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host |
354 | * buffer address for the provided smid. |
355 | * (Each smid can have 64K starts from 17024) |
356 | * |
357 | * @ioc: per adapter object |
358 | * @smid: system request message index |
359 | * |
360 | * Return: Pointer to buffer location in BAR0. |
361 | */ |
362 | |
363 | static void __iomem * |
364 | _base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
365 | { |
366 | u16 cmd_credit = ioc->facts.RequestCredit + 1; |
367 | // Added extra 1 to reach end of chain. |
368 | void __iomem *chain_end = _base_get_chain(ioc, |
369 | smid: cmd_credit + 1, |
370 | sge_chain_count: ioc->facts.MaxChainDepth); |
371 | return chain_end + (smid * 64 * 1024); |
372 | } |
373 | |
374 | /** |
375 | * _base_get_buffer_phys_bar0 - Calculates and Returns BAR0 mapped |
376 | * Host buffer Physical address for the provided smid. |
377 | * (Each smid can have 64K starts from 17024) |
378 | * |
379 | * @ioc: per adapter object |
380 | * @smid: system request message index |
381 | * |
382 | * Return: Pointer to buffer location in BAR0. |
383 | */ |
384 | static phys_addr_t |
385 | _base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
386 | { |
387 | u16 cmd_credit = ioc->facts.RequestCredit + 1; |
388 | phys_addr_t chain_end_phys = _base_get_chain_phys(ioc, |
389 | smid: cmd_credit + 1, |
390 | sge_chain_count: ioc->facts.MaxChainDepth); |
391 | return chain_end_phys + (smid * 64 * 1024); |
392 | } |
393 | |
394 | /** |
395 | * _base_get_chain_buffer_dma_to_chain_buffer - Iterates chain |
396 | * lookup list and Provides chain_buffer |
397 | * address for the matching dma address. |
398 | * (Each smid can have 64K starts from 17024) |
399 | * |
400 | * @ioc: per adapter object |
401 | * @chain_buffer_dma: Chain buffer dma address. |
402 | * |
403 | * Return: Pointer to chain buffer. Or Null on Failure. |
404 | */ |
405 | static void * |
406 | _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc, |
407 | dma_addr_t chain_buffer_dma) |
408 | { |
409 | u16 index, j; |
410 | struct chain_tracker *ct; |
411 | |
412 | for (index = 0; index < ioc->scsiio_depth; index++) { |
413 | for (j = 0; j < ioc->chains_needed_per_io; j++) { |
414 | ct = &ioc->chain_lookup[index].chains_per_smid[j]; |
415 | if (ct && ct->chain_buffer_dma == chain_buffer_dma) |
416 | return ct->chain_buffer; |
417 | } |
418 | } |
419 | ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n"); |
420 | return NULL; |
421 | } |
422 | |
423 | /** |
424 | * _clone_sg_entries - MPI EP's scsiio and config requests |
425 | * are handled here. Base function for |
426 | * double buffering, before submitting |
427 | * the requests. |
428 | * |
429 | * @ioc: per adapter object. |
430 | * @mpi_request: mf request pointer. |
431 | * @smid: system request message index. |
432 | */ |
433 | static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc, |
434 | void *mpi_request, u16 smid) |
435 | { |
436 | Mpi2SGESimple32_t *sgel, *sgel_next; |
437 | u32 sgl_flags, sge_chain_count = 0; |
438 | bool is_write = false; |
439 | u16 i = 0; |
440 | void __iomem *buffer_iomem; |
441 | phys_addr_t buffer_iomem_phys; |
442 | void __iomem *buff_ptr; |
443 | phys_addr_t buff_ptr_phys; |
444 | void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO]; |
445 | void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO]; |
446 | phys_addr_t dst_addr_phys; |
447 | MPI2RequestHeader_t *request_hdr; |
448 | struct scsi_cmnd *scmd; |
449 | struct scatterlist *sg_scmd = NULL; |
450 | int is_scsiio_req = 0; |
451 | |
452 | request_hdr = (MPI2RequestHeader_t *) mpi_request; |
453 | |
454 | if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) { |
455 | Mpi25SCSIIORequest_t *scsiio_request = |
456 | (Mpi25SCSIIORequest_t *)mpi_request; |
457 | sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL; |
458 | is_scsiio_req = 1; |
459 | } else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) { |
460 | Mpi2ConfigRequest_t *config_req = |
461 | (Mpi2ConfigRequest_t *)mpi_request; |
462 | sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE; |
463 | } else |
464 | return; |
465 | |
466 | /* From smid we can get scsi_cmd, once we have sg_scmd, |
467 | * we just need to get sg_virt and sg_next to get virtual |
468 | * address associated with sgel->Address. |
469 | */ |
470 | |
471 | if (is_scsiio_req) { |
472 | /* Get scsi_cmd using smid */ |
473 | scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); |
474 | if (scmd == NULL) { |
475 | ioc_err(ioc, "scmd is NULL\n"); |
476 | return; |
477 | } |
478 | |
479 | /* Get sg_scmd from scmd provided */ |
480 | sg_scmd = scsi_sglist(cmd: scmd); |
481 | } |
482 | |
483 | /* |
484 | * 0 - 255 System register |
485 | * 256 - 4352 MPI Frame. (This is based on maxCredit 32) |
486 | * 4352 - 4864 Reply_free pool (512 byte is reserved |
487 | * considering maxCredit 32. Reply need extra |
488 | * room, for mCPU case kept four times of |
489 | * maxCredit). |
490 | * 4864 - 17152 SGE chain element. (32cmd * 3 chain of |
491 | * 128 byte size = 12288) |
492 | * 17152 - x Host buffer mapped with smid. |
493 | * (Each smid can have 64K Max IO.) |
494 | * BAR0+Last 1K MSIX Addr and Data |
495 | * Total size in use 2113664 bytes of 4MB BAR0 |
496 | */ |
497 | |
498 | buffer_iomem = _base_get_buffer_bar0(ioc, smid); |
499 | buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid); |
500 | |
501 | buff_ptr = buffer_iomem; |
502 | buff_ptr_phys = buffer_iomem_phys; |
503 | WARN_ON(buff_ptr_phys > U32_MAX); |
504 | |
505 | if (le32_to_cpu(sgel->FlagsLength) & |
506 | (MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT)) |
507 | is_write = true; |
508 | |
509 | for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) { |
510 | |
511 | sgl_flags = |
512 | (le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT); |
513 | |
514 | switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) { |
515 | case MPI2_SGE_FLAGS_CHAIN_ELEMENT: |
516 | /* |
517 | * Helper function which on passing |
518 | * chain_buffer_dma returns chain_buffer. Get |
519 | * the virtual address for sgel->Address |
520 | */ |
521 | sgel_next = |
522 | _base_get_chain_buffer_dma_to_chain_buffer(ioc, |
523 | le32_to_cpu(sgel->Address)); |
524 | if (sgel_next == NULL) |
525 | return; |
526 | /* |
527 | * This is coping 128 byte chain |
528 | * frame (not a host buffer) |
529 | */ |
530 | dst_chain_addr[sge_chain_count] = |
531 | _base_get_chain(ioc, |
532 | smid, sge_chain_count); |
533 | src_chain_addr[sge_chain_count] = |
534 | (void *) sgel_next; |
535 | dst_addr_phys = _base_get_chain_phys(ioc, |
536 | smid, sge_chain_count); |
537 | WARN_ON(dst_addr_phys > U32_MAX); |
538 | sgel->Address = |
539 | cpu_to_le32(lower_32_bits(dst_addr_phys)); |
540 | sgel = sgel_next; |
541 | sge_chain_count++; |
542 | break; |
543 | case MPI2_SGE_FLAGS_SIMPLE_ELEMENT: |
544 | if (is_write) { |
545 | if (is_scsiio_req) { |
546 | _base_clone_to_sys_mem(dst_iomem: buff_ptr, |
547 | src: sg_virt(sg: sg_scmd), |
548 | size: (le32_to_cpu(sgel->FlagsLength) & |
549 | 0x00ffffff)); |
550 | /* |
551 | * FIXME: this relies on a a zero |
552 | * PCI mem_offset. |
553 | */ |
554 | sgel->Address = |
555 | cpu_to_le32((u32)buff_ptr_phys); |
556 | } else { |
557 | _base_clone_to_sys_mem(dst_iomem: buff_ptr, |
558 | src: ioc->config_vaddr, |
559 | size: (le32_to_cpu(sgel->FlagsLength) & |
560 | 0x00ffffff)); |
561 | sgel->Address = |
562 | cpu_to_le32((u32)buff_ptr_phys); |
563 | } |
564 | } |
565 | buff_ptr += (le32_to_cpu(sgel->FlagsLength) & |
566 | 0x00ffffff); |
567 | buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) & |
568 | 0x00ffffff); |
569 | if ((le32_to_cpu(sgel->FlagsLength) & |
570 | (MPI2_SGE_FLAGS_END_OF_BUFFER |
571 | << MPI2_SGE_FLAGS_SHIFT))) |
572 | goto eob_clone_chain; |
573 | else { |
574 | /* |
575 | * Every single element in MPT will have |
576 | * associated sg_next. Better to sanity that |
577 | * sg_next is not NULL, but it will be a bug |
578 | * if it is null. |
579 | */ |
580 | if (is_scsiio_req) { |
581 | sg_scmd = sg_next(sg_scmd); |
582 | if (sg_scmd) |
583 | sgel++; |
584 | else |
585 | goto eob_clone_chain; |
586 | } |
587 | } |
588 | break; |
589 | } |
590 | } |
591 | |
592 | eob_clone_chain: |
593 | for (i = 0; i < sge_chain_count; i++) { |
594 | if (is_scsiio_req) |
595 | _base_clone_to_sys_mem(dst_iomem: dst_chain_addr[i], |
596 | src: src_chain_addr[i], size: ioc->request_sz); |
597 | } |
598 | } |
599 | |
600 | /** |
601 | * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc |
602 | * @arg: input argument, used to derive ioc |
603 | * |
604 | * Return: |
605 | * 0 if controller is removed from pci subsystem. |
606 | * -1 for other case. |
607 | */ |
608 | static int mpt3sas_remove_dead_ioc_func(void *arg) |
609 | { |
610 | struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg; |
611 | struct pci_dev *pdev; |
612 | |
613 | if (!ioc) |
614 | return -1; |
615 | |
616 | pdev = ioc->pdev; |
617 | if (!pdev) |
618 | return -1; |
619 | pci_stop_and_remove_bus_device_locked(dev: pdev); |
620 | return 0; |
621 | } |
622 | |
623 | /** |
624 | * _base_sync_drv_fw_timestamp - Sync Drive-Fw TimeStamp. |
625 | * @ioc: Per Adapter Object |
626 | * |
627 | * Return: nothing. |
628 | */ |
629 | static void _base_sync_drv_fw_timestamp(struct MPT3SAS_ADAPTER *ioc) |
630 | { |
631 | Mpi26IoUnitControlRequest_t *mpi_request; |
632 | Mpi26IoUnitControlReply_t *mpi_reply; |
633 | u16 smid; |
634 | ktime_t current_time; |
635 | u64 TimeStamp = 0; |
636 | u8 issue_reset = 0; |
637 | |
638 | mutex_lock(&ioc->scsih_cmds.mutex); |
639 | if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { |
640 | ioc_err(ioc, "scsih_cmd in use %s\n", __func__); |
641 | goto out; |
642 | } |
643 | ioc->scsih_cmds.status = MPT3_CMD_PENDING; |
644 | smid = mpt3sas_base_get_smid(ioc, cb_idx: ioc->scsih_cb_idx); |
645 | if (!smid) { |
646 | ioc_err(ioc, "Failed obtaining a smid %s\n", __func__); |
647 | ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; |
648 | goto out; |
649 | } |
650 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); |
651 | ioc->scsih_cmds.smid = smid; |
652 | memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t)); |
653 | mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL; |
654 | mpi_request->Operation = MPI26_CTRL_OP_SET_IOC_PARAMETER; |
655 | mpi_request->IOCParameter = MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP; |
656 | current_time = ktime_get_real(); |
657 | TimeStamp = ktime_to_ms(kt: current_time); |
658 | mpi_request->Reserved7 = cpu_to_le32(TimeStamp >> 32); |
659 | mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp & 0xFFFFFFFF); |
660 | init_completion(x: &ioc->scsih_cmds.done); |
661 | ioc->put_smid_default(ioc, smid); |
662 | dinitprintk(ioc, ioc_info(ioc, |
663 | "Io Unit Control Sync TimeStamp (sending), @time %lld ms\n", |
664 | TimeStamp)); |
665 | wait_for_completion_timeout(x: &ioc->scsih_cmds.done, |
666 | MPT3SAS_TIMESYNC_TIMEOUT_SECONDS*HZ); |
667 | if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { |
668 | mpt3sas_check_cmd_timeout(ioc, |
669 | ioc->scsih_cmds.status, mpi_request, |
670 | sizeof(Mpi2SasIoUnitControlRequest_t)/4, issue_reset); |
671 | goto issue_host_reset; |
672 | } |
673 | if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { |
674 | mpi_reply = ioc->scsih_cmds.reply; |
675 | dinitprintk(ioc, ioc_info(ioc, |
676 | "Io Unit Control sync timestamp (complete): ioc_status(0x%04x), loginfo(0x%08x)\n", |
677 | le16_to_cpu(mpi_reply->IOCStatus), |
678 | le32_to_cpu(mpi_reply->IOCLogInfo))); |
679 | } |
680 | issue_host_reset: |
681 | if (issue_reset) |
682 | mpt3sas_base_hard_reset_handler(ioc, type: FORCE_BIG_HAMMER); |
683 | ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; |
684 | out: |
685 | mutex_unlock(lock: &ioc->scsih_cmds.mutex); |
686 | } |
687 | |
688 | /** |
689 | * _base_fault_reset_work - workq handling ioc fault conditions |
690 | * @work: input argument, used to derive ioc |
691 | * |
692 | * Context: sleep. |
693 | */ |
694 | static void |
695 | _base_fault_reset_work(struct work_struct *work) |
696 | { |
697 | struct MPT3SAS_ADAPTER *ioc = |
698 | container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work); |
699 | unsigned long flags; |
700 | u32 doorbell; |
701 | int rc; |
702 | struct task_struct *p; |
703 | |
704 | |
705 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); |
706 | if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) || |
707 | ioc->pci_error_recovery) |
708 | goto rearm_timer; |
709 | spin_unlock_irqrestore(lock: &ioc->ioc_reset_in_progress_lock, flags); |
710 | |
711 | doorbell = mpt3sas_base_get_iocstate(ioc, cooked: 0); |
712 | if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) { |
713 | ioc_err(ioc, "SAS host is non-operational !!!!\n"); |
714 | |
715 | /* It may be possible that EEH recovery can resolve some of |
716 | * pci bus failure issues rather removing the dead ioc function |
717 | * by considering controller is in a non-operational state. So |
718 | * here priority is given to the EEH recovery. If it doesn't |
719 | * not resolve this issue, mpt3sas driver will consider this |
720 | * controller to non-operational state and remove the dead ioc |
721 | * function. |
722 | */ |
723 | if (ioc->non_operational_loop++ < 5) { |
724 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, |
725 | flags); |
726 | goto rearm_timer; |
727 | } |
728 | |
729 | /* |
730 | * Call _scsih_flush_pending_cmds callback so that we flush all |
731 | * pending commands back to OS. This call is required to avoid |
732 | * deadlock at block layer. Dead IOC will fail to do diag reset, |
733 | * and this call is safe since dead ioc will never return any |
734 | * command back from HW. |
735 | */ |
736 | mpt3sas_base_pause_mq_polling(ioc); |
737 | ioc->schedule_dead_ioc_flush_running_cmds(ioc); |
738 | /* |
739 | * Set remove_host flag early since kernel thread will |
740 | * take some time to execute. |
741 | */ |
742 | ioc->remove_host = 1; |
743 | /*Remove the Dead Host */ |
744 | p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc, |
745 | "%s_dead_ioc_%d", ioc->driver_name, ioc->id); |
746 | if (IS_ERR(ptr: p)) |
747 | ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n", |
748 | __func__); |
749 | else |
750 | ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n", |
751 | __func__); |
752 | return; /* don't rearm timer */ |
753 | } |
754 | |
755 | if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) { |
756 | u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ? |
757 | ioc->manu_pg11.CoreDumpTOSec : |
758 | MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS; |
759 | |
760 | timeout /= (FAULT_POLLING_INTERVAL/1000); |
761 | |
762 | if (ioc->ioc_coredump_loop == 0) { |
763 | mpt3sas_print_coredump_info(ioc, |
764 | doorbell & MPI2_DOORBELL_DATA_MASK); |
765 | /* do not accept any IOs and disable the interrupts */ |
766 | spin_lock_irqsave( |
767 | &ioc->ioc_reset_in_progress_lock, flags); |
768 | ioc->shost_recovery = 1; |
769 | spin_unlock_irqrestore( |
770 | lock: &ioc->ioc_reset_in_progress_lock, flags); |
771 | mpt3sas_base_mask_interrupts(ioc); |
772 | mpt3sas_base_pause_mq_polling(ioc); |
773 | _base_clear_outstanding_commands(ioc); |
774 | } |
775 | |
776 | ioc_info(ioc, "%s: CoreDump loop %d.", |
777 | __func__, ioc->ioc_coredump_loop); |
778 | |
779 | /* Wait until CoreDump completes or times out */ |
780 | if (ioc->ioc_coredump_loop++ < timeout) { |
781 | spin_lock_irqsave( |
782 | &ioc->ioc_reset_in_progress_lock, flags); |
783 | goto rearm_timer; |
784 | } |
785 | } |
786 | |
787 | if (ioc->ioc_coredump_loop) { |
788 | if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_COREDUMP) |
789 | ioc_err(ioc, "%s: CoreDump completed. LoopCount: %d", |
790 | __func__, ioc->ioc_coredump_loop); |
791 | else |
792 | ioc_err(ioc, "%s: CoreDump Timed out. LoopCount: %d", |
793 | __func__, ioc->ioc_coredump_loop); |
794 | ioc->ioc_coredump_loop = MPT3SAS_COREDUMP_LOOP_DONE; |
795 | } |
796 | ioc->non_operational_loop = 0; |
797 | if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) { |
798 | rc = mpt3sas_base_hard_reset_handler(ioc, type: FORCE_BIG_HAMMER); |
799 | ioc_warn(ioc, "%s: hard reset: %s\n", |
800 | __func__, rc == 0 ? "success": "failed"); |
801 | doorbell = mpt3sas_base_get_iocstate(ioc, cooked: 0); |
802 | if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { |
803 | mpt3sas_print_fault_code(ioc, doorbell & |
804 | MPI2_DOORBELL_DATA_MASK); |
805 | } else if ((doorbell & MPI2_IOC_STATE_MASK) == |
806 | MPI2_IOC_STATE_COREDUMP) |
807 | mpt3sas_print_coredump_info(ioc, doorbell & |
808 | MPI2_DOORBELL_DATA_MASK); |
809 | if (rc && (doorbell & MPI2_IOC_STATE_MASK) != |
810 | MPI2_IOC_STATE_OPERATIONAL) |
811 | return; /* don't rearm timer */ |
812 | } |
813 | ioc->ioc_coredump_loop = 0; |
814 | if (ioc->time_sync_interval && |
815 | ++ioc->timestamp_update_count >= ioc->time_sync_interval) { |
816 | ioc->timestamp_update_count = 0; |
817 | _base_sync_drv_fw_timestamp(ioc); |
818 | } |
819 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); |
820 | rearm_timer: |
821 | if (ioc->fault_reset_work_q) |
822 | queue_delayed_work(wq: ioc->fault_reset_work_q, |
823 | dwork: &ioc->fault_reset_work, |
824 | delay: msecs_to_jiffies(FAULT_POLLING_INTERVAL)); |
825 | spin_unlock_irqrestore(lock: &ioc->ioc_reset_in_progress_lock, flags); |
826 | } |
827 | |
828 | /** |
829 | * mpt3sas_base_start_watchdog - start the fault_reset_work_q |
830 | * @ioc: per adapter object |
831 | * |
832 | * Context: sleep. |
833 | */ |
834 | void |
835 | mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc) |
836 | { |
837 | unsigned long flags; |
838 | |
839 | if (ioc->fault_reset_work_q) |
840 | return; |
841 | |
842 | ioc->timestamp_update_count = 0; |
843 | /* initialize fault polling */ |
844 | |
845 | INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work); |
846 | snprintf(buf: ioc->fault_reset_work_q_name, |
847 | size: sizeof(ioc->fault_reset_work_q_name), fmt: "poll_%s%d_status", |
848 | ioc->driver_name, ioc->id); |
849 | ioc->fault_reset_work_q = |
850 | create_singlethread_workqueue(ioc->fault_reset_work_q_name); |
851 | if (!ioc->fault_reset_work_q) { |
852 | ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__); |
853 | return; |
854 | } |
855 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); |
856 | if (ioc->fault_reset_work_q) |
857 | queue_delayed_work(wq: ioc->fault_reset_work_q, |
858 | dwork: &ioc->fault_reset_work, |
859 | delay: msecs_to_jiffies(FAULT_POLLING_INTERVAL)); |
860 | spin_unlock_irqrestore(lock: &ioc->ioc_reset_in_progress_lock, flags); |
861 | } |
862 | |
863 | /** |
864 | * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q |
865 | * @ioc: per adapter object |
866 | * |
867 | * Context: sleep. |
868 | */ |
869 | void |
870 | mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc) |
871 | { |
872 | unsigned long flags; |
873 | struct workqueue_struct *wq; |
874 | |
875 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); |
876 | wq = ioc->fault_reset_work_q; |
877 | ioc->fault_reset_work_q = NULL; |
878 | spin_unlock_irqrestore(lock: &ioc->ioc_reset_in_progress_lock, flags); |
879 | if (wq) { |
880 | if (!cancel_delayed_work_sync(dwork: &ioc->fault_reset_work)) |
881 | flush_workqueue(wq); |
882 | destroy_workqueue(wq); |
883 | } |
884 | } |
885 | |
886 | /** |
887 | * mpt3sas_base_fault_info - verbose translation of firmware FAULT code |
888 | * @ioc: per adapter object |
889 | * @fault_code: fault code |
890 | */ |
891 | void |
892 | mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code) |
893 | { |
894 | ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code); |
895 | } |
896 | |
897 | /** |
898 | * mpt3sas_base_coredump_info - verbose translation of firmware CoreDump state |
899 | * @ioc: per adapter object |
900 | * @fault_code: fault code |
901 | * |
902 | * Return: nothing. |
903 | */ |
904 | void |
905 | mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code) |
906 | { |
907 | ioc_err(ioc, "coredump_state(0x%04x)!\n", fault_code); |
908 | } |
909 | |
910 | /** |
911 | * mpt3sas_base_wait_for_coredump_completion - Wait until coredump |
912 | * completes or times out |
913 | * @ioc: per adapter object |
914 | * @caller: caller function name |
915 | * |
916 | * Return: 0 for success, non-zero for failure. |
917 | */ |
918 | int |
919 | mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc, |
920 | const char *caller) |
921 | { |
922 | u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ? |
923 | ioc->manu_pg11.CoreDumpTOSec : |
924 | MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS; |
925 | |
926 | int ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_FAULT, |
927 | timeout); |
928 | |
929 | if (ioc_state) |
930 | ioc_err(ioc, |
931 | "%s: CoreDump timed out. (ioc_state=0x%x)\n", |
932 | caller, ioc_state); |
933 | else |
934 | ioc_info(ioc, |
935 | "%s: CoreDump completed. (ioc_state=0x%x)\n", |
936 | caller, ioc_state); |
937 | |
938 | return ioc_state; |
939 | } |
940 | |
941 | /** |
942 | * mpt3sas_halt_firmware - halt's mpt controller firmware |
943 | * @ioc: per adapter object |
944 | * |
945 | * For debugging timeout related issues. Writing 0xCOFFEE00 |
946 | * to the doorbell register will halt controller firmware. With |
947 | * the purpose to stop both driver and firmware, the enduser can |
948 | * obtain a ring buffer from controller UART. |
949 | */ |
950 | void |
951 | mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc) |
952 | { |
953 | u32 doorbell; |
954 | |
955 | if (!ioc->fwfault_debug) |
956 | return; |
957 | |
958 | dump_stack(); |
959 | |
960 | doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell); |
961 | if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { |
962 | mpt3sas_print_fault_code(ioc, doorbell & |
963 | MPI2_DOORBELL_DATA_MASK); |
964 | } else if ((doorbell & MPI2_IOC_STATE_MASK) == |
965 | MPI2_IOC_STATE_COREDUMP) { |
966 | mpt3sas_print_coredump_info(ioc, doorbell & |
967 | MPI2_DOORBELL_DATA_MASK); |
968 | } else { |
969 | writel(val: 0xC0FFEE00, addr: &ioc->chip->Doorbell); |
970 | ioc_err(ioc, "Firmware is halted due to command timeout\n"); |
971 | } |
972 | |
973 | if (ioc->fwfault_debug == 2) |
974 | for (;;) |
975 | ; |
976 | else |
977 | panic(fmt: "panic in %s\n", __func__); |
978 | } |
979 | |
980 | /** |
981 | * _base_sas_ioc_info - verbose translation of the ioc status |
982 | * @ioc: per adapter object |
983 | * @mpi_reply: reply mf payload returned from firmware |
984 | * @request_hdr: request mf |
985 | */ |
986 | static void |
987 | _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply, |
988 | MPI2RequestHeader_t *request_hdr) |
989 | { |
990 | u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & |
991 | MPI2_IOCSTATUS_MASK; |
992 | char *desc = NULL; |
993 | u16 frame_sz; |
994 | char *func_str = NULL; |
995 | |
996 | /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */ |
997 | if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || |
998 | request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || |
999 | request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION) |
1000 | return; |
1001 | |
1002 | if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) |
1003 | return; |
1004 | /* |
1005 | * Older Firmware version doesn't support driver trigger pages. |
1006 | * So, skip displaying 'config invalid type' type |
1007 | * of error message. |
1008 | */ |
1009 | if (request_hdr->Function == MPI2_FUNCTION_CONFIG) { |
1010 | Mpi2ConfigRequest_t *rqst = (Mpi2ConfigRequest_t *)request_hdr; |
1011 | |
1012 | if ((rqst->ExtPageType == |
1013 | MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER) && |
1014 | !(ioc->logging_level & MPT_DEBUG_CONFIG)) { |
1015 | return; |
1016 | } |
1017 | } |
1018 | |
1019 | switch (ioc_status) { |
1020 | |
1021 | /**************************************************************************** |
1022 | * Common IOCStatus values for all replies |
1023 | ****************************************************************************/ |
1024 | |
1025 | case MPI2_IOCSTATUS_INVALID_FUNCTION: |
1026 | desc = "invalid function"; |
1027 | break; |
1028 | case MPI2_IOCSTATUS_BUSY: |
1029 | desc = "busy"; |
1030 | break; |
1031 | case MPI2_IOCSTATUS_INVALID_SGL: |
1032 | desc = "invalid sgl"; |
1033 | break; |
1034 | case MPI2_IOCSTATUS_INTERNAL_ERROR: |
1035 | desc = "internal error"; |
1036 | break; |
1037 | case MPI2_IOCSTATUS_INVALID_VPID: |
1038 | desc = "invalid vpid"; |
1039 | break; |
1040 | case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: |
1041 | desc = "insufficient resources"; |
1042 | break; |
1043 | case MPI2_IOCSTATUS_INSUFFICIENT_POWER: |
1044 | desc = "insufficient power"; |
1045 | break; |
1046 | case MPI2_IOCSTATUS_INVALID_FIELD: |
1047 | desc = "invalid field"; |
1048 | break; |
1049 | case MPI2_IOCSTATUS_INVALID_STATE: |
1050 | desc = "invalid state"; |
1051 | break; |
1052 | case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED: |
1053 | desc = "op state not supported"; |
1054 | break; |
1055 | |
1056 | /**************************************************************************** |
1057 | * Config IOCStatus values |
1058 | ****************************************************************************/ |
1059 | |
1060 | case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION: |
1061 | desc = "config invalid action"; |
1062 | break; |
1063 | case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE: |
1064 | desc = "config invalid type"; |
1065 | break; |
1066 | case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE: |
1067 | desc = "config invalid page"; |
1068 | break; |
1069 | case MPI2_IOCSTATUS_CONFIG_INVALID_DATA: |
1070 | desc = "config invalid data"; |
1071 | break; |
1072 | case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS: |
1073 | desc = "config no defaults"; |
1074 | break; |
1075 | case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT: |
1076 | desc = "config can't commit"; |
1077 | break; |
1078 | |
1079 | /**************************************************************************** |
1080 | * SCSI IO Reply |
1081 | ****************************************************************************/ |
1082 | |
1083 | case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: |
1084 | case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: |
1085 | case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: |
1086 | case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: |
1087 | case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: |
1088 | case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: |
1089 | case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: |
1090 | case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: |
1091 | case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: |
1092 | case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: |
1093 | case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: |
1094 | case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: |
1095 | break; |
1096 | |
1097 | /**************************************************************************** |
1098 | * For use by SCSI Initiator and SCSI Target end-to-end data protection |
1099 | ****************************************************************************/ |
1100 | |
1101 | case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: |
1102 | desc = "eedp guard error"; |
1103 | break; |
1104 | case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: |
1105 | desc = "eedp ref tag error"; |
1106 | break; |
1107 | case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: |
1108 | desc = "eedp app tag error"; |
1109 | break; |
1110 | |
1111 | /**************************************************************************** |
1112 | * SCSI Target values |
1113 | ****************************************************************************/ |
1114 | |
1115 | case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX: |
1116 | desc = "target invalid io index"; |
1117 | break; |
1118 | case MPI2_IOCSTATUS_TARGET_ABORTED: |
1119 | desc = "target aborted"; |
1120 | break; |
1121 | case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE: |
1122 | desc = "target no conn retryable"; |
1123 | break; |
1124 | case MPI2_IOCSTATUS_TARGET_NO_CONNECTION: |
1125 | desc = "target no connection"; |
1126 | break; |
1127 | case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH: |
1128 | desc = "target xfer count mismatch"; |
1129 | break; |
1130 | case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR: |
1131 | desc = "target data offset error"; |
1132 | break; |
1133 | case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA: |
1134 | desc = "target too much write data"; |
1135 | break; |
1136 | case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT: |
1137 | desc = "target iu too short"; |
1138 | break; |
1139 | case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT: |
1140 | desc = "target ack nak timeout"; |
1141 | break; |
1142 | case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED: |
1143 | desc = "target nak received"; |
1144 | break; |
1145 | |
1146 | /**************************************************************************** |
1147 | * Serial Attached SCSI values |
1148 | ****************************************************************************/ |
1149 | |
1150 | case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED: |
1151 | desc = "smp request failed"; |
1152 | break; |
1153 | case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN: |
1154 | desc = "smp data overrun"; |
1155 | break; |
1156 | |
1157 | /**************************************************************************** |
1158 | * Diagnostic Buffer Post / Diagnostic Release values |
1159 | ****************************************************************************/ |
1160 | |
1161 | case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED: |
1162 | desc = "diagnostic released"; |
1163 | break; |
1164 | default: |
1165 | break; |
1166 | } |
1167 | |
1168 | if (!desc) |
1169 | return; |
1170 | |
1171 | switch (request_hdr->Function) { |
1172 | case MPI2_FUNCTION_CONFIG: |
1173 | frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size; |
1174 | func_str = "config_page"; |
1175 | break; |
1176 | case MPI2_FUNCTION_SCSI_TASK_MGMT: |
1177 | frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t); |
1178 | func_str = "task_mgmt"; |
1179 | break; |
1180 | case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: |
1181 | frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t); |
1182 | func_str = "sas_iounit_ctl"; |
1183 | break; |
1184 | case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR: |
1185 | frame_sz = sizeof(Mpi2SepRequest_t); |
1186 | func_str = "enclosure"; |
1187 | break; |
1188 | case MPI2_FUNCTION_IOC_INIT: |
1189 | frame_sz = sizeof(Mpi2IOCInitRequest_t); |
1190 | func_str = "ioc_init"; |
1191 | break; |
1192 | case MPI2_FUNCTION_PORT_ENABLE: |
1193 | frame_sz = sizeof(Mpi2PortEnableRequest_t); |
1194 | func_str = "port_enable"; |
1195 | break; |
1196 | case MPI2_FUNCTION_SMP_PASSTHROUGH: |
1197 | frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size; |
1198 | func_str = "smp_passthru"; |
1199 | break; |
1200 | case MPI2_FUNCTION_NVME_ENCAPSULATED: |
1201 | frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) + |
1202 | ioc->sge_size; |
1203 | func_str = "nvme_encapsulated"; |
1204 | break; |
1205 | default: |
1206 | frame_sz = 32; |
1207 | func_str = "unknown"; |
1208 | break; |
1209 | } |
1210 | |
1211 | ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n", |
1212 | desc, ioc_status, request_hdr, func_str); |
1213 | |
1214 | _debug_dump_mf(mpi_request: request_hdr, sz: frame_sz/4); |
1215 | } |
1216 | |
1217 | /** |
1218 | * _base_display_event_data - verbose translation of firmware asyn events |
1219 | * @ioc: per adapter object |
1220 | * @mpi_reply: reply mf payload returned from firmware |
1221 | */ |
1222 | static void |
1223 | _base_display_event_data(struct MPT3SAS_ADAPTER *ioc, |
1224 | Mpi2EventNotificationReply_t *mpi_reply) |
1225 | { |
1226 | char *desc = NULL; |
1227 | u16 event; |
1228 | |
1229 | if (!(ioc->logging_level & MPT_DEBUG_EVENTS)) |
1230 | return; |
1231 | |
1232 | event = le16_to_cpu(mpi_reply->Event); |
1233 | |
1234 | switch (event) { |
1235 | case MPI2_EVENT_LOG_DATA: |
1236 | desc = "Log Data"; |
1237 | break; |
1238 | case MPI2_EVENT_STATE_CHANGE: |
1239 | desc = "Status Change"; |
1240 | break; |
1241 | case MPI2_EVENT_HARD_RESET_RECEIVED: |
1242 | desc = "Hard Reset Received"; |
1243 | break; |
1244 | case MPI2_EVENT_EVENT_CHANGE: |
1245 | desc = "Event Change"; |
1246 | break; |
1247 | case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: |
1248 | desc = "Device Status Change"; |
1249 | break; |
1250 | case MPI2_EVENT_IR_OPERATION_STATUS: |
1251 | if (!ioc->hide_ir_msg) |
1252 | desc = "IR Operation Status"; |
1253 | break; |
1254 | case MPI2_EVENT_SAS_DISCOVERY: |
1255 | { |
1256 | Mpi2EventDataSasDiscovery_t *event_data = |
1257 | (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData; |
1258 | ioc_info(ioc, "Discovery: (%s)", |
1259 | event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ? |
1260 | "start": "stop"); |
1261 | if (event_data->DiscoveryStatus) |
1262 | pr_cont(" discovery_status(0x%08x)", |
1263 | le32_to_cpu(event_data->DiscoveryStatus)); |
1264 | pr_cont("\n"); |
1265 | return; |
1266 | } |
1267 | case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: |
1268 | desc = "SAS Broadcast Primitive"; |
1269 | break; |
1270 | case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: |
1271 | desc = "SAS Init Device Status Change"; |
1272 | break; |
1273 | case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW: |
1274 | desc = "SAS Init Table Overflow"; |
1275 | break; |
1276 | case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: |
1277 | desc = "SAS Topology Change List"; |
1278 | break; |
1279 | case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: |
1280 | desc = "SAS Enclosure Device Status Change"; |
1281 | break; |
1282 | case MPI2_EVENT_IR_VOLUME: |
1283 | if (!ioc->hide_ir_msg) |
1284 | desc = "IR Volume"; |
1285 | break; |
1286 | case MPI2_EVENT_IR_PHYSICAL_DISK: |
1287 | if (!ioc->hide_ir_msg) |
1288 | desc = "IR Physical Disk"; |
1289 | break; |
1290 | case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: |
1291 | if (!ioc->hide_ir_msg) |
1292 | desc = "IR Configuration Change List"; |
1293 | break; |
1294 | case MPI2_EVENT_LOG_ENTRY_ADDED: |
1295 | if (!ioc->hide_ir_msg) |
1296 | desc = "Log Entry Added"; |
1297 | break; |
1298 | case MPI2_EVENT_TEMP_THRESHOLD: |
1299 | desc = "Temperature Threshold"; |
1300 | break; |
1301 | case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: |
1302 | desc = "Cable Event"; |
1303 | break; |
1304 | case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR: |
1305 | desc = "SAS Device Discovery Error"; |
1306 | break; |
1307 | case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE: |
1308 | desc = "PCIE Device Status Change"; |
1309 | break; |
1310 | case MPI2_EVENT_PCIE_ENUMERATION: |
1311 | { |
1312 | Mpi26EventDataPCIeEnumeration_t *event_data = |
1313 | (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData; |
1314 | ioc_info(ioc, "PCIE Enumeration: (%s)", |
1315 | event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ? |
1316 | "start": "stop"); |
1317 | if (event_data->EnumerationStatus) |
1318 | pr_cont("enumeration_status(0x%08x)", |
1319 | le32_to_cpu(event_data->EnumerationStatus)); |
1320 | pr_cont("\n"); |
1321 | return; |
1322 | } |
1323 | case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: |
1324 | desc = "PCIE Topology Change List"; |
1325 | break; |
1326 | } |
1327 | |
1328 | if (!desc) |
1329 | return; |
1330 | |
1331 | ioc_info(ioc, "%s\n", desc); |
1332 | } |
1333 | |
1334 | /** |
1335 | * _base_sas_log_info - verbose translation of firmware log info |
1336 | * @ioc: per adapter object |
1337 | * @log_info: log info |
1338 | */ |
1339 | static void |
1340 | _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc, u32 log_info) |
1341 | { |
1342 | union loginfo_type { |
1343 | u32 loginfo; |
1344 | struct { |
1345 | u32 subcode:16; |
1346 | u32 code:8; |
1347 | u32 originator:4; |
1348 | u32 bus_type:4; |
1349 | } dw; |
1350 | }; |
1351 | union loginfo_type sas_loginfo; |
1352 | char *originator_str = NULL; |
1353 | |
1354 | sas_loginfo.loginfo = log_info; |
1355 | if (sas_loginfo.dw.bus_type != 3 /*SAS*/) |
1356 | return; |
1357 | |
1358 | /* each nexus loss loginfo */ |
1359 | if (log_info == 0x31170000) |
1360 | return; |
1361 | |
1362 | /* eat the loginfos associated with task aborts */ |
1363 | if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info == |
1364 | 0x31140000 || log_info == 0x31130000)) |
1365 | return; |
1366 | |
1367 | switch (sas_loginfo.dw.originator) { |
1368 | case 0: |
1369 | originator_str = "IOP"; |
1370 | break; |
1371 | case 1: |
1372 | originator_str = "PL"; |
1373 | break; |
1374 | case 2: |
1375 | if (!ioc->hide_ir_msg) |
1376 | originator_str = "IR"; |
1377 | else |
1378 | originator_str = "WarpDrive"; |
1379 | break; |
1380 | } |
1381 | |
1382 | ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n", |
1383 | log_info, |
1384 | originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode); |
1385 | } |
1386 | |
1387 | /** |
1388 | * _base_display_reply_info - handle reply descriptors depending on IOC Status |
1389 | * @ioc: per adapter object |
1390 | * @smid: system request message index |
1391 | * @msix_index: MSIX table index supplied by the OS |
1392 | * @reply: reply message frame (lower 32bit addr) |
1393 | */ |
1394 | static void |
1395 | _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, |
1396 | u32 reply) |
1397 | { |
1398 | MPI2DefaultReply_t *mpi_reply; |
1399 | u16 ioc_status; |
1400 | u32 loginfo = 0; |
1401 | |
1402 | mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, phys_addr: reply); |
1403 | if (unlikely(!mpi_reply)) { |
1404 | ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", |
1405 | __FILE__, __LINE__, __func__); |
1406 | return; |
1407 | } |
1408 | ioc_status = le16_to_cpu(mpi_reply->IOCStatus); |
1409 | |
1410 | if ((ioc_status & MPI2_IOCSTATUS_MASK) && |
1411 | (ioc->logging_level & MPT_DEBUG_REPLY)) { |
1412 | _base_sas_ioc_info(ioc, mpi_reply, |
1413 | request_hdr: mpt3sas_base_get_msg_frame(ioc, smid)); |
1414 | } |
1415 | |
1416 | if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { |
1417 | loginfo = le32_to_cpu(mpi_reply->IOCLogInfo); |
1418 | _base_sas_log_info(ioc, log_info: loginfo); |
1419 | } |
1420 | |
1421 | if (ioc_status || loginfo) { |
1422 | ioc_status &= MPI2_IOCSTATUS_MASK; |
1423 | mpt3sas_trigger_mpi(ioc, ioc_status, loginfo); |
1424 | } |
1425 | } |
1426 | |
1427 | /** |
1428 | * mpt3sas_base_done - base internal command completion routine |
1429 | * @ioc: per adapter object |
1430 | * @smid: system request message index |
1431 | * @msix_index: MSIX table index supplied by the OS |
1432 | * @reply: reply message frame(lower 32bit addr) |
1433 | * |
1434 | * Return: |
1435 | * 1 meaning mf should be freed from _base_interrupt |
1436 | * 0 means the mf is freed from this function. |
1437 | */ |
1438 | u8 |
1439 | mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, |
1440 | u32 reply) |
1441 | { |
1442 | MPI2DefaultReply_t *mpi_reply; |
1443 | |
1444 | mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, phys_addr: reply); |
1445 | if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK) |
1446 | return mpt3sas_check_for_pending_internal_cmds(ioc, smid); |
1447 | |
1448 | if (ioc->base_cmds.status == MPT3_CMD_NOT_USED) |
1449 | return 1; |
1450 | |
1451 | ioc->base_cmds.status |= MPT3_CMD_COMPLETE; |
1452 | if (mpi_reply) { |
1453 | ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID; |
1454 | memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); |
1455 | } |
1456 | ioc->base_cmds.status &= ~MPT3_CMD_PENDING; |
1457 | |
1458 | complete(&ioc->base_cmds.done); |
1459 | return 1; |
1460 | } |
1461 | |
1462 | /** |
1463 | * _base_async_event - main callback handler for firmware asyn events |
1464 | * @ioc: per adapter object |
1465 | * @msix_index: MSIX table index supplied by the OS |
1466 | * @reply: reply message frame(lower 32bit addr) |
1467 | * |
1468 | * Return: |
1469 | * 1 meaning mf should be freed from _base_interrupt |
1470 | * 0 means the mf is freed from this function. |
1471 | */ |
1472 | static u8 |
1473 | _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply) |
1474 | { |
1475 | Mpi2EventNotificationReply_t *mpi_reply; |
1476 | Mpi2EventAckRequest_t *ack_request; |
1477 | u16 smid; |
1478 | struct _event_ack_list *delayed_event_ack; |
1479 | |
1480 | mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, phys_addr: reply); |
1481 | if (!mpi_reply) |
1482 | return 1; |
1483 | if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION) |
1484 | return 1; |
1485 | |
1486 | _base_display_event_data(ioc, mpi_reply); |
1487 | |
1488 | if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED)) |
1489 | goto out; |
1490 | smid = mpt3sas_base_get_smid(ioc, cb_idx: ioc->base_cb_idx); |
1491 | if (!smid) { |
1492 | delayed_event_ack = kzalloc(size: sizeof(*delayed_event_ack), |
1493 | GFP_ATOMIC); |
1494 | if (!delayed_event_ack) |
1495 | goto out; |
1496 | INIT_LIST_HEAD(list: &delayed_event_ack->list); |
1497 | delayed_event_ack->Event = mpi_reply->Event; |
1498 | delayed_event_ack->EventContext = mpi_reply->EventContext; |
1499 | list_add_tail(new: &delayed_event_ack->list, |
1500 | head: &ioc->delayed_event_ack_list); |
1501 | dewtprintk(ioc, |
1502 | ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n", |
1503 | le16_to_cpu(mpi_reply->Event))); |
1504 | goto out; |
1505 | } |
1506 | |
1507 | ack_request = mpt3sas_base_get_msg_frame(ioc, smid); |
1508 | memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t)); |
1509 | ack_request->Function = MPI2_FUNCTION_EVENT_ACK; |
1510 | ack_request->Event = mpi_reply->Event; |
1511 | ack_request->EventContext = mpi_reply->EventContext; |
1512 | ack_request->VF_ID = 0; /* TODO */ |
1513 | ack_request->VP_ID = 0; |
1514 | ioc->put_smid_default(ioc, smid); |
1515 | |
1516 | out: |
1517 | |
1518 | /* scsih callback handler */ |
1519 | mpt3sas_scsih_event_callback(ioc, msix_index, reply); |
1520 | |
1521 | /* ctl callback handler */ |
1522 | mpt3sas_ctl_event_callback(ioc, msix_index, reply); |
1523 | |
1524 | return 1; |
1525 | } |
1526 | |
1527 | static struct scsiio_tracker * |
1528 | _get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
1529 | { |
1530 | struct scsi_cmnd *cmd; |
1531 | |
1532 | if (WARN_ON(!smid) || |
1533 | WARN_ON(smid >= ioc->hi_priority_smid)) |
1534 | return NULL; |
1535 | |
1536 | cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); |
1537 | if (cmd) |
1538 | return scsi_cmd_priv(cmd); |
1539 | |
1540 | return NULL; |
1541 | } |
1542 | |
1543 | /** |
1544 | * _base_get_cb_idx - obtain the callback index |
1545 | * @ioc: per adapter object |
1546 | * @smid: system request message index |
1547 | * |
1548 | * Return: callback index. |
1549 | */ |
1550 | static u8 |
1551 | _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
1552 | { |
1553 | int i; |
1554 | u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1; |
1555 | u8 cb_idx = 0xFF; |
1556 | |
1557 | if (smid < ioc->hi_priority_smid) { |
1558 | struct scsiio_tracker *st; |
1559 | |
1560 | if (smid < ctl_smid) { |
1561 | st = _get_st_from_smid(ioc, smid); |
1562 | if (st) |
1563 | cb_idx = st->cb_idx; |
1564 | } else if (smid == ctl_smid) |
1565 | cb_idx = ioc->ctl_cb_idx; |
1566 | } else if (smid < ioc->internal_smid) { |
1567 | i = smid - ioc->hi_priority_smid; |
1568 | cb_idx = ioc->hpr_lookup[i].cb_idx; |
1569 | } else if (smid <= ioc->hba_queue_depth) { |
1570 | i = smid - ioc->internal_smid; |
1571 | cb_idx = ioc->internal_lookup[i].cb_idx; |
1572 | } |
1573 | return cb_idx; |
1574 | } |
1575 | |
1576 | /** |
1577 | * mpt3sas_base_pause_mq_polling - pause polling on the mq poll queues |
1578 | * when driver is flushing out the IOs. |
1579 | * @ioc: per adapter object |
1580 | * |
1581 | * Pause polling on the mq poll (io uring) queues when driver is flushing |
1582 | * out the IOs. Otherwise we may see the race condition of completing the same |
1583 | * IO from two paths. |
1584 | * |
1585 | * Returns nothing. |
1586 | */ |
1587 | void |
1588 | mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc) |
1589 | { |
1590 | int iopoll_q_count = |
1591 | ioc->reply_queue_count - ioc->iopoll_q_start_index; |
1592 | int qid; |
1593 | |
1594 | for (qid = 0; qid < iopoll_q_count; qid++) |
1595 | atomic_set(v: &ioc->io_uring_poll_queues[qid].pause, i: 1); |
1596 | |
1597 | /* |
1598 | * wait for current poll to complete. |
1599 | */ |
1600 | for (qid = 0; qid < iopoll_q_count; qid++) { |
1601 | while (atomic_read(v: &ioc->io_uring_poll_queues[qid].busy)) { |
1602 | cpu_relax(); |
1603 | udelay(500); |
1604 | } |
1605 | } |
1606 | } |
1607 | |
1608 | /** |
1609 | * mpt3sas_base_resume_mq_polling - Resume polling on mq poll queues. |
1610 | * @ioc: per adapter object |
1611 | * |
1612 | * Returns nothing. |
1613 | */ |
1614 | void |
1615 | mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc) |
1616 | { |
1617 | int iopoll_q_count = |
1618 | ioc->reply_queue_count - ioc->iopoll_q_start_index; |
1619 | int qid; |
1620 | |
1621 | for (qid = 0; qid < iopoll_q_count; qid++) |
1622 | atomic_set(v: &ioc->io_uring_poll_queues[qid].pause, i: 0); |
1623 | } |
1624 | |
1625 | /** |
1626 | * mpt3sas_base_mask_interrupts - disable interrupts |
1627 | * @ioc: per adapter object |
1628 | * |
1629 | * Disabling ResetIRQ, Reply and Doorbell Interrupts |
1630 | */ |
1631 | void |
1632 | mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc) |
1633 | { |
1634 | u32 him_register; |
1635 | |
1636 | ioc->mask_interrupts = 1; |
1637 | him_register = ioc->base_readl(&ioc->chip->HostInterruptMask); |
1638 | him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK; |
1639 | writel(val: him_register, addr: &ioc->chip->HostInterruptMask); |
1640 | ioc->base_readl(&ioc->chip->HostInterruptMask); |
1641 | } |
1642 | |
1643 | /** |
1644 | * mpt3sas_base_unmask_interrupts - enable interrupts |
1645 | * @ioc: per adapter object |
1646 | * |
1647 | * Enabling only Reply Interrupts |
1648 | */ |
1649 | void |
1650 | mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc) |
1651 | { |
1652 | u32 him_register; |
1653 | |
1654 | him_register = ioc->base_readl(&ioc->chip->HostInterruptMask); |
1655 | him_register &= ~MPI2_HIM_RIM; |
1656 | writel(val: him_register, addr: &ioc->chip->HostInterruptMask); |
1657 | ioc->mask_interrupts = 0; |
1658 | } |
1659 | |
1660 | union reply_descriptor { |
1661 | u64 word; |
1662 | struct { |
1663 | u32 low; |
1664 | u32 high; |
1665 | } u; |
1666 | }; |
1667 | |
1668 | static u32 base_mod64(u64 dividend, u32 divisor) |
1669 | { |
1670 | u32 remainder; |
1671 | |
1672 | if (!divisor) |
1673 | pr_err("mpt3sas: DIVISOR is zero, in div fn\n"); |
1674 | remainder = do_div(dividend, divisor); |
1675 | return remainder; |
1676 | } |
1677 | |
1678 | /** |
1679 | * _base_process_reply_queue - Process reply descriptors from reply |
1680 | * descriptor post queue. |
1681 | * @reply_q: per IRQ's reply queue object. |
1682 | * |
1683 | * Return: number of reply descriptors processed from reply |
1684 | * descriptor queue. |
1685 | */ |
1686 | static int |
1687 | _base_process_reply_queue(struct adapter_reply_queue *reply_q) |
1688 | { |
1689 | union reply_descriptor rd; |
1690 | u64 completed_cmds; |
1691 | u8 request_descript_type; |
1692 | u16 smid; |
1693 | u8 cb_idx; |
1694 | u32 reply; |
1695 | u8 msix_index = reply_q->msix_index; |
1696 | struct MPT3SAS_ADAPTER *ioc = reply_q->ioc; |
1697 | Mpi2ReplyDescriptorsUnion_t *rpf; |
1698 | u8 rc; |
1699 | |
1700 | completed_cmds = 0; |
1701 | if (!atomic_add_unless(v: &reply_q->busy, a: 1, u: 1)) |
1702 | return completed_cmds; |
1703 | |
1704 | rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index]; |
1705 | request_descript_type = rpf->Default.ReplyFlags |
1706 | & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; |
1707 | if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) { |
1708 | atomic_dec(v: &reply_q->busy); |
1709 | return completed_cmds; |
1710 | } |
1711 | |
1712 | cb_idx = 0xFF; |
1713 | do { |
1714 | rd.word = le64_to_cpu(rpf->Words); |
1715 | if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX) |
1716 | goto out; |
1717 | reply = 0; |
1718 | smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1); |
1719 | if (request_descript_type == |
1720 | MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS || |
1721 | request_descript_type == |
1722 | MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS || |
1723 | request_descript_type == |
1724 | MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) { |
1725 | cb_idx = _base_get_cb_idx(ioc, smid); |
1726 | if ((likely(cb_idx < MPT_MAX_CALLBACKS)) && |
1727 | (likely(mpt_callbacks[cb_idx] != NULL))) { |
1728 | rc = mpt_callbacks[cb_idx](ioc, smid, |
1729 | msix_index, 0); |
1730 | if (rc) |
1731 | mpt3sas_base_free_smid(ioc, smid); |
1732 | } |
1733 | } else if (request_descript_type == |
1734 | MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { |
1735 | reply = le32_to_cpu( |
1736 | rpf->AddressReply.ReplyFrameAddress); |
1737 | if (reply > ioc->reply_dma_max_address || |
1738 | reply < ioc->reply_dma_min_address) |
1739 | reply = 0; |
1740 | if (smid) { |
1741 | cb_idx = _base_get_cb_idx(ioc, smid); |
1742 | if ((likely(cb_idx < MPT_MAX_CALLBACKS)) && |
1743 | (likely(mpt_callbacks[cb_idx] != NULL))) { |
1744 | rc = mpt_callbacks[cb_idx](ioc, smid, |
1745 | msix_index, reply); |
1746 | if (reply) |
1747 | _base_display_reply_info(ioc, |
1748 | smid, msix_index, reply); |
1749 | if (rc) |
1750 | mpt3sas_base_free_smid(ioc, |
1751 | smid); |
1752 | } |
1753 | } else { |
1754 | _base_async_event(ioc, msix_index, reply); |
1755 | } |
1756 | |
1757 | /* reply free queue handling */ |
1758 | if (reply) { |
1759 | ioc->reply_free_host_index = |
1760 | (ioc->reply_free_host_index == |
1761 | (ioc->reply_free_queue_depth - 1)) ? |
1762 | 0 : ioc->reply_free_host_index + 1; |
1763 | ioc->reply_free[ioc->reply_free_host_index] = |
1764 | cpu_to_le32(reply); |
1765 | if (ioc->is_mcpu_endpoint) |
1766 | _base_clone_reply_to_sys_mem(ioc, |
1767 | reply, |
1768 | index: ioc->reply_free_host_index); |
1769 | writel(val: ioc->reply_free_host_index, |
1770 | addr: &ioc->chip->ReplyFreeHostIndex); |
1771 | } |
1772 | } |
1773 | |
1774 | rpf->Words = cpu_to_le64(ULLONG_MAX); |
1775 | reply_q->reply_post_host_index = |
1776 | (reply_q->reply_post_host_index == |
1777 | (ioc->reply_post_queue_depth - 1)) ? 0 : |
1778 | reply_q->reply_post_host_index + 1; |
1779 | request_descript_type = |
1780 | reply_q->reply_post_free[reply_q->reply_post_host_index]. |
1781 | Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; |
1782 | completed_cmds++; |
1783 | /* Update the reply post host index after continuously |
1784 | * processing the threshold number of Reply Descriptors. |
1785 | * So that FW can find enough entries to post the Reply |
1786 | * Descriptors in the reply descriptor post queue. |
1787 | */ |
1788 | if (completed_cmds >= ioc->thresh_hold) { |
1789 | if (ioc->combined_reply_queue) { |
1790 | writel(val: reply_q->reply_post_host_index | |
1791 | ((msix_index & 7) << |
1792 | MPI2_RPHI_MSIX_INDEX_SHIFT), |
1793 | addr: ioc->replyPostRegisterIndex[msix_index/8]); |
1794 | } else { |
1795 | writel(val: reply_q->reply_post_host_index | |
1796 | (msix_index << |
1797 | MPI2_RPHI_MSIX_INDEX_SHIFT), |
1798 | addr: &ioc->chip->ReplyPostHostIndex); |
1799 | } |
1800 | if (!reply_q->is_iouring_poll_q && |
1801 | !reply_q->irq_poll_scheduled) { |
1802 | reply_q->irq_poll_scheduled = true; |
1803 | irq_poll_sched(&reply_q->irqpoll); |
1804 | } |
1805 | atomic_dec(v: &reply_q->busy); |
1806 | return completed_cmds; |
1807 | } |
1808 | if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) |
1809 | goto out; |
1810 | if (!reply_q->reply_post_host_index) |
1811 | rpf = reply_q->reply_post_free; |
1812 | else |
1813 | rpf++; |
1814 | } while (1); |
1815 | |
1816 | out: |
1817 | |
1818 | if (!completed_cmds) { |
1819 | atomic_dec(v: &reply_q->busy); |
1820 | return completed_cmds; |
1821 | } |
1822 | |
1823 | if (ioc->is_warpdrive) { |
1824 | writel(val: reply_q->reply_post_host_index, |
1825 | addr: ioc->reply_post_host_index[msix_index]); |
1826 | atomic_dec(v: &reply_q->busy); |
1827 | return completed_cmds; |
1828 | } |
1829 | |
1830 | /* Update Reply Post Host Index. |
1831 | * For those HBA's which support combined reply queue feature |
1832 | * 1. Get the correct Supplemental Reply Post Host Index Register. |
1833 | * i.e. (msix_index / 8)th entry from Supplemental Reply Post Host |
1834 | * Index Register address bank i.e replyPostRegisterIndex[], |
1835 | * 2. Then update this register with new reply host index value |
1836 | * in ReplyPostIndex field and the MSIxIndex field with |
1837 | * msix_index value reduced to a value between 0 and 7, |
1838 | * using a modulo 8 operation. Since each Supplemental Reply Post |
1839 | * Host Index Register supports 8 MSI-X vectors. |
1840 | * |
1841 | * For other HBA's just update the Reply Post Host Index register with |
1842 | * new reply host index value in ReplyPostIndex Field and msix_index |
1843 | * value in MSIxIndex field. |
1844 | */ |
1845 | if (ioc->combined_reply_queue) |
1846 | writel(val: reply_q->reply_post_host_index | ((msix_index & 7) << |
1847 | MPI2_RPHI_MSIX_INDEX_SHIFT), |
1848 | addr: ioc->replyPostRegisterIndex[msix_index/8]); |
1849 | else |
1850 | writel(val: reply_q->reply_post_host_index | (msix_index << |
1851 | MPI2_RPHI_MSIX_INDEX_SHIFT), |
1852 | addr: &ioc->chip->ReplyPostHostIndex); |
1853 | atomic_dec(v: &reply_q->busy); |
1854 | return completed_cmds; |
1855 | } |
1856 | |
1857 | /** |
1858 | * mpt3sas_blk_mq_poll - poll the blk mq poll queue |
1859 | * @shost: Scsi_Host object |
1860 | * @queue_num: hw ctx queue number |
1861 | * |
1862 | * Return number of entries that has been processed from poll queue. |
1863 | */ |
1864 | int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) |
1865 | { |
1866 | struct MPT3SAS_ADAPTER *ioc = |
1867 | (struct MPT3SAS_ADAPTER *)shost->hostdata; |
1868 | struct adapter_reply_queue *reply_q; |
1869 | int num_entries = 0; |
1870 | int qid = queue_num - ioc->iopoll_q_start_index; |
1871 | |
1872 | if (atomic_read(v: &ioc->io_uring_poll_queues[qid].pause) || |
1873 | !atomic_add_unless(v: &ioc->io_uring_poll_queues[qid].busy, a: 1, u: 1)) |
1874 | return 0; |
1875 | |
1876 | reply_q = ioc->io_uring_poll_queues[qid].reply_q; |
1877 | |
1878 | num_entries = _base_process_reply_queue(reply_q); |
1879 | atomic_dec(v: &ioc->io_uring_poll_queues[qid].busy); |
1880 | |
1881 | return num_entries; |
1882 | } |
1883 | |
1884 | /** |
1885 | * _base_interrupt - MPT adapter (IOC) specific interrupt handler. |
1886 | * @irq: irq number (not used) |
1887 | * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure |
1888 | * |
1889 | * Return: IRQ_HANDLED if processed, else IRQ_NONE. |
1890 | */ |
1891 | static irqreturn_t |
1892 | _base_interrupt(int irq, void *bus_id) |
1893 | { |
1894 | struct adapter_reply_queue *reply_q = bus_id; |
1895 | struct MPT3SAS_ADAPTER *ioc = reply_q->ioc; |
1896 | |
1897 | if (ioc->mask_interrupts) |
1898 | return IRQ_NONE; |
1899 | if (reply_q->irq_poll_scheduled) |
1900 | return IRQ_HANDLED; |
1901 | return ((_base_process_reply_queue(reply_q) > 0) ? |
1902 | IRQ_HANDLED : IRQ_NONE); |
1903 | } |
1904 | |
1905 | /** |
1906 | * _base_irqpoll - IRQ poll callback handler |
1907 | * @irqpoll: irq_poll object |
1908 | * @budget: irq poll weight |
1909 | * |
1910 | * Return: number of reply descriptors processed |
1911 | */ |
1912 | static int |
1913 | _base_irqpoll(struct irq_poll *irqpoll, int budget) |
1914 | { |
1915 | struct adapter_reply_queue *reply_q; |
1916 | int num_entries = 0; |
1917 | |
1918 | reply_q = container_of(irqpoll, struct adapter_reply_queue, |
1919 | irqpoll); |
1920 | if (reply_q->irq_line_enable) { |
1921 | disable_irq_nosync(irq: reply_q->os_irq); |
1922 | reply_q->irq_line_enable = false; |
1923 | } |
1924 | num_entries = _base_process_reply_queue(reply_q); |
1925 | if (num_entries < budget) { |
1926 | irq_poll_complete(irqpoll); |
1927 | reply_q->irq_poll_scheduled = false; |
1928 | reply_q->irq_line_enable = true; |
1929 | enable_irq(irq: reply_q->os_irq); |
1930 | /* |
1931 | * Go for one more round of processing the |
1932 | * reply descriptor post queue in case the HBA |
1933 | * Firmware has posted some reply descriptors |
1934 | * while reenabling the IRQ. |
1935 | */ |
1936 | _base_process_reply_queue(reply_q); |
1937 | } |
1938 | |
1939 | return num_entries; |
1940 | } |
1941 | |
1942 | /** |
1943 | * _base_init_irqpolls - initliaze IRQ polls |
1944 | * @ioc: per adapter object |
1945 | * |
1946 | * Return: nothing |
1947 | */ |
1948 | static void |
1949 | _base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc) |
1950 | { |
1951 | struct adapter_reply_queue *reply_q, *next; |
1952 | |
1953 | if (list_empty(head: &ioc->reply_queue_list)) |
1954 | return; |
1955 | |
1956 | list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { |
1957 | if (reply_q->is_iouring_poll_q) |
1958 | continue; |
1959 | irq_poll_init(&reply_q->irqpoll, |
1960 | ioc->hba_queue_depth/4, _base_irqpoll); |
1961 | reply_q->irq_poll_scheduled = false; |
1962 | reply_q->irq_line_enable = true; |
1963 | reply_q->os_irq = pci_irq_vector(dev: ioc->pdev, |
1964 | nr: reply_q->msix_index); |
1965 | } |
1966 | } |
1967 | |
1968 | /** |
1969 | * _base_is_controller_msix_enabled - is controller support muli-reply queues |
1970 | * @ioc: per adapter object |
1971 | * |
1972 | * Return: Whether or not MSI/X is enabled. |
1973 | */ |
1974 | static inline int |
1975 | _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc) |
1976 | { |
1977 | return (ioc->facts.IOCCapabilities & |
1978 | MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable; |
1979 | } |
1980 | |
1981 | /** |
1982 | * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts |
1983 | * @ioc: per adapter object |
1984 | * @poll: poll over reply descriptor pools incase interrupt for |
1985 | * timed-out SCSI command got delayed |
1986 | * Context: non-ISR context |
1987 | * |
1988 | * Called when a Task Management request has completed. |
1989 | */ |
1990 | void |
1991 | mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll) |
1992 | { |
1993 | struct adapter_reply_queue *reply_q; |
1994 | |
1995 | /* If MSIX capability is turned off |
1996 | * then multi-queues are not enabled |
1997 | */ |
1998 | if (!_base_is_controller_msix_enabled(ioc)) |
1999 | return; |
2000 | |
2001 | list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { |
2002 | if (ioc->shost_recovery || ioc->remove_host || |
2003 | ioc->pci_error_recovery) |
2004 | return; |
2005 | /* TMs are on msix_index == 0 */ |
2006 | if (reply_q->msix_index == 0) |
2007 | continue; |
2008 | |
2009 | if (reply_q->is_iouring_poll_q) { |
2010 | _base_process_reply_queue(reply_q); |
2011 | continue; |
2012 | } |
2013 | |
2014 | synchronize_irq(irq: pci_irq_vector(dev: ioc->pdev, nr: reply_q->msix_index)); |
2015 | if (reply_q->irq_poll_scheduled) { |
2016 | /* Calling irq_poll_disable will wait for any pending |
2017 | * callbacks to have completed. |
2018 | */ |
2019 | irq_poll_disable(&reply_q->irqpoll); |
2020 | irq_poll_enable(&reply_q->irqpoll); |
2021 | /* check how the scheduled poll has ended, |
2022 | * clean up only if necessary |
2023 | */ |
2024 | if (reply_q->irq_poll_scheduled) { |
2025 | reply_q->irq_poll_scheduled = false; |
2026 | reply_q->irq_line_enable = true; |
2027 | enable_irq(irq: reply_q->os_irq); |
2028 | } |
2029 | } |
2030 | |
2031 | if (poll) |
2032 | _base_process_reply_queue(reply_q); |
2033 | } |
2034 | } |
2035 | |
2036 | /** |
2037 | * mpt3sas_base_release_callback_handler - clear interrupt callback handler |
2038 | * @cb_idx: callback index |
2039 | */ |
2040 | void |
2041 | mpt3sas_base_release_callback_handler(u8 cb_idx) |
2042 | { |
2043 | mpt_callbacks[cb_idx] = NULL; |
2044 | } |
2045 | |
2046 | /** |
2047 | * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler |
2048 | * @cb_func: callback function |
2049 | * |
2050 | * Return: Index of @cb_func. |
2051 | */ |
2052 | u8 |
2053 | mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func) |
2054 | { |
2055 | u8 cb_idx; |
2056 | |
2057 | for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--) |
2058 | if (mpt_callbacks[cb_idx] == NULL) |
2059 | break; |
2060 | |
2061 | mpt_callbacks[cb_idx] = cb_func; |
2062 | return cb_idx; |
2063 | } |
2064 | |
2065 | /** |
2066 | * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler |
2067 | */ |
2068 | void |
2069 | mpt3sas_base_initialize_callback_handler(void) |
2070 | { |
2071 | u8 cb_idx; |
2072 | |
2073 | for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++) |
2074 | mpt3sas_base_release_callback_handler(cb_idx); |
2075 | } |
2076 | |
2077 | |
2078 | /** |
2079 | * _base_build_zero_len_sge - build zero length sg entry |
2080 | * @ioc: per adapter object |
2081 | * @paddr: virtual address for SGE |
2082 | * |
2083 | * Create a zero length scatter gather entry to insure the IOCs hardware has |
2084 | * something to use if the target device goes brain dead and tries |
2085 | * to send data even when none is asked for. |
2086 | */ |
2087 | static void |
2088 | _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr) |
2089 | { |
2090 | u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT | |
2091 | MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST | |
2092 | MPI2_SGE_FLAGS_SIMPLE_ELEMENT) << |
2093 | MPI2_SGE_FLAGS_SHIFT); |
2094 | ioc->base_add_sg_single(paddr, flags_length, -1); |
2095 | } |
2096 | |
2097 | /** |
2098 | * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr. |
2099 | * @paddr: virtual address for SGE |
2100 | * @flags_length: SGE flags and data transfer length |
2101 | * @dma_addr: Physical address |
2102 | */ |
2103 | static void |
2104 | _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr) |
2105 | { |
2106 | Mpi2SGESimple32_t *sgel = paddr; |
2107 | |
2108 | flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING | |
2109 | MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT; |
2110 | sgel->FlagsLength = cpu_to_le32(flags_length); |
2111 | sgel->Address = cpu_to_le32(dma_addr); |
2112 | } |
2113 | |
2114 | |
2115 | /** |
2116 | * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr. |
2117 | * @paddr: virtual address for SGE |
2118 | * @flags_length: SGE flags and data transfer length |
2119 | * @dma_addr: Physical address |
2120 | */ |
2121 | static void |
2122 | _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr) |
2123 | { |
2124 | Mpi2SGESimple64_t *sgel = paddr; |
2125 | |
2126 | flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING | |
2127 | MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT; |
2128 | sgel->FlagsLength = cpu_to_le32(flags_length); |
2129 | sgel->Address = cpu_to_le64(dma_addr); |
2130 | } |
2131 | |
2132 | /** |
2133 | * _base_get_chain_buffer_tracker - obtain chain tracker |
2134 | * @ioc: per adapter object |
2135 | * @scmd: SCSI commands of the IO request |
2136 | * |
2137 | * Return: chain tracker from chain_lookup table using key as |
2138 | * smid and smid's chain_offset. |
2139 | */ |
2140 | static struct chain_tracker * |
2141 | _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, |
2142 | struct scsi_cmnd *scmd) |
2143 | { |
2144 | struct chain_tracker *chain_req; |
2145 | struct scsiio_tracker *st = scsi_cmd_priv(cmd: scmd); |
2146 | u16 smid = st->smid; |
2147 | u8 chain_offset = |
2148 | atomic_read(v: &ioc->chain_lookup[smid - 1].chain_offset); |
2149 | |
2150 | if (chain_offset == ioc->chains_needed_per_io) |
2151 | return NULL; |
2152 | |
2153 | chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset]; |
2154 | atomic_inc(v: &ioc->chain_lookup[smid - 1].chain_offset); |
2155 | return chain_req; |
2156 | } |
2157 | |
2158 | |
2159 | /** |
2160 | * _base_build_sg - build generic sg |
2161 | * @ioc: per adapter object |
2162 | * @psge: virtual address for SGE |
2163 | * @data_out_dma: physical address for WRITES |
2164 | * @data_out_sz: data xfer size for WRITES |
2165 | * @data_in_dma: physical address for READS |
2166 | * @data_in_sz: data xfer size for READS |
2167 | */ |
2168 | static void |
2169 | _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge, |
2170 | dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, |
2171 | size_t data_in_sz) |
2172 | { |
2173 | u32 sgl_flags; |
2174 | |
2175 | if (!data_out_sz && !data_in_sz) { |
2176 | _base_build_zero_len_sge(ioc, paddr: psge); |
2177 | return; |
2178 | } |
2179 | |
2180 | if (data_out_sz && data_in_sz) { |
2181 | /* WRITE sgel first */ |
2182 | sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | |
2183 | MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); |
2184 | sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; |
2185 | ioc->base_add_sg_single(psge, sgl_flags | |
2186 | data_out_sz, data_out_dma); |
2187 | |
2188 | /* incr sgel */ |
2189 | psge += ioc->sge_size; |
2190 | |
2191 | /* READ sgel last */ |
2192 | sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | |
2193 | MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | |
2194 | MPI2_SGE_FLAGS_END_OF_LIST); |
2195 | sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; |
2196 | ioc->base_add_sg_single(psge, sgl_flags | |
2197 | data_in_sz, data_in_dma); |
2198 | } else if (data_out_sz) /* WRITE */ { |
2199 | sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | |
2200 | MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | |
2201 | MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC); |
2202 | sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; |
2203 | ioc->base_add_sg_single(psge, sgl_flags | |
2204 | data_out_sz, data_out_dma); |
2205 | } else if (data_in_sz) /* READ */ { |
2206 | sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | |
2207 | MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | |
2208 | MPI2_SGE_FLAGS_END_OF_LIST); |
2209 | sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; |
2210 | ioc->base_add_sg_single(psge, sgl_flags | |
2211 | data_in_sz, data_in_dma); |
2212 | } |
2213 | } |
2214 | |
2215 | /* IEEE format sgls */ |
2216 | |
2217 | /** |
2218 | * _base_build_nvme_prp - This function is called for NVMe end devices to build |
2219 | * a native SGL (NVMe PRP). |
2220 | * @ioc: per adapter object |
2221 | * @smid: system request message index for getting asscociated SGL |
2222 | * @nvme_encap_request: the NVMe request msg frame pointer |
2223 | * @data_out_dma: physical address for WRITES |
2224 | * @data_out_sz: data xfer size for WRITES |
2225 | * @data_in_dma: physical address for READS |
2226 | * @data_in_sz: data xfer size for READS |
2227 | * |
2228 | * The native SGL is built starting in the first PRP |
2229 | * entry of the NVMe message (PRP1). If the data buffer is small enough to be |
2230 | * described entirely using PRP1, then PRP2 is not used. If needed, PRP2 is |
2231 | * used to describe a larger data buffer. If the data buffer is too large to |
2232 | * describe using the two PRP entriess inside the NVMe message, then PRP1 |
2233 | * describes the first data memory segment, and PRP2 contains a pointer to a PRP |
2234 | * list located elsewhere in memory to describe the remaining data memory |
2235 | * segments. The PRP list will be contiguous. |
2236 | * |
2237 | * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP |
2238 | * consists of a list of PRP entries to describe a number of noncontigous |
2239 | * physical memory segments as a single memory buffer, just as a SGL does. Note |
2240 | * however, that this function is only used by the IOCTL call, so the memory |
2241 | * given will be guaranteed to be contiguous. There is no need to translate |
2242 | * non-contiguous SGL into a PRP in this case. All PRPs will describe |
2243 | * contiguous space that is one page size each. |
2244 | * |
2245 | * Each NVMe message contains two PRP entries. The first (PRP1) either contains |
2246 | * a PRP list pointer or a PRP element, depending upon the command. PRP2 |
2247 | * contains the second PRP element if the memory being described fits within 2 |
2248 | * PRP entries, or a PRP list pointer if the PRP spans more than two entries. |
2249 | * |
2250 | * A PRP list pointer contains the address of a PRP list, structured as a linear |
2251 | * array of PRP entries. Each PRP entry in this list describes a segment of |
2252 | * physical memory. |
2253 | * |
2254 | * Each 64-bit PRP entry comprises an address and an offset field. The address |
2255 | * always points at the beginning of a 4KB physical memory page, and the offset |
2256 | * describes where within that 4KB page the memory segment begins. Only the |
2257 | * first element in a PRP list may contain a non-zero offset, implying that all |
2258 | * memory segments following the first begin at the start of a 4KB page. |
2259 | * |
2260 | * Each PRP element normally describes 4KB of physical memory, with exceptions |
2261 | * for the first and last elements in the list. If the memory being described |
2262 | * by the list begins at a non-zero offset within the first 4KB page, then the |
2263 | * first PRP element will contain a non-zero offset indicating where the region |
2264 | * begins within the 4KB page. The last memory segment may end before the end |
2265 | * of the 4KB segment, depending upon the overall size of the memory being |
2266 | * described by the PRP list. |
2267 | * |
2268 | * Since PRP entries lack any indication of size, the overall data buffer length |
2269 | * is used to determine where the end of the data memory buffer is located, and |
2270 | * how many PRP entries are required to describe it. |
2271 | */ |
2272 | static void |
2273 | _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid, |
2274 | Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request, |
2275 | dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, |
2276 | size_t data_in_sz) |
2277 | { |
2278 | int prp_size = NVME_PRP_SIZE; |
2279 | __le64 *prp_entry, *prp1_entry, *prp2_entry; |
2280 | __le64 *prp_page; |
2281 | dma_addr_t prp_entry_dma, prp_page_dma, dma_addr; |
2282 | u32 offset, entry_len; |
2283 | u32 page_mask_result, page_mask; |
2284 | size_t length; |
2285 | struct mpt3sas_nvme_cmd *nvme_cmd = |
2286 | (void *)nvme_encap_request->NVMe_Command; |
2287 | |
2288 | /* |
2289 | * Not all commands require a data transfer. If no data, just return |
2290 | * without constructing any PRP. |
2291 | */ |
2292 | if (!data_in_sz && !data_out_sz) |
2293 | return; |
2294 | prp1_entry = &nvme_cmd->prp1; |
2295 | prp2_entry = &nvme_cmd->prp2; |
2296 | prp_entry = prp1_entry; |
2297 | /* |
2298 | * For the PRP entries, use the specially allocated buffer of |
2299 | * contiguous memory. |
2300 | */ |
2301 | prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid); |
2302 | prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid); |
2303 | |
2304 | /* |
2305 | * Check if we are within 1 entry of a page boundary we don't |
2306 | * want our first entry to be a PRP List entry. |
2307 | */ |
2308 | page_mask = ioc->page_size - 1; |
2309 | page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask; |
2310 | if (!page_mask_result) { |
2311 | /* Bump up to next page boundary. */ |
2312 | prp_page = (__le64 *)((u8 *)prp_page + prp_size); |
2313 | prp_page_dma = prp_page_dma + prp_size; |
2314 | } |
2315 | |
2316 | /* |
2317 | * Set PRP physical pointer, which initially points to the current PRP |
2318 | * DMA memory page. |
2319 | */ |
2320 | prp_entry_dma = prp_page_dma; |
2321 | |
2322 | /* Get physical address and length of the data buffer. */ |
2323 | if (data_in_sz) { |
2324 | dma_addr = data_in_dma; |
2325 | length = data_in_sz; |
2326 | } else { |
2327 | dma_addr = data_out_dma; |
2328 | length = data_out_sz; |
2329 | } |
2330 | |
2331 | /* Loop while the length is not zero. */ |
2332 | while (length) { |
2333 | /* |
2334 | * Check if we need to put a list pointer here if we are at |
2335 | * page boundary - prp_size (8 bytes). |
2336 | */ |
2337 | page_mask_result = (prp_entry_dma + prp_size) & page_mask; |
2338 | if (!page_mask_result) { |
2339 | /* |
2340 | * This is the last entry in a PRP List, so we need to |
2341 | * put a PRP list pointer here. What this does is: |
2342 | * - bump the current memory pointer to the next |
2343 | * address, which will be the next full page. |
2344 | * - set the PRP Entry to point to that page. This |
2345 | * is now the PRP List pointer. |
2346 | * - bump the PRP Entry pointer the start of the |
2347 | * next page. Since all of this PRP memory is |
2348 | * contiguous, no need to get a new page - it's |
2349 | * just the next address. |
2350 | */ |
2351 | prp_entry_dma++; |
2352 | *prp_entry = cpu_to_le64(prp_entry_dma); |
2353 | prp_entry++; |
2354 | } |
2355 | |
2356 | /* Need to handle if entry will be part of a page. */ |
2357 | offset = dma_addr & page_mask; |
2358 | entry_len = ioc->page_size - offset; |
2359 | |
2360 | if (prp_entry == prp1_entry) { |
2361 | /* |
2362 | * Must fill in the first PRP pointer (PRP1) before |
2363 | * moving on. |
2364 | */ |
2365 | *prp1_entry = cpu_to_le64(dma_addr); |
2366 | |
2367 | /* |
2368 | * Now point to the second PRP entry within the |
2369 | * command (PRP2). |
2370 | */ |
2371 | prp_entry = prp2_entry; |
2372 | } else if (prp_entry == prp2_entry) { |
2373 | /* |
2374 | * Should the PRP2 entry be a PRP List pointer or just |
2375 | * a regular PRP pointer? If there is more than one |
2376 | * more page of data, must use a PRP List pointer. |
2377 | */ |
2378 | if (length > ioc->page_size) { |
2379 | /* |
2380 | * PRP2 will contain a PRP List pointer because |
2381 | * more PRP's are needed with this command. The |
2382 | * list will start at the beginning of the |
2383 | * contiguous buffer. |
2384 | */ |
2385 | *prp2_entry = cpu_to_le64(prp_entry_dma); |
2386 | |
2387 | /* |
2388 | * The next PRP Entry will be the start of the |
2389 | * first PRP List. |
2390 | */ |
2391 | prp_entry = prp_page; |
2392 | } else { |
2393 | /* |
2394 | * After this, the PRP Entries are complete. |
2395 | * This command uses 2 PRP's and no PRP list. |
2396 | */ |
2397 | *prp2_entry = cpu_to_le64(dma_addr); |
2398 | } |
2399 | } else { |
2400 | /* |
2401 | * Put entry in list and bump the addresses. |
2402 | * |
2403 | * After PRP1 and PRP2 are filled in, this will fill in |
2404 | * all remaining PRP entries in a PRP List, one per |
2405 | * each time through the loop. |
2406 | */ |
2407 | *prp_entry = cpu_to_le64(dma_addr); |
2408 | prp_entry++; |
2409 | prp_entry_dma++; |
2410 | } |
2411 | |
2412 | /* |
2413 | * Bump the phys address of the command's data buffer by the |
2414 | * entry_len. |
2415 | */ |
2416 | dma_addr += entry_len; |
2417 | |
2418 | /* Decrement length accounting for last partial page. */ |
2419 | if (entry_len > length) |
2420 | length = 0; |
2421 | else |
2422 | length -= entry_len; |
2423 | } |
2424 | } |
2425 | |
2426 | /** |
2427 | * base_make_prp_nvme - Prepare PRPs (Physical Region Page) - |
2428 | * SGLs specific to NVMe drives only |
2429 | * |
2430 | * @ioc: per adapter object |
2431 | * @scmd: SCSI command from the mid-layer |
2432 | * @mpi_request: mpi request |
2433 | * @smid: msg Index |
2434 | * @sge_count: scatter gather element count. |
2435 | * |
2436 | * Return: true: PRPs are built |
2437 | * false: IEEE SGLs needs to be built |
2438 | */ |
2439 | static void |
2440 | base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc, |
2441 | struct scsi_cmnd *scmd, |
2442 | Mpi25SCSIIORequest_t *mpi_request, |
2443 | u16 smid, int sge_count) |
2444 | { |
2445 | int sge_len, num_prp_in_chain = 0; |
2446 | Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl; |
2447 | __le64 *curr_buff; |
2448 | dma_addr_t msg_dma, sge_addr, offset; |
2449 | u32 page_mask, page_mask_result; |
2450 | struct scatterlist *sg_scmd; |
2451 | u32 first_prp_len; |
2452 | int data_len = scsi_bufflen(cmd: scmd); |
2453 | u32 nvme_pg_size; |
2454 | |
2455 | nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE); |
2456 | /* |
2457 | * Nvme has a very convoluted prp format. One prp is required |
2458 | * for each page or partial page. Driver need to split up OS sg_list |
2459 | * entries if it is longer than one page or cross a page |
2460 | * boundary. Driver also have to insert a PRP list pointer entry as |
2461 | * the last entry in each physical page of the PRP list. |
2462 | * |
2463 | * NOTE: The first PRP "entry" is actually placed in the first |
2464 | * SGL entry in the main message as IEEE 64 format. The 2nd |
2465 | * entry in the main message is the chain element, and the rest |
2466 | * of the PRP entries are built in the contiguous pcie buffer. |
2467 | */ |
2468 | page_mask = nvme_pg_size - 1; |
2469 | |
2470 | /* |
2471 | * Native SGL is needed. |
2472 | * Put a chain element in main message frame that points to the first |
2473 | * chain buffer. |
2474 | * |
2475 | * NOTE: The ChainOffset field must be 0 when using a chain pointer to |
2476 | * a native SGL. |
2477 | */ |
2478 | |
2479 | /* Set main message chain element pointer */ |
2480 | main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL; |
2481 | /* |
2482 | * For NVMe the chain element needs to be the 2nd SG entry in the main |
2483 | * message. |
2484 | */ |
2485 | main_chain_element = (Mpi25IeeeSgeChain64_t *) |
2486 | ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64)); |
2487 | |
2488 | /* |
2489 | * For the PRP entries, use the specially allocated buffer of |
2490 | * contiguous memory. Normal chain buffers can't be used |
2491 | * because each chain buffer would need to be the size of an OS |
2492 | * page (4k). |
2493 | */ |
2494 | curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid); |
2495 | msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid); |
2496 | |
2497 | main_chain_element->Address = cpu_to_le64(msg_dma); |
2498 | main_chain_element->NextChainOffset = 0; |
2499 | main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | |
2500 | MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | |
2501 | MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP; |
2502 | |
2503 | /* Build first prp, sge need not to be page aligned*/ |
2504 | ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL; |
2505 | sg_scmd = scsi_sglist(cmd: scmd); |
2506 | sge_addr = sg_dma_address(sg_scmd); |
2507 | sge_len = sg_dma_len(sg_scmd); |
2508 | |
2509 | offset = sge_addr & page_mask; |
2510 | first_prp_len = nvme_pg_size - offset; |
2511 | |
2512 | ptr_first_sgl->Address = cpu_to_le64(sge_addr); |
2513 | ptr_first_sgl->Length = cpu_to_le32(first_prp_len); |
2514 | |
2515 | data_len -= first_prp_len; |
2516 | |
2517 | if (sge_len > first_prp_len) { |
2518 | sge_addr += first_prp_len; |
2519 | sge_len -= first_prp_len; |
2520 | } else if (data_len && (sge_len == first_prp_len)) { |
2521 | sg_scmd = sg_next(sg_scmd); |
2522 | sge_addr = sg_dma_address(sg_scmd); |
2523 | sge_len = sg_dma_len(sg_scmd); |
2524 | } |
2525 | |
2526 | for (;;) { |
2527 | offset = sge_addr & page_mask; |
2528 | |
2529 | /* Put PRP pointer due to page boundary*/ |
2530 | page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask; |
2531 | if (unlikely(!page_mask_result)) { |
2532 | scmd_printk(KERN_NOTICE, |
2533 | scmd, "page boundary curr_buff: 0x%p\n", |
2534 | curr_buff); |
2535 | msg_dma += 8; |
2536 | *curr_buff = cpu_to_le64(msg_dma); |
2537 | curr_buff++; |
2538 | num_prp_in_chain++; |
2539 | } |
2540 | |
2541 | *curr_buff = cpu_to_le64(sge_addr); |
2542 | curr_buff++; |
2543 | msg_dma += 8; |
2544 | num_prp_in_chain++; |
2545 | |
2546 | sge_addr += nvme_pg_size; |
2547 | sge_len -= nvme_pg_size; |
2548 | data_len -= nvme_pg_size; |
2549 | |
2550 | if (data_len <= 0) |
2551 | break; |
2552 | |
2553 | if (sge_len > 0) |
2554 | continue; |
2555 | |
2556 | sg_scmd = sg_next(sg_scmd); |
2557 | sge_addr = sg_dma_address(sg_scmd); |
2558 | sge_len = sg_dma_len(sg_scmd); |
2559 | } |
2560 | |
2561 | main_chain_element->Length = |
2562 | cpu_to_le32(num_prp_in_chain * sizeof(u64)); |
2563 | return; |
2564 | } |
2565 | |
2566 | static bool |
2567 | base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc, |
2568 | struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count) |
2569 | { |
2570 | u32 data_length = 0; |
2571 | bool build_prp = true; |
2572 | |
2573 | data_length = scsi_bufflen(cmd: scmd); |
2574 | if (pcie_device && |
2575 | (mpt3sas_scsih_is_pcie_scsi_device(device_info: pcie_device->device_info))) { |
2576 | build_prp = false; |
2577 | return build_prp; |
2578 | } |
2579 | |
2580 | /* If Datalenth is <= 16K and number of SGE’s entries are <= 2 |
2581 | * we built IEEE SGL |
2582 | */ |
2583 | if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2)) |
2584 | build_prp = false; |
2585 | |
2586 | return build_prp; |
2587 | } |
2588 | |
2589 | /** |
2590 | * _base_check_pcie_native_sgl - This function is called for PCIe end devices to |
2591 | * determine if the driver needs to build a native SGL. If so, that native |
2592 | * SGL is built in the special contiguous buffers allocated especially for |
2593 | * PCIe SGL creation. If the driver will not build a native SGL, return |
2594 | * TRUE and a normal IEEE SGL will be built. Currently this routine |
2595 | * supports NVMe. |
2596 | * @ioc: per adapter object |
2597 | * @mpi_request: mf request pointer |
2598 | * @smid: system request message index |
2599 | * @scmd: scsi command |
2600 | * @pcie_device: points to the PCIe device's info |
2601 | * |
2602 | * Return: 0 if native SGL was built, 1 if no SGL was built |
2603 | */ |
2604 | static int |
2605 | _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc, |
2606 | Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd, |
2607 | struct _pcie_device *pcie_device) |
2608 | { |
2609 | int sges_left; |
2610 | |
2611 | /* Get the SG list pointer and info. */ |
2612 | sges_left = scsi_dma_map(cmd: scmd); |
2613 | if (sges_left < 0) |
2614 | return 1; |
2615 | |
2616 | /* Check if we need to build a native SG list. */ |
2617 | if (!base_is_prp_possible(ioc, pcie_device, |
2618 | scmd, sge_count: sges_left)) { |
2619 | /* We built a native SG list, just return. */ |
2620 | goto out; |
2621 | } |
2622 | |
2623 | /* |
2624 | * Build native NVMe PRP. |
2625 | */ |
2626 | base_make_prp_nvme(ioc, scmd, mpi_request, |
2627 | smid, sge_count: sges_left); |
2628 | |
2629 | return 0; |
2630 | out: |
2631 | scsi_dma_unmap(cmd: scmd); |
2632 | return 1; |
2633 | } |
2634 | |
2635 | /** |
2636 | * _base_add_sg_single_ieee - add sg element for IEEE format |
2637 | * @paddr: virtual address for SGE |
2638 | * @flags: SGE flags |
2639 | * @chain_offset: number of 128 byte elements from start of segment |
2640 | * @length: data transfer length |
2641 | * @dma_addr: Physical address |
2642 | */ |
2643 | static void |
2644 | _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length, |
2645 | dma_addr_t dma_addr) |
2646 | { |
2647 | Mpi25IeeeSgeChain64_t *sgel = paddr; |
2648 | |
2649 | sgel->Flags = flags; |
2650 | sgel->NextChainOffset = chain_offset; |
2651 | sgel->Length = cpu_to_le32(length); |
2652 | sgel->Address = cpu_to_le64(dma_addr); |
2653 | } |
2654 | |
2655 | /** |
2656 | * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format |
2657 | * @ioc: per adapter object |
2658 | * @paddr: virtual address for SGE |
2659 | * |
2660 | * Create a zero length scatter gather entry to insure the IOCs hardware has |
2661 | * something to use if the target device goes brain dead and tries |
2662 | * to send data even when none is asked for. |
2663 | */ |
2664 | static void |
2665 | _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr) |
2666 | { |
2667 | u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | |
2668 | MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | |
2669 | MPI25_IEEE_SGE_FLAGS_END_OF_LIST); |
2670 | |
2671 | _base_add_sg_single_ieee(paddr, flags: sgl_flags, chain_offset: 0, length: 0, dma_addr: -1); |
2672 | } |
2673 | |
2674 | /** |
2675 | * _base_build_sg_scmd - main sg creation routine |
2676 | * pcie_device is unused here! |
2677 | * @ioc: per adapter object |
2678 | * @scmd: scsi command |
2679 | * @smid: system request message index |
2680 | * @unused: unused pcie_device pointer |
2681 | * Context: none. |
2682 | * |
2683 | * The main routine that builds scatter gather table from a given |
2684 | * scsi request sent via the .queuecommand main handler. |
2685 | * |
2686 | * Return: 0 success, anything else error |
2687 | */ |
2688 | static int |
2689 | _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc, |
2690 | struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused) |
2691 | { |
2692 | Mpi2SCSIIORequest_t *mpi_request; |
2693 | dma_addr_t chain_dma; |
2694 | struct scatterlist *sg_scmd; |
2695 | void *sg_local, *chain; |
2696 | u32 chain_offset; |
2697 | u32 chain_length; |
2698 | u32 chain_flags; |
2699 | int sges_left; |
2700 | u32 sges_in_segment; |
2701 | u32 sgl_flags; |
2702 | u32 sgl_flags_last_element; |
2703 | u32 sgl_flags_end_buffer; |
2704 | struct chain_tracker *chain_req; |
2705 | |
2706 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); |
2707 | |
2708 | /* init scatter gather flags */ |
2709 | sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT; |
2710 | if (scmd->sc_data_direction == DMA_TO_DEVICE) |
2711 | sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC; |
2712 | sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT) |
2713 | << MPI2_SGE_FLAGS_SHIFT; |
2714 | sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT | |
2715 | MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST) |
2716 | << MPI2_SGE_FLAGS_SHIFT; |
2717 | sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; |
2718 | |
2719 | sg_scmd = scsi_sglist(cmd: scmd); |
2720 | sges_left = scsi_dma_map(cmd: scmd); |
2721 | if (sges_left < 0) |
2722 | return -ENOMEM; |
2723 | |
2724 | sg_local = &mpi_request->SGL; |
2725 | sges_in_segment = ioc->max_sges_in_main_message; |
2726 | if (sges_left <= sges_in_segment) |
2727 | goto fill_in_last_segment; |
2728 | |
2729 | mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) + |
2730 | (sges_in_segment * ioc->sge_size))/4; |
2731 | |
2732 | /* fill in main message segment when there is a chain following */ |
2733 | while (sges_in_segment) { |
2734 | if (sges_in_segment == 1) |
2735 | ioc->base_add_sg_single(sg_local, |
2736 | sgl_flags_last_element | sg_dma_len(sg_scmd), |
2737 | sg_dma_address(sg_scmd)); |
2738 | else |
2739 | ioc->base_add_sg_single(sg_local, sgl_flags | |
2740 | sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); |
2741 | sg_scmd = sg_next(sg_scmd); |
2742 | sg_local += ioc->sge_size; |
2743 | sges_left--; |
2744 | sges_in_segment--; |
2745 | } |
2746 | |
2747 | /* initializing the chain flags and pointers */ |
2748 | chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT; |
2749 | chain_req = _base_get_chain_buffer_tracker(ioc, scmd); |
2750 | if (!chain_req) |
2751 | return -1; |
2752 | chain = chain_req->chain_buffer; |
2753 | chain_dma = chain_req->chain_buffer_dma; |
2754 | do { |
2755 | sges_in_segment = (sges_left <= |
2756 | ioc->max_sges_in_chain_message) ? sges_left : |
2757 | ioc->max_sges_in_chain_message; |
2758 | chain_offset = (sges_left == sges_in_segment) ? |
2759 | 0 : (sges_in_segment * ioc->sge_size)/4; |
2760 | chain_length = sges_in_segment * ioc->sge_size; |
2761 | if (chain_offset) { |
2762 | chain_offset = chain_offset << |
2763 | MPI2_SGE_CHAIN_OFFSET_SHIFT; |
2764 | chain_length += ioc->sge_size; |
2765 | } |
2766 | ioc->base_add_sg_single(sg_local, chain_flags | chain_offset | |
2767 | chain_length, chain_dma); |
2768 | sg_local = chain; |
2769 | if (!chain_offset) |
2770 | goto fill_in_last_segment; |
2771 | |
2772 | /* fill in chain segments */ |
2773 | while (sges_in_segment) { |
2774 | if (sges_in_segment == 1) |
2775 | ioc->base_add_sg_single(sg_local, |
2776 | sgl_flags_last_element | |
2777 | sg_dma_len(sg_scmd), |
2778 | sg_dma_address(sg_scmd)); |
2779 | else |
2780 | ioc->base_add_sg_single(sg_local, sgl_flags | |
2781 | sg_dma_len(sg_scmd), |
2782 | sg_dma_address(sg_scmd)); |
2783 | sg_scmd = sg_next(sg_scmd); |
2784 | sg_local += ioc->sge_size; |
2785 | sges_left--; |
2786 | sges_in_segment--; |
2787 | } |
2788 | |
2789 | chain_req = _base_get_chain_buffer_tracker(ioc, scmd); |
2790 | if (!chain_req) |
2791 | return -1; |
2792 | chain = chain_req->chain_buffer; |
2793 | chain_dma = chain_req->chain_buffer_dma; |
2794 | } while (1); |
2795 | |
2796 | |
2797 | fill_in_last_segment: |
2798 | |
2799 | /* fill the last segment */ |
2800 | while (sges_left) { |
2801 | if (sges_left == 1) |
2802 | ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer | |
2803 | sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); |
2804 | else |
2805 | ioc->base_add_sg_single(sg_local, sgl_flags | |
2806 | sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); |
2807 | sg_scmd = sg_next(sg_scmd); |
2808 | sg_local += ioc->sge_size; |
2809 | sges_left--; |
2810 | } |
2811 | |
2812 | return 0; |
2813 | } |
2814 | |
2815 | /** |
2816 | * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format |
2817 | * @ioc: per adapter object |
2818 | * @scmd: scsi command |
2819 | * @smid: system request message index |
2820 | * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be |
2821 | * constructed on need. |
2822 | * Context: none. |
2823 | * |
2824 | * The main routine that builds scatter gather table from a given |
2825 | * scsi request sent via the .queuecommand main handler. |
2826 | * |
2827 | * Return: 0 success, anything else error |
2828 | */ |
2829 | static int |
2830 | _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc, |
2831 | struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device) |
2832 | { |
2833 | Mpi25SCSIIORequest_t *mpi_request; |
2834 | dma_addr_t chain_dma; |
2835 | struct scatterlist *sg_scmd; |
2836 | void *sg_local, *chain; |
2837 | u32 chain_offset; |
2838 | u32 chain_length; |
2839 | int sges_left; |
2840 | u32 sges_in_segment; |
2841 | u8 simple_sgl_flags; |
2842 | u8 simple_sgl_flags_last; |
2843 | u8 chain_sgl_flags; |
2844 | struct chain_tracker *chain_req; |
2845 | |
2846 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); |
2847 | |
2848 | /* init scatter gather flags */ |
2849 | simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | |
2850 | MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; |
2851 | simple_sgl_flags_last = simple_sgl_flags | |
2852 | MPI25_IEEE_SGE_FLAGS_END_OF_LIST; |
2853 | chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | |
2854 | MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; |
2855 | |
2856 | /* Check if we need to build a native SG list. */ |
2857 | if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request, |
2858 | smid, scmd, pcie_device) == 0)) { |
2859 | /* We built a native SG list, just return. */ |
2860 | return 0; |
2861 | } |
2862 | |
2863 | sg_scmd = scsi_sglist(cmd: scmd); |
2864 | sges_left = scsi_dma_map(cmd: scmd); |
2865 | if (sges_left < 0) |
2866 | return -ENOMEM; |
2867 | |
2868 | sg_local = &mpi_request->SGL; |
2869 | sges_in_segment = (ioc->request_sz - |
2870 | offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee; |
2871 | if (sges_left <= sges_in_segment) |
2872 | goto fill_in_last_segment; |
2873 | |
2874 | mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) + |
2875 | (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee); |
2876 | |
2877 | /* fill in main message segment when there is a chain following */ |
2878 | while (sges_in_segment > 1) { |
2879 | _base_add_sg_single_ieee(paddr: sg_local, flags: simple_sgl_flags, chain_offset: 0, |
2880 | sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); |
2881 | sg_scmd = sg_next(sg_scmd); |
2882 | sg_local += ioc->sge_size_ieee; |
2883 | sges_left--; |
2884 | sges_in_segment--; |
2885 | } |
2886 | |
2887 | /* initializing the pointers */ |
2888 | chain_req = _base_get_chain_buffer_tracker(ioc, scmd); |
2889 | if (!chain_req) |
2890 | return -1; |
2891 | chain = chain_req->chain_buffer; |
2892 | chain_dma = chain_req->chain_buffer_dma; |
2893 | do { |
2894 | sges_in_segment = (sges_left <= |
2895 | ioc->max_sges_in_chain_message) ? sges_left : |
2896 | ioc->max_sges_in_chain_message; |
2897 | chain_offset = (sges_left == sges_in_segment) ? |
2898 | 0 : sges_in_segment; |
2899 | chain_length = sges_in_segment * ioc->sge_size_ieee; |
2900 | if (chain_offset) |
2901 | chain_length += ioc->sge_size_ieee; |
2902 | _base_add_sg_single_ieee(paddr: sg_local, flags: chain_sgl_flags, |
2903 | chain_offset, length: chain_length, dma_addr: chain_dma); |
2904 | |
2905 | sg_local = chain; |
2906 | if (!chain_offset) |
2907 | goto fill_in_last_segment; |
2908 | |
2909 | /* fill in chain segments */ |
2910 | while (sges_in_segment) { |
2911 | _base_add_sg_single_ieee(paddr: sg_local, flags: simple_sgl_flags, chain_offset: 0, |
2912 | sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); |
2913 | sg_scmd = sg_next(sg_scmd); |
2914 | sg_local += ioc->sge_size_ieee; |
2915 | sges_left--; |
2916 | sges_in_segment--; |
2917 | } |
2918 | |
2919 | chain_req = _base_get_chain_buffer_tracker(ioc, scmd); |
2920 | if (!chain_req) |
2921 | return -1; |
2922 | chain = chain_req->chain_buffer; |
2923 | chain_dma = chain_req->chain_buffer_dma; |
2924 | } while (1); |
2925 | |
2926 | |
2927 | fill_in_last_segment: |
2928 | |
2929 | /* fill the last segment */ |
2930 | while (sges_left > 0) { |
2931 | if (sges_left == 1) |
2932 | _base_add_sg_single_ieee(paddr: sg_local, |
2933 | flags: simple_sgl_flags_last, chain_offset: 0, sg_dma_len(sg_scmd), |
2934 | sg_dma_address(sg_scmd)); |
2935 | else |
2936 | _base_add_sg_single_ieee(paddr: sg_local, flags: simple_sgl_flags, chain_offset: 0, |
2937 | sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); |
2938 | sg_scmd = sg_next(sg_scmd); |
2939 | sg_local += ioc->sge_size_ieee; |
2940 | sges_left--; |
2941 | } |
2942 | |
2943 | return 0; |
2944 | } |
2945 | |
2946 | /** |
2947 | * _base_build_sg_ieee - build generic sg for IEEE format |
2948 | * @ioc: per adapter object |
2949 | * @psge: virtual address for SGE |
2950 | * @data_out_dma: physical address for WRITES |
2951 | * @data_out_sz: data xfer size for WRITES |
2952 | * @data_in_dma: physical address for READS |
2953 | * @data_in_sz: data xfer size for READS |
2954 | */ |
2955 | static void |
2956 | _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge, |
2957 | dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, |
2958 | size_t data_in_sz) |
2959 | { |
2960 | u8 sgl_flags; |
2961 | |
2962 | if (!data_out_sz && !data_in_sz) { |
2963 | _base_build_zero_len_sge_ieee(ioc, paddr: psge); |
2964 | return; |
2965 | } |
2966 | |
2967 | if (data_out_sz && data_in_sz) { |
2968 | /* WRITE sgel first */ |
2969 | sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | |
2970 | MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; |
2971 | _base_add_sg_single_ieee(paddr: psge, flags: sgl_flags, chain_offset: 0, length: data_out_sz, |
2972 | dma_addr: data_out_dma); |
2973 | |
2974 | /* incr sgel */ |
2975 | psge += ioc->sge_size_ieee; |
2976 | |
2977 | /* READ sgel last */ |
2978 | sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST; |
2979 | _base_add_sg_single_ieee(paddr: psge, flags: sgl_flags, chain_offset: 0, length: data_in_sz, |
2980 | dma_addr: data_in_dma); |
2981 | } else if (data_out_sz) /* WRITE */ { |
2982 | sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | |
2983 | MPI25_IEEE_SGE_FLAGS_END_OF_LIST | |
2984 | MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; |
2985 | _base_add_sg_single_ieee(paddr: psge, flags: sgl_flags, chain_offset: 0, length: data_out_sz, |
2986 | dma_addr: data_out_dma); |
2987 | } else if (data_in_sz) /* READ */ { |
2988 | sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | |
2989 | MPI25_IEEE_SGE_FLAGS_END_OF_LIST | |
2990 | MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; |
2991 | _base_add_sg_single_ieee(paddr: psge, flags: sgl_flags, chain_offset: 0, length: data_in_sz, |
2992 | dma_addr: data_in_dma); |
2993 | } |
2994 | } |
2995 | |
2996 | #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10)) |
2997 | |
2998 | /** |
2999 | * _base_config_dma_addressing - set dma addressing |
3000 | * @ioc: per adapter object |
3001 | * @pdev: PCI device struct |
3002 | * |
3003 | * Return: 0 for success, non-zero for failure. |
3004 | */ |
3005 | static int |
3006 | _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) |
3007 | { |
3008 | struct sysinfo s; |
3009 | u64 coherent_dma_mask, dma_mask; |
3010 | |
3011 | if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4) { |
3012 | ioc->dma_mask = 32; |
3013 | coherent_dma_mask = dma_mask = DMA_BIT_MASK(32); |
3014 | /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */ |
3015 | } else if (ioc->hba_mpi_version_belonged > MPI2_VERSION) { |
3016 | ioc->dma_mask = 63; |
3017 | coherent_dma_mask = dma_mask = DMA_BIT_MASK(63); |
3018 | } else { |
3019 | ioc->dma_mask = 64; |
3020 | coherent_dma_mask = dma_mask = DMA_BIT_MASK(64); |
3021 | } |
3022 | |
3023 | if (ioc->use_32bit_dma) |
3024 | coherent_dma_mask = DMA_BIT_MASK(32); |
3025 | |
3026 | if (dma_set_mask(dev: &pdev->dev, mask: dma_mask) || |
3027 | dma_set_coherent_mask(dev: &pdev->dev, mask: coherent_dma_mask)) |
3028 | return -ENODEV; |
3029 | |
3030 | if (ioc->dma_mask > 32) { |
3031 | ioc->base_add_sg_single = &_base_add_sg_single_64; |
3032 | ioc->sge_size = sizeof(Mpi2SGESimple64_t); |
3033 | } else { |
3034 | ioc->base_add_sg_single = &_base_add_sg_single_32; |
3035 | ioc->sge_size = sizeof(Mpi2SGESimple32_t); |
3036 | } |
3037 | |
3038 | si_meminfo(val: &s); |
3039 | ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n", |
3040 | ioc->dma_mask, convert_to_kb(s.totalram)); |
3041 | |
3042 | return 0; |
3043 | } |
3044 | |
3045 | /** |
3046 | * _base_check_enable_msix - checks MSIX capabable. |
3047 | * @ioc: per adapter object |
3048 | * |
3049 | * Check to see if card is capable of MSIX, and set number |
3050 | * of available msix vectors |
3051 | */ |
3052 | static int |
3053 | _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc) |
3054 | { |
3055 | int base; |
3056 | u16 message_control; |
3057 | |
3058 | /* Check whether controller SAS2008 B0 controller, |
3059 | * if it is SAS2008 B0 controller use IO-APIC instead of MSIX |
3060 | */ |
3061 | if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 && |
3062 | ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) { |
3063 | return -EINVAL; |
3064 | } |
3065 | |
3066 | base = pci_find_capability(dev: ioc->pdev, PCI_CAP_ID_MSIX); |
3067 | if (!base) { |
3068 | dfailprintk(ioc, ioc_info(ioc, "msix not supported\n")); |
3069 | return -EINVAL; |
3070 | } |
3071 | |
3072 | /* get msix vector count */ |
3073 | /* NUMA_IO not supported for older controllers */ |
3074 | if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 || |
3075 | ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 || |
3076 | ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 || |
3077 | ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 || |
3078 | ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 || |
3079 | ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 || |
3080 | ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2) |
3081 | ioc->msix_vector_count = 1; |
3082 | else { |
3083 | pci_read_config_word(dev: ioc->pdev, where: base + 2, val: &message_control); |
3084 | ioc->msix_vector_count = (message_control & 0x3FF) + 1; |
3085 | } |
3086 | dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n", |
3087 | ioc->msix_vector_count)); |
3088 | return 0; |
3089 | } |
3090 | |
3091 | /** |
3092 | * mpt3sas_base_free_irq - free irq |
3093 | * @ioc: per adapter object |
3094 | * |
3095 | * Freeing respective reply_queue from the list. |
3096 | */ |
3097 | void |
3098 | mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc) |
3099 | { |
3100 | unsigned int irq; |
3101 | struct adapter_reply_queue *reply_q, *next; |
3102 | |
3103 | if (list_empty(head: &ioc->reply_queue_list)) |
3104 | return; |
3105 | |
3106 | list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { |
3107 | list_del(entry: &reply_q->list); |
3108 | if (reply_q->is_iouring_poll_q) { |
3109 | kfree(objp: reply_q); |
3110 | continue; |
3111 | } |
3112 | |
3113 | if (ioc->smp_affinity_enable) { |
3114 | irq = pci_irq_vector(dev: ioc->pdev, nr: reply_q->msix_index); |
3115 | irq_update_affinity_hint(irq, NULL); |
3116 | } |
3117 | free_irq(pci_irq_vector(dev: ioc->pdev, nr: reply_q->msix_index), |
3118 | reply_q); |
3119 | kfree(objp: reply_q); |
3120 | } |
3121 | } |
3122 | |
3123 | /** |
3124 | * _base_request_irq - request irq |
3125 | * @ioc: per adapter object |
3126 | * @index: msix index into vector table |
3127 | * |
3128 | * Inserting respective reply_queue into the list. |
3129 | */ |
3130 | static int |
3131 | _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index) |
3132 | { |
3133 | struct pci_dev *pdev = ioc->pdev; |
3134 | struct adapter_reply_queue *reply_q; |
3135 | int r, qid; |
3136 | |
3137 | reply_q = kzalloc(size: sizeof(struct adapter_reply_queue), GFP_KERNEL); |
3138 | if (!reply_q) { |
3139 | ioc_err(ioc, "unable to allocate memory %zu!\n", |
3140 | sizeof(struct adapter_reply_queue)); |
3141 | return -ENOMEM; |
3142 | } |
3143 | reply_q->ioc = ioc; |
3144 | reply_q->msix_index = index; |
3145 | |
3146 | atomic_set(v: &reply_q->busy, i: 0); |
3147 | |
3148 | if (index >= ioc->iopoll_q_start_index) { |
3149 | qid = index - ioc->iopoll_q_start_index; |
3150 | snprintf(buf: reply_q->name, MPT_NAME_LENGTH, fmt: "%s%d-mq-poll%d", |
3151 | ioc->driver_name, ioc->id, qid); |
3152 | reply_q->is_iouring_poll_q = 1; |
3153 | ioc->io_uring_poll_queues[qid].reply_q = reply_q; |
3154 | goto out; |
3155 | } |
3156 | |
3157 | |
3158 | if (ioc->msix_enable) |
3159 | snprintf(buf: reply_q->name, MPT_NAME_LENGTH, fmt: "%s%d-msix%d", |
3160 | ioc->driver_name, ioc->id, index); |
3161 | else |
3162 | snprintf(buf: reply_q->name, MPT_NAME_LENGTH, fmt: "%s%d", |
3163 | ioc->driver_name, ioc->id); |
3164 | r = request_irq(irq: pci_irq_vector(dev: pdev, nr: index), handler: _base_interrupt, |
3165 | IRQF_SHARED, name: reply_q->name, dev: reply_q); |
3166 | if (r) { |
3167 | pr_err("%s: unable to allocate interrupt %d!\n", |
3168 | reply_q->name, pci_irq_vector(pdev, index)); |
3169 | kfree(objp: reply_q); |
3170 | return -EBUSY; |
3171 | } |
3172 | out: |
3173 | INIT_LIST_HEAD(list: &reply_q->list); |
3174 | list_add_tail(new: &reply_q->list, head: &ioc->reply_queue_list); |
3175 | return 0; |
3176 | } |
3177 | |
3178 | /** |
3179 | * _base_assign_reply_queues - assigning msix index for each cpu |
3180 | * @ioc: per adapter object |
3181 | * |
3182 | * The enduser would need to set the affinity via /proc/irq/#/smp_affinity |
3183 | */ |
3184 | static void |
3185 | _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) |
3186 | { |
3187 | unsigned int cpu, nr_cpus, nr_msix, index = 0, irq; |
3188 | struct adapter_reply_queue *reply_q; |
3189 | int iopoll_q_count = ioc->reply_queue_count - |
3190 | ioc->iopoll_q_start_index; |
3191 | const struct cpumask *mask; |
3192 | |
3193 | if (!_base_is_controller_msix_enabled(ioc)) |
3194 | return; |
3195 | |
3196 | if (ioc->msix_load_balance) |
3197 | return; |
3198 | |
3199 | memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz); |
3200 | |
3201 | nr_cpus = num_online_cpus(); |
3202 | nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count, |
3203 | ioc->facts.MaxMSIxVectors); |
3204 | if (!nr_msix) |
3205 | return; |
3206 | |
3207 | if (ioc->smp_affinity_enable) { |
3208 | |
3209 | /* |
3210 | * set irq affinity to local numa node for those irqs |
3211 | * corresponding to high iops queues. |
3212 | */ |
3213 | if (ioc->high_iops_queues) { |
3214 | mask = cpumask_of_node(node: dev_to_node(dev: &ioc->pdev->dev)); |
3215 | for (index = 0; index < ioc->high_iops_queues; |
3216 | index++) { |
3217 | irq = pci_irq_vector(dev: ioc->pdev, nr: index); |
3218 | irq_set_affinity_and_hint(irq, m: mask); |
3219 | } |
3220 | } |
3221 | |
3222 | list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { |
3223 | const cpumask_t *mask; |
3224 | |
3225 | if (reply_q->msix_index < ioc->high_iops_queues || |
3226 | reply_q->msix_index >= ioc->iopoll_q_start_index) |
3227 | continue; |
3228 | |
3229 | mask = pci_irq_get_affinity(pdev: ioc->pdev, |
3230 | vec: reply_q->msix_index); |
3231 | if (!mask) { |
3232 | ioc_warn(ioc, "no affinity for msi %x\n", |
3233 | reply_q->msix_index); |
3234 | goto fall_back; |
3235 | } |
3236 | |
3237 | for_each_cpu_and(cpu, mask, cpu_online_mask) { |
3238 | if (cpu >= ioc->cpu_msix_table_sz) |
3239 | break; |
3240 | ioc->cpu_msix_table[cpu] = reply_q->msix_index; |
3241 | } |
3242 | } |
3243 | return; |
3244 | } |
3245 | |
3246 | fall_back: |
3247 | cpu = cpumask_first(cpu_online_mask); |
3248 | nr_msix -= (ioc->high_iops_queues - iopoll_q_count); |
3249 | index = 0; |
3250 | |
3251 | list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { |
3252 | unsigned int i, group = nr_cpus / nr_msix; |
3253 | |
3254 | if (reply_q->msix_index < ioc->high_iops_queues || |
3255 | reply_q->msix_index >= ioc->iopoll_q_start_index) |
3256 | continue; |
3257 | |
3258 | if (cpu >= nr_cpus) |
3259 | break; |
3260 | |
3261 | if (index < nr_cpus % nr_msix) |
3262 | group++; |
3263 | |
3264 | for (i = 0 ; i < group ; i++) { |
3265 | ioc->cpu_msix_table[cpu] = reply_q->msix_index; |
3266 | cpu = cpumask_next(n: cpu, cpu_online_mask); |
3267 | } |
3268 | index++; |
3269 | } |
3270 | } |
3271 | |
3272 | /** |
3273 | * _base_check_and_enable_high_iops_queues - enable high iops mode |
3274 | * @ioc: per adapter object |
3275 | * @hba_msix_vector_count: msix vectors supported by HBA |
3276 | * |
3277 | * Enable high iops queues only if |
3278 | * - HBA is a SEA/AERO controller and |
3279 | * - MSI-Xs vector supported by the HBA is 128 and |
3280 | * - total CPU count in the system >=16 and |
3281 | * - loaded driver with default max_msix_vectors module parameter and |
3282 | * - system booted in non kdump mode |
3283 | * |
3284 | * Return: nothing. |
3285 | */ |
3286 | static void |
3287 | _base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc, |
3288 | int hba_msix_vector_count) |
3289 | { |
3290 | u16 lnksta, speed; |
3291 | |
3292 | /* |
3293 | * Disable high iops queues if io uring poll queues are enabled. |
3294 | */ |
3295 | if (perf_mode == MPT_PERF_MODE_IOPS || |
3296 | perf_mode == MPT_PERF_MODE_LATENCY || |
3297 | ioc->io_uring_poll_queues) { |
3298 | ioc->high_iops_queues = 0; |
3299 | return; |
3300 | } |
3301 | |
3302 | if (perf_mode == MPT_PERF_MODE_DEFAULT) { |
3303 | |
3304 | pcie_capability_read_word(dev: ioc->pdev, PCI_EXP_LNKSTA, val: &lnksta); |
3305 | speed = lnksta & PCI_EXP_LNKSTA_CLS; |
3306 | |
3307 | if (speed < 0x4) { |
3308 | ioc->high_iops_queues = 0; |
3309 | return; |
3310 | } |
3311 | } |
3312 | |
3313 | if (!reset_devices && ioc->is_aero_ioc && |
3314 | hba_msix_vector_count == MPT3SAS_GEN35_MAX_MSIX_QUEUES && |
3315 | num_online_cpus() >= MPT3SAS_HIGH_IOPS_REPLY_QUEUES && |
3316 | max_msix_vectors == -1) |
3317 | ioc->high_iops_queues = MPT3SAS_HIGH_IOPS_REPLY_QUEUES; |
3318 | else |
3319 | ioc->high_iops_queues = 0; |
3320 | } |
3321 | |
3322 | /** |
3323 | * mpt3sas_base_disable_msix - disables msix |
3324 | * @ioc: per adapter object |
3325 | * |
3326 | */ |
3327 | void |
3328 | mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc) |
3329 | { |
3330 | if (!ioc->msix_enable) |
3331 | return; |
3332 | pci_free_irq_vectors(dev: ioc->pdev); |
3333 | ioc->msix_enable = 0; |
3334 | kfree(objp: ioc->io_uring_poll_queues); |
3335 | } |
3336 | |
3337 | /** |
3338 | * _base_alloc_irq_vectors - allocate msix vectors |
3339 | * @ioc: per adapter object |
3340 | * |
3341 | */ |
3342 | static int |
3343 | _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc) |
3344 | { |
3345 | int i, irq_flags = PCI_IRQ_MSIX; |
3346 | struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues }; |
3347 | struct irq_affinity *descp = &desc; |
3348 | /* |
3349 | * Don't allocate msix vectors for poll_queues. |
3350 | * msix_vectors is always within a range of FW supported reply queue. |
3351 | */ |
3352 | int nr_msix_vectors = ioc->iopoll_q_start_index; |
3353 | |
3354 | |
3355 | if (ioc->smp_affinity_enable) |
3356 | irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; |
3357 | else |
3358 | descp = NULL; |
3359 | |
3360 | ioc_info(ioc, " %d %d %d\n", ioc->high_iops_queues, |
3361 | ioc->reply_queue_count, nr_msix_vectors); |
3362 | |
3363 | i = pci_alloc_irq_vectors_affinity(dev: ioc->pdev, |
3364 | min_vecs: ioc->high_iops_queues, |
3365 | max_vecs: nr_msix_vectors, flags: irq_flags, affd: descp); |
3366 | |
3367 | return i; |
3368 | } |
3369 | |
3370 | /** |
3371 | * _base_enable_msix - enables msix, failback to io_apic |
3372 | * @ioc: per adapter object |
3373 | * |
3374 | */ |
3375 | static int |
3376 | _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) |
3377 | { |
3378 | int r; |
3379 | int i, local_max_msix_vectors; |
3380 | u8 try_msix = 0; |
3381 | int iopoll_q_count = 0; |
3382 | |
3383 | ioc->msix_load_balance = false; |
3384 | |
3385 | if (msix_disable == -1 || msix_disable == 0) |
3386 | try_msix = 1; |
3387 | |
3388 | if (!try_msix) |
3389 | goto try_ioapic; |
3390 | |
3391 | if (_base_check_enable_msix(ioc) != 0) |
3392 | goto try_ioapic; |
3393 | |
3394 | ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count); |
3395 | pr_info("\t no of cores: %d, max_msix_vectors: %d\n", |
3396 | ioc->cpu_count, max_msix_vectors); |
3397 | |
3398 | ioc->reply_queue_count = |
3399 | min_t(int, ioc->cpu_count, ioc->msix_vector_count); |
3400 | |
3401 | if (!ioc->rdpq_array_enable && max_msix_vectors == -1) |
3402 | local_max_msix_vectors = (reset_devices) ? 1 : 8; |
3403 | else |
3404 | local_max_msix_vectors = max_msix_vectors; |
3405 | |
3406 | if (local_max_msix_vectors == 0) |
3407 | goto try_ioapic; |
3408 | |
3409 | /* |
3410 | * Enable msix_load_balance only if combined reply queue mode is |
3411 | * disabled on SAS3 & above generation HBA devices. |
3412 | */ |
3413 | if (!ioc->combined_reply_queue && |
3414 | ioc->hba_mpi_version_belonged != MPI2_VERSION) { |
3415 | ioc_info(ioc, |
3416 | "combined ReplyQueue is off, Enabling msix load balance\n"); |
3417 | ioc->msix_load_balance = true; |
3418 | } |
3419 | |
3420 | /* |
3421 | * smp affinity setting is not need when msix load balance |
3422 | * is enabled. |
3423 | */ |
3424 | if (ioc->msix_load_balance) |
3425 | ioc->smp_affinity_enable = 0; |
3426 | |
3427 | if (!ioc->smp_affinity_enable || ioc->reply_queue_count <= 1) |
3428 | ioc->shost->host_tagset = 0; |
3429 | |
3430 | /* |
3431 | * Enable io uring poll queues only if host_tagset is enabled. |
3432 | */ |
3433 | if (ioc->shost->host_tagset) |
3434 | iopoll_q_count = poll_queues; |
3435 | |
3436 | if (iopoll_q_count) { |
3437 | ioc->io_uring_poll_queues = kcalloc(n: iopoll_q_count, |
3438 | size: sizeof(struct io_uring_poll_queue), GFP_KERNEL); |
3439 | if (!ioc->io_uring_poll_queues) |
3440 | iopoll_q_count = 0; |
3441 | } |
3442 | |
3443 | if (ioc->is_aero_ioc) |
3444 | _base_check_and_enable_high_iops_queues(ioc, |
3445 | hba_msix_vector_count: ioc->msix_vector_count); |
3446 | |
3447 | /* |
3448 | * Add high iops queues count to reply queue count if high iops queues |
3449 | * are enabled. |
3450 | */ |
3451 | ioc->reply_queue_count = min_t(int, |
3452 | ioc->reply_queue_count + ioc->high_iops_queues, |
3453 | ioc->msix_vector_count); |
3454 | |
3455 | /* |
3456 | * Adjust the reply queue count incase reply queue count |
3457 | * exceeds the user provided MSIx vectors count. |
3458 | */ |
3459 | if (local_max_msix_vectors > 0) |
3460 | ioc->reply_queue_count = min_t(int, local_max_msix_vectors, |
3461 | ioc->reply_queue_count); |
3462 | /* |
3463 | * Add io uring poll queues count to reply queues count |
3464 | * if io uring is enabled in driver. |
3465 | */ |
3466 | if (iopoll_q_count) { |
3467 | if (ioc->reply_queue_count < (iopoll_q_count + MPT3_MIN_IRQS)) |
3468 | iopoll_q_count = 0; |
3469 | ioc->reply_queue_count = min_t(int, |
3470 | ioc->reply_queue_count + iopoll_q_count, |
3471 | ioc->msix_vector_count); |
3472 | } |
3473 | |
3474 | /* |
3475 | * Starting index of io uring poll queues in reply queue list. |
3476 | */ |
3477 | ioc->iopoll_q_start_index = |
3478 | ioc->reply_queue_count - iopoll_q_count; |
3479 | |
3480 | r = _base_alloc_irq_vectors(ioc); |
3481 | if (r < 0) { |
3482 | ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r); |
3483 | goto try_ioapic; |
3484 | } |
3485 | |
3486 | /* |
3487 | * Adjust the reply queue count if the allocated |
3488 | * MSIx vectors is less then the requested number |
3489 | * of MSIx vectors. |
3490 | */ |
3491 | if (r < ioc->iopoll_q_start_index) { |
3492 | ioc->reply_queue_count = r + iopoll_q_count; |
3493 | ioc->iopoll_q_start_index = |
3494 | ioc->reply_queue_count - iopoll_q_count; |
3495 | } |
3496 | |
3497 | ioc->msix_enable = 1; |
3498 | for (i = 0; i < ioc->reply_queue_count; i++) { |
3499 | r = _base_request_irq(ioc, index: i); |
3500 | if (r) { |
3501 | mpt3sas_base_free_irq(ioc); |
3502 | mpt3sas_base_disable_msix(ioc); |
3503 | goto try_ioapic; |
3504 | } |
3505 | } |
3506 | |
3507 | ioc_info(ioc, "High IOPs queues : %s\n", |
3508 | ioc->high_iops_queues ? "enabled": "disabled"); |
3509 | |
3510 | return 0; |
3511 | |
3512 | /* failback to io_apic interrupt routing */ |
3513 | try_ioapic: |
3514 | ioc->high_iops_queues = 0; |
3515 | ioc_info(ioc, "High IOPs queues : disabled\n"); |
3516 | ioc->reply_queue_count = 1; |
3517 | ioc->iopoll_q_start_index = ioc->reply_queue_count - 0; |
3518 | r = pci_alloc_irq_vectors(dev: ioc->pdev, min_vecs: 1, max_vecs: 1, PCI_IRQ_LEGACY); |
3519 | if (r < 0) { |
3520 | dfailprintk(ioc, |
3521 | ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n", |
3522 | r)); |
3523 | } else |
3524 | r = _base_request_irq(ioc, index: 0); |
3525 | |
3526 | return r; |
3527 | } |
3528 | |
3529 | /** |
3530 | * mpt3sas_base_unmap_resources - free controller resources |
3531 | * @ioc: per adapter object |
3532 | */ |
3533 | static void |
3534 | mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc) |
3535 | { |
3536 | struct pci_dev *pdev = ioc->pdev; |
3537 | |
3538 | dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); |
3539 | |
3540 | mpt3sas_base_free_irq(ioc); |
3541 | mpt3sas_base_disable_msix(ioc); |
3542 | |
3543 | kfree(objp: ioc->replyPostRegisterIndex); |
3544 | ioc->replyPostRegisterIndex = NULL; |
3545 | |
3546 | |
3547 | if (ioc->chip_phys) { |
3548 | iounmap(addr: ioc->chip); |
3549 | ioc->chip_phys = 0; |
3550 | } |
3551 | |
3552 | if (pci_is_enabled(pdev)) { |
3553 | pci_release_selected_regions(ioc->pdev, ioc->bars); |
3554 | pci_disable_device(dev: pdev); |
3555 | } |
3556 | } |
3557 | |
3558 | static int |
3559 | _base_diag_reset(struct MPT3SAS_ADAPTER *ioc); |
3560 | |
3561 | /** |
3562 | * mpt3sas_base_check_for_fault_and_issue_reset - check if IOC is in fault state |
3563 | * and if it is in fault state then issue diag reset. |
3564 | * @ioc: per adapter object |
3565 | * |
3566 | * Return: 0 for success, non-zero for failure. |
3567 | */ |
3568 | int |
3569 | mpt3sas_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc) |
3570 | { |
3571 | u32 ioc_state; |
3572 | int rc = -EFAULT; |
3573 | |
3574 | dinitprintk(ioc, pr_info("%s\n", __func__)); |
3575 | if (ioc->pci_error_recovery) |
3576 | return 0; |
3577 | ioc_state = mpt3sas_base_get_iocstate(ioc, cooked: 0); |
3578 | dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state)); |
3579 | |
3580 | if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { |
3581 | mpt3sas_print_fault_code(ioc, ioc_state & |
3582 | MPI2_DOORBELL_DATA_MASK); |
3583 | mpt3sas_base_mask_interrupts(ioc); |
3584 | rc = _base_diag_reset(ioc); |
3585 | } else if ((ioc_state & MPI2_IOC_STATE_MASK) == |
3586 | MPI2_IOC_STATE_COREDUMP) { |
3587 | mpt3sas_print_coredump_info(ioc, ioc_state & |
3588 | MPI2_DOORBELL_DATA_MASK); |
3589 | mpt3sas_base_wait_for_coredump_completion(ioc, caller: __func__); |
3590 | mpt3sas_base_mask_interrupts(ioc); |
3591 | rc = _base_diag_reset(ioc); |
3592 | } |
3593 | |
3594 | return rc; |
3595 | } |
3596 | |
3597 | /** |
3598 | * mpt3sas_base_map_resources - map in controller resources (io/irq/memap) |
3599 | * @ioc: per adapter object |
3600 | * |
3601 | * Return: 0 for success, non-zero for failure. |
3602 | */ |
3603 | int |
3604 | mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) |
3605 | { |
3606 | struct pci_dev *pdev = ioc->pdev; |
3607 | u32 memap_sz; |
3608 | u32 pio_sz; |
3609 | int i, r = 0, rc; |
3610 | u64 pio_chip = 0; |
3611 | phys_addr_t chip_phys = 0; |
3612 | struct adapter_reply_queue *reply_q; |
3613 | int iopoll_q_count = 0; |
3614 | |
3615 | dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); |
3616 | |
3617 | ioc->bars = pci_select_bars(dev: pdev, IORESOURCE_MEM); |
3618 | if (pci_enable_device_mem(dev: pdev)) { |
3619 | ioc_warn(ioc, "pci_enable_device_mem: failed\n"); |
3620 | ioc->bars = 0; |
3621 | return -ENODEV; |
3622 | } |
3623 | |
3624 | |
3625 | if (pci_request_selected_regions(pdev, ioc->bars, |
3626 | ioc->driver_name)) { |
3627 | ioc_warn(ioc, "pci_request_selected_regions: failed\n"); |
3628 | ioc->bars = 0; |
3629 | r = -ENODEV; |
3630 | goto out_fail; |
3631 | } |
3632 | |
3633 | pci_set_master(dev: pdev); |
3634 | |
3635 | |
3636 | if (_base_config_dma_addressing(ioc, pdev) != 0) { |
3637 | ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev)); |
3638 | r = -ENODEV; |
3639 | goto out_fail; |
3640 | } |
3641 | |
3642 | for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) && |
3643 | (!memap_sz || !pio_sz); i++) { |
3644 | if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { |
3645 | if (pio_sz) |
3646 | continue; |
3647 | pio_chip = (u64)pci_resource_start(pdev, i); |
3648 | pio_sz = pci_resource_len(pdev, i); |
3649 | } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { |
3650 | if (memap_sz) |
3651 | continue; |
3652 | ioc->chip_phys = pci_resource_start(pdev, i); |
3653 | chip_phys = ioc->chip_phys; |
3654 | memap_sz = pci_resource_len(pdev, i); |
3655 | ioc->chip = ioremap(offset: ioc->chip_phys, size: memap_sz); |
3656 | } |
3657 | } |
3658 | |
3659 | if (ioc->chip == NULL) { |
3660 | ioc_err(ioc, |
3661 | "unable to map adapter memory! or resource not found\n"); |
3662 | r = -EINVAL; |
3663 | goto out_fail; |
3664 | } |
3665 | |
3666 | mpt3sas_base_mask_interrupts(ioc); |
3667 | |
3668 | r = _base_get_ioc_facts(ioc); |
3669 | if (r) { |
3670 | rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc); |
3671 | if (rc || (_base_get_ioc_facts(ioc))) |
3672 | goto out_fail; |
3673 | } |
3674 | |
3675 | if (!ioc->rdpq_array_enable_assigned) { |
3676 | ioc->rdpq_array_enable = ioc->rdpq_array_capable; |
3677 | ioc->rdpq_array_enable_assigned = 1; |
3678 | } |
3679 | |
3680 | r = _base_enable_msix(ioc); |
3681 | if (r) |
3682 | goto out_fail; |
3683 | |
3684 | iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index; |
3685 | for (i = 0; i < iopoll_q_count; i++) { |
3686 | atomic_set(v: &ioc->io_uring_poll_queues[i].busy, i: 0); |
3687 | atomic_set(v: &ioc->io_uring_poll_queues[i].pause, i: 0); |
3688 | } |
3689 | |
3690 | if (!ioc->is_driver_loading) |
3691 | _base_init_irqpolls(ioc); |
3692 | /* Use the Combined reply queue feature only for SAS3 C0 & higher |
3693 | * revision HBAs and also only when reply queue count is greater than 8 |
3694 | */ |
3695 | if (ioc->combined_reply_queue) { |
3696 | /* Determine the Supplemental Reply Post Host Index Registers |
3697 | * Addresse. Supplemental Reply Post Host Index Registers |
3698 | * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and |
3699 | * each register is at offset bytes of |
3700 | * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one. |
3701 | */ |
3702 | ioc->replyPostRegisterIndex = kcalloc( |
3703 | n: ioc->combined_reply_index_count, |
3704 | size: sizeof(resource_size_t *), GFP_KERNEL); |
3705 | if (!ioc->replyPostRegisterIndex) { |
3706 | ioc_err(ioc, |
3707 | "allocation for replyPostRegisterIndex failed!\n"); |
3708 | r = -ENOMEM; |
3709 | goto out_fail; |
3710 | } |
3711 | |
3712 | for (i = 0; i < ioc->combined_reply_index_count; i++) { |
3713 | ioc->replyPostRegisterIndex[i] = |
3714 | (resource_size_t __iomem *) |
3715 | ((u8 __force *)&ioc->chip->Doorbell + |
3716 | MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET + |
3717 | (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET)); |
3718 | } |
3719 | } |
3720 | |
3721 | if (ioc->is_warpdrive) { |
3722 | ioc->reply_post_host_index[0] = (resource_size_t __iomem *) |
3723 | &ioc->chip->ReplyPostHostIndex; |
3724 | |
3725 | for (i = 1; i < ioc->cpu_msix_table_sz; i++) |
3726 | ioc->reply_post_host_index[i] = |
3727 | (resource_size_t __iomem *) |
3728 | ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) |
3729 | * 4))); |
3730 | } |
3731 | |
3732 | list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { |
3733 | if (reply_q->msix_index >= ioc->iopoll_q_start_index) { |
3734 | pr_info("%s: enabled: index: %d\n", |
3735 | reply_q->name, reply_q->msix_index); |
3736 | continue; |
3737 | } |
3738 | |
3739 | pr_info("%s: %s enabled: IRQ %d\n", |
3740 | reply_q->name, |
3741 | ioc->msix_enable ? "PCI-MSI-X": "IO-APIC", |
3742 | pci_irq_vector(ioc->pdev, reply_q->msix_index)); |
3743 | } |
3744 | |
3745 | ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n", |
3746 | &chip_phys, ioc->chip, memap_sz); |
3747 | ioc_info(ioc, "ioport(0x%016llx), size(%d)\n", |
3748 | (unsigned long long)pio_chip, pio_sz); |
3749 | |
3750 | /* Save PCI configuration state for recovery from PCI AER/EEH errors */ |
3751 | pci_save_state(dev: pdev); |
3752 | return 0; |
3753 | |
3754 | out_fail: |
3755 | mpt3sas_base_unmap_resources(ioc); |
3756 | return r; |
3757 | } |
3758 | |
3759 | /** |
3760 | * mpt3sas_base_get_msg_frame - obtain request mf pointer |
3761 | * @ioc: per adapter object |
3762 | * @smid: system request message index(smid zero is invalid) |
3763 | * |
3764 | * Return: virt pointer to message frame. |
3765 | */ |
3766 | void * |
3767 | mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
3768 | { |
3769 | return (void *)(ioc->request + (smid * ioc->request_sz)); |
3770 | } |
3771 | |
3772 | /** |
3773 | * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr |
3774 | * @ioc: per adapter object |
3775 | * @smid: system request message index |
3776 | * |
3777 | * Return: virt pointer to sense buffer. |
3778 | */ |
3779 | void * |
3780 | mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
3781 | { |
3782 | return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE)); |
3783 | } |
3784 | |
3785 | /** |
3786 | * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr |
3787 | * @ioc: per adapter object |
3788 | * @smid: system request message index |
3789 | * |
3790 | * Return: phys pointer to the low 32bit address of the sense buffer. |
3791 | */ |
3792 | __le32 |
3793 | mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
3794 | { |
3795 | return cpu_to_le32(ioc->sense_dma + ((smid - 1) * |
3796 | SCSI_SENSE_BUFFERSIZE)); |
3797 | } |
3798 | |
3799 | /** |
3800 | * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr |
3801 | * @ioc: per adapter object |
3802 | * @smid: system request message index |
3803 | * |
3804 | * Return: virt pointer to a PCIe SGL. |
3805 | */ |
3806 | void * |
3807 | mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
3808 | { |
3809 | return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl); |
3810 | } |
3811 | |
3812 | /** |
3813 | * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr |
3814 | * @ioc: per adapter object |
3815 | * @smid: system request message index |
3816 | * |
3817 | * Return: phys pointer to the address of the PCIe buffer. |
3818 | */ |
3819 | dma_addr_t |
3820 | mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
3821 | { |
3822 | return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma; |
3823 | } |
3824 | |
3825 | /** |
3826 | * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address |
3827 | * @ioc: per adapter object |
3828 | * @phys_addr: lower 32 physical addr of the reply |
3829 | * |
3830 | * Converts 32bit lower physical addr into a virt address. |
3831 | */ |
3832 | void * |
3833 | mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr) |
3834 | { |
3835 | if (!phys_addr) |
3836 | return NULL; |
3837 | return ioc->reply + (phys_addr - (u32)ioc->reply_dma); |
3838 | } |
3839 | |
3840 | /** |
3841 | * _base_get_msix_index - get the msix index |
3842 | * @ioc: per adapter object |
3843 | * @scmd: scsi_cmnd object |
3844 | * |
3845 | * Return: msix index of general reply queues, |
3846 | * i.e. reply queue on which IO request's reply |
3847 | * should be posted by the HBA firmware. |
3848 | */ |
3849 | static inline u8 |
3850 | _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc, |
3851 | struct scsi_cmnd *scmd) |
3852 | { |
3853 | /* Enables reply_queue load balancing */ |
3854 | if (ioc->msix_load_balance) |
3855 | return ioc->reply_queue_count ? |
3856 | base_mod64(dividend: atomic64_add_return(i: 1, |
3857 | v: &ioc->total_io_cnt), divisor: ioc->reply_queue_count) : 0; |
3858 | |
3859 | if (scmd && ioc->shost->nr_hw_queues > 1) { |
3860 | u32 tag = blk_mq_unique_tag(rq: scsi_cmd_to_rq(scmd)); |
3861 | |
3862 | return blk_mq_unique_tag_to_hwq(unique_tag: tag) + |
3863 | ioc->high_iops_queues; |
3864 | } |
3865 | |
3866 | return ioc->cpu_msix_table[raw_smp_processor_id()]; |
3867 | } |
3868 | |
3869 | /** |
3870 | * _base_get_high_iops_msix_index - get the msix index of |
3871 | * high iops queues |
3872 | * @ioc: per adapter object |
3873 | * @scmd: scsi_cmnd object |
3874 | * |
3875 | * Return: msix index of high iops reply queues. |
3876 | * i.e. high iops reply queue on which IO request's |
3877 | * reply should be posted by the HBA firmware. |
3878 | */ |
3879 | static inline u8 |
3880 | _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc, |
3881 | struct scsi_cmnd *scmd) |
3882 | { |
3883 | /** |
3884 | * Round robin the IO interrupts among the high iops |
3885 | * reply queues in terms of batch count 16 when outstanding |
3886 | * IOs on the target device is >=8. |
3887 | */ |
3888 | |
3889 | if (scsi_device_busy(sdev: scmd->device) > MPT3SAS_DEVICE_HIGH_IOPS_DEPTH) |
3890 | return base_mod64(dividend: ( |
3891 | atomic64_add_return(i: 1, v: &ioc->high_iops_outstanding) / |
3892 | MPT3SAS_HIGH_IOPS_BATCH_COUNT), |
3893 | MPT3SAS_HIGH_IOPS_REPLY_QUEUES); |
3894 | |
3895 | return _base_get_msix_index(ioc, scmd); |
3896 | } |
3897 | |
3898 | /** |
3899 | * mpt3sas_base_get_smid - obtain a free smid from internal queue |
3900 | * @ioc: per adapter object |
3901 | * @cb_idx: callback index |
3902 | * |
3903 | * Return: smid (zero is invalid) |
3904 | */ |
3905 | u16 |
3906 | mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) |
3907 | { |
3908 | unsigned long flags; |
3909 | struct request_tracker *request; |
3910 | u16 smid; |
3911 | |
3912 | spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); |
3913 | if (list_empty(head: &ioc->internal_free_list)) { |
3914 | spin_unlock_irqrestore(lock: &ioc->scsi_lookup_lock, flags); |
3915 | ioc_err(ioc, "%s: smid not available\n", __func__); |
3916 | return 0; |
3917 | } |
3918 | |
3919 | request = list_entry(ioc->internal_free_list.next, |
3920 | struct request_tracker, tracker_list); |
3921 | request->cb_idx = cb_idx; |
3922 | smid = request->smid; |
3923 | list_del(entry: &request->tracker_list); |
3924 | spin_unlock_irqrestore(lock: &ioc->scsi_lookup_lock, flags); |
3925 | return smid; |
3926 | } |
3927 | |
3928 | /** |
3929 | * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue |
3930 | * @ioc: per adapter object |
3931 | * @cb_idx: callback index |
3932 | * @scmd: pointer to scsi command object |
3933 | * |
3934 | * Return: smid (zero is invalid) |
3935 | */ |
3936 | u16 |
3937 | mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx, |
3938 | struct scsi_cmnd *scmd) |
3939 | { |
3940 | struct scsiio_tracker *request = scsi_cmd_priv(cmd: scmd); |
3941 | u16 smid; |
3942 | u32 tag, unique_tag; |
3943 | |
3944 | unique_tag = blk_mq_unique_tag(rq: scsi_cmd_to_rq(scmd)); |
3945 | tag = blk_mq_unique_tag_to_tag(unique_tag); |
3946 | |
3947 | /* |
3948 | * Store hw queue number corresponding to the tag. |
3949 | * This hw queue number is used later to determine |
3950 | * the unique_tag using the logic below. This unique_tag |
3951 | * is used to retrieve the scmd pointer corresponding |
3952 | * to tag using scsi_host_find_tag() API. |
3953 | * |
3954 | * tag = smid - 1; |
3955 | * unique_tag = ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag; |
3956 | */ |
3957 | ioc->io_queue_num[tag] = blk_mq_unique_tag_to_hwq(unique_tag); |
3958 | |
3959 | smid = tag + 1; |
3960 | request->cb_idx = cb_idx; |
3961 | request->smid = smid; |
3962 | request->scmd = scmd; |
3963 | INIT_LIST_HEAD(list: &request->chain_list); |
3964 | return smid; |
3965 | } |
3966 | |
3967 | /** |
3968 | * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue |
3969 | * @ioc: per adapter object |
3970 | * @cb_idx: callback index |
3971 | * |
3972 | * Return: smid (zero is invalid) |
3973 | */ |
3974 | u16 |
3975 | mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) |
3976 | { |
3977 | unsigned long flags; |
3978 | struct request_tracker *request; |
3979 | u16 smid; |
3980 | |
3981 | spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); |
3982 | if (list_empty(head: &ioc->hpr_free_list)) { |
3983 | spin_unlock_irqrestore(lock: &ioc->scsi_lookup_lock, flags); |
3984 | return 0; |
3985 | } |
3986 | |
3987 | request = list_entry(ioc->hpr_free_list.next, |
3988 | struct request_tracker, tracker_list); |
3989 | request->cb_idx = cb_idx; |
3990 | smid = request->smid; |
3991 | list_del(entry: &request->tracker_list); |
3992 | spin_unlock_irqrestore(lock: &ioc->scsi_lookup_lock, flags); |
3993 | return smid; |
3994 | } |
3995 | |
3996 | static void |
3997 | _base_recovery_check(struct MPT3SAS_ADAPTER *ioc) |
3998 | { |
3999 | /* |
4000 | * See _wait_for_commands_to_complete() call with regards to this code. |
4001 | */ |
4002 | if (ioc->shost_recovery && ioc->pending_io_count) { |
4003 | ioc->pending_io_count = scsi_host_busy(shost: ioc->shost); |
4004 | if (ioc->pending_io_count == 0) |
4005 | wake_up(&ioc->reset_wq); |
4006 | } |
4007 | } |
4008 | |
4009 | void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc, |
4010 | struct scsiio_tracker *st) |
4011 | { |
4012 | if (WARN_ON(st->smid == 0)) |
4013 | return; |
4014 | st->cb_idx = 0xFF; |
4015 | st->direct_io = 0; |
4016 | st->scmd = NULL; |
4017 | atomic_set(v: &ioc->chain_lookup[st->smid - 1].chain_offset, i: 0); |
4018 | st->smid = 0; |
4019 | } |
4020 | |
4021 | /** |
4022 | * mpt3sas_base_free_smid - put smid back on free_list |
4023 | * @ioc: per adapter object |
4024 | * @smid: system request message index |
4025 | */ |
4026 | void |
4027 | mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
4028 | { |
4029 | unsigned long flags; |
4030 | int i; |
4031 | |
4032 | if (smid < ioc->hi_priority_smid) { |
4033 | struct scsiio_tracker *st; |
4034 | void *request; |
4035 | |
4036 | st = _get_st_from_smid(ioc, smid); |
4037 | if (!st) { |
4038 | _base_recovery_check(ioc); |
4039 | return; |
4040 | } |
4041 | |
4042 | /* Clear MPI request frame */ |
4043 | request = mpt3sas_base_get_msg_frame(ioc, smid); |
4044 | memset(request, 0, ioc->request_sz); |
4045 | |
4046 | mpt3sas_base_clear_st(ioc, st); |
4047 | _base_recovery_check(ioc); |
4048 | ioc->io_queue_num[smid - 1] = 0; |
4049 | return; |
4050 | } |
4051 | |
4052 | spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); |
4053 | if (smid < ioc->internal_smid) { |
4054 | /* hi-priority */ |
4055 | i = smid - ioc->hi_priority_smid; |
4056 | ioc->hpr_lookup[i].cb_idx = 0xFF; |
4057 | list_add(new: &ioc->hpr_lookup[i].tracker_list, head: &ioc->hpr_free_list); |
4058 | } else if (smid <= ioc->hba_queue_depth) { |
4059 | /* internal queue */ |
4060 | i = smid - ioc->internal_smid; |
4061 | ioc->internal_lookup[i].cb_idx = 0xFF; |
4062 | list_add(new: &ioc->internal_lookup[i].tracker_list, |
4063 | head: &ioc->internal_free_list); |
4064 | } |
4065 | spin_unlock_irqrestore(lock: &ioc->scsi_lookup_lock, flags); |
4066 | } |
4067 | |
4068 | /** |
4069 | * _base_mpi_ep_writeq - 32 bit write to MMIO |
4070 | * @b: data payload |
4071 | * @addr: address in MMIO space |
4072 | * @writeq_lock: spin lock |
4073 | * |
4074 | * This special handling for MPI EP to take care of 32 bit |
4075 | * environment where its not quarenteed to send the entire word |
4076 | * in one transfer. |
4077 | */ |
4078 | static inline void |
4079 | _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr, |
4080 | spinlock_t *writeq_lock) |
4081 | { |
4082 | unsigned long flags; |
4083 | |
4084 | spin_lock_irqsave(writeq_lock, flags); |
4085 | __raw_writel(val: (u32)(b), addr); |
4086 | __raw_writel(val: (u32)(b >> 32), addr: (addr + 4)); |
4087 | spin_unlock_irqrestore(lock: writeq_lock, flags); |
4088 | } |
4089 | |
4090 | /** |
4091 | * _base_writeq - 64 bit write to MMIO |
4092 | * @b: data payload |
4093 | * @addr: address in MMIO space |
4094 | * @writeq_lock: spin lock |
4095 | * |
4096 | * Glue for handling an atomic 64 bit word to MMIO. This special handling takes |
4097 | * care of 32 bit environment where its not quarenteed to send the entire word |
4098 | * in one transfer. |
4099 | */ |
4100 | #if defined(writeq) && defined(CONFIG_64BIT) |
4101 | static inline void |
4102 | _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) |
4103 | { |
4104 | wmb(); |
4105 | __raw_writeq(val: b, addr); |
4106 | barrier(); |
4107 | } |
4108 | #else |
4109 | static inline void |
4110 | _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) |
4111 | { |
4112 | _base_mpi_ep_writeq(b, addr, writeq_lock); |
4113 | } |
4114 | #endif |
4115 | |
4116 | /** |
4117 | * _base_set_and_get_msix_index - get the msix index and assign to msix_io |
4118 | * variable of scsi tracker |
4119 | * @ioc: per adapter object |
4120 | * @smid: system request message index |
4121 | * |
4122 | * Return: msix index. |
4123 | */ |
4124 | static u8 |
4125 | _base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
4126 | { |
4127 | struct scsiio_tracker *st = NULL; |
4128 | |
4129 | if (smid < ioc->hi_priority_smid) |
4130 | st = _get_st_from_smid(ioc, smid); |
4131 | |
4132 | if (st == NULL) |
4133 | return _base_get_msix_index(ioc, NULL); |
4134 | |
4135 | st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd); |
4136 | return st->msix_io; |
4137 | } |
4138 | |
4139 | /** |
4140 | * _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware |
4141 | * @ioc: per adapter object |
4142 | * @smid: system request message index |
4143 | * @handle: device handle |
4144 | */ |
4145 | static void |
4146 | _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, |
4147 | u16 smid, u16 handle) |
4148 | { |
4149 | Mpi2RequestDescriptorUnion_t descriptor; |
4150 | u64 *request = (u64 *)&descriptor; |
4151 | void *mpi_req_iomem; |
4152 | __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid); |
4153 | |
4154 | _clone_sg_entries(ioc, mpi_request: (void *) mfp, smid); |
4155 | mpi_req_iomem = (void __force *)ioc->chip + |
4156 | MPI_FRAME_START_OFFSET + (smid * ioc->request_sz); |
4157 | _base_clone_mpi_to_sys_mem(dst_iomem: mpi_req_iomem, src: (void *)mfp, |
4158 | size: ioc->request_sz); |
4159 | descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; |
4160 | descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); |
4161 | descriptor.SCSIIO.SMID = cpu_to_le16(smid); |
4162 | descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); |
4163 | descriptor.SCSIIO.LMID = 0; |
4164 | _base_mpi_ep_writeq(b: *request, addr: &ioc->chip->RequestDescriptorPostLow, |
4165 | writeq_lock: &ioc->scsi_lookup_lock); |
4166 | } |
4167 | |
4168 | /** |
4169 | * _base_put_smid_scsi_io - send SCSI_IO request to firmware |
4170 | * @ioc: per adapter object |
4171 | * @smid: system request message index |
4172 | * @handle: device handle |
4173 | */ |
4174 | static void |
4175 | _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) |
4176 | { |
4177 | Mpi2RequestDescriptorUnion_t descriptor; |
4178 | u64 *request = (u64 *)&descriptor; |
4179 | |
4180 | |
4181 | descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; |
4182 | descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); |
4183 | descriptor.SCSIIO.SMID = cpu_to_le16(smid); |
4184 | descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); |
4185 | descriptor.SCSIIO.LMID = 0; |
4186 | _base_writeq(b: *request, addr: &ioc->chip->RequestDescriptorPostLow, |
4187 | writeq_lock: &ioc->scsi_lookup_lock); |
4188 | } |
4189 | |
4190 | /** |
4191 | * _base_put_smid_fast_path - send fast path request to firmware |
4192 | * @ioc: per adapter object |
4193 | * @smid: system request message index |
4194 | * @handle: device handle |
4195 | */ |
4196 | static void |
4197 | _base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, |
4198 | u16 handle) |
4199 | { |
4200 | Mpi2RequestDescriptorUnion_t descriptor; |
4201 | u64 *request = (u64 *)&descriptor; |
4202 | |
4203 | descriptor.SCSIIO.RequestFlags = |
4204 | MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; |
4205 | descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); |
4206 | descriptor.SCSIIO.SMID = cpu_to_le16(smid); |
4207 | descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); |
4208 | descriptor.SCSIIO.LMID = 0; |
4209 | _base_writeq(b: *request, addr: &ioc->chip->RequestDescriptorPostLow, |
4210 | writeq_lock: &ioc->scsi_lookup_lock); |
4211 | } |
4212 | |
4213 | /** |
4214 | * _base_put_smid_hi_priority - send Task Management request to firmware |
4215 | * @ioc: per adapter object |
4216 | * @smid: system request message index |
4217 | * @msix_task: msix_task will be same as msix of IO in case of task abort else 0 |
4218 | */ |
4219 | static void |
4220 | _base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid, |
4221 | u16 msix_task) |
4222 | { |
4223 | Mpi2RequestDescriptorUnion_t descriptor; |
4224 | void *mpi_req_iomem; |
4225 | u64 *request; |
4226 | |
4227 | if (ioc->is_mcpu_endpoint) { |
4228 | __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid); |
4229 | |
4230 | /* TBD 256 is offset within sys register. */ |
4231 | mpi_req_iomem = (void __force *)ioc->chip |
4232 | + MPI_FRAME_START_OFFSET |
4233 | + (smid * ioc->request_sz); |
4234 | _base_clone_mpi_to_sys_mem(dst_iomem: mpi_req_iomem, src: (void *)mfp, |
4235 | size: ioc->request_sz); |
4236 | } |
4237 | |
4238 | request = (u64 *)&descriptor; |
4239 | |
4240 | descriptor.HighPriority.RequestFlags = |
4241 | MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; |
4242 | descriptor.HighPriority.MSIxIndex = msix_task; |
4243 | descriptor.HighPriority.SMID = cpu_to_le16(smid); |
4244 | descriptor.HighPriority.LMID = 0; |
4245 | descriptor.HighPriority.Reserved1 = 0; |
4246 | if (ioc->is_mcpu_endpoint) |
4247 | _base_mpi_ep_writeq(b: *request, |
4248 | addr: &ioc->chip->RequestDescriptorPostLow, |
4249 | writeq_lock: &ioc->scsi_lookup_lock); |
4250 | else |
4251 | _base_writeq(b: *request, addr: &ioc->chip->RequestDescriptorPostLow, |
4252 | writeq_lock: &ioc->scsi_lookup_lock); |
4253 | } |
4254 | |
4255 | /** |
4256 | * mpt3sas_base_put_smid_nvme_encap - send NVMe encapsulated request to |
4257 | * firmware |
4258 | * @ioc: per adapter object |
4259 | * @smid: system request message index |
4260 | */ |
4261 | void |
4262 | mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
4263 | { |
4264 | Mpi2RequestDescriptorUnion_t descriptor; |
4265 | u64 *request = (u64 *)&descriptor; |
4266 | |
4267 | descriptor.Default.RequestFlags = |
4268 | MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED; |
4269 | descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); |
4270 | descriptor.Default.SMID = cpu_to_le16(smid); |
4271 | descriptor.Default.LMID = 0; |
4272 | descriptor.Default.DescriptorTypeDependent = 0; |
4273 | _base_writeq(b: *request, addr: &ioc->chip->RequestDescriptorPostLow, |
4274 | writeq_lock: &ioc->scsi_lookup_lock); |
4275 | } |
4276 | |
4277 | /** |
4278 | * _base_put_smid_default - Default, primarily used for config pages |
4279 | * @ioc: per adapter object |
4280 | * @smid: system request message index |
4281 | */ |
4282 | static void |
4283 | _base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
4284 | { |
4285 | Mpi2RequestDescriptorUnion_t descriptor; |
4286 | void *mpi_req_iomem; |
4287 | u64 *request; |
4288 | |
4289 | if (ioc->is_mcpu_endpoint) { |
4290 | __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid); |
4291 | |
4292 | _clone_sg_entries(ioc, mpi_request: (void *) mfp, smid); |
4293 | /* TBD 256 is offset within sys register */ |
4294 | mpi_req_iomem = (void __force *)ioc->chip + |
4295 | MPI_FRAME_START_OFFSET + (smid * ioc->request_sz); |
4296 | _base_clone_mpi_to_sys_mem(dst_iomem: mpi_req_iomem, src: (void *)mfp, |
4297 | size: ioc->request_sz); |
4298 | } |
4299 | request = (u64 *)&descriptor; |
4300 | descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; |
4301 | descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); |
4302 | descriptor.Default.SMID = cpu_to_le16(smid); |
4303 | descriptor.Default.LMID = 0; |
4304 | descriptor.Default.DescriptorTypeDependent = 0; |
4305 | if (ioc->is_mcpu_endpoint) |
4306 | _base_mpi_ep_writeq(b: *request, |
4307 | addr: &ioc->chip->RequestDescriptorPostLow, |
4308 | writeq_lock: &ioc->scsi_lookup_lock); |
4309 | else |
4310 | _base_writeq(b: *request, addr: &ioc->chip->RequestDescriptorPostLow, |
4311 | writeq_lock: &ioc->scsi_lookup_lock); |
4312 | } |
4313 | |
4314 | /** |
4315 | * _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using |
4316 | * Atomic Request Descriptor |
4317 | * @ioc: per adapter object |
4318 | * @smid: system request message index |
4319 | * @handle: device handle, unused in this function, for function type match |
4320 | * |
4321 | * Return: nothing. |
4322 | */ |
4323 | static void |
4324 | _base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, |
4325 | u16 handle) |
4326 | { |
4327 | Mpi26AtomicRequestDescriptor_t descriptor; |
4328 | u32 *request = (u32 *)&descriptor; |
4329 | |
4330 | descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; |
4331 | descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); |
4332 | descriptor.SMID = cpu_to_le16(smid); |
4333 | |
4334 | writel(cpu_to_le32(*request), addr: &ioc->chip->AtomicRequestDescriptorPost); |
4335 | } |
4336 | |
4337 | /** |
4338 | * _base_put_smid_fast_path_atomic - send fast path request to firmware |
4339 | * using Atomic Request Descriptor |
4340 | * @ioc: per adapter object |
4341 | * @smid: system request message index |
4342 | * @handle: device handle, unused in this function, for function type match |
4343 | * Return: nothing |
4344 | */ |
4345 | static void |
4346 | _base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, |
4347 | u16 handle) |
4348 | { |
4349 | Mpi26AtomicRequestDescriptor_t descriptor; |
4350 | u32 *request = (u32 *)&descriptor; |
4351 | |
4352 | descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; |
4353 | descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); |
4354 | descriptor.SMID = cpu_to_le16(smid); |
4355 | |
4356 | writel(cpu_to_le32(*request), addr: &ioc->chip->AtomicRequestDescriptorPost); |
4357 | } |
4358 | |
4359 | /** |
4360 | * _base_put_smid_hi_priority_atomic - send Task Management request to |
4361 | * firmware using Atomic Request Descriptor |
4362 | * @ioc: per adapter object |
4363 | * @smid: system request message index |
4364 | * @msix_task: msix_task will be same as msix of IO in case of task abort else 0 |
4365 | * |
4366 | * Return: nothing. |
4367 | */ |
4368 | static void |
4369 | _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, |
4370 | u16 msix_task) |
4371 | { |
4372 | Mpi26AtomicRequestDescriptor_t descriptor; |
4373 | u32 *request = (u32 *)&descriptor; |
4374 | |
4375 | descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; |
4376 | descriptor.MSIxIndex = msix_task; |
4377 | descriptor.SMID = cpu_to_le16(smid); |
4378 | |
4379 | writel(cpu_to_le32(*request), addr: &ioc->chip->AtomicRequestDescriptorPost); |
4380 | } |
4381 | |
4382 | /** |
4383 | * _base_put_smid_default_atomic - Default, primarily used for config pages |
4384 | * use Atomic Request Descriptor |
4385 | * @ioc: per adapter object |
4386 | * @smid: system request message index |
4387 | * |
4388 | * Return: nothing. |
4389 | */ |
4390 | static void |
4391 | _base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
4392 | { |
4393 | Mpi26AtomicRequestDescriptor_t descriptor; |
4394 | u32 *request = (u32 *)&descriptor; |
4395 | |
4396 | descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; |
4397 | descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); |
4398 | descriptor.SMID = cpu_to_le16(smid); |
4399 | |
4400 | writel(cpu_to_le32(*request), addr: &ioc->chip->AtomicRequestDescriptorPost); |
4401 | } |
4402 | |
4403 | /** |
4404 | * _base_display_OEMs_branding - Display branding string |
4405 | * @ioc: per adapter object |
4406 | */ |
4407 | static void |
4408 | _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc) |
4409 | { |
4410 | if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL) |
4411 | return; |
4412 | |
4413 | switch (ioc->pdev->subsystem_vendor) { |
4414 | case PCI_VENDOR_ID_INTEL: |
4415 | switch (ioc->pdev->device) { |
4416 | case MPI2_MFGPAGE_DEVID_SAS2008: |
4417 | switch (ioc->pdev->subsystem_device) { |
4418 | case MPT2SAS_INTEL_RMS2LL080_SSDID: |
4419 | ioc_info(ioc, "%s\n", |
4420 | MPT2SAS_INTEL_RMS2LL080_BRANDING); |
4421 | break; |
4422 | case MPT2SAS_INTEL_RMS2LL040_SSDID: |
4423 | ioc_info(ioc, "%s\n", |
4424 | MPT2SAS_INTEL_RMS2LL040_BRANDING); |
4425 | break; |
4426 | case MPT2SAS_INTEL_SSD910_SSDID: |
4427 | ioc_info(ioc, "%s\n", |
4428 | MPT2SAS_INTEL_SSD910_BRANDING); |
4429 | break; |
4430 | default: |
4431 | ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n", |
4432 | ioc->pdev->subsystem_device); |
4433 | break; |
4434 | } |
4435 | break; |
4436 | case MPI2_MFGPAGE_DEVID_SAS2308_2: |
4437 | switch (ioc->pdev->subsystem_device) { |
4438 | case MPT2SAS_INTEL_RS25GB008_SSDID: |
4439 | ioc_info(ioc, "%s\n", |
4440 | MPT2SAS_INTEL_RS25GB008_BRANDING); |
4441 | break; |
4442 | case MPT2SAS_INTEL_RMS25JB080_SSDID: |
4443 | ioc_info(ioc, "%s\n", |
4444 | MPT2SAS_INTEL_RMS25JB080_BRANDING); |
4445 | break; |
4446 | case MPT2SAS_INTEL_RMS25JB040_SSDID: |
4447 | ioc_info(ioc, "%s\n", |
4448 | MPT2SAS_INTEL_RMS25JB040_BRANDING); |
4449 | break; |
4450 | case MPT2SAS_INTEL_RMS25KB080_SSDID: |
4451 | ioc_info(ioc, "%s\n", |
4452 | MPT2SAS_INTEL_RMS25KB080_BRANDING); |
4453 | break; |
4454 | case MPT2SAS_INTEL_RMS25KB040_SSDID: |
4455 | ioc_info(ioc, "%s\n", |
4456 | MPT2SAS_INTEL_RMS25KB040_BRANDING); |
4457 | break; |
4458 | case MPT2SAS_INTEL_RMS25LB040_SSDID: |
4459 | ioc_info(ioc, "%s\n", |
4460 | MPT2SAS_INTEL_RMS25LB040_BRANDING); |
4461 | break; |
4462 | case MPT2SAS_INTEL_RMS25LB080_SSDID: |
4463 | ioc_info(ioc, "%s\n", |
4464 | MPT2SAS_INTEL_RMS25LB080_BRANDING); |
4465 | break; |
4466 | default: |
4467 | ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n", |
4468 | ioc->pdev->subsystem_device); |
4469 | break; |
4470 | } |
4471 | break; |
4472 | case MPI25_MFGPAGE_DEVID_SAS3008: |
4473 | switch (ioc->pdev->subsystem_device) { |
4474 | case MPT3SAS_INTEL_RMS3JC080_SSDID: |
4475 | ioc_info(ioc, "%s\n", |
4476 | MPT3SAS_INTEL_RMS3JC080_BRANDING); |
4477 | break; |
4478 | |
4479 | case MPT3SAS_INTEL_RS3GC008_SSDID: |
4480 | ioc_info(ioc, "%s\n", |
4481 | MPT3SAS_INTEL_RS3GC008_BRANDING); |
4482 | break; |
4483 | case MPT3SAS_INTEL_RS3FC044_SSDID: |
4484 | ioc_info(ioc, "%s\n", |
4485 | MPT3SAS_INTEL_RS3FC044_BRANDING); |
4486 | break; |
4487 | case MPT3SAS_INTEL_RS3UC080_SSDID: |
4488 | ioc_info(ioc, "%s\n", |
4489 | MPT3SAS_INTEL_RS3UC080_BRANDING); |
4490 | break; |
4491 | default: |
4492 | ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n", |
4493 | ioc->pdev->subsystem_device); |
4494 | break; |
4495 | } |
4496 | break; |
4497 | default: |
4498 | ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n", |
4499 | ioc->pdev->subsystem_device); |
4500 | break; |
4501 | } |
4502 | break; |
4503 | case PCI_VENDOR_ID_DELL: |
4504 | switch (ioc->pdev->device) { |
4505 | case MPI2_MFGPAGE_DEVID_SAS2008: |
4506 | switch (ioc->pdev->subsystem_device) { |
4507 | case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID: |
4508 | ioc_info(ioc, "%s\n", |
4509 | MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING); |
4510 | break; |
4511 | case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID: |
4512 | ioc_info(ioc, "%s\n", |
4513 | MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING); |
4514 | break; |
4515 | case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID: |
4516 | ioc_info(ioc, "%s\n", |
4517 | MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING); |
4518 | break; |
4519 | case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID: |
4520 | ioc_info(ioc, "%s\n", |
4521 | MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING); |
4522 | break; |
4523 | case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID: |
4524 | ioc_info(ioc, "%s\n", |
4525 | MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING); |
4526 | break; |
4527 | case MPT2SAS_DELL_PERC_H200_SSDID: |
4528 | ioc_info(ioc, "%s\n", |
4529 | MPT2SAS_DELL_PERC_H200_BRANDING); |
4530 | break; |
4531 | case MPT2SAS_DELL_6GBPS_SAS_SSDID: |
4532 | ioc_info(ioc, "%s\n", |
4533 | MPT2SAS_DELL_6GBPS_SAS_BRANDING); |
4534 | break; |
4535 | default: |
4536 | ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n", |
4537 | ioc->pdev->subsystem_device); |
4538 | break; |
4539 | } |
4540 | break; |
4541 | case MPI25_MFGPAGE_DEVID_SAS3008: |
4542 | switch (ioc->pdev->subsystem_device) { |
4543 | case MPT3SAS_DELL_12G_HBA_SSDID: |
4544 | ioc_info(ioc, "%s\n", |
4545 | MPT3SAS_DELL_12G_HBA_BRANDING); |
4546 | break; |
4547 | default: |
4548 | ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n", |
4549 | ioc->pdev->subsystem_device); |
4550 | break; |
4551 | } |
4552 | break; |
4553 | default: |
4554 | ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n", |
4555 | ioc->pdev->subsystem_device); |
4556 | break; |
4557 | } |
4558 | break; |
4559 | case PCI_VENDOR_ID_CISCO: |
4560 | switch (ioc->pdev->device) { |
4561 | case MPI25_MFGPAGE_DEVID_SAS3008: |
4562 | switch (ioc->pdev->subsystem_device) { |
4563 | case MPT3SAS_CISCO_12G_8E_HBA_SSDID: |
4564 | ioc_info(ioc, "%s\n", |
4565 | MPT3SAS_CISCO_12G_8E_HBA_BRANDING); |
4566 | break; |
4567 | case MPT3SAS_CISCO_12G_8I_HBA_SSDID: |
4568 | ioc_info(ioc, "%s\n", |
4569 | MPT3SAS_CISCO_12G_8I_HBA_BRANDING); |
4570 | break; |
4571 | case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID: |
4572 | ioc_info(ioc, "%s\n", |
4573 | MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING); |
4574 | break; |
4575 | default: |
4576 | ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n", |
4577 | ioc->pdev->subsystem_device); |
4578 | break; |
4579 | } |
4580 | break; |
4581 | case MPI25_MFGPAGE_DEVID_SAS3108_1: |
4582 | switch (ioc->pdev->subsystem_device) { |
4583 | case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID: |
4584 | ioc_info(ioc, "%s\n", |
4585 | MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING); |
4586 | break; |
4587 | case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID: |
4588 | ioc_info(ioc, "%s\n", |
4589 | MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING); |
4590 | break; |
4591 | default: |
4592 | ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n", |
4593 | ioc->pdev->subsystem_device); |
4594 | break; |
4595 | } |
4596 | break; |
4597 | default: |
4598 | ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n", |
4599 | ioc->pdev->subsystem_device); |
4600 | break; |
4601 | } |
4602 | break; |
4603 | case MPT2SAS_HP_3PAR_SSVID: |
4604 | switch (ioc->pdev->device) { |
4605 | case MPI2_MFGPAGE_DEVID_SAS2004: |
4606 | switch (ioc->pdev->subsystem_device) { |
4607 | case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID: |
4608 | ioc_info(ioc, "%s\n", |
4609 | MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING); |
4610 | break; |
4611 | default: |
4612 | ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n", |
4613 | ioc->pdev->subsystem_device); |
4614 | break; |
4615 | } |
4616 | break; |
4617 | case MPI2_MFGPAGE_DEVID_SAS2308_2: |
4618 | switch (ioc->pdev->subsystem_device) { |
4619 | case MPT2SAS_HP_2_4_INTERNAL_SSDID: |
4620 | ioc_info(ioc, "%s\n", |
4621 | MPT2SAS_HP_2_4_INTERNAL_BRANDING); |
4622 | break; |
4623 | case MPT2SAS_HP_2_4_EXTERNAL_SSDID: |
4624 | ioc_info(ioc, "%s\n", |
4625 | MPT2SAS_HP_2_4_EXTERNAL_BRANDING); |
4626 | break; |
4627 | case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID: |
4628 | ioc_info(ioc, "%s\n", |
4629 | MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING); |
4630 | break; |
4631 | case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID: |
4632 | ioc_info(ioc, "%s\n", |
4633 | MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING); |
4634 | break; |
4635 | default: |
4636 | ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n", |
4637 | ioc->pdev->subsystem_device); |
4638 | break; |
4639 | } |
4640 | break; |
4641 | default: |
4642 | ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n", |
4643 | ioc->pdev->subsystem_device); |
4644 | break; |
4645 | } |
4646 | break; |
4647 | default: |
4648 | break; |
4649 | } |
4650 | } |
4651 | |
4652 | /** |
4653 | * _base_display_fwpkg_version - sends FWUpload request to pull FWPkg |
4654 | * version from FW Image Header. |
4655 | * @ioc: per adapter object |
4656 | * |
4657 | * Return: 0 for success, non-zero for failure. |
4658 | */ |
4659 | static int |
4660 | _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc) |
4661 | { |
4662 | Mpi2FWImageHeader_t *fw_img_hdr; |
4663 | Mpi26ComponentImageHeader_t *cmp_img_hdr; |
4664 | Mpi25FWUploadRequest_t *mpi_request; |
4665 | Mpi2FWUploadReply_t mpi_reply; |
4666 | int r = 0, issue_diag_reset = 0; |
4667 | u32 package_version = 0; |
4668 | void *fwpkg_data = NULL; |
4669 | dma_addr_t fwpkg_data_dma; |
4670 | u16 smid, ioc_status; |
4671 | size_t data_length; |
4672 | |
4673 | dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); |
4674 | |
4675 | if (ioc->base_cmds.status & MPT3_CMD_PENDING) { |
4676 | ioc_err(ioc, "%s: internal command already in use\n", __func__); |
4677 | return -EAGAIN; |
4678 | } |
4679 | |
4680 | data_length = sizeof(Mpi2FWImageHeader_t); |
4681 | fwpkg_data = dma_alloc_coherent(dev: &ioc->pdev->dev, size: data_length, |
4682 | dma_handle: &fwpkg_data_dma, GFP_KERNEL); |
4683 | if (!fwpkg_data) { |
4684 | ioc_err(ioc, |
4685 | "Memory allocation for fwpkg data failed at %s:%d/%s()!\n", |
4686 | __FILE__, __LINE__, __func__); |
4687 | return -ENOMEM; |
4688 | } |
4689 | |
4690 | smid = mpt3sas_base_get_smid(ioc, cb_idx: ioc->base_cb_idx); |
4691 | if (!smid) { |
4692 | ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); |
4693 | r = -EAGAIN; |
4694 | goto out; |
4695 | } |
4696 | |
4697 | ioc->base_cmds.status = MPT3_CMD_PENDING; |
4698 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); |
4699 | ioc->base_cmds.smid = smid; |
4700 | memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t)); |
4701 | mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD; |
4702 | mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH; |
4703 | mpi_request->ImageSize = cpu_to_le32(data_length); |
4704 | ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma, |
4705 | data_length); |
4706 | init_completion(x: &ioc->base_cmds.done); |
4707 | ioc->put_smid_default(ioc, smid); |
4708 | /* Wait for 15 seconds */ |
4709 | wait_for_completion_timeout(x: &ioc->base_cmds.done, |
4710 | FW_IMG_HDR_READ_TIMEOUT*HZ); |
4711 | ioc_info(ioc, "%s: complete\n", __func__); |
4712 | if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { |
4713 | ioc_err(ioc, "%s: timeout\n", __func__); |
4714 | _debug_dump_mf(mpi_request, |
4715 | sz: sizeof(Mpi25FWUploadRequest_t)/4); |
4716 | issue_diag_reset = 1; |
4717 | } else { |
4718 | memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t)); |
4719 | if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) { |
4720 | memcpy(&mpi_reply, ioc->base_cmds.reply, |
4721 | sizeof(Mpi2FWUploadReply_t)); |
4722 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
4723 | MPI2_IOCSTATUS_MASK; |
4724 | if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { |
4725 | fw_img_hdr = (Mpi2FWImageHeader_t *)fwpkg_data; |
4726 | if (le32_to_cpu(fw_img_hdr->Signature) == |
4727 | MPI26_IMAGE_HEADER_SIGNATURE0_MPI26) { |
4728 | cmp_img_hdr = |
4729 | (Mpi26ComponentImageHeader_t *) |
4730 | (fwpkg_data); |
4731 | package_version = |
4732 | le32_to_cpu( |
4733 | cmp_img_hdr->ApplicationSpecific); |
4734 | } else |
4735 | package_version = |
4736 | le32_to_cpu( |
4737 | fw_img_hdr->PackageVersion.Word); |
4738 | if (package_version) |
4739 | ioc_info(ioc, |
4740 | "FW Package Ver(%02d.%02d.%02d.%02d)\n", |
4741 | ((package_version) & 0xFF000000) >> 24, |
4742 | ((package_version) & 0x00FF0000) >> 16, |
4743 | ((package_version) & 0x0000FF00) >> 8, |
4744 | (package_version) & 0x000000FF); |
4745 | } else { |
4746 | _debug_dump_mf(mpi_request: &mpi_reply, |
4747 | sz: sizeof(Mpi2FWUploadReply_t)/4); |
4748 | } |
4749 | } |
4750 | } |
4751 | ioc->base_cmds.status = MPT3_CMD_NOT_USED; |
4752 | out: |
4753 | if (fwpkg_data) |
4754 | dma_free_coherent(dev: &ioc->pdev->dev, size: data_length, cpu_addr: fwpkg_data, |
4755 | dma_handle: fwpkg_data_dma); |
4756 | if (issue_diag_reset) { |
4757 | if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED) |
4758 | return -EFAULT; |
4759 | if (mpt3sas_base_check_for_fault_and_issue_reset(ioc)) |
4760 | return -EFAULT; |
4761 | r = -EAGAIN; |
4762 | } |
4763 | return r; |
4764 | } |
4765 | |
4766 | /** |
4767 | * _base_display_ioc_capabilities - Display IOC's capabilities. |
4768 | * @ioc: per adapter object |
4769 | */ |
4770 | static void |
4771 | _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc) |
4772 | { |
4773 | int i = 0; |
4774 | char desc[17] = {0}; |
4775 | u32 iounit_pg1_flags; |
4776 | |
4777 | strncpy(p: desc, q: ioc->manu_pg0.ChipName, size: 16); |
4778 | ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x)\n", |
4779 | desc, |
4780 | (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, |
4781 | (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, |
4782 | (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, |
4783 | ioc->facts.FWVersion.Word & 0x000000FF, |
4784 | ioc->pdev->revision); |
4785 | |
4786 | _base_display_OEMs_branding(ioc); |
4787 | |
4788 | if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) { |
4789 | pr_info("%sNVMe", i ? ",": ""); |
4790 | i++; |
4791 | } |
4792 | |
4793 | ioc_info(ioc, "Protocol=("); |
4794 | |
4795 | if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) { |
4796 | pr_cont("Initiator"); |
4797 | i++; |
4798 | } |
4799 | |
4800 | if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) { |
4801 | pr_cont("%sTarget", i ? ",": ""); |
4802 | i++; |
4803 | } |
4804 | |
4805 | i = 0; |
4806 | pr_cont("), Capabilities=("); |
4807 | |
4808 | if (!ioc->hide_ir_msg) { |
4809 | if (ioc->facts.IOCCapabilities & |
4810 | MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) { |
4811 | pr_cont("Raid"); |
4812 | i++; |
4813 | } |
4814 | } |
4815 | |
4816 | if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) { |
4817 | pr_cont("%sTLR", i ? ",": ""); |
4818 | i++; |
4819 | } |
4820 | |
4821 | if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) { |
4822 | pr_cont("%sMulticast", i ? ",": ""); |
4823 | i++; |
4824 | } |
4825 | |
4826 | if (ioc->facts.IOCCapabilities & |
4827 | MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) { |
4828 | pr_cont("%sBIDI Target", i ? ",": ""); |
4829 | i++; |
4830 | } |
4831 | |
4832 | if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) { |
4833 | pr_cont("%sEEDP", i ? ",": ""); |
4834 | i++; |
4835 | } |
4836 | |
4837 | if (ioc->facts.IOCCapabilities & |
4838 | MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) { |
4839 | pr_cont("%sSnapshot Buffer", i ? ",": ""); |
4840 | i++; |
4841 | } |
4842 | |
4843 | if (ioc->facts.IOCCapabilities & |
4844 | MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) { |
4845 | pr_cont("%sDiag Trace Buffer", i ? ",": ""); |
4846 | i++; |
4847 | } |
4848 | |
4849 | if (ioc->facts.IOCCapabilities & |
4850 | MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) { |
4851 | pr_cont("%sDiag Extended Buffer", i ? ",": ""); |
4852 | i++; |
4853 | } |
4854 | |
4855 | if (ioc->facts.IOCCapabilities & |
4856 | MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) { |
4857 | pr_cont("%sTask Set Full", i ? ",": ""); |
4858 | i++; |
4859 | } |
4860 | |
4861 | iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); |
4862 | if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) { |
4863 | pr_cont("%sNCQ", i ? ",": ""); |
4864 | i++; |
4865 | } |
4866 | |
4867 | pr_cont(")\n"); |
4868 | } |
4869 | |
4870 | /** |
4871 | * mpt3sas_base_update_missing_delay - change the missing delay timers |
4872 | * @ioc: per adapter object |
4873 | * @device_missing_delay: amount of time till device is reported missing |
4874 | * @io_missing_delay: interval IO is returned when there is a missing device |
4875 | * |
4876 | * Passed on the command line, this function will modify the device missing |
4877 | * delay, as well as the io missing delay. This should be called at driver |
4878 | * load time. |
4879 | */ |
4880 | void |
4881 | mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc, |
4882 | u16 device_missing_delay, u8 io_missing_delay) |
4883 | { |
4884 | u16 dmd, dmd_new, dmd_orignal; |
4885 | u8 io_missing_delay_original; |
4886 | u16 sz; |
4887 | Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; |
4888 | Mpi2ConfigReply_t mpi_reply; |
4889 | u8 num_phys = 0; |
4890 | u16 ioc_status; |
4891 | |
4892 | mpt3sas_config_get_number_hba_phys(ioc, num_phys: &num_phys); |
4893 | if (!num_phys) |
4894 | return; |
4895 | |
4896 | sz = struct_size(sas_iounit_pg1, PhyData, num_phys); |
4897 | sas_iounit_pg1 = kzalloc(size: sz, GFP_KERNEL); |
4898 | if (!sas_iounit_pg1) { |
4899 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
4900 | __FILE__, __LINE__, __func__); |
4901 | goto out; |
4902 | } |
4903 | if ((mpt3sas_config_get_sas_iounit_pg1(ioc, mpi_reply: &mpi_reply, |
4904 | config_page: sas_iounit_pg1, sz))) { |
4905 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
4906 | __FILE__, __LINE__, __func__); |
4907 | goto out; |
4908 | } |
4909 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
4910 | MPI2_IOCSTATUS_MASK; |
4911 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
4912 | ioc_err(ioc, "failure at %s:%d/%s()!\n", |
4913 | __FILE__, __LINE__, __func__); |
4914 | goto out; |
4915 | } |
4916 | |
4917 | /* device missing delay */ |
4918 | dmd = sas_iounit_pg1->ReportDeviceMissingDelay; |
4919 | if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) |
4920 | dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; |
4921 | else |
4922 | dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; |
4923 | dmd_orignal = dmd; |
4924 | if (device_missing_delay > 0x7F) { |
4925 | dmd = (device_missing_delay > 0x7F0) ? 0x7F0 : |
4926 | device_missing_delay; |
4927 | dmd = dmd / 16; |
4928 | dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16; |
4929 | } else |
4930 | dmd = device_missing_delay; |
4931 | sas_iounit_pg1->ReportDeviceMissingDelay = dmd; |
4932 | |
4933 | /* io missing delay */ |
4934 | io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay; |
4935 | sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay; |
4936 | |
4937 | if (!mpt3sas_config_set_sas_iounit_pg1(ioc, mpi_reply: &mpi_reply, config_page: sas_iounit_pg1, |
4938 | sz)) { |
4939 | if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) |
4940 | dmd_new = (dmd & |
4941 | MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; |
4942 | else |
4943 | dmd_new = |
4944 | dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; |
4945 | ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n", |
4946 | dmd_orignal, dmd_new); |
4947 | ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n", |
4948 | io_missing_delay_original, |
4949 | io_missing_delay); |
4950 | ioc->device_missing_delay = dmd_new; |
4951 | ioc->io_missing_delay = io_missing_delay; |
4952 | } |
4953 | |
4954 | out: |
4955 | kfree(objp: sas_iounit_pg1); |
4956 | } |
4957 | |
4958 | /** |
4959 | * _base_update_ioc_page1_inlinewith_perf_mode - Update IOC Page1 fields |
4960 | * according to performance mode. |
4961 | * @ioc : per adapter object |
4962 | * |
4963 | * Return: zero on success; otherwise return EAGAIN error code asking the |
4964 | * caller to retry. |
4965 | */ |
4966 | static int |
4967 | _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc) |
4968 | { |
4969 | Mpi2IOCPage1_t ioc_pg1; |
4970 | Mpi2ConfigReply_t mpi_reply; |
4971 | int rc; |
4972 | |
4973 | rc = mpt3sas_config_get_ioc_pg1(ioc, mpi_reply: &mpi_reply, config_page: &ioc->ioc_pg1_copy); |
4974 | if (rc) |
4975 | return rc; |
4976 | memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(Mpi2IOCPage1_t)); |
4977 | |
4978 | switch (perf_mode) { |
4979 | case MPT_PERF_MODE_DEFAULT: |
4980 | case MPT_PERF_MODE_BALANCED: |
4981 | if (ioc->high_iops_queues) { |
4982 | ioc_info(ioc, |
4983 | "Enable interrupt coalescing only for first\t" |
4984 | "%d reply queues\n", |
4985 | MPT3SAS_HIGH_IOPS_REPLY_QUEUES); |
4986 | /* |
4987 | * If 31st bit is zero then interrupt coalescing is |
4988 | * enabled for all reply descriptor post queues. |
4989 | * If 31st bit is set to one then user can |
4990 | * enable/disable interrupt coalescing on per reply |
4991 | * descriptor post queue group(8) basis. So to enable |
4992 | * interrupt coalescing only on first reply descriptor |
4993 | * post queue group 31st bit and zero th bit is enabled. |
4994 | */ |
4995 | ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 | |
4996 | ((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES/8) - 1)); |
4997 | rc = mpt3sas_config_set_ioc_pg1(ioc, mpi_reply: &mpi_reply, config_page: &ioc_pg1); |
4998 | if (rc) |
4999 | return rc; |
5000 | ioc_info(ioc, "performance mode: balanced\n"); |
5001 | return 0; |
5002 | } |
5003 | fallthrough; |
5004 | case MPT_PERF_MODE_LATENCY: |
5005 | /* |
5006 | * Enable interrupt coalescing on all reply queues |
5007 | * with timeout value 0xA |
5008 | */ |
5009 | ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa); |
5010 | ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING); |
5011 | ioc_pg1.ProductSpecific = 0; |
5012 | rc = mpt3sas_config_set_ioc_pg1(ioc, mpi_reply: &mpi_reply, config_page: &ioc_pg1); |
5013 | if (rc) |
5014 | return rc; |
5015 | ioc_info(ioc, "performance mode: latency\n"); |
5016 | break; |
5017 | case MPT_PERF_MODE_IOPS: |
5018 | /* |
5019 | * Enable interrupt coalescing on all reply queues. |
5020 | */ |
5021 | ioc_info(ioc, |
5022 | "performance mode: iops with coalescing timeout: 0x%x\n", |
5023 | le32_to_cpu(ioc_pg1.CoalescingTimeout)); |
5024 | ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING); |
5025 | ioc_pg1.ProductSpecific = 0; |
5026 | rc = mpt3sas_config_set_ioc_pg1(ioc, mpi_reply: &mpi_reply, config_page: &ioc_pg1); |
5027 | if (rc) |
5028 | return rc; |
5029 | break; |
5030 | } |
5031 | return 0; |
5032 | } |
5033 | |
5034 | /** |
5035 | * _base_get_event_diag_triggers - get event diag trigger values from |
5036 | * persistent pages |
5037 | * @ioc : per adapter object |
5038 | * |
5039 | * Return: nothing. |
5040 | */ |
5041 | static int |
5042 | _base_get_event_diag_triggers(struct MPT3SAS_ADAPTER *ioc) |
5043 | { |
5044 | Mpi26DriverTriggerPage2_t trigger_pg2; |
5045 | struct SL_WH_EVENT_TRIGGER_T *event_tg; |
5046 | MPI26_DRIVER_MPI_EVENT_TRIGGER_ENTRY *mpi_event_tg; |
5047 | Mpi2ConfigReply_t mpi_reply; |
5048 | int r = 0, i = 0; |
5049 | u16 count = 0; |
5050 | u16 ioc_status; |
5051 | |
5052 | r = mpt3sas_config_get_driver_trigger_pg2(ioc, mpi_reply: &mpi_reply, |
5053 | config_page: &trigger_pg2); |
5054 | if (r) |
5055 | return r; |
5056 | |
5057 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
5058 | MPI2_IOCSTATUS_MASK; |
5059 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
5060 | dinitprintk(ioc, |
5061 | ioc_err(ioc, |
5062 | "%s: Failed to get trigger pg2, ioc_status(0x%04x)\n", |
5063 | __func__, ioc_status)); |
5064 | return 0; |
5065 | } |
5066 | |
5067 | if (le16_to_cpu(trigger_pg2.NumMPIEventTrigger)) { |
5068 | count = le16_to_cpu(trigger_pg2.NumMPIEventTrigger); |
5069 | count = min_t(u16, NUM_VALID_ENTRIES, count); |
5070 | ioc->diag_trigger_event.ValidEntries = count; |
5071 | |
5072 | event_tg = &ioc->diag_trigger_event.EventTriggerEntry[0]; |
5073 | mpi_event_tg = &trigger_pg2.MPIEventTriggers[0]; |
5074 | for (i = 0; i < count; i++) { |
5075 | event_tg->EventValue = le16_to_cpu( |
5076 | mpi_event_tg->MPIEventCode); |
5077 | event_tg->LogEntryQualifier = le16_to_cpu( |
5078 | mpi_event_tg->MPIEventCodeSpecific); |
5079 | event_tg++; |
5080 | mpi_event_tg++; |
5081 | } |
5082 | } |
5083 | return 0; |
5084 | } |
5085 | |
5086 | /** |
5087 | * _base_get_scsi_diag_triggers - get scsi diag trigger values from |
5088 | * persistent pages |
5089 | * @ioc : per adapter object |
5090 | * |
5091 | * Return: 0 on success; otherwise return failure status. |
5092 | */ |
5093 | static int |
5094 | _base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER *ioc) |
5095 | { |
5096 | Mpi26DriverTriggerPage3_t trigger_pg3; |
5097 | struct SL_WH_SCSI_TRIGGER_T *scsi_tg; |
5098 | MPI26_DRIVER_SCSI_SENSE_TRIGGER_ENTRY *mpi_scsi_tg; |
5099 | Mpi2ConfigReply_t mpi_reply; |
5100 | int r = 0, i = 0; |
5101 | u16 count = 0; |
5102 | u16 ioc_status; |
5103 | |
5104 | r = mpt3sas_config_get_driver_trigger_pg3(ioc, mpi_reply: &mpi_reply, |
5105 | config_page: &trigger_pg3); |
5106 | if (r) |
5107 | return r; |
5108 | |
5109 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
5110 | MPI2_IOCSTATUS_MASK; |
5111 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
5112 | dinitprintk(ioc, |
5113 | ioc_err(ioc, |
5114 | "%s: Failed to get trigger pg3, ioc_status(0x%04x)\n", |
5115 | __func__, ioc_status)); |
5116 | return 0; |
5117 | } |
5118 | |
5119 | if (le16_to_cpu(trigger_pg3.NumSCSISenseTrigger)) { |
5120 | count = le16_to_cpu(trigger_pg3.NumSCSISenseTrigger); |
5121 | count = min_t(u16, NUM_VALID_ENTRIES, count); |
5122 | ioc->diag_trigger_scsi.ValidEntries = count; |
5123 | |
5124 | scsi_tg = &ioc->diag_trigger_scsi.SCSITriggerEntry[0]; |
5125 | mpi_scsi_tg = &trigger_pg3.SCSISenseTriggers[0]; |
5126 | for (i = 0; i < count; i++) { |
5127 | scsi_tg->ASCQ = mpi_scsi_tg->ASCQ; |
5128 | scsi_tg->ASC = mpi_scsi_tg->ASC; |
5129 | scsi_tg->SenseKey = mpi_scsi_tg->SenseKey; |
5130 | |
5131 | scsi_tg++; |
5132 | mpi_scsi_tg++; |
5133 | } |
5134 | } |
5135 | return 0; |
5136 | } |
5137 | |
5138 | /** |
5139 | * _base_get_mpi_diag_triggers - get mpi diag trigger values from |
5140 | * persistent pages |
5141 | * @ioc : per adapter object |
5142 | * |
5143 | * Return: 0 on success; otherwise return failure status. |
5144 | */ |
5145 | static int |
5146 | _base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER *ioc) |
5147 | { |
5148 | Mpi26DriverTriggerPage4_t trigger_pg4; |
5149 | struct SL_WH_MPI_TRIGGER_T *status_tg; |
5150 | MPI26_DRIVER_IOCSTATUS_LOGINFO_TRIGGER_ENTRY *mpi_status_tg; |
5151 | Mpi2ConfigReply_t mpi_reply; |
5152 | int r = 0, i = 0; |
5153 | u16 count = 0; |
5154 | u16 ioc_status; |
5155 | |
5156 | r = mpt3sas_config_get_driver_trigger_pg4(ioc, mpi_reply: &mpi_reply, |
5157 | config_page: &trigger_pg4); |
5158 | if (r) |
5159 | return r; |
5160 | |
5161 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
5162 | MPI2_IOCSTATUS_MASK; |
5163 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
5164 | dinitprintk(ioc, |
5165 | ioc_err(ioc, |
5166 | "%s: Failed to get trigger pg4, ioc_status(0x%04x)\n", |
5167 | __func__, ioc_status)); |
5168 | return 0; |
5169 | } |
5170 | |
5171 | if (le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger)) { |
5172 | count = le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger); |
5173 | count = min_t(u16, NUM_VALID_ENTRIES, count); |
5174 | ioc->diag_trigger_mpi.ValidEntries = count; |
5175 | |
5176 | status_tg = &ioc->diag_trigger_mpi.MPITriggerEntry[0]; |
5177 | mpi_status_tg = &trigger_pg4.IOCStatusLoginfoTriggers[0]; |
5178 | |
5179 | for (i = 0; i < count; i++) { |
5180 | status_tg->IOCStatus = le16_to_cpu( |
5181 | mpi_status_tg->IOCStatus); |
5182 | status_tg->IocLogInfo = le32_to_cpu( |
5183 | mpi_status_tg->LogInfo); |
5184 | |
5185 | status_tg++; |
5186 | mpi_status_tg++; |
5187 | } |
5188 | } |
5189 | return 0; |
5190 | } |
5191 | |
5192 | /** |
5193 | * _base_get_master_diag_triggers - get master diag trigger values from |
5194 | * persistent pages |
5195 | * @ioc : per adapter object |
5196 | * |
5197 | * Return: nothing. |
5198 | */ |
5199 | static int |
5200 | _base_get_master_diag_triggers(struct MPT3SAS_ADAPTER *ioc) |
5201 | { |
5202 | Mpi26DriverTriggerPage1_t trigger_pg1; |
5203 | Mpi2ConfigReply_t mpi_reply; |
5204 | int r; |
5205 | u16 ioc_status; |
5206 | |
5207 | r = mpt3sas_config_get_driver_trigger_pg1(ioc, mpi_reply: &mpi_reply, |
5208 | config_page: &trigger_pg1); |
5209 | if (r) |
5210 | return r; |
5211 | |
5212 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
5213 | MPI2_IOCSTATUS_MASK; |
5214 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
5215 | dinitprintk(ioc, |
5216 | ioc_err(ioc, |
5217 | "%s: Failed to get trigger pg1, ioc_status(0x%04x)\n", |
5218 | __func__, ioc_status)); |
5219 | return 0; |
5220 | } |
5221 | |
5222 | if (le16_to_cpu(trigger_pg1.NumMasterTrigger)) |
5223 | ioc->diag_trigger_master.MasterData |= |
5224 | le32_to_cpu( |
5225 | trigger_pg1.MasterTriggers[0].MasterTriggerFlags); |
5226 | return 0; |
5227 | } |
5228 | |
5229 | /** |
5230 | * _base_check_for_trigger_pages_support - checks whether HBA FW supports |
5231 | * driver trigger pages or not |
5232 | * @ioc : per adapter object |
5233 | * @trigger_flags : address where trigger page0's TriggerFlags value is copied |
5234 | * |
5235 | * Return: trigger flags mask if HBA FW supports driver trigger pages; |
5236 | * otherwise returns %-EFAULT if driver trigger pages are not supported by FW or |
5237 | * return EAGAIN if diag reset occurred due to FW fault and asking the |
5238 | * caller to retry the command. |
5239 | * |
5240 | */ |
5241 | static int |
5242 | _base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc, u32 *trigger_flags) |
5243 | { |
5244 | Mpi26DriverTriggerPage0_t trigger_pg0; |
5245 | int r = 0; |
5246 | Mpi2ConfigReply_t mpi_reply; |
5247 | u16 ioc_status; |
5248 | |
5249 | r = mpt3sas_config_get_driver_trigger_pg0(ioc, mpi_reply: &mpi_reply, |
5250 | config_page: &trigger_pg0); |
5251 | if (r) |
5252 | return r; |
5253 | |
5254 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
5255 | MPI2_IOCSTATUS_MASK; |
5256 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) |
5257 | return -EFAULT; |
5258 | |
5259 | *trigger_flags = le16_to_cpu(trigger_pg0.TriggerFlags); |
5260 | return 0; |
5261 | } |
5262 | |
5263 | /** |
5264 | * _base_get_diag_triggers - Retrieve diag trigger values from |
5265 | * persistent pages. |
5266 | * @ioc : per adapter object |
5267 | * |
5268 | * Return: zero on success; otherwise return EAGAIN error codes |
5269 | * asking the caller to retry. |
5270 | */ |
5271 | static int |
5272 | _base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc) |
5273 | { |
5274 | int trigger_flags; |
5275 | int r; |
5276 | |
5277 | /* |
5278 | * Default setting of master trigger. |
5279 | */ |
5280 | ioc->diag_trigger_master.MasterData = |
5281 | (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); |
5282 | |
5283 | r = _base_check_for_trigger_pages_support(ioc, trigger_flags: &trigger_flags); |
5284 | if (r) { |
5285 | if (r == -EAGAIN) |
5286 | return r; |
5287 | /* |
5288 | * Don't go for error handling when FW doesn't support |
5289 | * driver trigger pages. |
5290 | */ |
5291 | return 0; |
5292 | } |
5293 | |
5294 | ioc->supports_trigger_pages = 1; |
5295 | |
5296 | /* |
5297 | * Retrieve master diag trigger values from driver trigger pg1 |
5298 | * if master trigger bit enabled in TriggerFlags. |
5299 | */ |
5300 | if ((u16)trigger_flags & |
5301 | MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID) { |
5302 | r = _base_get_master_diag_triggers(ioc); |
5303 | if (r) |
5304 | return r; |
5305 | } |
5306 | |
5307 | /* |
5308 | * Retrieve event diag trigger values from driver trigger pg2 |
5309 | * if event trigger bit enabled in TriggerFlags. |
5310 | */ |
5311 | if ((u16)trigger_flags & |
5312 | MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID) { |
5313 | r = _base_get_event_diag_triggers(ioc); |
5314 | if (r) |
5315 | return r; |
5316 | } |
5317 | |
5318 | /* |
5319 | * Retrieve scsi diag trigger values from driver trigger pg3 |
5320 | * if scsi trigger bit enabled in TriggerFlags. |
5321 | */ |
5322 | if ((u16)trigger_flags & |
5323 | MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID) { |
5324 | r = _base_get_scsi_diag_triggers(ioc); |
5325 | if (r) |
5326 | return r; |
5327 | } |
5328 | /* |
5329 | * Retrieve mpi error diag trigger values from driver trigger pg4 |
5330 | * if loginfo trigger bit enabled in TriggerFlags. |
5331 | */ |
5332 | if ((u16)trigger_flags & |
5333 | MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID) { |
5334 | r = _base_get_mpi_diag_triggers(ioc); |
5335 | if (r) |
5336 | return r; |
5337 | } |
5338 | return 0; |
5339 | } |
5340 | |
5341 | /** |
5342 | * _base_update_diag_trigger_pages - Update the driver trigger pages after |
5343 | * online FW update, in case updated FW supports driver |
5344 | * trigger pages. |
5345 | * @ioc : per adapter object |
5346 | * |
5347 | * Return: nothing. |
5348 | */ |
5349 | static void |
5350 | _base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER *ioc) |
5351 | { |
5352 | |
5353 | if (ioc->diag_trigger_master.MasterData) |
5354 | mpt3sas_config_update_driver_trigger_pg1(ioc, |
5355 | master_tg: &ioc->diag_trigger_master, set: 1); |
5356 | |
5357 | if (ioc->diag_trigger_event.ValidEntries) |
5358 | mpt3sas_config_update_driver_trigger_pg2(ioc, |
5359 | event_tg: &ioc->diag_trigger_event, set: 1); |
5360 | |
5361 | if (ioc->diag_trigger_scsi.ValidEntries) |
5362 | mpt3sas_config_update_driver_trigger_pg3(ioc, |
5363 | scsi_tg: &ioc->diag_trigger_scsi, set: 1); |
5364 | |
5365 | if (ioc->diag_trigger_mpi.ValidEntries) |
5366 | mpt3sas_config_update_driver_trigger_pg4(ioc, |
5367 | mpi_tg: &ioc->diag_trigger_mpi, set: 1); |
5368 | } |
5369 | |
5370 | /** |
5371 | * _base_assign_fw_reported_qd - Get FW reported QD for SAS/SATA devices. |
5372 | * - On failure set default QD values. |
5373 | * @ioc : per adapter object |
5374 | * |
5375 | * Returns 0 for success, non-zero for failure. |
5376 | * |
5377 | */ |
5378 | static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc) |
5379 | { |
5380 | Mpi2ConfigReply_t mpi_reply; |
5381 | Mpi2SasIOUnitPage1_t sas_iounit_pg1; |
5382 | Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1; |
5383 | u16 depth; |
5384 | int rc = 0; |
5385 | |
5386 | ioc->max_wideport_qd = MPT3SAS_SAS_QUEUE_DEPTH; |
5387 | ioc->max_narrowport_qd = MPT3SAS_SAS_QUEUE_DEPTH; |
5388 | ioc->max_sata_qd = MPT3SAS_SATA_QUEUE_DEPTH; |
5389 | ioc->max_nvme_qd = MPT3SAS_NVME_QUEUE_DEPTH; |
5390 | if (!ioc->is_gen35_ioc) |
5391 | goto out; |
5392 | /* sas iounit page 1 */ |
5393 | rc = mpt3sas_config_get_sas_iounit_pg1(ioc, mpi_reply: &mpi_reply, |
5394 | config_page: &sas_iounit_pg1, sz: sizeof(Mpi2SasIOUnitPage1_t)); |
5395 | if (rc) { |
5396 | pr_err("%s: failure at %s:%d/%s()!\n", |
5397 | ioc->name, __FILE__, __LINE__, __func__); |
5398 | goto out; |
5399 | } |
5400 | |
5401 | depth = le16_to_cpu(sas_iounit_pg1.SASWideMaxQueueDepth); |
5402 | ioc->max_wideport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH); |
5403 | |
5404 | depth = le16_to_cpu(sas_iounit_pg1.SASNarrowMaxQueueDepth); |
5405 | ioc->max_narrowport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH); |
5406 | |
5407 | depth = sas_iounit_pg1.SATAMaxQDepth; |
5408 | ioc->max_sata_qd = (depth ? depth : MPT3SAS_SATA_QUEUE_DEPTH); |
5409 | |
5410 | /* pcie iounit page 1 */ |
5411 | rc = mpt3sas_config_get_pcie_iounit_pg1(ioc, mpi_reply: &mpi_reply, |
5412 | config_page: &pcie_iounit_pg1, sz: sizeof(Mpi26PCIeIOUnitPage1_t)); |
5413 | if (rc) { |
5414 | pr_err("%s: failure at %s:%d/%s()!\n", |
5415 | ioc->name, __FILE__, __LINE__, __func__); |
5416 | goto out; |
5417 | } |
5418 | ioc->max_nvme_qd = (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) ? |
5419 | (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) : |
5420 | MPT3SAS_NVME_QUEUE_DEPTH; |
5421 | out: |
5422 | dinitprintk(ioc, pr_err( |
5423 | "MaxWidePortQD: 0x%x MaxNarrowPortQD: 0x%x MaxSataQD: 0x%x MaxNvmeQD: 0x%x\n", |
5424 | ioc->max_wideport_qd, ioc->max_narrowport_qd, |
5425 | ioc->max_sata_qd, ioc->max_nvme_qd)); |
5426 | return rc; |
5427 | } |
5428 | |
5429 | /** |
5430 | * mpt3sas_atto_validate_nvram - validate the ATTO nvram read from mfg pg1 |
5431 | * |
5432 | * @ioc : per adapter object |
5433 | * @n : ptr to the ATTO nvram structure |
5434 | * Return: 0 for success, non-zero for failure. |
5435 | */ |
5436 | static int |
5437 | mpt3sas_atto_validate_nvram(struct MPT3SAS_ADAPTER *ioc, |
5438 | struct ATTO_SAS_NVRAM *n) |
5439 | { |
5440 | int r = -EINVAL; |
5441 | union ATTO_SAS_ADDRESS *s1; |
5442 | u32 len; |
5443 | u8 *pb; |
5444 | u8 ckSum; |
5445 | |
5446 | /* validate nvram checksum */ |
5447 | pb = (u8 *) n; |
5448 | ckSum = ATTO_SASNVR_CKSUM_SEED; |
5449 | len = sizeof(struct ATTO_SAS_NVRAM); |
5450 | |
5451 | while (len--) |
5452 | ckSum = ckSum + pb[len]; |
5453 | |
5454 | if (ckSum) { |
5455 | ioc_err(ioc, "Invalid ATTO NVRAM checksum\n"); |
5456 | return r; |
5457 | } |
5458 | |
5459 | s1 = (union ATTO_SAS_ADDRESS *) n->SasAddr; |
5460 | |
5461 | if (n->Signature[0] != 'E' |
5462 | || n->Signature[1] != 'S' |
5463 | || n->Signature[2] != 'A' |
5464 | || n->Signature[3] != 'S') |
5465 | ioc_err(ioc, "Invalid ATTO NVRAM signature\n"); |
5466 | else if (n->Version > ATTO_SASNVR_VERSION) |
5467 | ioc_info(ioc, "Invalid ATTO NVRAM version"); |
5468 | else if ((n->SasAddr[7] & (ATTO_SAS_ADDR_ALIGN - 1)) |
5469 | || s1->b[0] != 0x50 |
5470 | || s1->b[1] != 0x01 |
5471 | || s1->b[2] != 0x08 |
5472 | || (s1->b[3] & 0xF0) != 0x60 |
5473 | || ((s1->b[3] & 0x0F) | le32_to_cpu(s1->d[1])) == 0) { |
5474 | ioc_err(ioc, "Invalid ATTO SAS address\n"); |
5475 | } else |
5476 | r = 0; |
5477 | return r; |
5478 | } |
5479 | |
5480 | /** |
5481 | * mpt3sas_atto_get_sas_addr - get the ATTO SAS address from mfg page 1 |
5482 | * |
5483 | * @ioc : per adapter object |
5484 | * @sas_addr : return sas address |
5485 | * Return: 0 for success, non-zero for failure. |
5486 | */ |
5487 | static int |
5488 | mpt3sas_atto_get_sas_addr(struct MPT3SAS_ADAPTER *ioc, union ATTO_SAS_ADDRESS *sas_addr) |
5489 | { |
5490 | Mpi2ManufacturingPage1_t mfg_pg1; |
5491 | Mpi2ConfigReply_t mpi_reply; |
5492 | struct ATTO_SAS_NVRAM *nvram; |
5493 | int r; |
5494 | __be64 addr; |
5495 | |
5496 | r = mpt3sas_config_get_manufacturing_pg1(ioc, mpi_reply: &mpi_reply, config_page: &mfg_pg1); |
5497 | if (r) { |
5498 | ioc_err(ioc, "Failed to read manufacturing page 1\n"); |
5499 | return r; |
5500 | } |
5501 | |
5502 | /* validate nvram */ |
5503 | nvram = (struct ATTO_SAS_NVRAM *) mfg_pg1.VPD; |
5504 | r = mpt3sas_atto_validate_nvram(ioc, n: nvram); |
5505 | if (r) |
5506 | return r; |
5507 | |
5508 | addr = *((__be64 *) nvram->SasAddr); |
5509 | sas_addr->q = cpu_to_le64(be64_to_cpu(addr)); |
5510 | return r; |
5511 | } |
5512 | |
5513 | /** |
5514 | * mpt3sas_atto_init - perform initializaion for ATTO branded |
5515 | * adapter. |
5516 | * @ioc : per adapter object |
5517 | *5 |
5518 | * Return: 0 for success, non-zero for failure. |
5519 | */ |
5520 | static int |
5521 | mpt3sas_atto_init(struct MPT3SAS_ADAPTER *ioc) |
5522 | { |
5523 | int sz = 0; |
5524 | Mpi2BiosPage4_t *bios_pg4 = NULL; |
5525 | Mpi2ConfigReply_t mpi_reply; |
5526 | int r; |
5527 | int ix; |
5528 | union ATTO_SAS_ADDRESS sas_addr; |
5529 | union ATTO_SAS_ADDRESS temp; |
5530 | union ATTO_SAS_ADDRESS bias; |
5531 | |
5532 | r = mpt3sas_atto_get_sas_addr(ioc, sas_addr: &sas_addr); |
5533 | if (r) |
5534 | return r; |
5535 | |
5536 | /* get header first to get size */ |
5537 | r = mpt3sas_config_get_bios_pg4(ioc, mpi_reply: &mpi_reply, NULL, sz_config_page: 0); |
5538 | if (r) { |
5539 | ioc_err(ioc, "Failed to read ATTO bios page 4 header.\n"); |
5540 | return r; |
5541 | } |
5542 | |
5543 | sz = mpi_reply.Header.PageLength * sizeof(u32); |
5544 | bios_pg4 = kzalloc(size: sz, GFP_KERNEL); |
5545 | if (!bios_pg4) { |
5546 | ioc_err(ioc, "Failed to allocate memory for ATTO bios page.\n"); |
5547 | return -ENOMEM; |
5548 | } |
5549 | |
5550 | /* read bios page 4 */ |
5551 | r = mpt3sas_config_get_bios_pg4(ioc, mpi_reply: &mpi_reply, config_page: bios_pg4, sz_config_page: sz); |
5552 | if (r) { |
5553 | ioc_err(ioc, "Failed to read ATTO bios page 4\n"); |
5554 | goto out; |
5555 | } |
5556 | |
5557 | /* Update bios page 4 with the ATTO WWID */ |
5558 | bias.q = sas_addr.q; |
5559 | bias.b[7] += ATTO_SAS_ADDR_DEVNAME_BIAS; |
5560 | |
5561 | for (ix = 0; ix < bios_pg4->NumPhys; ix++) { |
5562 | temp.q = sas_addr.q; |
5563 | temp.b[7] += ix; |
5564 | bios_pg4->Phy[ix].ReassignmentWWID = temp.q; |
5565 | bios_pg4->Phy[ix].ReassignmentDeviceName = bias.q; |
5566 | } |
5567 | r = mpt3sas_config_set_bios_pg4(ioc, mpi_reply: &mpi_reply, config_page: bios_pg4, sz_config_page: sz); |
5568 | |
5569 | out: |
5570 | kfree(objp: bios_pg4); |
5571 | return r; |
5572 | } |
5573 | |
5574 | /** |
5575 | * _base_static_config_pages - static start of day config pages |
5576 | * @ioc: per adapter object |
5577 | */ |
5578 | static int |
5579 | _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) |
5580 | { |
5581 | Mpi2IOUnitPage8_t iounit_pg8; |
5582 | Mpi2ConfigReply_t mpi_reply; |
5583 | u32 iounit_pg1_flags; |
5584 | int tg_flags = 0; |
5585 | int rc; |
5586 | ioc->nvme_abort_timeout = 30; |
5587 | |
5588 | rc = mpt3sas_config_get_manufacturing_pg0(ioc, mpi_reply: &mpi_reply, |
5589 | config_page: &ioc->manu_pg0); |
5590 | if (rc) |
5591 | return rc; |
5592 | if (ioc->ir_firmware) { |
5593 | rc = mpt3sas_config_get_manufacturing_pg10(ioc, mpi_reply: &mpi_reply, |
5594 | config_page: &ioc->manu_pg10); |
5595 | if (rc) |
5596 | return rc; |
5597 | } |
5598 | |
5599 | if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO) { |
5600 | rc = mpt3sas_atto_init(ioc); |
5601 | if (rc) |
5602 | return rc; |
5603 | } |
5604 | |
5605 | /* |
5606 | * Ensure correct T10 PI operation if vendor left EEDPTagMode |
5607 | * flag unset in NVDATA. |
5608 | */ |
5609 | rc = mpt3sas_config_get_manufacturing_pg11(ioc, mpi_reply: &mpi_reply, |
5610 | config_page: &ioc->manu_pg11); |
5611 | if (rc) |
5612 | return rc; |
5613 | if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) { |
5614 | pr_err("%s: overriding NVDATA EEDPTagMode setting\n", |
5615 | ioc->name); |
5616 | ioc->manu_pg11.EEDPTagMode &= ~0x3; |
5617 | ioc->manu_pg11.EEDPTagMode |= 0x1; |
5618 | mpt3sas_config_set_manufacturing_pg11(ioc, mpi_reply: &mpi_reply, |
5619 | config_page: &ioc->manu_pg11); |
5620 | } |
5621 | if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK) |
5622 | ioc->tm_custom_handling = 1; |
5623 | else { |
5624 | ioc->tm_custom_handling = 0; |
5625 | if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT) |
5626 | ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT; |
5627 | else if (ioc->manu_pg11.NVMeAbortTO > |
5628 | NVME_TASK_ABORT_MAX_TIMEOUT) |
5629 | ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT; |
5630 | else |
5631 | ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO; |
5632 | } |
5633 | ioc->time_sync_interval = |
5634 | ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_MASK; |
5635 | if (ioc->time_sync_interval) { |
5636 | if (ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_UNIT_MASK) |
5637 | ioc->time_sync_interval = |
5638 | ioc->time_sync_interval * SECONDS_PER_HOUR; |
5639 | else |
5640 | ioc->time_sync_interval = |
5641 | ioc->time_sync_interval * SECONDS_PER_MIN; |
5642 | dinitprintk(ioc, ioc_info(ioc, |
5643 | "Driver-FW TimeSync interval is %d seconds. ManuPg11 TimeSync Unit is in %s\n", |
5644 | ioc->time_sync_interval, (ioc->manu_pg11.TimeSyncInterval & |
5645 | MPT3SAS_TIMESYNC_UNIT_MASK) ? "Hour": "Minute")); |
5646 | } else { |
5647 | if (ioc->is_gen35_ioc) |
5648 | ioc_warn(ioc, |
5649 | "TimeSync Interval in Manuf page-11 is not enabled. Periodic Time-Sync will be disabled\n"); |
5650 | } |
5651 | rc = _base_assign_fw_reported_qd(ioc); |
5652 | if (rc) |
5653 | return rc; |
5654 | |
5655 | /* |
5656 | * ATTO doesn't use bios page 2 and 3 for bios settings. |
5657 | */ |
5658 | if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO) |
5659 | ioc->bios_pg3.BiosVersion = 0; |
5660 | else { |
5661 | rc = mpt3sas_config_get_bios_pg2(ioc, mpi_reply: &mpi_reply, config_page: &ioc->bios_pg2); |
5662 | if (rc) |
5663 | return rc; |
5664 | rc = mpt3sas_config_get_bios_pg3(ioc, mpi_reply: &mpi_reply, config_page: &ioc->bios_pg3); |
5665 | if (rc) |
5666 | return rc; |
5667 | } |
5668 | |
5669 | rc = mpt3sas_config_get_ioc_pg8(ioc, mpi_reply: &mpi_reply, config_page: &ioc->ioc_pg8); |
5670 | if (rc) |
5671 | return rc; |
5672 | rc = mpt3sas_config_get_iounit_pg0(ioc, mpi_reply: &mpi_reply, config_page: &ioc->iounit_pg0); |
5673 | if (rc) |
5674 | return rc; |
5675 | rc = mpt3sas_config_get_iounit_pg1(ioc, mpi_reply: &mpi_reply, config_page: &ioc->iounit_pg1); |
5676 | if (rc) |
5677 | return rc; |
5678 | rc = mpt3sas_config_get_iounit_pg8(ioc, mpi_reply: &mpi_reply, config_page: &iounit_pg8); |
5679 | if (rc) |
5680 | return rc; |
5681 | _base_display_ioc_capabilities(ioc); |
5682 | |
5683 | /* |
5684 | * Enable task_set_full handling in iounit_pg1 when the |
5685 | * facts capabilities indicate that its supported. |
5686 | */ |
5687 | iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); |
5688 | if ((ioc->facts.IOCCapabilities & |
5689 | MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING)) |
5690 | iounit_pg1_flags &= |
5691 | ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; |
5692 | else |
5693 | iounit_pg1_flags |= |
5694 | MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; |
5695 | ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); |
5696 | rc = mpt3sas_config_set_iounit_pg1(ioc, mpi_reply: &mpi_reply, config_page: &ioc->iounit_pg1); |
5697 | if (rc) |
5698 | return rc; |
5699 | |
5700 | if (iounit_pg8.NumSensors) |
5701 | ioc->temp_sensors_count = iounit_pg8.NumSensors; |
5702 | if (ioc->is_aero_ioc) { |
5703 | rc = _base_update_ioc_page1_inlinewith_perf_mode(ioc); |
5704 | if (rc) |
5705 | return rc; |
5706 | } |
5707 | if (ioc->is_gen35_ioc) { |
5708 | if (ioc->is_driver_loading) { |
5709 | rc = _base_get_diag_triggers(ioc); |
5710 | if (rc) |
5711 | return rc; |
5712 | } else { |
5713 | /* |
5714 | * In case of online HBA FW update operation, |
5715 | * check whether updated FW supports the driver trigger |
5716 | * pages or not. |
5717 | * - If previous FW has not supported driver trigger |
5718 | * pages and newer FW supports them then update these |
5719 | * pages with current diag trigger values. |
5720 | * - If previous FW has supported driver trigger pages |
5721 | * and new FW doesn't support them then disable |
5722 | * support_trigger_pages flag. |
5723 | */ |
5724 | _base_check_for_trigger_pages_support(ioc, trigger_flags: &tg_flags); |
5725 | if (!ioc->supports_trigger_pages && tg_flags != -EFAULT) |
5726 | _base_update_diag_trigger_pages(ioc); |
5727 | else if (ioc->supports_trigger_pages && |
5728 | tg_flags == -EFAULT) |
5729 | ioc->supports_trigger_pages = 0; |
5730 | } |
5731 | } |
5732 | return 0; |
5733 | } |
5734 | |
5735 | /** |
5736 | * mpt3sas_free_enclosure_list - release memory |
5737 | * @ioc: per adapter object |
5738 | * |
5739 | * Free memory allocated during enclosure add. |
5740 | */ |
5741 | void |
5742 | mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc) |
5743 | { |
5744 | struct _enclosure_node *enclosure_dev, *enclosure_dev_next; |
5745 | |
5746 | /* Free enclosure list */ |
5747 | list_for_each_entry_safe(enclosure_dev, |
5748 | enclosure_dev_next, &ioc->enclosure_list, list) { |
5749 | list_del(entry: &enclosure_dev->list); |
5750 | kfree(objp: enclosure_dev); |
5751 | } |
5752 | } |
5753 | |
5754 | /** |
5755 | * _base_release_memory_pools - release memory |
5756 | * @ioc: per adapter object |
5757 | * |
5758 | * Free memory allocated from _base_allocate_memory_pools. |
5759 | */ |
5760 | static void |
5761 | _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) |
5762 | { |
5763 | int i = 0; |
5764 | int j = 0; |
5765 | int dma_alloc_count = 0; |
5766 | struct chain_tracker *ct; |
5767 | int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1; |
5768 | |
5769 | dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); |
5770 | |
5771 | if (ioc->request) { |
5772 | dma_free_coherent(dev: &ioc->pdev->dev, size: ioc->request_dma_sz, |
5773 | cpu_addr: ioc->request, dma_handle: ioc->request_dma); |
5774 | dexitprintk(ioc, |
5775 | ioc_info(ioc, "request_pool(0x%p): free\n", |
5776 | ioc->request)); |
5777 | ioc->request = NULL; |
5778 | } |
5779 | |
5780 | if (ioc->sense) { |
5781 | dma_pool_free(pool: ioc->sense_dma_pool, vaddr: ioc->sense, addr: ioc->sense_dma); |
5782 | dma_pool_destroy(pool: ioc->sense_dma_pool); |
5783 | dexitprintk(ioc, |
5784 | ioc_info(ioc, "sense_pool(0x%p): free\n", |
5785 | ioc->sense)); |
5786 | ioc->sense = NULL; |
5787 | } |
5788 | |
5789 | if (ioc->reply) { |
5790 | dma_pool_free(pool: ioc->reply_dma_pool, vaddr: ioc->reply, addr: ioc->reply_dma); |
5791 | dma_pool_destroy(pool: ioc->reply_dma_pool); |
5792 | dexitprintk(ioc, |
5793 | ioc_info(ioc, "reply_pool(0x%p): free\n", |
5794 | ioc->reply)); |
5795 | ioc->reply = NULL; |
5796 | } |
5797 | |
5798 | if (ioc->reply_free) { |
5799 | dma_pool_free(pool: ioc->reply_free_dma_pool, vaddr: ioc->reply_free, |
5800 | addr: ioc->reply_free_dma); |
5801 | dma_pool_destroy(pool: ioc->reply_free_dma_pool); |
5802 | dexitprintk(ioc, |
5803 | ioc_info(ioc, "reply_free_pool(0x%p): free\n", |
5804 | ioc->reply_free)); |
5805 | ioc->reply_free = NULL; |
5806 | } |
5807 | |
5808 | if (ioc->reply_post) { |
5809 | dma_alloc_count = DIV_ROUND_UP(count, |
5810 | RDPQ_MAX_INDEX_IN_ONE_CHUNK); |
5811 | for (i = 0; i < count; i++) { |
5812 | if (i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0 |
5813 | && dma_alloc_count) { |
5814 | if (ioc->reply_post[i].reply_post_free) { |
5815 | dma_pool_free( |
5816 | pool: ioc->reply_post_free_dma_pool, |
5817 | vaddr: ioc->reply_post[i].reply_post_free, |
5818 | addr: ioc->reply_post[i].reply_post_free_dma); |
5819 | dexitprintk(ioc, ioc_info(ioc, |
5820 | "reply_post_free_pool(0x%p): free\n", |
5821 | ioc->reply_post[i].reply_post_free)); |
5822 | ioc->reply_post[i].reply_post_free = |
5823 | NULL; |
5824 | } |
5825 | --dma_alloc_count; |
5826 | } |
5827 | } |
5828 | dma_pool_destroy(pool: ioc->reply_post_free_dma_pool); |
5829 | if (ioc->reply_post_free_array && |
5830 | ioc->rdpq_array_enable) { |
5831 | dma_pool_free(pool: ioc->reply_post_free_array_dma_pool, |
5832 | vaddr: ioc->reply_post_free_array, |
5833 | addr: ioc->reply_post_free_array_dma); |
5834 | ioc->reply_post_free_array = NULL; |
5835 | } |
5836 | dma_pool_destroy(pool: ioc->reply_post_free_array_dma_pool); |
5837 | kfree(objp: ioc->reply_post); |
5838 | } |
5839 | |
5840 | if (ioc->pcie_sgl_dma_pool) { |
5841 | for (i = 0; i < ioc->scsiio_depth; i++) { |
5842 | dma_pool_free(pool: ioc->pcie_sgl_dma_pool, |
5843 | vaddr: ioc->pcie_sg_lookup[i].pcie_sgl, |
5844 | addr: ioc->pcie_sg_lookup[i].pcie_sgl_dma); |
5845 | ioc->pcie_sg_lookup[i].pcie_sgl = NULL; |
5846 | } |
5847 | dma_pool_destroy(pool: ioc->pcie_sgl_dma_pool); |
5848 | } |
5849 | kfree(objp: ioc->pcie_sg_lookup); |
5850 | ioc->pcie_sg_lookup = NULL; |
5851 | |
5852 | if (ioc->config_page) { |
5853 | dexitprintk(ioc, |
5854 | ioc_info(ioc, "config_page(0x%p): free\n", |
5855 | ioc->config_page)); |
5856 | dma_free_coherent(dev: &ioc->pdev->dev, size: ioc->config_page_sz, |
5857 | cpu_addr: ioc->config_page, dma_handle: ioc->config_page_dma); |
5858 | } |
5859 | |
5860 | kfree(objp: ioc->hpr_lookup); |
5861 | ioc->hpr_lookup = NULL; |
5862 | kfree(objp: ioc->internal_lookup); |
5863 | ioc->internal_lookup = NULL; |
5864 | if (ioc->chain_lookup) { |
5865 | for (i = 0; i < ioc->scsiio_depth; i++) { |
5866 | for (j = ioc->chains_per_prp_buffer; |
5867 | j < ioc->chains_needed_per_io; j++) { |
5868 | ct = &ioc->chain_lookup[i].chains_per_smid[j]; |
5869 | if (ct && ct->chain_buffer) |
5870 | dma_pool_free(pool: ioc->chain_dma_pool, |
5871 | vaddr: ct->chain_buffer, |
5872 | addr: ct->chain_buffer_dma); |
5873 | } |
5874 | kfree(objp: ioc->chain_lookup[i].chains_per_smid); |
5875 | } |
5876 | dma_pool_destroy(pool: ioc->chain_dma_pool); |
5877 | kfree(objp: ioc->chain_lookup); |
5878 | ioc->chain_lookup = NULL; |
5879 | } |
5880 | |
5881 | kfree(objp: ioc->io_queue_num); |
5882 | ioc->io_queue_num = NULL; |
5883 | } |
5884 | |
5885 | /** |
5886 | * mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are |
5887 | * having same upper 32bits in their base memory address. |
5888 | * @start_address: Base address of a reply queue set |
5889 | * @pool_sz: Size of single Reply Descriptor Post Queues pool size |
5890 | * |
5891 | * Return: 1 if reply queues in a set have a same upper 32bits in their base |
5892 | * memory address, else 0. |
5893 | */ |
5894 | static int |
5895 | mpt3sas_check_same_4gb_region(dma_addr_t start_address, u32 pool_sz) |
5896 | { |
5897 | dma_addr_t end_address; |
5898 | |
5899 | end_address = start_address + pool_sz - 1; |
5900 | |
5901 | if (upper_32_bits(start_address) == upper_32_bits(end_address)) |
5902 | return 1; |
5903 | else |
5904 | return 0; |
5905 | } |
5906 | |
5907 | /** |
5908 | * _base_reduce_hba_queue_depth- Retry with reduced queue depth |
5909 | * @ioc: Adapter object |
5910 | * |
5911 | * Return: 0 for success, non-zero for failure. |
5912 | **/ |
5913 | static inline int |
5914 | _base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER *ioc) |
5915 | { |
5916 | int reduce_sz = 64; |
5917 | |
5918 | if ((ioc->hba_queue_depth - reduce_sz) > |
5919 | (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) { |
5920 | ioc->hba_queue_depth -= reduce_sz; |
5921 | return 0; |
5922 | } else |
5923 | return -ENOMEM; |
5924 | } |
5925 | |
5926 | /** |
5927 | * _base_allocate_pcie_sgl_pool - Allocating DMA'able memory |
5928 | * for pcie sgl pools. |
5929 | * @ioc: Adapter object |
5930 | * @sz: DMA Pool size |
5931 | * |
5932 | * Return: 0 for success, non-zero for failure. |
5933 | */ |
5934 | |
5935 | static int |
5936 | _base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) |
5937 | { |
5938 | int i = 0, j = 0; |
5939 | struct chain_tracker *ct; |
5940 | |
5941 | ioc->pcie_sgl_dma_pool = |
5942 | dma_pool_create(name: "PCIe SGL pool", dev: &ioc->pdev->dev, size: sz, |
5943 | align: ioc->page_size, allocation: 0); |
5944 | if (!ioc->pcie_sgl_dma_pool) { |
5945 | ioc_err(ioc, "PCIe SGL pool: dma_pool_create failed\n"); |
5946 | return -ENOMEM; |
5947 | } |
5948 | |
5949 | ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz; |
5950 | ioc->chains_per_prp_buffer = |
5951 | min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io); |
5952 | for (i = 0; i < ioc->scsiio_depth; i++) { |
5953 | ioc->pcie_sg_lookup[i].pcie_sgl = |
5954 | dma_pool_alloc(pool: ioc->pcie_sgl_dma_pool, GFP_KERNEL, |
5955 | handle: &ioc->pcie_sg_lookup[i].pcie_sgl_dma); |
5956 | if (!ioc->pcie_sg_lookup[i].pcie_sgl) { |
5957 | ioc_err(ioc, "PCIe SGL pool: dma_pool_alloc failed\n"); |
5958 | return -EAGAIN; |
5959 | } |
5960 | |
5961 | if (!mpt3sas_check_same_4gb_region( |
5962 | start_address: ioc->pcie_sg_lookup[i].pcie_sgl_dma, pool_sz: sz)) { |
5963 | ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n", |
5964 | ioc->pcie_sg_lookup[i].pcie_sgl, |
5965 | (unsigned long long) |
5966 | ioc->pcie_sg_lookup[i].pcie_sgl_dma); |
5967 | ioc->use_32bit_dma = true; |
5968 | return -EAGAIN; |
5969 | } |
5970 | |
5971 | for (j = 0; j < ioc->chains_per_prp_buffer; j++) { |
5972 | ct = &ioc->chain_lookup[i].chains_per_smid[j]; |
5973 | ct->chain_buffer = |
5974 | ioc->pcie_sg_lookup[i].pcie_sgl + |
5975 | (j * ioc->chain_segment_sz); |
5976 | ct->chain_buffer_dma = |
5977 | ioc->pcie_sg_lookup[i].pcie_sgl_dma + |
5978 | (j * ioc->chain_segment_sz); |
5979 | } |
5980 | } |
5981 | dinitprintk(ioc, ioc_info(ioc, |
5982 | "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n", |
5983 | ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024)); |
5984 | dinitprintk(ioc, ioc_info(ioc, |
5985 | "Number of chains can fit in a PRP page(%d)\n", |
5986 | ioc->chains_per_prp_buffer)); |
5987 | return 0; |
5988 | } |
5989 | |
5990 | /** |
5991 | * _base_allocate_chain_dma_pool - Allocating DMA'able memory |
5992 | * for chain dma pool. |
5993 | * @ioc: Adapter object |
5994 | * @sz: DMA Pool size |
5995 | * |
5996 | * Return: 0 for success, non-zero for failure. |
5997 | */ |
5998 | static int |
5999 | _base_allocate_chain_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) |
6000 | { |
6001 | int i = 0, j = 0; |
6002 | struct chain_tracker *ctr; |
6003 | |
6004 | ioc->chain_dma_pool = dma_pool_create(name: "chain pool", dev: &ioc->pdev->dev, |
6005 | size: ioc->chain_segment_sz, align: 16, allocation: 0); |
6006 | if (!ioc->chain_dma_pool) |
6007 | return -ENOMEM; |
6008 | |
6009 | for (i = 0; i < ioc->scsiio_depth; i++) { |
6010 | for (j = ioc->chains_per_prp_buffer; |
6011 | j < ioc->chains_needed_per_io; j++) { |
6012 | ctr = &ioc->chain_lookup[i].chains_per_smid[j]; |
6013 | ctr->chain_buffer = dma_pool_alloc(pool: ioc->chain_dma_pool, |
6014 | GFP_KERNEL, handle: &ctr->chain_buffer_dma); |
6015 | if (!ctr->chain_buffer) |
6016 | return -EAGAIN; |
6017 | if (!mpt3sas_check_same_4gb_region( |
6018 | start_address: ctr->chain_buffer_dma, pool_sz: ioc->chain_segment_sz)) { |
6019 | ioc_err(ioc, |
6020 | "Chain buffers are not in same 4G !!! Chain buff (0x%p) dma = (0x%llx)\n", |
6021 | ctr->chain_buffer, |
6022 | (unsigned long long)ctr->chain_buffer_dma); |
6023 | ioc->use_32bit_dma = true; |
6024 | return -EAGAIN; |
6025 | } |
6026 | } |
6027 | } |
6028 | dinitprintk(ioc, ioc_info(ioc, |
6029 | "chain_lookup depth (%d), frame_size(%d), pool_size(%d kB)\n", |
6030 | ioc->scsiio_depth, ioc->chain_segment_sz, ((ioc->scsiio_depth * |
6031 | (ioc->chains_needed_per_io - ioc->chains_per_prp_buffer) * |
6032 | ioc->chain_segment_sz))/1024)); |
6033 | return 0; |
6034 | } |
6035 | |
6036 | /** |
6037 | * _base_allocate_sense_dma_pool - Allocating DMA'able memory |
6038 | * for sense dma pool. |
6039 | * @ioc: Adapter object |
6040 | * @sz: DMA Pool size |
6041 | * Return: 0 for success, non-zero for failure. |
6042 | */ |
6043 | static int |
6044 | _base_allocate_sense_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) |
6045 | { |
6046 | ioc->sense_dma_pool = |
6047 | dma_pool_create(name: "sense pool", dev: &ioc->pdev->dev, size: sz, align: 4, allocation: 0); |
6048 | if (!ioc->sense_dma_pool) |
6049 | return -ENOMEM; |
6050 | ioc->sense = dma_pool_alloc(pool: ioc->sense_dma_pool, |
6051 | GFP_KERNEL, handle: &ioc->sense_dma); |
6052 | if (!ioc->sense) |
6053 | return -EAGAIN; |
6054 | if (!mpt3sas_check_same_4gb_region(start_address: ioc->sense_dma, pool_sz: sz)) { |
6055 | dinitprintk(ioc, pr_err( |
6056 | "Bad Sense Pool! sense (0x%p) sense_dma = (0x%llx)\n", |
6057 | ioc->sense, (unsigned long long) ioc->sense_dma)); |
6058 | ioc->use_32bit_dma = true; |
6059 | return -EAGAIN; |
6060 | } |
6061 | ioc_info(ioc, |
6062 | "sense pool(0x%p) - dma(0x%llx): depth(%d), element_size(%d), pool_size (%d kB)\n", |
6063 | ioc->sense, (unsigned long long)ioc->sense_dma, |
6064 | ioc->scsiio_depth, SCSI_SENSE_BUFFERSIZE, sz/1024); |
6065 | return 0; |
6066 | } |
6067 | |
6068 | /** |
6069 | * _base_allocate_reply_pool - Allocating DMA'able memory |
6070 | * for reply pool. |
6071 | * @ioc: Adapter object |
6072 | * @sz: DMA Pool size |
6073 | * Return: 0 for success, non-zero for failure. |
6074 | */ |
6075 | static int |
6076 | _base_allocate_reply_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) |
6077 | { |
6078 | /* reply pool, 4 byte align */ |
6079 | ioc->reply_dma_pool = dma_pool_create(name: "reply pool", |
6080 | dev: &ioc->pdev->dev, size: sz, align: 4, allocation: 0); |
6081 | if (!ioc->reply_dma_pool) |
6082 | return -ENOMEM; |
6083 | ioc->reply = dma_pool_alloc(pool: ioc->reply_dma_pool, GFP_KERNEL, |
6084 | handle: &ioc->reply_dma); |
6085 | if (!ioc->reply) |
6086 | return -EAGAIN; |
6087 | if (!mpt3sas_check_same_4gb_region(start_address: ioc->reply_dma, pool_sz: sz)) { |
6088 | dinitprintk(ioc, pr_err( |
6089 | "Bad Reply Pool! Reply (0x%p) Reply dma = (0x%llx)\n", |
6090 | ioc->reply, (unsigned long long) ioc->reply_dma)); |
6091 | ioc->use_32bit_dma = true; |
6092 | return -EAGAIN; |
6093 | } |
6094 | ioc->reply_dma_min_address = (u32)(ioc->reply_dma); |
6095 | ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz; |
6096 | ioc_info(ioc, |
6097 | "reply pool(0x%p) - dma(0x%llx): depth(%d), frame_size(%d), pool_size(%d kB)\n", |
6098 | ioc->reply, (unsigned long long)ioc->reply_dma, |
6099 | ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024); |
6100 | return 0; |
6101 | } |
6102 | |
6103 | /** |
6104 | * _base_allocate_reply_free_dma_pool - Allocating DMA'able memory |
6105 | * for reply free dma pool. |
6106 | * @ioc: Adapter object |
6107 | * @sz: DMA Pool size |
6108 | * Return: 0 for success, non-zero for failure. |
6109 | */ |
6110 | static int |
6111 | _base_allocate_reply_free_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) |
6112 | { |
6113 | /* reply free queue, 16 byte align */ |
6114 | ioc->reply_free_dma_pool = dma_pool_create( |
6115 | name: "reply_free pool", dev: &ioc->pdev->dev, size: sz, align: 16, allocation: 0); |
6116 | if (!ioc->reply_free_dma_pool) |
6117 | return -ENOMEM; |
6118 | ioc->reply_free = dma_pool_alloc(pool: ioc->reply_free_dma_pool, |
6119 | GFP_KERNEL, handle: &ioc->reply_free_dma); |
6120 | if (!ioc->reply_free) |
6121 | return -EAGAIN; |
6122 | if (!mpt3sas_check_same_4gb_region(start_address: ioc->reply_free_dma, pool_sz: sz)) { |
6123 | dinitprintk(ioc, |
6124 | pr_err("Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n", |
6125 | ioc->reply_free, (unsigned long long) ioc->reply_free_dma)); |
6126 | ioc->use_32bit_dma = true; |
6127 | return -EAGAIN; |
6128 | } |
6129 | memset(ioc->reply_free, 0, sz); |
6130 | dinitprintk(ioc, ioc_info(ioc, |
6131 | "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n", |
6132 | ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024)); |
6133 | dinitprintk(ioc, ioc_info(ioc, |
6134 | "reply_free_dma (0x%llx)\n", |
6135 | (unsigned long long)ioc->reply_free_dma)); |
6136 | return 0; |
6137 | } |
6138 | |
6139 | /** |
6140 | * _base_allocate_reply_post_free_array - Allocating DMA'able memory |
6141 | * for reply post free array. |
6142 | * @ioc: Adapter object |
6143 | * @reply_post_free_array_sz: DMA Pool size |
6144 | * Return: 0 for success, non-zero for failure. |
6145 | */ |
6146 | |
6147 | static int |
6148 | _base_allocate_reply_post_free_array(struct MPT3SAS_ADAPTER *ioc, |
6149 | u32 reply_post_free_array_sz) |
6150 | { |
6151 | ioc->reply_post_free_array_dma_pool = |
6152 | dma_pool_create(name: "reply_post_free_array pool", |
6153 | dev: &ioc->pdev->dev, size: reply_post_free_array_sz, align: 16, allocation: 0); |
6154 | if (!ioc->reply_post_free_array_dma_pool) |
6155 | return -ENOMEM; |
6156 | ioc->reply_post_free_array = |
6157 | dma_pool_alloc(pool: ioc->reply_post_free_array_dma_pool, |
6158 | GFP_KERNEL, handle: &ioc->reply_post_free_array_dma); |
6159 | if (!ioc->reply_post_free_array) |
6160 | return -EAGAIN; |
6161 | if (!mpt3sas_check_same_4gb_region(start_address: ioc->reply_post_free_array_dma, |
6162 | pool_sz: reply_post_free_array_sz)) { |
6163 | dinitprintk(ioc, pr_err( |
6164 | "Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n", |
6165 | ioc->reply_free, |
6166 | (unsigned long long) ioc->reply_free_dma)); |
6167 | ioc->use_32bit_dma = true; |
6168 | return -EAGAIN; |
6169 | } |
6170 | return 0; |
6171 | } |
6172 | /** |
6173 | * base_alloc_rdpq_dma_pool - Allocating DMA'able memory |
6174 | * for reply queues. |
6175 | * @ioc: per adapter object |
6176 | * @sz: DMA Pool size |
6177 | * Return: 0 for success, non-zero for failure. |
6178 | */ |
6179 | static int |
6180 | base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz) |
6181 | { |
6182 | int i = 0; |
6183 | u32 dma_alloc_count = 0; |
6184 | int reply_post_free_sz = ioc->reply_post_queue_depth * |
6185 | sizeof(Mpi2DefaultReplyDescriptor_t); |
6186 | int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1; |
6187 | |
6188 | ioc->reply_post = kcalloc(n: count, size: sizeof(struct reply_post_struct), |
6189 | GFP_KERNEL); |
6190 | if (!ioc->reply_post) |
6191 | return -ENOMEM; |
6192 | /* |
6193 | * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and |
6194 | * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should |
6195 | * be within 4GB boundary i.e reply queues in a set must have same |
6196 | * upper 32-bits in their memory address. so here driver is allocating |
6197 | * the DMA'able memory for reply queues according. |
6198 | * Driver uses limitation of |
6199 | * VENTURA_SERIES to manage INVADER_SERIES as well. |
6200 | */ |
6201 | dma_alloc_count = DIV_ROUND_UP(count, |
6202 | RDPQ_MAX_INDEX_IN_ONE_CHUNK); |
6203 | ioc->reply_post_free_dma_pool = |
6204 | dma_pool_create(name: "reply_post_free pool", |
6205 | dev: &ioc->pdev->dev, size: sz, align: 16, allocation: 0); |
6206 | if (!ioc->reply_post_free_dma_pool) |
6207 | return -ENOMEM; |
6208 | for (i = 0; i < count; i++) { |
6209 | if ((i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) { |
6210 | ioc->reply_post[i].reply_post_free = |
6211 | dma_pool_zalloc(pool: ioc->reply_post_free_dma_pool, |
6212 | GFP_KERNEL, |
6213 | handle: &ioc->reply_post[i].reply_post_free_dma); |
6214 | if (!ioc->reply_post[i].reply_post_free) |
6215 | return -ENOMEM; |
6216 | /* |
6217 | * Each set of RDPQ pool must satisfy 4gb boundary |
6218 | * restriction. |
6219 | * 1) Check if allocated resources for RDPQ pool are in |
6220 | * the same 4GB range. |
6221 | * 2) If #1 is true, continue with 64 bit DMA. |
6222 | * 3) If #1 is false, return 1. which means free all the |
6223 | * resources and set DMA mask to 32 and allocate. |
6224 | */ |
6225 | if (!mpt3sas_check_same_4gb_region( |
6226 | start_address: ioc->reply_post[i].reply_post_free_dma, pool_sz: sz)) { |
6227 | dinitprintk(ioc, |
6228 | ioc_err(ioc, "bad Replypost free pool(0x%p)" |
6229 | "reply_post_free_dma = (0x%llx)\n", |
6230 | ioc->reply_post[i].reply_post_free, |
6231 | (unsigned long long) |
6232 | ioc->reply_post[i].reply_post_free_dma)); |
6233 | return -EAGAIN; |
6234 | } |
6235 | dma_alloc_count--; |
6236 | |
6237 | } else { |
6238 | ioc->reply_post[i].reply_post_free = |
6239 | (Mpi2ReplyDescriptorsUnion_t *) |
6240 | ((long)ioc->reply_post[i-1].reply_post_free |
6241 | + reply_post_free_sz); |
6242 | ioc->reply_post[i].reply_post_free_dma = |
6243 | (dma_addr_t) |
6244 | (ioc->reply_post[i-1].reply_post_free_dma + |
6245 | reply_post_free_sz); |
6246 | } |
6247 | } |
6248 | return 0; |
6249 | } |
6250 | |
6251 | /** |
6252 | * _base_allocate_memory_pools - allocate start of day memory pools |
6253 | * @ioc: per adapter object |
6254 | * |
6255 | * Return: 0 success, anything else error. |
6256 | */ |
6257 | static int |
6258 | _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) |
6259 | { |
6260 | struct mpt3sas_facts *facts; |
6261 | u16 max_sge_elements; |
6262 | u16 chains_needed_per_io; |
6263 | u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz; |
6264 | u32 retry_sz; |
6265 | u32 rdpq_sz = 0, sense_sz = 0; |
6266 | u16 max_request_credit, nvme_blocks_needed; |
6267 | unsigned short sg_tablesize; |
6268 | u16 sge_size; |
6269 | int i; |
6270 | int ret = 0, rc = 0; |
6271 | |
6272 | dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); |
6273 | |
6274 | |
6275 | retry_sz = 0; |
6276 | facts = &ioc->facts; |
6277 | |
6278 | /* command line tunables for max sgl entries */ |
6279 | if (max_sgl_entries != -1) |
6280 | sg_tablesize = max_sgl_entries; |
6281 | else { |
6282 | if (ioc->hba_mpi_version_belonged == MPI2_VERSION) |
6283 | sg_tablesize = MPT2SAS_SG_DEPTH; |
6284 | else |
6285 | sg_tablesize = MPT3SAS_SG_DEPTH; |
6286 | } |
6287 | |
6288 | /* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */ |
6289 | if (reset_devices) |
6290 | sg_tablesize = min_t(unsigned short, sg_tablesize, |
6291 | MPT_KDUMP_MIN_PHYS_SEGMENTS); |
6292 | |
6293 | if (ioc->is_mcpu_endpoint) |
6294 | ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS; |
6295 | else { |
6296 | if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS) |
6297 | sg_tablesize = MPT_MIN_PHYS_SEGMENTS; |
6298 | else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) { |
6299 | sg_tablesize = min_t(unsigned short, sg_tablesize, |
6300 | SG_MAX_SEGMENTS); |
6301 | ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n", |
6302 | sg_tablesize, MPT_MAX_PHYS_SEGMENTS); |
6303 | } |
6304 | ioc->shost->sg_tablesize = sg_tablesize; |
6305 | } |
6306 | |
6307 | ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)), |
6308 | (facts->RequestCredit / 4)); |
6309 | if (ioc->internal_depth < INTERNAL_CMDS_COUNT) { |
6310 | if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT + |
6311 | INTERNAL_SCSIIO_CMDS_COUNT)) { |
6312 | ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n", |
6313 | facts->RequestCredit); |
6314 | return -ENOMEM; |
6315 | } |
6316 | ioc->internal_depth = 10; |
6317 | } |
6318 | |
6319 | ioc->hi_priority_depth = ioc->internal_depth - (5); |
6320 | /* command line tunables for max controller queue depth */ |
6321 | if (max_queue_depth != -1 && max_queue_depth != 0) { |
6322 | max_request_credit = min_t(u16, max_queue_depth + |
6323 | ioc->internal_depth, facts->RequestCredit); |
6324 | if (max_request_credit > MAX_HBA_QUEUE_DEPTH) |
6325 | max_request_credit = MAX_HBA_QUEUE_DEPTH; |
6326 | } else if (reset_devices) |
6327 | max_request_credit = min_t(u16, facts->RequestCredit, |
6328 | (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth)); |
6329 | else |
6330 | max_request_credit = min_t(u16, facts->RequestCredit, |
6331 | MAX_HBA_QUEUE_DEPTH); |
6332 | |
6333 | /* Firmware maintains additional facts->HighPriorityCredit number of |
6334 | * credits for HiPriprity Request messages, so hba queue depth will be |
6335 | * sum of max_request_credit and high priority queue depth. |
6336 | */ |
6337 | ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth; |
6338 | |
6339 | /* request frame size */ |
6340 | ioc->request_sz = facts->IOCRequestFrameSize * 4; |
6341 | |
6342 | /* reply frame size */ |
6343 | ioc->reply_sz = facts->ReplyFrameSize * 4; |
6344 | |
6345 | /* chain segment size */ |
6346 | if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { |
6347 | if (facts->IOCMaxChainSegmentSize) |
6348 | ioc->chain_segment_sz = |
6349 | facts->IOCMaxChainSegmentSize * |
6350 | MAX_CHAIN_ELEMT_SZ; |
6351 | else |
6352 | /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */ |
6353 | ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS * |
6354 | MAX_CHAIN_ELEMT_SZ; |
6355 | } else |
6356 | ioc->chain_segment_sz = ioc->request_sz; |
6357 | |
6358 | /* calculate the max scatter element size */ |
6359 | sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee); |
6360 | |
6361 | retry_allocation: |
6362 | total_sz = 0; |
6363 | /* calculate number of sg elements left over in the 1st frame */ |
6364 | max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) - |
6365 | sizeof(Mpi2SGEIOUnion_t)) + sge_size); |
6366 | ioc->max_sges_in_main_message = max_sge_elements/sge_size; |
6367 | |
6368 | /* now do the same for a chain buffer */ |
6369 | max_sge_elements = ioc->chain_segment_sz - sge_size; |
6370 | ioc->max_sges_in_chain_message = max_sge_elements/sge_size; |
6371 | |
6372 | /* |
6373 | * MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE |
6374 | */ |
6375 | chains_needed_per_io = ((ioc->shost->sg_tablesize - |
6376 | ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message) |
6377 | + 1; |
6378 | if (chains_needed_per_io > facts->MaxChainDepth) { |
6379 | chains_needed_per_io = facts->MaxChainDepth; |
6380 | ioc->shost->sg_tablesize = min_t(u16, |
6381 | ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message |
6382 | * chains_needed_per_io), ioc->shost->sg_tablesize); |
6383 | } |
6384 | ioc->chains_needed_per_io = chains_needed_per_io; |
6385 | |
6386 | /* reply free queue sizing - taking into account for 64 FW events */ |
6387 | ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; |
6388 | |
6389 | /* mCPU manage single counters for simplicity */ |
6390 | if (ioc->is_mcpu_endpoint) |
6391 | ioc->reply_post_queue_depth = ioc->reply_free_queue_depth; |
6392 | else { |
6393 | /* calculate reply descriptor post queue depth */ |
6394 | ioc->reply_post_queue_depth = ioc->hba_queue_depth + |
6395 | ioc->reply_free_queue_depth + 1; |
6396 | /* align the reply post queue on the next 16 count boundary */ |
6397 | if (ioc->reply_post_queue_depth % 16) |
6398 | ioc->reply_post_queue_depth += 16 - |
6399 | (ioc->reply_post_queue_depth % 16); |
6400 | } |
6401 | |
6402 | if (ioc->reply_post_queue_depth > |
6403 | facts->MaxReplyDescriptorPostQueueDepth) { |
6404 | ioc->reply_post_queue_depth = |
6405 | facts->MaxReplyDescriptorPostQueueDepth - |
6406 | (facts->MaxReplyDescriptorPostQueueDepth % 16); |
6407 | ioc->hba_queue_depth = |
6408 | ((ioc->reply_post_queue_depth - 64) / 2) - 1; |
6409 | ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; |
6410 | } |
6411 | |
6412 | ioc_info(ioc, |
6413 | "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), " |
6414 | "sge_per_io(%d), chains_per_io(%d)\n", |
6415 | ioc->max_sges_in_main_message, |
6416 | ioc->max_sges_in_chain_message, |
6417 | ioc->shost->sg_tablesize, |
6418 | ioc->chains_needed_per_io); |
6419 | |
6420 | /* reply post queue, 16 byte align */ |
6421 | reply_post_free_sz = ioc->reply_post_queue_depth * |
6422 | sizeof(Mpi2DefaultReplyDescriptor_t); |
6423 | rdpq_sz = reply_post_free_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK; |
6424 | if ((_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable) |
6425 | || (ioc->reply_queue_count < RDPQ_MAX_INDEX_IN_ONE_CHUNK)) |
6426 | rdpq_sz = reply_post_free_sz * ioc->reply_queue_count; |
6427 | ret = base_alloc_rdpq_dma_pool(ioc, sz: rdpq_sz); |
6428 | if (ret == -EAGAIN) { |
6429 | /* |
6430 | * Free allocated bad RDPQ memory pools. |
6431 | * Change dma coherent mask to 32 bit and reallocate RDPQ |
6432 | */ |
6433 | _base_release_memory_pools(ioc); |
6434 | ioc->use_32bit_dma = true; |
6435 | if (_base_config_dma_addressing(ioc, pdev: ioc->pdev) != 0) { |
6436 | ioc_err(ioc, |
6437 | "32 DMA mask failed %s\n", pci_name(ioc->pdev)); |
6438 | return -ENODEV; |
6439 | } |
6440 | if (base_alloc_rdpq_dma_pool(ioc, sz: rdpq_sz)) |
6441 | return -ENOMEM; |
6442 | } else if (ret == -ENOMEM) |
6443 | return -ENOMEM; |
6444 | total_sz = rdpq_sz * (!ioc->rdpq_array_enable ? 1 : |
6445 | DIV_ROUND_UP(ioc->reply_queue_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK)); |
6446 | ioc->scsiio_depth = ioc->hba_queue_depth - |
6447 | ioc->hi_priority_depth - ioc->internal_depth; |
6448 | |
6449 | /* set the scsi host can_queue depth |
6450 | * with some internal commands that could be outstanding |
6451 | */ |
6452 | ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT; |
6453 | dinitprintk(ioc, |
6454 | ioc_info(ioc, "scsi host: can_queue depth (%d)\n", |
6455 | ioc->shost->can_queue)); |
6456 | |
6457 | /* contiguous pool for request and chains, 16 byte align, one extra " |
6458 | * "frame for smid=0 |
6459 | */ |
6460 | ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth; |
6461 | sz = ((ioc->scsiio_depth + 1) * ioc->request_sz); |
6462 | |
6463 | /* hi-priority queue */ |
6464 | sz += (ioc->hi_priority_depth * ioc->request_sz); |
6465 | |
6466 | /* internal queue */ |
6467 | sz += (ioc->internal_depth * ioc->request_sz); |
6468 | |
6469 | ioc->request_dma_sz = sz; |
6470 | ioc->request = dma_alloc_coherent(dev: &ioc->pdev->dev, size: sz, |
6471 | dma_handle: &ioc->request_dma, GFP_KERNEL); |
6472 | if (!ioc->request) { |
6473 | ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n", |
6474 | ioc->hba_queue_depth, ioc->chains_needed_per_io, |
6475 | ioc->request_sz, sz / 1024); |
6476 | if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH) |
6477 | goto out; |
6478 | retry_sz = 64; |
6479 | ioc->hba_queue_depth -= retry_sz; |
6480 | _base_release_memory_pools(ioc); |
6481 | goto retry_allocation; |
6482 | } |
6483 | |
6484 | if (retry_sz) |
6485 | ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n", |
6486 | ioc->hba_queue_depth, ioc->chains_needed_per_io, |
6487 | ioc->request_sz, sz / 1024); |
6488 | |
6489 | /* hi-priority queue */ |
6490 | ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) * |
6491 | ioc->request_sz); |
6492 | ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) * |
6493 | ioc->request_sz); |
6494 | |
6495 | /* internal queue */ |
6496 | ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth * |
6497 | ioc->request_sz); |
6498 | ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth * |
6499 | ioc->request_sz); |
6500 | |
6501 | ioc_info(ioc, |
6502 | "request pool(0x%p) - dma(0x%llx): " |
6503 | "depth(%d), frame_size(%d), pool_size(%d kB)\n", |
6504 | ioc->request, (unsigned long long) ioc->request_dma, |
6505 | ioc->hba_queue_depth, ioc->request_sz, |
6506 | (ioc->hba_queue_depth * ioc->request_sz) / 1024); |
6507 | |
6508 | total_sz += sz; |
6509 | |
6510 | dinitprintk(ioc, |
6511 | ioc_info(ioc, "scsiio(0x%p): depth(%d)\n", |
6512 | ioc->request, ioc->scsiio_depth)); |
6513 | |
6514 | ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH); |
6515 | sz = ioc->scsiio_depth * sizeof(struct chain_lookup); |
6516 | ioc->chain_lookup = kzalloc(size: sz, GFP_KERNEL); |
6517 | if (!ioc->chain_lookup) { |
6518 | ioc_err(ioc, "chain_lookup: __get_free_pages failed\n"); |
6519 | goto out; |
6520 | } |
6521 | |
6522 | sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker); |
6523 | for (i = 0; i < ioc->scsiio_depth; i++) { |
6524 | ioc->chain_lookup[i].chains_per_smid = kzalloc(size: sz, GFP_KERNEL); |
6525 | if (!ioc->chain_lookup[i].chains_per_smid) { |
6526 | ioc_err(ioc, "chain_lookup: kzalloc failed\n"); |
6527 | goto out; |
6528 | } |
6529 | } |
6530 | |
6531 | /* initialize hi-priority queue smid's */ |
6532 | ioc->hpr_lookup = kcalloc(n: ioc->hi_priority_depth, |
6533 | size: sizeof(struct request_tracker), GFP_KERNEL); |
6534 | if (!ioc->hpr_lookup) { |
6535 | ioc_err(ioc, "hpr_lookup: kcalloc failed\n"); |
6536 | goto out; |
6537 | } |
6538 | ioc->hi_priority_smid = ioc->scsiio_depth + 1; |
6539 | dinitprintk(ioc, |
6540 | ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n", |
6541 | ioc->hi_priority, |
6542 | ioc->hi_priority_depth, ioc->hi_priority_smid)); |
6543 | |
6544 | /* initialize internal queue smid's */ |
6545 | ioc->internal_lookup = kcalloc(n: ioc->internal_depth, |
6546 | size: sizeof(struct request_tracker), GFP_KERNEL); |
6547 | if (!ioc->internal_lookup) { |
6548 | ioc_err(ioc, "internal_lookup: kcalloc failed\n"); |
6549 | goto out; |
6550 | } |
6551 | ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth; |
6552 | dinitprintk(ioc, |
6553 | ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n", |
6554 | ioc->internal, |
6555 | ioc->internal_depth, ioc->internal_smid)); |
6556 | |
6557 | ioc->io_queue_num = kcalloc(n: ioc->scsiio_depth, |
6558 | size: sizeof(u16), GFP_KERNEL); |
6559 | if (!ioc->io_queue_num) |
6560 | goto out; |
6561 | /* |
6562 | * The number of NVMe page sized blocks needed is: |
6563 | * (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1 |
6564 | * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry |
6565 | * that is placed in the main message frame. 8 is the size of each PRP |
6566 | * entry or PRP list pointer entry. 8 is subtracted from page_size |
6567 | * because of the PRP list pointer entry at the end of a page, so this |
6568 | * is not counted as a PRP entry. The 1 added page is a round up. |
6569 | * |
6570 | * To avoid allocation failures due to the amount of memory that could |
6571 | * be required for NVMe PRP's, only each set of NVMe blocks will be |
6572 | * contiguous, so a new set is allocated for each possible I/O. |
6573 | */ |
6574 | |
6575 | ioc->chains_per_prp_buffer = 0; |
6576 | if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) { |
6577 | nvme_blocks_needed = |
6578 | (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1; |
6579 | nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE); |
6580 | nvme_blocks_needed++; |
6581 | |
6582 | sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth; |
6583 | ioc->pcie_sg_lookup = kzalloc(size: sz, GFP_KERNEL); |
6584 | if (!ioc->pcie_sg_lookup) { |
6585 | ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n"); |
6586 | goto out; |
6587 | } |
6588 | sz = nvme_blocks_needed * ioc->page_size; |
6589 | rc = _base_allocate_pcie_sgl_pool(ioc, sz); |
6590 | if (rc == -ENOMEM) |
6591 | return -ENOMEM; |
6592 | else if (rc == -EAGAIN) |
6593 | goto try_32bit_dma; |
6594 | total_sz += sz * ioc->scsiio_depth; |
6595 | } |
6596 | |
6597 | rc = _base_allocate_chain_dma_pool(ioc, sz: ioc->chain_segment_sz); |
6598 | if (rc == -ENOMEM) |
6599 | return -ENOMEM; |
6600 | else if (rc == -EAGAIN) |
6601 | goto try_32bit_dma; |
6602 | total_sz += ioc->chain_segment_sz * ((ioc->chains_needed_per_io - |
6603 | ioc->chains_per_prp_buffer) * ioc->scsiio_depth); |
6604 | dinitprintk(ioc, |
6605 | ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n", |
6606 | ioc->chain_depth, ioc->chain_segment_sz, |
6607 | (ioc->chain_depth * ioc->chain_segment_sz) / 1024)); |
6608 | /* sense buffers, 4 byte align */ |
6609 | sense_sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE; |
6610 | rc = _base_allocate_sense_dma_pool(ioc, sz: sense_sz); |
6611 | if (rc == -ENOMEM) |
6612 | return -ENOMEM; |
6613 | else if (rc == -EAGAIN) |
6614 | goto try_32bit_dma; |
6615 | total_sz += sense_sz; |
6616 | /* reply pool, 4 byte align */ |
6617 | sz = ioc->reply_free_queue_depth * ioc->reply_sz; |
6618 | rc = _base_allocate_reply_pool(ioc, sz); |
6619 | if (rc == -ENOMEM) |
6620 | return -ENOMEM; |
6621 | else if (rc == -EAGAIN) |
6622 | goto try_32bit_dma; |
6623 | total_sz += sz; |
6624 | |
6625 | /* reply free queue, 16 byte align */ |
6626 | sz = ioc->reply_free_queue_depth * 4; |
6627 | rc = _base_allocate_reply_free_dma_pool(ioc, sz); |
6628 | if (rc == -ENOMEM) |
6629 | return -ENOMEM; |
6630 | else if (rc == -EAGAIN) |
6631 | goto try_32bit_dma; |
6632 | dinitprintk(ioc, |
6633 | ioc_info(ioc, "reply_free_dma (0x%llx)\n", |
6634 | (unsigned long long)ioc->reply_free_dma)); |
6635 | total_sz += sz; |
6636 | if (ioc->rdpq_array_enable) { |
6637 | reply_post_free_array_sz = ioc->reply_queue_count * |
6638 | sizeof(Mpi2IOCInitRDPQArrayEntry); |
6639 | rc = _base_allocate_reply_post_free_array(ioc, |
6640 | reply_post_free_array_sz); |
6641 | if (rc == -ENOMEM) |
6642 | return -ENOMEM; |
6643 | else if (rc == -EAGAIN) |
6644 | goto try_32bit_dma; |
6645 | } |
6646 | ioc->config_page_sz = 512; |
6647 | ioc->config_page = dma_alloc_coherent(dev: &ioc->pdev->dev, |
6648 | size: ioc->config_page_sz, dma_handle: &ioc->config_page_dma, GFP_KERNEL); |
6649 | if (!ioc->config_page) { |
6650 | ioc_err(ioc, "config page: dma_pool_alloc failed\n"); |
6651 | goto out; |
6652 | } |
6653 | |
6654 | ioc_info(ioc, "config page(0x%p) - dma(0x%llx): size(%d)\n", |
6655 | ioc->config_page, (unsigned long long)ioc->config_page_dma, |
6656 | ioc->config_page_sz); |
6657 | total_sz += ioc->config_page_sz; |
6658 | |
6659 | ioc_info(ioc, "Allocated physical memory: size(%d kB)\n", |
6660 | total_sz / 1024); |
6661 | ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n", |
6662 | ioc->shost->can_queue, facts->RequestCredit); |
6663 | ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n", |
6664 | ioc->shost->sg_tablesize); |
6665 | return 0; |
6666 | |
6667 | try_32bit_dma: |
6668 | _base_release_memory_pools(ioc); |
6669 | if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) { |
6670 | /* Change dma coherent mask to 32 bit and reallocate */ |
6671 | if (_base_config_dma_addressing(ioc, pdev: ioc->pdev) != 0) { |
6672 | pr_err("Setting 32 bit coherent DMA mask Failed %s\n", |
6673 | pci_name(ioc->pdev)); |
6674 | return -ENODEV; |
6675 | } |
6676 | } else if (_base_reduce_hba_queue_depth(ioc) != 0) |
6677 | return -ENOMEM; |
6678 | goto retry_allocation; |
6679 | |
6680 | out: |
6681 | return -ENOMEM; |
6682 | } |
6683 | |
6684 | /** |
6685 | * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter. |
6686 | * @ioc: Pointer to MPT_ADAPTER structure |
6687 | * @cooked: Request raw or cooked IOC state |
6688 | * |
6689 | * Return: all IOC Doorbell register bits if cooked==0, else just the |
6690 | * Doorbell bits in MPI_IOC_STATE_MASK. |
6691 | */ |
6692 | u32 |
6693 | mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked) |
6694 | { |
6695 | u32 s, sc; |
6696 | |
6697 | s = ioc->base_readl_ext_retry(&ioc->chip->Doorbell); |
6698 | sc = s & MPI2_IOC_STATE_MASK; |
6699 | return cooked ? sc : s; |
6700 | } |
6701 | |
6702 | /** |
6703 | * _base_wait_on_iocstate - waiting on a particular ioc state |
6704 | * @ioc: ? |
6705 | * @ioc_state: controller state { READY, OPERATIONAL, or RESET } |
6706 | * @timeout: timeout in second |
6707 | * |
6708 | * Return: 0 for success, non-zero for failure. |
6709 | */ |
6710 | static int |
6711 | _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout) |
6712 | { |
6713 | u32 count, cntdn; |
6714 | u32 current_state; |
6715 | |
6716 | count = 0; |
6717 | cntdn = 1000 * timeout; |
6718 | do { |
6719 | current_state = mpt3sas_base_get_iocstate(ioc, cooked: 1); |
6720 | if (current_state == ioc_state) |
6721 | return 0; |
6722 | if (count && current_state == MPI2_IOC_STATE_FAULT) |
6723 | break; |
6724 | if (count && current_state == MPI2_IOC_STATE_COREDUMP) |
6725 | break; |
6726 | |
6727 | usleep_range(min: 1000, max: 1500); |
6728 | count++; |
6729 | } while (--cntdn); |
6730 | |
6731 | return current_state; |
6732 | } |
6733 | |
6734 | /** |
6735 | * _base_dump_reg_set - This function will print hexdump of register set. |
6736 | * @ioc: per adapter object |
6737 | * |
6738 | * Return: nothing. |
6739 | */ |
6740 | static inline void |
6741 | _base_dump_reg_set(struct MPT3SAS_ADAPTER *ioc) |
6742 | { |
6743 | unsigned int i, sz = 256; |
6744 | u32 __iomem *reg = (u32 __iomem *)ioc->chip; |
6745 | |
6746 | ioc_info(ioc, "System Register set:\n"); |
6747 | for (i = 0; i < (sz / sizeof(u32)); i++) |
6748 | pr_info("%08x: %08x\n", (i * 4), readl(®[i])); |
6749 | } |
6750 | |
6751 | /** |
6752 | * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by |
6753 | * a write to the doorbell) |
6754 | * @ioc: per adapter object |
6755 | * @timeout: timeout in seconds |
6756 | * |
6757 | * Return: 0 for success, non-zero for failure. |
6758 | * |
6759 | * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell. |
6760 | */ |
6761 | |
6762 | static int |
6763 | _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout) |
6764 | { |
6765 | u32 cntdn, count; |
6766 | u32 int_status; |
6767 | |
6768 | count = 0; |
6769 | cntdn = 1000 * timeout; |
6770 | do { |
6771 | int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus); |
6772 | if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { |
6773 | dhsprintk(ioc, |
6774 | ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n", |
6775 | __func__, count, timeout)); |
6776 | return 0; |
6777 | } |
6778 | |
6779 | usleep_range(min: 1000, max: 1500); |
6780 | count++; |
6781 | } while (--cntdn); |
6782 | |
6783 | ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n", |
6784 | __func__, count, int_status); |
6785 | return -EFAULT; |
6786 | } |
6787 | |
6788 | static int |
6789 | _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout) |
6790 | { |
6791 | u32 cntdn, count; |
6792 | u32 int_status; |
6793 | |
6794 | count = 0; |
6795 | cntdn = 2000 * timeout; |
6796 | do { |
6797 | int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus); |
6798 | if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { |
6799 | dhsprintk(ioc, |
6800 | ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n", |
6801 | __func__, count, timeout)); |
6802 | return 0; |
6803 | } |
6804 | |
6805 | udelay(500); |
6806 | count++; |
6807 | } while (--cntdn); |
6808 | |
6809 | ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n", |
6810 | __func__, count, int_status); |
6811 | return -EFAULT; |
6812 | |
6813 | } |
6814 | |
6815 | /** |
6816 | * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell. |
6817 | * @ioc: per adapter object |
6818 | * @timeout: timeout in second |
6819 | * |
6820 | * Return: 0 for success, non-zero for failure. |
6821 | * |
6822 | * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to |
6823 | * doorbell. |
6824 | */ |
6825 | static int |
6826 | _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout) |
6827 | { |
6828 | u32 cntdn, count; |
6829 | u32 int_status; |
6830 | u32 doorbell; |
6831 | |
6832 | count = 0; |
6833 | cntdn = 1000 * timeout; |
6834 | do { |
6835 | int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus); |
6836 | if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { |
6837 | dhsprintk(ioc, |
6838 | ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n", |
6839 | __func__, count, timeout)); |
6840 | return 0; |
6841 | } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { |
6842 | doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell); |
6843 | if ((doorbell & MPI2_IOC_STATE_MASK) == |
6844 | MPI2_IOC_STATE_FAULT) { |
6845 | mpt3sas_print_fault_code(ioc, doorbell); |
6846 | return -EFAULT; |
6847 | } |
6848 | if ((doorbell & MPI2_IOC_STATE_MASK) == |
6849 | MPI2_IOC_STATE_COREDUMP) { |
6850 | mpt3sas_print_coredump_info(ioc, doorbell); |
6851 | return -EFAULT; |
6852 | } |
6853 | } else if (int_status == 0xFFFFFFFF) |
6854 | goto out; |
6855 | |
6856 | usleep_range(min: 1000, max: 1500); |
6857 | count++; |
6858 | } while (--cntdn); |
6859 | |
6860 | out: |
6861 | ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n", |
6862 | __func__, count, int_status); |
6863 | return -EFAULT; |
6864 | } |
6865 | |
6866 | /** |
6867 | * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use |
6868 | * @ioc: per adapter object |
6869 | * @timeout: timeout in second |
6870 | * |
6871 | * Return: 0 for success, non-zero for failure. |
6872 | */ |
6873 | static int |
6874 | _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout) |
6875 | { |
6876 | u32 cntdn, count; |
6877 | u32 doorbell_reg; |
6878 | |
6879 | count = 0; |
6880 | cntdn = 1000 * timeout; |
6881 | do { |
6882 | doorbell_reg = ioc->base_readl_ext_retry(&ioc->chip->Doorbell); |
6883 | if (!(doorbell_reg & MPI2_DOORBELL_USED)) { |
6884 | dhsprintk(ioc, |
6885 | ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n", |
6886 | __func__, count, timeout)); |
6887 | return 0; |
6888 | } |
6889 | |
6890 | usleep_range(min: 1000, max: 1500); |
6891 | count++; |
6892 | } while (--cntdn); |
6893 | |
6894 | ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n", |
6895 | __func__, count, doorbell_reg); |
6896 | return -EFAULT; |
6897 | } |
6898 | |
6899 | /** |
6900 | * _base_send_ioc_reset - send doorbell reset |
6901 | * @ioc: per adapter object |
6902 | * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET |
6903 | * @timeout: timeout in second |
6904 | * |
6905 | * Return: 0 for success, non-zero for failure. |
6906 | */ |
6907 | static int |
6908 | _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout) |
6909 | { |
6910 | u32 ioc_state; |
6911 | int r = 0; |
6912 | unsigned long flags; |
6913 | |
6914 | if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) { |
6915 | ioc_err(ioc, "%s: unknown reset_type\n", __func__); |
6916 | return -EFAULT; |
6917 | } |
6918 | |
6919 | if (!(ioc->facts.IOCCapabilities & |
6920 | MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY)) |
6921 | return -EFAULT; |
6922 | |
6923 | ioc_info(ioc, "sending message unit reset !!\n"); |
6924 | |
6925 | writel(val: reset_type << MPI2_DOORBELL_FUNCTION_SHIFT, |
6926 | addr: &ioc->chip->Doorbell); |
6927 | if ((_base_wait_for_doorbell_ack(ioc, timeout: 15))) { |
6928 | r = -EFAULT; |
6929 | goto out; |
6930 | } |
6931 | |
6932 | ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout); |
6933 | if (ioc_state) { |
6934 | ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n", |
6935 | __func__, ioc_state); |
6936 | r = -EFAULT; |
6937 | goto out; |
6938 | } |
6939 | out: |
6940 | if (r != 0) { |
6941 | ioc_state = mpt3sas_base_get_iocstate(ioc, cooked: 0); |
6942 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); |
6943 | /* |
6944 | * Wait for IOC state CoreDump to clear only during |
6945 | * HBA initialization & release time. |
6946 | */ |
6947 | if ((ioc_state & MPI2_IOC_STATE_MASK) == |
6948 | MPI2_IOC_STATE_COREDUMP && (ioc->is_driver_loading == 1 || |
6949 | ioc->fault_reset_work_q == NULL)) { |
6950 | spin_unlock_irqrestore( |
6951 | lock: &ioc->ioc_reset_in_progress_lock, flags); |
6952 | mpt3sas_print_coredump_info(ioc, ioc_state); |
6953 | mpt3sas_base_wait_for_coredump_completion(ioc, |
6954 | caller: __func__); |
6955 | spin_lock_irqsave( |
6956 | &ioc->ioc_reset_in_progress_lock, flags); |
6957 | } |
6958 | spin_unlock_irqrestore(lock: &ioc->ioc_reset_in_progress_lock, flags); |
6959 | } |
6960 | ioc_info(ioc, "message unit reset: %s\n", |
6961 | r == 0 ? "SUCCESS": "FAILED"); |
6962 | return r; |
6963 | } |
6964 | |
6965 | /** |
6966 | * mpt3sas_wait_for_ioc - IOC's operational state is checked here. |
6967 | * @ioc: per adapter object |
6968 | * @timeout: timeout in seconds |
6969 | * |
6970 | * Return: Waits up to timeout seconds for the IOC to |
6971 | * become operational. Returns 0 if IOC is present |
6972 | * and operational; otherwise returns %-EFAULT. |
6973 | */ |
6974 | |
6975 | int |
6976 | mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout) |
6977 | { |
6978 | int wait_state_count = 0; |
6979 | u32 ioc_state; |
6980 | |
6981 | do { |
6982 | ioc_state = mpt3sas_base_get_iocstate(ioc, cooked: 1); |
6983 | if (ioc_state == MPI2_IOC_STATE_OPERATIONAL) |
6984 | break; |
6985 | |
6986 | /* |
6987 | * Watchdog thread will be started after IOC Initialization, so |
6988 | * no need to wait here for IOC state to become operational |
6989 | * when IOC Initialization is on. Instead the driver will |
6990 | * return ETIME status, so that calling function can issue |
6991 | * diag reset operation and retry the command. |
6992 | */ |
6993 | if (ioc->is_driver_loading) |
6994 | return -ETIME; |
6995 | |
6996 | ssleep(seconds: 1); |
6997 | ioc_info(ioc, "%s: waiting for operational state(count=%d)\n", |
6998 | __func__, ++wait_state_count); |
6999 | } while (--timeout); |
7000 | if (!timeout) { |
7001 | ioc_err(ioc, "%s: failed due to ioc not operational\n", __func__); |
7002 | return -EFAULT; |
7003 | } |
7004 | if (wait_state_count) |
7005 | ioc_info(ioc, "ioc is operational\n"); |
7006 | return 0; |
7007 | } |
7008 | |
7009 | /** |
7010 | * _base_handshake_req_reply_wait - send request thru doorbell interface |
7011 | * @ioc: per adapter object |
7012 | * @request_bytes: request length |
7013 | * @request: pointer having request payload |
7014 | * @reply_bytes: reply length |
7015 | * @reply: pointer to reply payload |
7016 | * @timeout: timeout in second |
7017 | * |
7018 | * Return: 0 for success, non-zero for failure. |
7019 | */ |
7020 | static int |
7021 | _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, |
7022 | u32 *request, int reply_bytes, u16 *reply, int timeout) |
7023 | { |
7024 | MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply; |
7025 | int i; |
7026 | u8 failed; |
7027 | __le32 *mfp; |
7028 | |
7029 | /* make sure doorbell is not in use */ |
7030 | if ((ioc->base_readl_ext_retry(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) { |
7031 | ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__); |
7032 | return -EFAULT; |
7033 | } |
7034 | |
7035 | /* clear pending doorbell interrupts from previous state changes */ |
7036 | if (ioc->base_readl(&ioc->chip->HostInterruptStatus) & |
7037 | MPI2_HIS_IOC2SYS_DB_STATUS) |
7038 | writel(val: 0, addr: &ioc->chip->HostInterruptStatus); |
7039 | |
7040 | /* send message to ioc */ |
7041 | writel(val: ((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) | |
7042 | ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)), |
7043 | addr: &ioc->chip->Doorbell); |
7044 | |
7045 | if ((_base_spin_on_doorbell_int(ioc, timeout: 5))) { |
7046 | ioc_err(ioc, "doorbell handshake int failed (line=%d)\n", |
7047 | __LINE__); |
7048 | return -EFAULT; |
7049 | } |
7050 | writel(val: 0, addr: &ioc->chip->HostInterruptStatus); |
7051 | |
7052 | if ((_base_wait_for_doorbell_ack(ioc, timeout: 5))) { |
7053 | ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n", |
7054 | __LINE__); |
7055 | return -EFAULT; |
7056 | } |
7057 | |
7058 | /* send message 32-bits at a time */ |
7059 | for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) { |
7060 | writel(cpu_to_le32(request[i]), addr: &ioc->chip->Doorbell); |
7061 | if ((_base_wait_for_doorbell_ack(ioc, timeout: 5))) |
7062 | failed = 1; |
7063 | } |
7064 | |
7065 | if (failed) { |
7066 | ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n", |
7067 | __LINE__); |
7068 | return -EFAULT; |
7069 | } |
7070 | |
7071 | /* now wait for the reply */ |
7072 | if ((_base_wait_for_doorbell_int(ioc, timeout))) { |
7073 | ioc_err(ioc, "doorbell handshake int failed (line=%d)\n", |
7074 | __LINE__); |
7075 | return -EFAULT; |
7076 | } |
7077 | |
7078 | /* read the first two 16-bits, it gives the total length of the reply */ |
7079 | reply[0] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell) |
7080 | & MPI2_DOORBELL_DATA_MASK); |
7081 | writel(val: 0, addr: &ioc->chip->HostInterruptStatus); |
7082 | if ((_base_wait_for_doorbell_int(ioc, timeout: 5))) { |
7083 | ioc_err(ioc, "doorbell handshake int failed (line=%d)\n", |
7084 | __LINE__); |
7085 | return -EFAULT; |
7086 | } |
7087 | reply[1] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell) |
7088 | & MPI2_DOORBELL_DATA_MASK); |
7089 | writel(val: 0, addr: &ioc->chip->HostInterruptStatus); |
7090 | |
7091 | for (i = 2; i < default_reply->MsgLength * 2; i++) { |
7092 | if ((_base_wait_for_doorbell_int(ioc, timeout: 5))) { |
7093 | ioc_err(ioc, "doorbell handshake int failed (line=%d)\n", |
7094 | __LINE__); |
7095 | return -EFAULT; |
7096 | } |
7097 | if (i >= reply_bytes/2) /* overflow case */ |
7098 | ioc->base_readl_ext_retry(&ioc->chip->Doorbell); |
7099 | else |
7100 | reply[i] = le16_to_cpu( |
7101 | ioc->base_readl_ext_retry(&ioc->chip->Doorbell) |
7102 | & MPI2_DOORBELL_DATA_MASK); |
7103 | writel(val: 0, addr: &ioc->chip->HostInterruptStatus); |
7104 | } |
7105 | |
7106 | _base_wait_for_doorbell_int(ioc, timeout: 5); |
7107 | if (_base_wait_for_doorbell_not_used(ioc, timeout: 5) != 0) { |
7108 | dhsprintk(ioc, |
7109 | ioc_info(ioc, "doorbell is in use (line=%d)\n", |
7110 | __LINE__)); |
7111 | } |
7112 | writel(val: 0, addr: &ioc->chip->HostInterruptStatus); |
7113 | |
7114 | if (ioc->logging_level & MPT_DEBUG_INIT) { |
7115 | mfp = (__le32 *)reply; |
7116 | pr_info("\toffset:data\n"); |
7117 | for (i = 0; i < reply_bytes/4; i++) |
7118 | ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4, |
7119 | le32_to_cpu(mfp[i])); |
7120 | } |
7121 | return 0; |
7122 | } |
7123 | |
7124 | /** |
7125 | * mpt3sas_base_sas_iounit_control - send sas iounit control to FW |
7126 | * @ioc: per adapter object |
7127 | * @mpi_reply: the reply payload from FW |
7128 | * @mpi_request: the request payload sent to FW |
7129 | * |
7130 | * The SAS IO Unit Control Request message allows the host to perform low-level |
7131 | * operations, such as resets on the PHYs of the IO Unit, also allows the host |
7132 | * to obtain the IOC assigned device handles for a device if it has other |
7133 | * identifying information about the device, in addition allows the host to |
7134 | * remove IOC resources associated with the device. |
7135 | * |
7136 | * Return: 0 for success, non-zero for failure. |
7137 | */ |
7138 | int |
7139 | mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, |
7140 | Mpi2SasIoUnitControlReply_t *mpi_reply, |
7141 | Mpi2SasIoUnitControlRequest_t *mpi_request) |
7142 | { |
7143 | u16 smid; |
7144 | u8 issue_reset = 0; |
7145 | int rc; |
7146 | void *request; |
7147 | |
7148 | dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); |
7149 | |
7150 | mutex_lock(&ioc->base_cmds.mutex); |
7151 | |
7152 | if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { |
7153 | ioc_err(ioc, "%s: base_cmd in use\n", __func__); |
7154 | rc = -EAGAIN; |
7155 | goto out; |
7156 | } |
7157 | |
7158 | rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); |
7159 | if (rc) |
7160 | goto out; |
7161 | |
7162 | smid = mpt3sas_base_get_smid(ioc, cb_idx: ioc->base_cb_idx); |
7163 | if (!smid) { |
7164 | ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); |
7165 | rc = -EAGAIN; |
7166 | goto out; |
7167 | } |
7168 | |
7169 | rc = 0; |
7170 | ioc->base_cmds.status = MPT3_CMD_PENDING; |
7171 | request = mpt3sas_base_get_msg_frame(ioc, smid); |
7172 | ioc->base_cmds.smid = smid; |
7173 | memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)); |
7174 | if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || |
7175 | mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) |
7176 | ioc->ioc_link_reset_in_progress = 1; |
7177 | init_completion(x: &ioc->base_cmds.done); |
7178 | ioc->put_smid_default(ioc, smid); |
7179 | wait_for_completion_timeout(x: &ioc->base_cmds.done, |
7180 | timeout: msecs_to_jiffies(m: 10000)); |
7181 | if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || |
7182 | mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) && |
7183 | ioc->ioc_link_reset_in_progress) |
7184 | ioc->ioc_link_reset_in_progress = 0; |
7185 | if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { |
7186 | mpt3sas_check_cmd_timeout(ioc, ioc->base_cmds.status, |
7187 | mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)/4, |
7188 | issue_reset); |
7189 | goto issue_host_reset; |
7190 | } |
7191 | if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) |
7192 | memcpy(mpi_reply, ioc->base_cmds.reply, |
7193 | sizeof(Mpi2SasIoUnitControlReply_t)); |
7194 | else |
7195 | memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t)); |
7196 | ioc->base_cmds.status = MPT3_CMD_NOT_USED; |
7197 | goto out; |
7198 | |
7199 | issue_host_reset: |
7200 | if (issue_reset) |
7201 | mpt3sas_base_hard_reset_handler(ioc, type: FORCE_BIG_HAMMER); |
7202 | ioc->base_cmds.status = MPT3_CMD_NOT_USED; |
7203 | rc = -EFAULT; |
7204 | out: |
7205 | mutex_unlock(lock: &ioc->base_cmds.mutex); |
7206 | return rc; |
7207 | } |
7208 | |
7209 | /** |
7210 | * mpt3sas_base_scsi_enclosure_processor - sending request to sep device |
7211 | * @ioc: per adapter object |
7212 | * @mpi_reply: the reply payload from FW |
7213 | * @mpi_request: the request payload sent to FW |
7214 | * |
7215 | * The SCSI Enclosure Processor request message causes the IOC to |
7216 | * communicate with SES devices to control LED status signals. |
7217 | * |
7218 | * Return: 0 for success, non-zero for failure. |
7219 | */ |
7220 | int |
7221 | mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, |
7222 | Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request) |
7223 | { |
7224 | u16 smid; |
7225 | u8 issue_reset = 0; |
7226 | int rc; |
7227 | void *request; |
7228 | |
7229 | dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); |
7230 | |
7231 | mutex_lock(&ioc->base_cmds.mutex); |
7232 | |
7233 | if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { |
7234 | ioc_err(ioc, "%s: base_cmd in use\n", __func__); |
7235 | rc = -EAGAIN; |
7236 | goto out; |
7237 | } |
7238 | |
7239 | rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); |
7240 | if (rc) |
7241 | goto out; |
7242 | |
7243 | smid = mpt3sas_base_get_smid(ioc, cb_idx: ioc->base_cb_idx); |
7244 | if (!smid) { |
7245 | ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); |
7246 | rc = -EAGAIN; |
7247 | goto out; |
7248 | } |
7249 | |
7250 | rc = 0; |
7251 | ioc->base_cmds.status = MPT3_CMD_PENDING; |
7252 | request = mpt3sas_base_get_msg_frame(ioc, smid); |
7253 | ioc->base_cmds.smid = smid; |
7254 | memset(request, 0, ioc->request_sz); |
7255 | memcpy(request, mpi_request, sizeof(Mpi2SepReply_t)); |
7256 | init_completion(x: &ioc->base_cmds.done); |
7257 | ioc->put_smid_default(ioc, smid); |
7258 | wait_for_completion_timeout(x: &ioc->base_cmds.done, |
7259 | timeout: msecs_to_jiffies(m: 10000)); |
7260 | if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { |
7261 | mpt3sas_check_cmd_timeout(ioc, |
7262 | ioc->base_cmds.status, mpi_request, |
7263 | sizeof(Mpi2SepRequest_t)/4, issue_reset); |
7264 | goto issue_host_reset; |
7265 | } |
7266 | if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) |
7267 | memcpy(mpi_reply, ioc->base_cmds.reply, |
7268 | sizeof(Mpi2SepReply_t)); |
7269 | else |
7270 | memset(mpi_reply, 0, sizeof(Mpi2SepReply_t)); |
7271 | ioc->base_cmds.status = MPT3_CMD_NOT_USED; |
7272 | goto out; |
7273 | |
7274 | issue_host_reset: |
7275 | if (issue_reset) |
7276 | mpt3sas_base_hard_reset_handler(ioc, type: FORCE_BIG_HAMMER); |
7277 | ioc->base_cmds.status = MPT3_CMD_NOT_USED; |
7278 | rc = -EFAULT; |
7279 | out: |
7280 | mutex_unlock(lock: &ioc->base_cmds.mutex); |
7281 | return rc; |
7282 | } |
7283 | |
7284 | /** |
7285 | * _base_get_port_facts - obtain port facts reply and save in ioc |
7286 | * @ioc: per adapter object |
7287 | * @port: ? |
7288 | * |
7289 | * Return: 0 for success, non-zero for failure. |
7290 | */ |
7291 | static int |
7292 | _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port) |
7293 | { |
7294 | Mpi2PortFactsRequest_t mpi_request; |
7295 | Mpi2PortFactsReply_t mpi_reply; |
7296 | struct mpt3sas_port_facts *pfacts; |
7297 | int mpi_reply_sz, mpi_request_sz, r; |
7298 | |
7299 | dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); |
7300 | |
7301 | mpi_reply_sz = sizeof(Mpi2PortFactsReply_t); |
7302 | mpi_request_sz = sizeof(Mpi2PortFactsRequest_t); |
7303 | memset(&mpi_request, 0, mpi_request_sz); |
7304 | mpi_request.Function = MPI2_FUNCTION_PORT_FACTS; |
7305 | mpi_request.PortNumber = port; |
7306 | r = _base_handshake_req_reply_wait(ioc, request_bytes: mpi_request_sz, |
7307 | request: (u32 *)&mpi_request, reply_bytes: mpi_reply_sz, reply: (u16 *)&mpi_reply, timeout: 5); |
7308 | |
7309 | if (r != 0) { |
7310 | ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r); |
7311 | return r; |
7312 | } |
7313 | |
7314 | pfacts = &ioc->pfacts[port]; |
7315 | memset(pfacts, 0, sizeof(struct mpt3sas_port_facts)); |
7316 | pfacts->PortNumber = mpi_reply.PortNumber; |
7317 | pfacts->VP_ID = mpi_reply.VP_ID; |
7318 | pfacts->VF_ID = mpi_reply.VF_ID; |
7319 | pfacts->MaxPostedCmdBuffers = |
7320 | le16_to_cpu(mpi_reply.MaxPostedCmdBuffers); |
7321 | |
7322 | return 0; |
7323 | } |
7324 | |
7325 | /** |
7326 | * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL |
7327 | * @ioc: per adapter object |
7328 | * @timeout: |
7329 | * |
7330 | * Return: 0 for success, non-zero for failure. |
7331 | */ |
7332 | static int |
7333 | _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout) |
7334 | { |
7335 | u32 ioc_state; |
7336 | int rc; |
7337 | |
7338 | dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); |
7339 | |
7340 | if (ioc->pci_error_recovery) { |
7341 | dfailprintk(ioc, |
7342 | ioc_info(ioc, "%s: host in pci error recovery\n", |
7343 | __func__)); |
7344 | return -EFAULT; |
7345 | } |
7346 | |
7347 | ioc_state = mpt3sas_base_get_iocstate(ioc, cooked: 0); |
7348 | dhsprintk(ioc, |
7349 | ioc_info(ioc, "%s: ioc_state(0x%08x)\n", |
7350 | __func__, ioc_state)); |
7351 | |
7352 | if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) || |
7353 | (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL) |
7354 | return 0; |
7355 | |
7356 | if (ioc_state & MPI2_DOORBELL_USED) { |
7357 | dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n")); |
7358 | goto issue_diag_reset; |
7359 | } |
7360 | |
7361 | if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { |
7362 | mpt3sas_print_fault_code(ioc, ioc_state & |
7363 | MPI2_DOORBELL_DATA_MASK); |
7364 | goto issue_diag_reset; |
7365 | } else if ((ioc_state & MPI2_IOC_STATE_MASK) == |
7366 | MPI2_IOC_STATE_COREDUMP) { |
7367 | ioc_info(ioc, |
7368 | "%s: Skipping the diag reset here. (ioc_state=0x%x)\n", |
7369 | __func__, ioc_state); |
7370 | return -EFAULT; |
7371 | } |
7372 | |
7373 | ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout); |
7374 | if (ioc_state) { |
7375 | dfailprintk(ioc, |
7376 | ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n", |
7377 | __func__, ioc_state)); |
7378 | return -EFAULT; |
7379 | } |
7380 | |
7381 | return 0; |
7382 | |
7383 | issue_diag_reset: |
7384 | rc = _base_diag_reset(ioc); |
7385 | return rc; |
7386 | } |
7387 | |
7388 | /** |
7389 | * _base_get_ioc_facts - obtain ioc facts reply and save in ioc |
7390 | * @ioc: per adapter object |
7391 | * |
7392 | * Return: 0 for success, non-zero for failure. |
7393 | */ |
7394 | static int |
7395 | _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc) |
7396 | { |
7397 | Mpi2IOCFactsRequest_t mpi_request; |
7398 | Mpi2IOCFactsReply_t mpi_reply; |
7399 | struct mpt3sas_facts *facts; |
7400 | int mpi_reply_sz, mpi_request_sz, r; |
7401 | |
7402 | dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); |
7403 | |
7404 | r = _base_wait_for_iocstate(ioc, timeout: 10); |
7405 | if (r) { |
7406 | dfailprintk(ioc, |
7407 | ioc_info(ioc, "%s: failed getting to correct state\n", |
7408 | __func__)); |
7409 | return r; |
7410 | } |
7411 | mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t); |
7412 | mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t); |
7413 | memset(&mpi_request, 0, mpi_request_sz); |
7414 | mpi_request.Function = MPI2_FUNCTION_IOC_FACTS; |
7415 | r = _base_handshake_req_reply_wait(ioc, request_bytes: mpi_request_sz, |
7416 | request: (u32 *)&mpi_request, reply_bytes: mpi_reply_sz, reply: (u16 *)&mpi_reply, timeout: 5); |
7417 | |
7418 | if (r != 0) { |
7419 | ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r); |
7420 | return r; |
7421 | } |
7422 | |
7423 | facts = &ioc->facts; |
7424 | memset(facts, 0, sizeof(struct mpt3sas_facts)); |
7425 | facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion); |
7426 | facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion); |
7427 | facts->VP_ID = mpi_reply.VP_ID; |
7428 | facts->VF_ID = mpi_reply.VF_ID; |
7429 | facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions); |
7430 | facts->MaxChainDepth = mpi_reply.MaxChainDepth; |
7431 | facts->WhoInit = mpi_reply.WhoInit; |
7432 | facts->NumberOfPorts = mpi_reply.NumberOfPorts; |
7433 | facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors; |
7434 | if (ioc->msix_enable && (facts->MaxMSIxVectors <= |
7435 | MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc))) |
7436 | ioc->combined_reply_queue = 0; |
7437 | facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit); |
7438 | facts->MaxReplyDescriptorPostQueueDepth = |
7439 | le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth); |
7440 | facts->ProductID = le16_to_cpu(mpi_reply.ProductID); |
7441 | facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities); |
7442 | if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) |
7443 | ioc->ir_firmware = 1; |
7444 | if ((facts->IOCCapabilities & |
7445 | MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices)) |
7446 | ioc->rdpq_array_capable = 1; |
7447 | if ((facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ) |
7448 | && ioc->is_aero_ioc) |
7449 | ioc->atomic_desc_capable = 1; |
7450 | facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word); |
7451 | facts->IOCRequestFrameSize = |
7452 | le16_to_cpu(mpi_reply.IOCRequestFrameSize); |
7453 | if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { |
7454 | facts->IOCMaxChainSegmentSize = |
7455 | le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize); |
7456 | } |
7457 | facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators); |
7458 | facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets); |
7459 | ioc->shost->max_id = -1; |
7460 | facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders); |
7461 | facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures); |
7462 | facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags); |
7463 | facts->HighPriorityCredit = |
7464 | le16_to_cpu(mpi_reply.HighPriorityCredit); |
7465 | facts->ReplyFrameSize = mpi_reply.ReplyFrameSize; |
7466 | facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle); |
7467 | facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize; |
7468 | |
7469 | /* |
7470 | * Get the Page Size from IOC Facts. If it's 0, default to 4k. |
7471 | */ |
7472 | ioc->page_size = 1 << facts->CurrentHostPageSize; |
7473 | if (ioc->page_size == 1) { |
7474 | ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n"); |
7475 | ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K; |
7476 | } |
7477 | dinitprintk(ioc, |
7478 | ioc_info(ioc, "CurrentHostPageSize(%d)\n", |
7479 | facts->CurrentHostPageSize)); |
7480 | |
7481 | dinitprintk(ioc, |
7482 | ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n", |
7483 | facts->RequestCredit, facts->MaxChainDepth)); |
7484 | dinitprintk(ioc, |
7485 | ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n", |
7486 | facts->IOCRequestFrameSize * 4, |
7487 | facts->ReplyFrameSize * 4)); |
7488 | return 0; |
7489 | } |
7490 | |
7491 | /** |
7492 | * _base_send_ioc_init - send ioc_init to firmware |
7493 | * @ioc: per adapter object |
7494 | * |
7495 | * Return: 0 for success, non-zero for failure. |
7496 | */ |
7497 | static int |
7498 | _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc) |
7499 | { |
7500 | Mpi2IOCInitRequest_t mpi_request; |
7501 | Mpi2IOCInitReply_t mpi_reply; |
7502 | int i, r = 0; |
7503 | ktime_t current_time; |
7504 | u16 ioc_status; |
7505 | u32 reply_post_free_array_sz = 0; |
7506 | |
7507 | dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); |
7508 | |
7509 | memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t)); |
7510 | mpi_request.Function = MPI2_FUNCTION_IOC_INIT; |
7511 | mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER; |
7512 | mpi_request.VF_ID = 0; /* TODO */ |
7513 | mpi_request.VP_ID = 0; |
7514 | mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged); |
7515 | mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); |
7516 | mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K; |
7517 | |
7518 | if (_base_is_controller_msix_enabled(ioc)) |
7519 | mpi_request.HostMSIxVectors = ioc->reply_queue_count; |
7520 | mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4); |
7521 | mpi_request.ReplyDescriptorPostQueueDepth = |
7522 | cpu_to_le16(ioc->reply_post_queue_depth); |
7523 | mpi_request.ReplyFreeQueueDepth = |
7524 | cpu_to_le16(ioc->reply_free_queue_depth); |
7525 | |
7526 | mpi_request.SenseBufferAddressHigh = |
7527 | cpu_to_le32((u64)ioc->sense_dma >> 32); |
7528 | mpi_request.SystemReplyAddressHigh = |
7529 | cpu_to_le32((u64)ioc->reply_dma >> 32); |
7530 | mpi_request.SystemRequestFrameBaseAddress = |
7531 | cpu_to_le64((u64)ioc->request_dma); |
7532 | mpi_request.ReplyFreeQueueAddress = |
7533 | cpu_to_le64((u64)ioc->reply_free_dma); |
7534 | |
7535 | if (ioc->rdpq_array_enable) { |
7536 | reply_post_free_array_sz = ioc->reply_queue_count * |
7537 | sizeof(Mpi2IOCInitRDPQArrayEntry); |
7538 | memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz); |
7539 | for (i = 0; i < ioc->reply_queue_count; i++) |
7540 | ioc->reply_post_free_array[i].RDPQBaseAddress = |
7541 | cpu_to_le64( |
7542 | (u64)ioc->reply_post[i].reply_post_free_dma); |
7543 | mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE; |
7544 | mpi_request.ReplyDescriptorPostQueueAddress = |
7545 | cpu_to_le64((u64)ioc->reply_post_free_array_dma); |
7546 | } else { |
7547 | mpi_request.ReplyDescriptorPostQueueAddress = |
7548 | cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma); |
7549 | } |
7550 | |
7551 | /* |
7552 | * Set the flag to enable CoreDump state feature in IOC firmware. |
7553 | */ |
7554 | mpi_request.ConfigurationFlags |= |
7555 | cpu_to_le16(MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE); |
7556 | |
7557 | /* This time stamp specifies number of milliseconds |
7558 | * since epoch ~ midnight January 1, 1970. |
7559 | */ |
7560 | current_time = ktime_get_real(); |
7561 | mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time)); |
7562 | |
7563 | if (ioc->logging_level & MPT_DEBUG_INIT) { |
7564 | __le32 *mfp; |
7565 | int i; |
7566 | |
7567 | mfp = (__le32 *)&mpi_request; |
7568 | ioc_info(ioc, "\toffset:data\n"); |
7569 | for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++) |
7570 | ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4, |
7571 | le32_to_cpu(mfp[i])); |
7572 | } |
7573 | |
7574 | r = _base_handshake_req_reply_wait(ioc, |
7575 | request_bytes: sizeof(Mpi2IOCInitRequest_t), request: (u32 *)&mpi_request, |
7576 | reply_bytes: sizeof(Mpi2IOCInitReply_t), reply: (u16 *)&mpi_reply, timeout: 30); |
7577 | |
7578 | if (r != 0) { |
7579 | ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r); |
7580 | return r; |
7581 | } |
7582 | |
7583 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; |
7584 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS || |
7585 | mpi_reply.IOCLogInfo) { |
7586 | ioc_err(ioc, "%s: failed\n", __func__); |
7587 | r = -EIO; |
7588 | } |
7589 | |
7590 | /* Reset TimeSync Counter*/ |
7591 | ioc->timestamp_update_count = 0; |
7592 | return r; |
7593 | } |
7594 | |
7595 | /** |
7596 | * mpt3sas_port_enable_done - command completion routine for port enable |
7597 | * @ioc: per adapter object |
7598 | * @smid: system request message index |
7599 | * @msix_index: MSIX table index supplied by the OS |
7600 | * @reply: reply message frame(lower 32bit addr) |
7601 | * |
7602 | * Return: 1 meaning mf should be freed from _base_interrupt |
7603 | * 0 means the mf is freed from this function. |
7604 | */ |
7605 | u8 |
7606 | mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, |
7607 | u32 reply) |
7608 | { |
7609 | MPI2DefaultReply_t *mpi_reply; |
7610 | u16 ioc_status; |
7611 | |
7612 | if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED) |
7613 | return 1; |
7614 | |
7615 | mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, phys_addr: reply); |
7616 | if (!mpi_reply) |
7617 | return 1; |
7618 | |
7619 | if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE) |
7620 | return 1; |
7621 | |
7622 | ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING; |
7623 | ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE; |
7624 | ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID; |
7625 | memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); |
7626 | ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; |
7627 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) |
7628 | ioc->port_enable_failed = 1; |
7629 | |
7630 | if (ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE_ASYNC) { |
7631 | ioc->port_enable_cmds.status &= ~MPT3_CMD_COMPLETE_ASYNC; |
7632 | if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { |
7633 | mpt3sas_port_enable_complete(ioc); |
7634 | return 1; |
7635 | } else { |
7636 | ioc->start_scan_failed = ioc_status; |
7637 | ioc->start_scan = 0; |
7638 | return 1; |
7639 | } |
7640 | } |
7641 | complete(&ioc->port_enable_cmds.done); |
7642 | return 1; |
7643 | } |
7644 | |
7645 | /** |
7646 | * _base_send_port_enable - send port_enable(discovery stuff) to firmware |
7647 | * @ioc: per adapter object |
7648 | * |
7649 | * Return: 0 for success, non-zero for failure. |
7650 | */ |
7651 | static int |
7652 | _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc) |
7653 | { |
7654 | Mpi2PortEnableRequest_t *mpi_request; |
7655 | Mpi2PortEnableReply_t *mpi_reply; |
7656 | int r = 0; |
7657 | u16 smid; |
7658 | u16 ioc_status; |
7659 | |
7660 | ioc_info(ioc, "sending port enable !!\n"); |
7661 | |
7662 | if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { |
7663 | ioc_err(ioc, "%s: internal command already in use\n", __func__); |
7664 | return -EAGAIN; |
7665 | } |
7666 | |
7667 | smid = mpt3sas_base_get_smid(ioc, cb_idx: ioc->port_enable_cb_idx); |
7668 | if (!smid) { |
7669 | ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); |
7670 | return -EAGAIN; |
7671 | } |
7672 | |
7673 | ioc->port_enable_cmds.status = MPT3_CMD_PENDING; |
7674 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); |
7675 | ioc->port_enable_cmds.smid = smid; |
7676 | memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); |
7677 | mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; |
7678 | |
7679 | init_completion(x: &ioc->port_enable_cmds.done); |
7680 | ioc->put_smid_default(ioc, smid); |
7681 | wait_for_completion_timeout(x: &ioc->port_enable_cmds.done, timeout: 300*HZ); |
7682 | if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) { |
7683 | ioc_err(ioc, "%s: timeout\n", __func__); |
7684 | _debug_dump_mf(mpi_request, |
7685 | sz: sizeof(Mpi2PortEnableRequest_t)/4); |
7686 | if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) |
7687 | r = -EFAULT; |
7688 | else |
7689 | r = -ETIME; |
7690 | goto out; |
7691 | } |
7692 | |
7693 | mpi_reply = ioc->port_enable_cmds.reply; |
7694 | ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; |
7695 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
7696 | ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n", |
7697 | __func__, ioc_status); |
7698 | r = -EFAULT; |
7699 | goto out; |
7700 | } |
7701 | |
7702 | out: |
7703 | ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; |
7704 | ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS": "FAILED"); |
7705 | return r; |
7706 | } |
7707 | |
7708 | /** |
7709 | * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply) |
7710 | * @ioc: per adapter object |
7711 | * |
7712 | * Return: 0 for success, non-zero for failure. |
7713 | */ |
7714 | int |
7715 | mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc) |
7716 | { |
7717 | Mpi2PortEnableRequest_t *mpi_request; |
7718 | u16 smid; |
7719 | |
7720 | ioc_info(ioc, "sending port enable !!\n"); |
7721 | |
7722 | if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { |
7723 | ioc_err(ioc, "%s: internal command already in use\n", __func__); |
7724 | return -EAGAIN; |
7725 | } |
7726 | |
7727 | smid = mpt3sas_base_get_smid(ioc, cb_idx: ioc->port_enable_cb_idx); |
7728 | if (!smid) { |
7729 | ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); |
7730 | return -EAGAIN; |
7731 | } |
7732 | ioc->drv_internal_flags |= MPT_DRV_INTERNAL_FIRST_PE_ISSUED; |
7733 | ioc->port_enable_cmds.status = MPT3_CMD_PENDING; |
7734 | ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE_ASYNC; |
7735 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); |
7736 | ioc->port_enable_cmds.smid = smid; |
7737 | memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); |
7738 | mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; |
7739 | |
7740 | ioc->put_smid_default(ioc, smid); |
7741 | return 0; |
7742 | } |
7743 | |
7744 | /** |
7745 | * _base_determine_wait_on_discovery - desposition |
7746 | * @ioc: per adapter object |
7747 | * |
7748 | * Decide whether to wait on discovery to complete. Used to either |
7749 | * locate boot device, or report volumes ahead of physical devices. |
7750 | * |
7751 | * Return: 1 for wait, 0 for don't wait. |
7752 | */ |
7753 | static int |
7754 | _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc) |
7755 | { |
7756 | /* We wait for discovery to complete if IR firmware is loaded. |
7757 | * The sas topology events arrive before PD events, so we need time to |
7758 | * turn on the bit in ioc->pd_handles to indicate PD |
7759 | * Also, it maybe required to report Volumes ahead of physical |
7760 | * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set. |
7761 | */ |
7762 | if (ioc->ir_firmware) |
7763 | return 1; |
7764 | |
7765 | /* if no Bios, then we don't need to wait */ |
7766 | if (!ioc->bios_pg3.BiosVersion) |
7767 | return 0; |
7768 | |
7769 | /* Bios is present, then we drop down here. |
7770 | * |
7771 | * If there any entries in the Bios Page 2, then we wait |
7772 | * for discovery to complete. |
7773 | */ |
7774 | |
7775 | /* Current Boot Device */ |
7776 | if ((ioc->bios_pg2.CurrentBootDeviceForm & |
7777 | MPI2_BIOSPAGE2_FORM_MASK) == |
7778 | MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && |
7779 | /* Request Boot Device */ |
7780 | (ioc->bios_pg2.ReqBootDeviceForm & |
7781 | MPI2_BIOSPAGE2_FORM_MASK) == |
7782 | MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && |
7783 | /* Alternate Request Boot Device */ |
7784 | (ioc->bios_pg2.ReqAltBootDeviceForm & |
7785 | MPI2_BIOSPAGE2_FORM_MASK) == |
7786 | MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED) |
7787 | return 0; |
7788 | |
7789 | return 1; |
7790 | } |
7791 | |
7792 | /** |
7793 | * _base_unmask_events - turn on notification for this event |
7794 | * @ioc: per adapter object |
7795 | * @event: firmware event |
7796 | * |
7797 | * The mask is stored in ioc->event_masks. |
7798 | */ |
7799 | static void |
7800 | _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event) |
7801 | { |
7802 | u32 desired_event; |
7803 | |
7804 | if (event >= 128) |
7805 | return; |
7806 | |
7807 | desired_event = (1 << (event % 32)); |
7808 | |
7809 | if (event < 32) |
7810 | ioc->event_masks[0] &= ~desired_event; |
7811 | else if (event < 64) |
7812 | ioc->event_masks[1] &= ~desired_event; |
7813 | else if (event < 96) |
7814 | ioc->event_masks[2] &= ~desired_event; |
7815 | else if (event < 128) |
7816 | ioc->event_masks[3] &= ~desired_event; |
7817 | } |
7818 | |
7819 | /** |
7820 | * _base_event_notification - send event notification |
7821 | * @ioc: per adapter object |
7822 | * |
7823 | * Return: 0 for success, non-zero for failure. |
7824 | */ |
7825 | static int |
7826 | _base_event_notification(struct MPT3SAS_ADAPTER *ioc) |
7827 | { |
7828 | Mpi2EventNotificationRequest_t *mpi_request; |
7829 | u16 smid; |
7830 | int r = 0; |
7831 | int i, issue_diag_reset = 0; |
7832 | |
7833 | dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); |
7834 | |
7835 | if (ioc->base_cmds.status & MPT3_CMD_PENDING) { |
7836 | ioc_err(ioc, "%s: internal command already in use\n", __func__); |
7837 | return -EAGAIN; |
7838 | } |
7839 | |
7840 | smid = mpt3sas_base_get_smid(ioc, cb_idx: ioc->base_cb_idx); |
7841 | if (!smid) { |
7842 | ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); |
7843 | return -EAGAIN; |
7844 | } |
7845 | ioc->base_cmds.status = MPT3_CMD_PENDING; |
7846 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); |
7847 | ioc->base_cmds.smid = smid; |
7848 | memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t)); |
7849 | mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; |
7850 | mpi_request->VF_ID = 0; /* TODO */ |
7851 | mpi_request->VP_ID = 0; |
7852 | for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) |
7853 | mpi_request->EventMasks[i] = |
7854 | cpu_to_le32(ioc->event_masks[i]); |
7855 | init_completion(x: &ioc->base_cmds.done); |
7856 | ioc->put_smid_default(ioc, smid); |
7857 | wait_for_completion_timeout(x: &ioc->base_cmds.done, timeout: 30*HZ); |
7858 | if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { |
7859 | ioc_err(ioc, "%s: timeout\n", __func__); |
7860 | _debug_dump_mf(mpi_request, |
7861 | sz: sizeof(Mpi2EventNotificationRequest_t)/4); |
7862 | if (ioc->base_cmds.status & MPT3_CMD_RESET) |
7863 | r = -EFAULT; |
7864 | else |
7865 | issue_diag_reset = 1; |
7866 | |
7867 | } else |
7868 | dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__)); |
7869 | ioc->base_cmds.status = MPT3_CMD_NOT_USED; |
7870 | |
7871 | if (issue_diag_reset) { |
7872 | if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED) |
7873 | return -EFAULT; |
7874 | if (mpt3sas_base_check_for_fault_and_issue_reset(ioc)) |
7875 | return -EFAULT; |
7876 | r = -EAGAIN; |
7877 | } |
7878 | return r; |
7879 | } |
7880 | |
7881 | /** |
7882 | * mpt3sas_base_validate_event_type - validating event types |
7883 | * @ioc: per adapter object |
7884 | * @event_type: firmware event |
7885 | * |
7886 | * This will turn on firmware event notification when application |
7887 | * ask for that event. We don't mask events that are already enabled. |
7888 | */ |
7889 | void |
7890 | mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type) |
7891 | { |
7892 | int i, j; |
7893 | u32 event_mask, desired_event; |
7894 | u8 send_update_to_fw; |
7895 | |
7896 | for (i = 0, send_update_to_fw = 0; i < |
7897 | MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) { |
7898 | event_mask = ~event_type[i]; |
7899 | desired_event = 1; |
7900 | for (j = 0; j < 32; j++) { |
7901 | if (!(event_mask & desired_event) && |
7902 | (ioc->event_masks[i] & desired_event)) { |
7903 | ioc->event_masks[i] &= ~desired_event; |
7904 | send_update_to_fw = 1; |
7905 | } |
7906 | desired_event = (desired_event << 1); |
7907 | } |
7908 | } |
7909 | |
7910 | if (!send_update_to_fw) |
7911 | return; |
7912 | |
7913 | mutex_lock(&ioc->base_cmds.mutex); |
7914 | _base_event_notification(ioc); |
7915 | mutex_unlock(lock: &ioc->base_cmds.mutex); |
7916 | } |
7917 | |
7918 | /** |
7919 | * mpt3sas_base_unlock_and_get_host_diagnostic- enable Host Diagnostic Register writes |
7920 | * @ioc: per adapter object |
7921 | * @host_diagnostic: host diagnostic register content |
7922 | * |
7923 | * Return: 0 for success, non-zero for failure. |
7924 | */ |
7925 | |
7926 | int |
7927 | mpt3sas_base_unlock_and_get_host_diagnostic(struct MPT3SAS_ADAPTER *ioc, |
7928 | u32 *host_diagnostic) |
7929 | { |
7930 | |
7931 | u32 count; |
7932 | *host_diagnostic = 0; |
7933 | count = 0; |
7934 | |
7935 | do { |
7936 | /* Write magic sequence to WriteSequence register |
7937 | * Loop until in diagnostic mode |
7938 | */ |
7939 | drsprintk(ioc, ioc_info(ioc, "write magic sequence\n")); |
7940 | writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, addr: &ioc->chip->WriteSequence); |
7941 | writel(MPI2_WRSEQ_1ST_KEY_VALUE, addr: &ioc->chip->WriteSequence); |
7942 | writel(MPI2_WRSEQ_2ND_KEY_VALUE, addr: &ioc->chip->WriteSequence); |
7943 | writel(MPI2_WRSEQ_3RD_KEY_VALUE, addr: &ioc->chip->WriteSequence); |
7944 | writel(MPI2_WRSEQ_4TH_KEY_VALUE, addr: &ioc->chip->WriteSequence); |
7945 | writel(MPI2_WRSEQ_5TH_KEY_VALUE, addr: &ioc->chip->WriteSequence); |
7946 | writel(MPI2_WRSEQ_6TH_KEY_VALUE, addr: &ioc->chip->WriteSequence); |
7947 | |
7948 | /* wait 100 msec */ |
7949 | msleep(msecs: 100); |
7950 | |
7951 | if (count++ > 20) { |
7952 | ioc_info(ioc, |
7953 | "Stop writing magic sequence after 20 retries\n"); |
7954 | _base_dump_reg_set(ioc); |
7955 | return -EFAULT; |
7956 | } |
7957 | |
7958 | *host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic); |
7959 | drsprintk(ioc, |
7960 | ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n", |
7961 | count, *host_diagnostic)); |
7962 | |
7963 | } while ((*host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0); |
7964 | return 0; |
7965 | } |
7966 | |
7967 | /** |
7968 | * mpt3sas_base_lock_host_diagnostic: Disable Host Diagnostic Register writes |
7969 | * @ioc: per adapter object |
7970 | */ |
7971 | |
7972 | void |
7973 | mpt3sas_base_lock_host_diagnostic(struct MPT3SAS_ADAPTER *ioc) |
7974 | { |
7975 | drsprintk(ioc, ioc_info(ioc, "disable writes to the diagnostic register\n")); |
7976 | writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, addr: &ioc->chip->WriteSequence); |
7977 | } |
7978 | |
7979 | /** |
7980 | * _base_diag_reset - the "big hammer" start of day reset |
7981 | * @ioc: per adapter object |
7982 | * |
7983 | * Return: 0 for success, non-zero for failure. |
7984 | */ |
7985 | static int |
7986 | _base_diag_reset(struct MPT3SAS_ADAPTER *ioc) |
7987 | { |
7988 | u32 host_diagnostic; |
7989 | u32 ioc_state; |
7990 | u32 count; |
7991 | u32 hcb_size; |
7992 | |
7993 | ioc_info(ioc, "sending diag reset !!\n"); |
7994 | |
7995 | pci_cfg_access_lock(dev: ioc->pdev); |
7996 | |
7997 | drsprintk(ioc, ioc_info(ioc, "clear interrupts\n")); |
7998 | |
7999 | mutex_lock(&ioc->hostdiag_unlock_mutex); |
8000 | if (mpt3sas_base_unlock_and_get_host_diagnostic(ioc, host_diagnostic: &host_diagnostic)) |
8001 | goto out; |
8002 | |
8003 | hcb_size = ioc->base_readl(&ioc->chip->HCBSize); |
8004 | drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n")); |
8005 | writel(val: host_diagnostic | MPI2_DIAG_RESET_ADAPTER, |
8006 | addr: &ioc->chip->HostDiagnostic); |
8007 | |
8008 | /* This delay allows the chip PCIe hardware time to finish reset tasks */ |
8009 | msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000); |
8010 | |
8011 | /* Approximately 300 second max wait */ |
8012 | for (count = 0; count < (300000000 / |
8013 | MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) { |
8014 | |
8015 | host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic); |
8016 | |
8017 | if (host_diagnostic == 0xFFFFFFFF) { |
8018 | ioc_info(ioc, |
8019 | "Invalid host diagnostic register value\n"); |
8020 | _base_dump_reg_set(ioc); |
8021 | goto out; |
8022 | } |
8023 | if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER)) |
8024 | break; |
8025 | |
8026 | /* Wait to pass the second read delay window */ |
8027 | msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC/1000); |
8028 | } |
8029 | |
8030 | if (host_diagnostic & MPI2_DIAG_HCB_MODE) { |
8031 | |
8032 | drsprintk(ioc, |
8033 | ioc_info(ioc, "restart the adapter assuming the\n" |
8034 | "HCB Address points to good F/W\n")); |
8035 | host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK; |
8036 | host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW; |
8037 | writel(val: host_diagnostic, addr: &ioc->chip->HostDiagnostic); |
8038 | |
8039 | drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n")); |
8040 | writel(val: hcb_size | MPI2_HCB_SIZE_HCB_ENABLE, |
8041 | addr: &ioc->chip->HCBSize); |
8042 | } |
8043 | |
8044 | drsprintk(ioc, ioc_info(ioc, "restart the adapter\n")); |
8045 | writel(val: host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET, |
8046 | addr: &ioc->chip->HostDiagnostic); |
8047 | |
8048 | mpt3sas_base_lock_host_diagnostic(ioc); |
8049 | mutex_unlock(lock: &ioc->hostdiag_unlock_mutex); |
8050 | |
8051 | drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n")); |
8052 | ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout: 20); |
8053 | if (ioc_state) { |
8054 | ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n", |
8055 | __func__, ioc_state); |
8056 | _base_dump_reg_set(ioc); |
8057 | goto out; |
8058 | } |
8059 | |
8060 | pci_cfg_access_unlock(dev: ioc->pdev); |
8061 | ioc_info(ioc, "diag reset: SUCCESS\n"); |
8062 | return 0; |
8063 | |
8064 | out: |
8065 | pci_cfg_access_unlock(dev: ioc->pdev); |
8066 | ioc_err(ioc, "diag reset: FAILED\n"); |
8067 | mutex_unlock(lock: &ioc->hostdiag_unlock_mutex); |
8068 | return -EFAULT; |
8069 | } |
8070 | |
8071 | /** |
8072 | * mpt3sas_base_make_ioc_ready - put controller in READY state |
8073 | * @ioc: per adapter object |
8074 | * @type: FORCE_BIG_HAMMER or SOFT_RESET |
8075 | * |
8076 | * Return: 0 for success, non-zero for failure. |
8077 | */ |
8078 | int |
8079 | mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type) |
8080 | { |
8081 | u32 ioc_state; |
8082 | int rc; |
8083 | int count; |
8084 | |
8085 | dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); |
8086 | |
8087 | if (ioc->pci_error_recovery) |
8088 | return 0; |
8089 | |
8090 | ioc_state = mpt3sas_base_get_iocstate(ioc, cooked: 0); |
8091 | dhsprintk(ioc, |
8092 | ioc_info(ioc, "%s: ioc_state(0x%08x)\n", |
8093 | __func__, ioc_state)); |
8094 | |
8095 | /* if in RESET state, it should move to READY state shortly */ |
8096 | count = 0; |
8097 | if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) { |
8098 | while ((ioc_state & MPI2_IOC_STATE_MASK) != |
8099 | MPI2_IOC_STATE_READY) { |
8100 | if (count++ == 10) { |
8101 | ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n", |
8102 | __func__, ioc_state); |
8103 | return -EFAULT; |
8104 | } |
8105 | ssleep(seconds: 1); |
8106 | ioc_state = mpt3sas_base_get_iocstate(ioc, cooked: 0); |
8107 | } |
8108 | } |
8109 | |
8110 | if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) |
8111 | return 0; |
8112 | |
8113 | if (ioc_state & MPI2_DOORBELL_USED) { |
8114 | ioc_info(ioc, "unexpected doorbell active!\n"); |
8115 | goto issue_diag_reset; |
8116 | } |
8117 | |
8118 | if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { |
8119 | mpt3sas_print_fault_code(ioc, ioc_state & |
8120 | MPI2_DOORBELL_DATA_MASK); |
8121 | goto issue_diag_reset; |
8122 | } |
8123 | |
8124 | if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) { |
8125 | /* |
8126 | * if host reset is invoked while watch dog thread is waiting |
8127 | * for IOC state to be changed to Fault state then driver has |
8128 | * to wait here for CoreDump state to clear otherwise reset |
8129 | * will be issued to the FW and FW move the IOC state to |
8130 | * reset state without copying the FW logs to coredump region. |
8131 | */ |
8132 | if (ioc->ioc_coredump_loop != MPT3SAS_COREDUMP_LOOP_DONE) { |
8133 | mpt3sas_print_coredump_info(ioc, ioc_state & |
8134 | MPI2_DOORBELL_DATA_MASK); |
8135 | mpt3sas_base_wait_for_coredump_completion(ioc, |
8136 | caller: __func__); |
8137 | } |
8138 | goto issue_diag_reset; |
8139 | } |
8140 | |
8141 | if (type == FORCE_BIG_HAMMER) |
8142 | goto issue_diag_reset; |
8143 | |
8144 | if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL) |
8145 | if (!(_base_send_ioc_reset(ioc, |
8146 | MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, timeout: 15))) { |
8147 | return 0; |
8148 | } |
8149 | |
8150 | issue_diag_reset: |
8151 | rc = _base_diag_reset(ioc); |
8152 | return rc; |
8153 | } |
8154 | |
8155 | /** |
8156 | * _base_make_ioc_operational - put controller in OPERATIONAL state |
8157 | * @ioc: per adapter object |
8158 | * |
8159 | * Return: 0 for success, non-zero for failure. |
8160 | */ |
8161 | static int |
8162 | _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc) |
8163 | { |
8164 | int r, i, index, rc; |
8165 | unsigned long flags; |
8166 | u32 reply_address; |
8167 | u16 smid; |
8168 | struct _tr_list *delayed_tr, *delayed_tr_next; |
8169 | struct _sc_list *delayed_sc, *delayed_sc_next; |
8170 | struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next; |
8171 | u8 hide_flag; |
8172 | struct adapter_reply_queue *reply_q; |
8173 | Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig; |
8174 | |
8175 | dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); |
8176 | |
8177 | /* clean the delayed target reset list */ |
8178 | list_for_each_entry_safe(delayed_tr, delayed_tr_next, |
8179 | &ioc->delayed_tr_list, list) { |
8180 | list_del(entry: &delayed_tr->list); |
8181 | kfree(objp: delayed_tr); |
8182 | } |
8183 | |
8184 | |
8185 | list_for_each_entry_safe(delayed_tr, delayed_tr_next, |
8186 | &ioc->delayed_tr_volume_list, list) { |
8187 | list_del(entry: &delayed_tr->list); |
8188 | kfree(objp: delayed_tr); |
8189 | } |
8190 | |
8191 | list_for_each_entry_safe(delayed_sc, delayed_sc_next, |
8192 | &ioc->delayed_sc_list, list) { |
8193 | list_del(entry: &delayed_sc->list); |
8194 | kfree(objp: delayed_sc); |
8195 | } |
8196 | |
8197 | list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next, |
8198 | &ioc->delayed_event_ack_list, list) { |
8199 | list_del(entry: &delayed_event_ack->list); |
8200 | kfree(objp: delayed_event_ack); |
8201 | } |
8202 | |
8203 | spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); |
8204 | |
8205 | /* hi-priority queue */ |
8206 | INIT_LIST_HEAD(list: &ioc->hpr_free_list); |
8207 | smid = ioc->hi_priority_smid; |
8208 | for (i = 0; i < ioc->hi_priority_depth; i++, smid++) { |
8209 | ioc->hpr_lookup[i].cb_idx = 0xFF; |
8210 | ioc->hpr_lookup[i].smid = smid; |
8211 | list_add_tail(new: &ioc->hpr_lookup[i].tracker_list, |
8212 | head: &ioc->hpr_free_list); |
8213 | } |
8214 | |
8215 | /* internal queue */ |
8216 | INIT_LIST_HEAD(list: &ioc->internal_free_list); |
8217 | smid = ioc->internal_smid; |
8218 | for (i = 0; i < ioc->internal_depth; i++, smid++) { |
8219 | ioc->internal_lookup[i].cb_idx = 0xFF; |
8220 | ioc->internal_lookup[i].smid = smid; |
8221 | list_add_tail(new: &ioc->internal_lookup[i].tracker_list, |
8222 | head: &ioc->internal_free_list); |
8223 | } |
8224 | |
8225 | spin_unlock_irqrestore(lock: &ioc->scsi_lookup_lock, flags); |
8226 | |
8227 | /* initialize Reply Free Queue */ |
8228 | for (i = 0, reply_address = (u32)ioc->reply_dma ; |
8229 | i < ioc->reply_free_queue_depth ; i++, reply_address += |
8230 | ioc->reply_sz) { |
8231 | ioc->reply_free[i] = cpu_to_le32(reply_address); |
8232 | if (ioc->is_mcpu_endpoint) |
8233 | _base_clone_reply_to_sys_mem(ioc, |
8234 | reply: reply_address, index: i); |
8235 | } |
8236 | |
8237 | /* initialize reply queues */ |
8238 | if (ioc->is_driver_loading) |
8239 | _base_assign_reply_queues(ioc); |
8240 | |
8241 | /* initialize Reply Post Free Queue */ |
8242 | index = 0; |
8243 | reply_post_free_contig = ioc->reply_post[0].reply_post_free; |
8244 | list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { |
8245 | /* |
8246 | * If RDPQ is enabled, switch to the next allocation. |
8247 | * Otherwise advance within the contiguous region. |
8248 | */ |
8249 | if (ioc->rdpq_array_enable) { |
8250 | reply_q->reply_post_free = |
8251 | ioc->reply_post[index++].reply_post_free; |
8252 | } else { |
8253 | reply_q->reply_post_free = reply_post_free_contig; |
8254 | reply_post_free_contig += ioc->reply_post_queue_depth; |
8255 | } |
8256 | |
8257 | reply_q->reply_post_host_index = 0; |
8258 | for (i = 0; i < ioc->reply_post_queue_depth; i++) |
8259 | reply_q->reply_post_free[i].Words = |
8260 | cpu_to_le64(ULLONG_MAX); |
8261 | if (!_base_is_controller_msix_enabled(ioc)) |
8262 | goto skip_init_reply_post_free_queue; |
8263 | } |
8264 | skip_init_reply_post_free_queue: |
8265 | |
8266 | r = _base_send_ioc_init(ioc); |
8267 | if (r) { |
8268 | /* |
8269 | * No need to check IOC state for fault state & issue |
8270 | * diag reset during host reset. This check is need |
8271 | * only during driver load time. |
8272 | */ |
8273 | if (!ioc->is_driver_loading) |
8274 | return r; |
8275 | |
8276 | rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc); |
8277 | if (rc || (_base_send_ioc_init(ioc))) |
8278 | return r; |
8279 | } |
8280 | |
8281 | /* initialize reply free host index */ |
8282 | ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1; |
8283 | writel(val: ioc->reply_free_host_index, addr: &ioc->chip->ReplyFreeHostIndex); |
8284 | |
8285 | /* initialize reply post host index */ |
8286 | list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { |
8287 | if (ioc->combined_reply_queue) |
8288 | writel(val: (reply_q->msix_index & 7)<< |
8289 | MPI2_RPHI_MSIX_INDEX_SHIFT, |
8290 | addr: ioc->replyPostRegisterIndex[reply_q->msix_index/8]); |
8291 | else |
8292 | writel(val: reply_q->msix_index << |
8293 | MPI2_RPHI_MSIX_INDEX_SHIFT, |
8294 | addr: &ioc->chip->ReplyPostHostIndex); |
8295 | |
8296 | if (!_base_is_controller_msix_enabled(ioc)) |
8297 | goto skip_init_reply_post_host_index; |
8298 | } |
8299 | |
8300 | skip_init_reply_post_host_index: |
8301 | |
8302 | mpt3sas_base_unmask_interrupts(ioc); |
8303 | |
8304 | if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { |
8305 | r = _base_display_fwpkg_version(ioc); |
8306 | if (r) |
8307 | return r; |
8308 | } |
8309 | |
8310 | r = _base_static_config_pages(ioc); |
8311 | if (r) |
8312 | return r; |
8313 | |
8314 | r = _base_event_notification(ioc); |
8315 | if (r) |
8316 | return r; |
8317 | |
8318 | if (!ioc->shost_recovery) { |
8319 | |
8320 | if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier |
8321 | == 0x80) { |
8322 | hide_flag = (u8) ( |
8323 | le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) & |
8324 | MFG_PAGE10_HIDE_SSDS_MASK); |
8325 | if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK) |
8326 | ioc->mfg_pg10_hide_flag = hide_flag; |
8327 | } |
8328 | |
8329 | ioc->wait_for_discovery_to_complete = |
8330 | _base_determine_wait_on_discovery(ioc); |
8331 | |
8332 | return r; /* scan_start and scan_finished support */ |
8333 | } |
8334 | |
8335 | r = _base_send_port_enable(ioc); |
8336 | if (r) |
8337 | return r; |
8338 | |
8339 | return r; |
8340 | } |
8341 | |
8342 | /** |
8343 | * mpt3sas_base_free_resources - free resources controller resources |
8344 | * @ioc: per adapter object |
8345 | */ |
8346 | void |
8347 | mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc) |
8348 | { |
8349 | dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); |
8350 | |
8351 | /* synchronizing freeing resource with pci_access_mutex lock */ |
8352 | mutex_lock(&ioc->pci_access_mutex); |
8353 | if (ioc->chip_phys && ioc->chip) { |
8354 | mpt3sas_base_mask_interrupts(ioc); |
8355 | ioc->shost_recovery = 1; |
8356 | mpt3sas_base_make_ioc_ready(ioc, type: SOFT_RESET); |
8357 | ioc->shost_recovery = 0; |
8358 | } |
8359 | |
8360 | mpt3sas_base_unmap_resources(ioc); |
8361 | mutex_unlock(lock: &ioc->pci_access_mutex); |
8362 | return; |
8363 | } |
8364 | |
8365 | /** |
8366 | * mpt3sas_base_attach - attach controller instance |
8367 | * @ioc: per adapter object |
8368 | * |
8369 | * Return: 0 for success, non-zero for failure. |
8370 | */ |
8371 | int |
8372 | mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) |
8373 | { |
8374 | int r, i, rc; |
8375 | int cpu_id, last_cpu_id = 0; |
8376 | |
8377 | dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); |
8378 | |
8379 | /* setup cpu_msix_table */ |
8380 | ioc->cpu_count = num_online_cpus(); |
8381 | for_each_online_cpu(cpu_id) |
8382 | last_cpu_id = cpu_id; |
8383 | ioc->cpu_msix_table_sz = last_cpu_id + 1; |
8384 | ioc->cpu_msix_table = kzalloc(size: ioc->cpu_msix_table_sz, GFP_KERNEL); |
8385 | ioc->reply_queue_count = 1; |
8386 | if (!ioc->cpu_msix_table) { |
8387 | ioc_info(ioc, "Allocation for cpu_msix_table failed!!!\n"); |
8388 | r = -ENOMEM; |
8389 | goto out_free_resources; |
8390 | } |
8391 | |
8392 | if (ioc->is_warpdrive) { |
8393 | ioc->reply_post_host_index = kcalloc(n: ioc->cpu_msix_table_sz, |
8394 | size: sizeof(resource_size_t *), GFP_KERNEL); |
8395 | if (!ioc->reply_post_host_index) { |
8396 | ioc_info(ioc, "Allocation for reply_post_host_index failed!!!\n"); |
8397 | r = -ENOMEM; |
8398 | goto out_free_resources; |
8399 | } |
8400 | } |
8401 | |
8402 | ioc->smp_affinity_enable = smp_affinity_enable; |
8403 | |
8404 | ioc->rdpq_array_enable_assigned = 0; |
8405 | ioc->use_32bit_dma = false; |
8406 | ioc->dma_mask = 64; |
8407 | if (ioc->is_aero_ioc) { |
8408 | ioc->base_readl = &_base_readl_aero; |
8409 | ioc->base_readl_ext_retry = &_base_readl_ext_retry; |
8410 | } else { |
8411 | ioc->base_readl = &_base_readl; |
8412 | ioc->base_readl_ext_retry = &_base_readl; |
8413 | } |
8414 | r = mpt3sas_base_map_resources(ioc); |
8415 | if (r) |
8416 | goto out_free_resources; |
8417 | |
8418 | pci_set_drvdata(pdev: ioc->pdev, data: ioc->shost); |
8419 | r = _base_get_ioc_facts(ioc); |
8420 | if (r) { |
8421 | rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc); |
8422 | if (rc || (_base_get_ioc_facts(ioc))) |
8423 | goto out_free_resources; |
8424 | } |
8425 | |
8426 | switch (ioc->hba_mpi_version_belonged) { |
8427 | case MPI2_VERSION: |
8428 | ioc->build_sg_scmd = &_base_build_sg_scmd; |
8429 | ioc->build_sg = &_base_build_sg; |
8430 | ioc->build_zero_len_sge = &_base_build_zero_len_sge; |
8431 | ioc->get_msix_index_for_smlio = &_base_get_msix_index; |
8432 | break; |
8433 | case MPI25_VERSION: |
8434 | case MPI26_VERSION: |
8435 | /* |
8436 | * In SAS3.0, |
8437 | * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and |
8438 | * Target Status - all require the IEEE formatted scatter gather |
8439 | * elements. |
8440 | */ |
8441 | ioc->build_sg_scmd = &_base_build_sg_scmd_ieee; |
8442 | ioc->build_sg = &_base_build_sg_ieee; |
8443 | ioc->build_nvme_prp = &_base_build_nvme_prp; |
8444 | ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee; |
8445 | ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t); |
8446 | if (ioc->high_iops_queues) |
8447 | ioc->get_msix_index_for_smlio = |
8448 | &_base_get_high_iops_msix_index; |
8449 | else |
8450 | ioc->get_msix_index_for_smlio = &_base_get_msix_index; |
8451 | break; |
8452 | } |
8453 | if (ioc->atomic_desc_capable) { |
8454 | ioc->put_smid_default = &_base_put_smid_default_atomic; |
8455 | ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic; |
8456 | ioc->put_smid_fast_path = |
8457 | &_base_put_smid_fast_path_atomic; |
8458 | ioc->put_smid_hi_priority = |
8459 | &_base_put_smid_hi_priority_atomic; |
8460 | } else { |
8461 | ioc->put_smid_default = &_base_put_smid_default; |
8462 | ioc->put_smid_fast_path = &_base_put_smid_fast_path; |
8463 | ioc->put_smid_hi_priority = &_base_put_smid_hi_priority; |
8464 | if (ioc->is_mcpu_endpoint) |
8465 | ioc->put_smid_scsi_io = |
8466 | &_base_put_smid_mpi_ep_scsi_io; |
8467 | else |
8468 | ioc->put_smid_scsi_io = &_base_put_smid_scsi_io; |
8469 | } |
8470 | /* |
8471 | * These function pointers for other requests that don't |
8472 | * the require IEEE scatter gather elements. |
8473 | * |
8474 | * For example Configuration Pages and SAS IOUNIT Control don't. |
8475 | */ |
8476 | ioc->build_sg_mpi = &_base_build_sg; |
8477 | ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge; |
8478 | |
8479 | r = mpt3sas_base_make_ioc_ready(ioc, type: SOFT_RESET); |
8480 | if (r) |
8481 | goto out_free_resources; |
8482 | |
8483 | ioc->pfacts = kcalloc(n: ioc->facts.NumberOfPorts, |
8484 | size: sizeof(struct mpt3sas_port_facts), GFP_KERNEL); |
8485 | if (!ioc->pfacts) { |
8486 | r = -ENOMEM; |
8487 | goto out_free_resources; |
8488 | } |
8489 | |
8490 | for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) { |
8491 | r = _base_get_port_facts(ioc, port: i); |
8492 | if (r) { |
8493 | rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc); |
8494 | if (rc || (_base_get_port_facts(ioc, port: i))) |
8495 | goto out_free_resources; |
8496 | } |
8497 | } |
8498 | |
8499 | r = _base_allocate_memory_pools(ioc); |
8500 | if (r) |
8501 | goto out_free_resources; |
8502 | |
8503 | if (irqpoll_weight > 0) |
8504 | ioc->thresh_hold = irqpoll_weight; |
8505 | else |
8506 | ioc->thresh_hold = ioc->hba_queue_depth/4; |
8507 | |
8508 | _base_init_irqpolls(ioc); |
8509 | init_waitqueue_head(&ioc->reset_wq); |
8510 | |
8511 | /* allocate memory pd handle bitmask list */ |
8512 | ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8); |
8513 | if (ioc->facts.MaxDevHandle % 8) |
8514 | ioc->pd_handles_sz++; |
8515 | ioc->pd_handles = kzalloc(size: ioc->pd_handles_sz, |
8516 | GFP_KERNEL); |
8517 | if (!ioc->pd_handles) { |
8518 | r = -ENOMEM; |
8519 | goto out_free_resources; |
8520 | } |
8521 | ioc->blocking_handles = kzalloc(size: ioc->pd_handles_sz, |
8522 | GFP_KERNEL); |
8523 | if (!ioc->blocking_handles) { |
8524 | r = -ENOMEM; |
8525 | goto out_free_resources; |
8526 | } |
8527 | |
8528 | /* allocate memory for pending OS device add list */ |
8529 | ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8); |
8530 | if (ioc->facts.MaxDevHandle % 8) |
8531 | ioc->pend_os_device_add_sz++; |
8532 | ioc->pend_os_device_add = kzalloc(size: ioc->pend_os_device_add_sz, |
8533 | GFP_KERNEL); |
8534 | if (!ioc->pend_os_device_add) { |
8535 | r = -ENOMEM; |
8536 | goto out_free_resources; |
8537 | } |
8538 | |
8539 | ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz; |
8540 | ioc->device_remove_in_progress = |
8541 | kzalloc(size: ioc->device_remove_in_progress_sz, GFP_KERNEL); |
8542 | if (!ioc->device_remove_in_progress) { |
8543 | r = -ENOMEM; |
8544 | goto out_free_resources; |
8545 | } |
8546 | |
8547 | ioc->fwfault_debug = mpt3sas_fwfault_debug; |
8548 | |
8549 | /* base internal command bits */ |
8550 | mutex_init(&ioc->base_cmds.mutex); |
8551 | ioc->base_cmds.reply = kzalloc(size: ioc->reply_sz, GFP_KERNEL); |
8552 | ioc->base_cmds.status = MPT3_CMD_NOT_USED; |
8553 | |
8554 | /* port_enable command bits */ |
8555 | ioc->port_enable_cmds.reply = kzalloc(size: ioc->reply_sz, GFP_KERNEL); |
8556 | ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; |
8557 | |
8558 | /* transport internal command bits */ |
8559 | ioc->transport_cmds.reply = kzalloc(size: ioc->reply_sz, GFP_KERNEL); |
8560 | ioc->transport_cmds.status = MPT3_CMD_NOT_USED; |
8561 | mutex_init(&ioc->transport_cmds.mutex); |
8562 | |
8563 | /* scsih internal command bits */ |
8564 | ioc->scsih_cmds.reply = kzalloc(size: ioc->reply_sz, GFP_KERNEL); |
8565 | ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; |
8566 | mutex_init(&ioc->scsih_cmds.mutex); |
8567 | |
8568 | /* task management internal command bits */ |
8569 | ioc->tm_cmds.reply = kzalloc(size: ioc->reply_sz, GFP_KERNEL); |
8570 | ioc->tm_cmds.status = MPT3_CMD_NOT_USED; |
8571 | mutex_init(&ioc->tm_cmds.mutex); |
8572 | |
8573 | /* config page internal command bits */ |
8574 | ioc->config_cmds.reply = kzalloc(size: ioc->reply_sz, GFP_KERNEL); |
8575 | ioc->config_cmds.status = MPT3_CMD_NOT_USED; |
8576 | mutex_init(&ioc->config_cmds.mutex); |
8577 | |
8578 | /* ctl module internal command bits */ |
8579 | ioc->ctl_cmds.reply = kzalloc(size: ioc->reply_sz, GFP_KERNEL); |
8580 | ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); |
8581 | ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; |
8582 | mutex_init(&ioc->ctl_cmds.mutex); |
8583 | |
8584 | if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply || |
8585 | !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply || |
8586 | !ioc->tm_cmds.reply || !ioc->config_cmds.reply || |
8587 | !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) { |
8588 | r = -ENOMEM; |
8589 | goto out_free_resources; |
8590 | } |
8591 | |
8592 | for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) |
8593 | ioc->event_masks[i] = -1; |
8594 | |
8595 | /* here we enable the events we care about */ |
8596 | _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY); |
8597 | _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE); |
8598 | _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST); |
8599 | _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); |
8600 | _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); |
8601 | _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST); |
8602 | _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME); |
8603 | _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK); |
8604 | _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); |
8605 | _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); |
8606 | _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD); |
8607 | _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION); |
8608 | _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR); |
8609 | if (ioc->hba_mpi_version_belonged == MPI26_VERSION) { |
8610 | if (ioc->is_gen35_ioc) { |
8611 | _base_unmask_events(ioc, |
8612 | MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE); |
8613 | _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION); |
8614 | _base_unmask_events(ioc, |
8615 | MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST); |
8616 | } |
8617 | } |
8618 | r = _base_make_ioc_operational(ioc); |
8619 | if (r == -EAGAIN) { |
8620 | r = _base_make_ioc_operational(ioc); |
8621 | if (r) |
8622 | goto out_free_resources; |
8623 | } |
8624 | |
8625 | /* |
8626 | * Copy current copy of IOCFacts in prev_fw_facts |
8627 | * and it will be used during online firmware upgrade. |
8628 | */ |
8629 | memcpy(&ioc->prev_fw_facts, &ioc->facts, |
8630 | sizeof(struct mpt3sas_facts)); |
8631 | |
8632 | ioc->non_operational_loop = 0; |
8633 | ioc->ioc_coredump_loop = 0; |
8634 | ioc->got_task_abort_from_ioctl = 0; |
8635 | return 0; |
8636 | |
8637 | out_free_resources: |
8638 | |
8639 | ioc->remove_host = 1; |
8640 | |
8641 | mpt3sas_base_free_resources(ioc); |
8642 | _base_release_memory_pools(ioc); |
8643 | pci_set_drvdata(pdev: ioc->pdev, NULL); |
8644 | kfree(objp: ioc->cpu_msix_table); |
8645 | if (ioc->is_warpdrive) |
8646 | kfree(objp: ioc->reply_post_host_index); |
8647 | kfree(objp: ioc->pd_handles); |
8648 | kfree(objp: ioc->blocking_handles); |
8649 | kfree(objp: ioc->device_remove_in_progress); |
8650 | kfree(objp: ioc->pend_os_device_add); |
8651 | kfree(objp: ioc->tm_cmds.reply); |
8652 | kfree(objp: ioc->transport_cmds.reply); |
8653 | kfree(objp: ioc->scsih_cmds.reply); |
8654 | kfree(objp: ioc->config_cmds.reply); |
8655 | kfree(objp: ioc->base_cmds.reply); |
8656 | kfree(objp: ioc->port_enable_cmds.reply); |
8657 | kfree(objp: ioc->ctl_cmds.reply); |
8658 | kfree(objp: ioc->ctl_cmds.sense); |
8659 | kfree(objp: ioc->pfacts); |
8660 | ioc->ctl_cmds.reply = NULL; |
8661 | ioc->base_cmds.reply = NULL; |
8662 | ioc->tm_cmds.reply = NULL; |
8663 | ioc->scsih_cmds.reply = NULL; |
8664 | ioc->transport_cmds.reply = NULL; |
8665 | ioc->config_cmds.reply = NULL; |
8666 | ioc->pfacts = NULL; |
8667 | return r; |
8668 | } |
8669 | |
8670 | |
8671 | /** |
8672 | * mpt3sas_base_detach - remove controller instance |
8673 | * @ioc: per adapter object |
8674 | */ |
8675 | void |
8676 | mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc) |
8677 | { |
8678 | dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); |
8679 | |
8680 | mpt3sas_base_stop_watchdog(ioc); |
8681 | mpt3sas_base_free_resources(ioc); |
8682 | _base_release_memory_pools(ioc); |
8683 | mpt3sas_free_enclosure_list(ioc); |
8684 | pci_set_drvdata(pdev: ioc->pdev, NULL); |
8685 | kfree(objp: ioc->cpu_msix_table); |
8686 | if (ioc->is_warpdrive) |
8687 | kfree(objp: ioc->reply_post_host_index); |
8688 | kfree(objp: ioc->pd_handles); |
8689 | kfree(objp: ioc->blocking_handles); |
8690 | kfree(objp: ioc->device_remove_in_progress); |
8691 | kfree(objp: ioc->pend_os_device_add); |
8692 | kfree(objp: ioc->pfacts); |
8693 | kfree(objp: ioc->ctl_cmds.reply); |
8694 | kfree(objp: ioc->ctl_cmds.sense); |
8695 | kfree(objp: ioc->base_cmds.reply); |
8696 | kfree(objp: ioc->port_enable_cmds.reply); |
8697 | kfree(objp: ioc->tm_cmds.reply); |
8698 | kfree(objp: ioc->transport_cmds.reply); |
8699 | kfree(objp: ioc->scsih_cmds.reply); |
8700 | kfree(objp: ioc->config_cmds.reply); |
8701 | } |
8702 | |
8703 | /** |
8704 | * _base_pre_reset_handler - pre reset handler |
8705 | * @ioc: per adapter object |
8706 | */ |
8707 | static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) |
8708 | { |
8709 | mpt3sas_scsih_pre_reset_handler(ioc); |
8710 | mpt3sas_ctl_pre_reset_handler(ioc); |
8711 | dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__)); |
8712 | } |
8713 | |
8714 | /** |
8715 | * _base_clear_outstanding_mpt_commands - clears outstanding mpt commands |
8716 | * @ioc: per adapter object |
8717 | */ |
8718 | static void |
8719 | _base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc) |
8720 | { |
8721 | dtmprintk(ioc, |
8722 | ioc_info(ioc, "%s: clear outstanding mpt cmds\n", __func__)); |
8723 | if (ioc->transport_cmds.status & MPT3_CMD_PENDING) { |
8724 | ioc->transport_cmds.status |= MPT3_CMD_RESET; |
8725 | mpt3sas_base_free_smid(ioc, smid: ioc->transport_cmds.smid); |
8726 | complete(&ioc->transport_cmds.done); |
8727 | } |
8728 | if (ioc->base_cmds.status & MPT3_CMD_PENDING) { |
8729 | ioc->base_cmds.status |= MPT3_CMD_RESET; |
8730 | mpt3sas_base_free_smid(ioc, smid: ioc->base_cmds.smid); |
8731 | complete(&ioc->base_cmds.done); |
8732 | } |
8733 | if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { |
8734 | ioc->port_enable_failed = 1; |
8735 | ioc->port_enable_cmds.status |= MPT3_CMD_RESET; |
8736 | mpt3sas_base_free_smid(ioc, smid: ioc->port_enable_cmds.smid); |
8737 | if (ioc->is_driver_loading) { |
8738 | ioc->start_scan_failed = |
8739 | MPI2_IOCSTATUS_INTERNAL_ERROR; |
8740 | ioc->start_scan = 0; |
8741 | } else { |
8742 | complete(&ioc->port_enable_cmds.done); |
8743 | } |
8744 | } |
8745 | if (ioc->config_cmds.status & MPT3_CMD_PENDING) { |
8746 | ioc->config_cmds.status |= MPT3_CMD_RESET; |
8747 | mpt3sas_base_free_smid(ioc, smid: ioc->config_cmds.smid); |
8748 | ioc->config_cmds.smid = USHRT_MAX; |
8749 | complete(&ioc->config_cmds.done); |
8750 | } |
8751 | } |
8752 | |
8753 | /** |
8754 | * _base_clear_outstanding_commands - clear all outstanding commands |
8755 | * @ioc: per adapter object |
8756 | */ |
8757 | static void _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc) |
8758 | { |
8759 | mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc); |
8760 | mpt3sas_ctl_clear_outstanding_ioctls(ioc); |
8761 | _base_clear_outstanding_mpt_commands(ioc); |
8762 | } |
8763 | |
8764 | /** |
8765 | * _base_reset_done_handler - reset done handler |
8766 | * @ioc: per adapter object |
8767 | */ |
8768 | static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc) |
8769 | { |
8770 | mpt3sas_scsih_reset_done_handler(ioc); |
8771 | mpt3sas_ctl_reset_done_handler(ioc); |
8772 | dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__)); |
8773 | } |
8774 | |
8775 | /** |
8776 | * mpt3sas_wait_for_commands_to_complete - reset controller |
8777 | * @ioc: Pointer to MPT_ADAPTER structure |
8778 | * |
8779 | * This function is waiting 10s for all pending commands to complete |
8780 | * prior to putting controller in reset. |
8781 | */ |
8782 | void |
8783 | mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) |
8784 | { |
8785 | u32 ioc_state; |
8786 | |
8787 | ioc->pending_io_count = 0; |
8788 | |
8789 | ioc_state = mpt3sas_base_get_iocstate(ioc, cooked: 0); |
8790 | if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) |
8791 | return; |
8792 | |
8793 | /* pending command count */ |
8794 | ioc->pending_io_count = scsi_host_busy(shost: ioc->shost); |
8795 | |
8796 | if (!ioc->pending_io_count) |
8797 | return; |
8798 | |
8799 | /* wait for pending commands to complete */ |
8800 | wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ); |
8801 | } |
8802 | |
8803 | /** |
8804 | * _base_check_ioc_facts_changes - Look for increase/decrease of IOCFacts |
8805 | * attributes during online firmware upgrade and update the corresponding |
8806 | * IOC variables accordingly. |
8807 | * |
8808 | * @ioc: Pointer to MPT_ADAPTER structure |
8809 | */ |
8810 | static int |
8811 | _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc) |
8812 | { |
8813 | u16 pd_handles_sz; |
8814 | void *pd_handles = NULL, *blocking_handles = NULL; |
8815 | void *pend_os_device_add = NULL, *device_remove_in_progress = NULL; |
8816 | struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts; |
8817 | |
8818 | if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) { |
8819 | pd_handles_sz = (ioc->facts.MaxDevHandle / 8); |
8820 | if (ioc->facts.MaxDevHandle % 8) |
8821 | pd_handles_sz++; |
8822 | |
8823 | pd_handles = krealloc(objp: ioc->pd_handles, new_size: pd_handles_sz, |
8824 | GFP_KERNEL); |
8825 | if (!pd_handles) { |
8826 | ioc_info(ioc, |
8827 | "Unable to allocate the memory for pd_handles of sz: %d\n", |
8828 | pd_handles_sz); |
8829 | return -ENOMEM; |
8830 | } |
8831 | memset(pd_handles + ioc->pd_handles_sz, 0, |
8832 | (pd_handles_sz - ioc->pd_handles_sz)); |
8833 | ioc->pd_handles = pd_handles; |
8834 | |
8835 | blocking_handles = krealloc(objp: ioc->blocking_handles, |
8836 | new_size: pd_handles_sz, GFP_KERNEL); |
8837 | if (!blocking_handles) { |
8838 | ioc_info(ioc, |
8839 | "Unable to allocate the memory for " |
8840 | "blocking_handles of sz: %d\n", |
8841 | pd_handles_sz); |
8842 | return -ENOMEM; |
8843 | } |
8844 | memset(blocking_handles + ioc->pd_handles_sz, 0, |
8845 | (pd_handles_sz - ioc->pd_handles_sz)); |
8846 | ioc->blocking_handles = blocking_handles; |
8847 | ioc->pd_handles_sz = pd_handles_sz; |
8848 | |
8849 | pend_os_device_add = krealloc(objp: ioc->pend_os_device_add, |
8850 | new_size: pd_handles_sz, GFP_KERNEL); |
8851 | if (!pend_os_device_add) { |
8852 | ioc_info(ioc, |
8853 | "Unable to allocate the memory for pend_os_device_add of sz: %d\n", |
8854 | pd_handles_sz); |
8855 | return -ENOMEM; |
8856 | } |
8857 | memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0, |
8858 | (pd_handles_sz - ioc->pend_os_device_add_sz)); |
8859 | ioc->pend_os_device_add = pend_os_device_add; |
8860 | ioc->pend_os_device_add_sz = pd_handles_sz; |
8861 | |
8862 | device_remove_in_progress = krealloc( |
8863 | objp: ioc->device_remove_in_progress, new_size: pd_handles_sz, GFP_KERNEL); |
8864 | if (!device_remove_in_progress) { |
8865 | ioc_info(ioc, |
8866 | "Unable to allocate the memory for " |
8867 | "device_remove_in_progress of sz: %d\n " |
8868 | , pd_handles_sz); |
8869 | return -ENOMEM; |
8870 | } |
8871 | memset(device_remove_in_progress + |
8872 | ioc->device_remove_in_progress_sz, 0, |
8873 | (pd_handles_sz - ioc->device_remove_in_progress_sz)); |
8874 | ioc->device_remove_in_progress = device_remove_in_progress; |
8875 | ioc->device_remove_in_progress_sz = pd_handles_sz; |
8876 | } |
8877 | |
8878 | memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts)); |
8879 | return 0; |
8880 | } |
8881 | |
8882 | /** |
8883 | * mpt3sas_base_hard_reset_handler - reset controller |
8884 | * @ioc: Pointer to MPT_ADAPTER structure |
8885 | * @type: FORCE_BIG_HAMMER or SOFT_RESET |
8886 | * |
8887 | * Return: 0 for success, non-zero for failure. |
8888 | */ |
8889 | int |
8890 | mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, |
8891 | enum reset_type type) |
8892 | { |
8893 | int r; |
8894 | unsigned long flags; |
8895 | u32 ioc_state; |
8896 | u8 is_fault = 0, is_trigger = 0; |
8897 | |
8898 | dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__)); |
8899 | |
8900 | if (ioc->pci_error_recovery) { |
8901 | ioc_err(ioc, "%s: pci error recovery reset\n", __func__); |
8902 | r = 0; |
8903 | goto out_unlocked; |
8904 | } |
8905 | |
8906 | if (mpt3sas_fwfault_debug) |
8907 | mpt3sas_halt_firmware(ioc); |
8908 | |
8909 | /* wait for an active reset in progress to complete */ |
8910 | mutex_lock(&ioc->reset_in_progress_mutex); |
8911 | |
8912 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); |
8913 | ioc->shost_recovery = 1; |
8914 | spin_unlock_irqrestore(lock: &ioc->ioc_reset_in_progress_lock, flags); |
8915 | |
8916 | if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & |
8917 | MPT3_DIAG_BUFFER_IS_REGISTERED) && |
8918 | (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & |
8919 | MPT3_DIAG_BUFFER_IS_RELEASED))) { |
8920 | is_trigger = 1; |
8921 | ioc_state = mpt3sas_base_get_iocstate(ioc, cooked: 0); |
8922 | if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT || |
8923 | (ioc_state & MPI2_IOC_STATE_MASK) == |
8924 | MPI2_IOC_STATE_COREDUMP) { |
8925 | is_fault = 1; |
8926 | ioc->htb_rel.trigger_info_dwords[1] = |
8927 | (ioc_state & MPI2_DOORBELL_DATA_MASK); |
8928 | } |
8929 | } |
8930 | _base_pre_reset_handler(ioc); |
8931 | mpt3sas_wait_for_commands_to_complete(ioc); |
8932 | mpt3sas_base_mask_interrupts(ioc); |
8933 | mpt3sas_base_pause_mq_polling(ioc); |
8934 | r = mpt3sas_base_make_ioc_ready(ioc, type); |
8935 | if (r) |
8936 | goto out; |
8937 | _base_clear_outstanding_commands(ioc); |
8938 | |
8939 | /* If this hard reset is called while port enable is active, then |
8940 | * there is no reason to call make_ioc_operational |
8941 | */ |
8942 | if (ioc->is_driver_loading && ioc->port_enable_failed) { |
8943 | ioc->remove_host = 1; |
8944 | r = -EFAULT; |
8945 | goto out; |
8946 | } |
8947 | r = _base_get_ioc_facts(ioc); |
8948 | if (r) |
8949 | goto out; |
8950 | |
8951 | r = _base_check_ioc_facts_changes(ioc); |
8952 | if (r) { |
8953 | ioc_info(ioc, |
8954 | "Some of the parameters got changed in this new firmware" |
8955 | " image and it requires system reboot\n"); |
8956 | goto out; |
8957 | } |
8958 | if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable) |
8959 | panic(fmt: "%s: Issue occurred with flashing controller firmware." |
8960 | "Please reboot the system and ensure that the correct" |
8961 | " firmware version is running\n", ioc->name); |
8962 | |
8963 | r = _base_make_ioc_operational(ioc); |
8964 | if (!r) |
8965 | _base_reset_done_handler(ioc); |
8966 | |
8967 | out: |
8968 | ioc_info(ioc, "%s: %s\n", __func__, r == 0 ? "SUCCESS": "FAILED"); |
8969 | |
8970 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); |
8971 | ioc->shost_recovery = 0; |
8972 | spin_unlock_irqrestore(lock: &ioc->ioc_reset_in_progress_lock, flags); |
8973 | ioc->ioc_reset_count++; |
8974 | mutex_unlock(lock: &ioc->reset_in_progress_mutex); |
8975 | mpt3sas_base_resume_mq_polling(ioc); |
8976 | |
8977 | out_unlocked: |
8978 | if ((r == 0) && is_trigger) { |
8979 | if (is_fault) |
8980 | mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT); |
8981 | else |
8982 | mpt3sas_trigger_master(ioc, |
8983 | MASTER_TRIGGER_ADAPTER_RESET); |
8984 | } |
8985 | dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__)); |
8986 | return r; |
8987 | } |
8988 |
Definitions
- mpt_callbacks
- max_queue_depth
- max_sgl_entries
- msix_disable
- smp_affinity_enable
- max_msix_vectors
- irqpoll_weight
- mpt3sas_fwfault_debug
- perf_mode
- poll_queues
- mpt3sas_perf_mode
- mpt3sas_base_check_cmd_timeout
- _scsih_set_fwfault_debug
- _base_readl_aero
- _base_readl_ext_retry
- _base_readl
- _base_clone_reply_to_sys_mem
- _base_clone_mpi_to_sys_mem
- _base_clone_to_sys_mem
- _base_get_chain
- _base_get_chain_phys
- _base_get_buffer_bar0
- _base_get_buffer_phys_bar0
- _base_get_chain_buffer_dma_to_chain_buffer
- _clone_sg_entries
- mpt3sas_remove_dead_ioc_func
- _base_sync_drv_fw_timestamp
- _base_fault_reset_work
- mpt3sas_base_start_watchdog
- mpt3sas_base_stop_watchdog
- mpt3sas_base_fault_info
- mpt3sas_base_coredump_info
- mpt3sas_base_wait_for_coredump_completion
- mpt3sas_halt_firmware
- _base_sas_ioc_info
- _base_display_event_data
- _base_sas_log_info
- _base_display_reply_info
- mpt3sas_base_done
- _base_async_event
- _get_st_from_smid
- _base_get_cb_idx
- mpt3sas_base_pause_mq_polling
- mpt3sas_base_resume_mq_polling
- mpt3sas_base_mask_interrupts
- mpt3sas_base_unmask_interrupts
- reply_descriptor
- base_mod64
- _base_process_reply_queue
- mpt3sas_blk_mq_poll
- _base_interrupt
- _base_irqpoll
- _base_init_irqpolls
- _base_is_controller_msix_enabled
- mpt3sas_base_sync_reply_irqs
- mpt3sas_base_release_callback_handler
- mpt3sas_base_register_callback_handler
- mpt3sas_base_initialize_callback_handler
- _base_build_zero_len_sge
- _base_add_sg_single_32
- _base_add_sg_single_64
- _base_get_chain_buffer_tracker
- _base_build_sg
- _base_build_nvme_prp
- base_make_prp_nvme
- base_is_prp_possible
- _base_check_pcie_native_sgl
- _base_add_sg_single_ieee
- _base_build_zero_len_sge_ieee
- _base_build_sg_scmd
- _base_build_sg_scmd_ieee
- _base_build_sg_ieee
- _base_config_dma_addressing
- _base_check_enable_msix
- mpt3sas_base_free_irq
- _base_request_irq
- _base_assign_reply_queues
- _base_check_and_enable_high_iops_queues
- mpt3sas_base_disable_msix
- _base_alloc_irq_vectors
- _base_enable_msix
- mpt3sas_base_unmap_resources
- mpt3sas_base_check_for_fault_and_issue_reset
- mpt3sas_base_map_resources
- mpt3sas_base_get_msg_frame
- mpt3sas_base_get_sense_buffer
- mpt3sas_base_get_sense_buffer_dma
- mpt3sas_base_get_pcie_sgl
- mpt3sas_base_get_pcie_sgl_dma
- mpt3sas_base_get_reply_virt_addr
- _base_get_msix_index
- _base_get_high_iops_msix_index
- mpt3sas_base_get_smid
- mpt3sas_base_get_smid_scsiio
- mpt3sas_base_get_smid_hpr
- _base_recovery_check
- mpt3sas_base_clear_st
- mpt3sas_base_free_smid
- _base_mpi_ep_writeq
- _base_writeq
- _base_set_and_get_msix_index
- _base_put_smid_mpi_ep_scsi_io
- _base_put_smid_scsi_io
- _base_put_smid_fast_path
- _base_put_smid_hi_priority
- mpt3sas_base_put_smid_nvme_encap
- _base_put_smid_default
- _base_put_smid_scsi_io_atomic
- _base_put_smid_fast_path_atomic
- _base_put_smid_hi_priority_atomic
- _base_put_smid_default_atomic
- _base_display_OEMs_branding
- _base_display_fwpkg_version
- _base_display_ioc_capabilities
- mpt3sas_base_update_missing_delay
- _base_update_ioc_page1_inlinewith_perf_mode
- _base_get_event_diag_triggers
- _base_get_scsi_diag_triggers
- _base_get_mpi_diag_triggers
- _base_get_master_diag_triggers
- _base_check_for_trigger_pages_support
- _base_get_diag_triggers
- _base_update_diag_trigger_pages
- _base_assign_fw_reported_qd
- mpt3sas_atto_validate_nvram
- mpt3sas_atto_get_sas_addr
- mpt3sas_atto_init
- _base_static_config_pages
- mpt3sas_free_enclosure_list
- _base_release_memory_pools
- mpt3sas_check_same_4gb_region
- _base_reduce_hba_queue_depth
- _base_allocate_pcie_sgl_pool
- _base_allocate_chain_dma_pool
- _base_allocate_sense_dma_pool
- _base_allocate_reply_pool
- _base_allocate_reply_free_dma_pool
- _base_allocate_reply_post_free_array
- base_alloc_rdpq_dma_pool
- _base_allocate_memory_pools
- mpt3sas_base_get_iocstate
- _base_wait_on_iocstate
- _base_dump_reg_set
- _base_wait_for_doorbell_int
- _base_spin_on_doorbell_int
- _base_wait_for_doorbell_ack
- _base_wait_for_doorbell_not_used
- _base_send_ioc_reset
- mpt3sas_wait_for_ioc
- _base_handshake_req_reply_wait
- mpt3sas_base_sas_iounit_control
- mpt3sas_base_scsi_enclosure_processor
- _base_get_port_facts
- _base_wait_for_iocstate
- _base_get_ioc_facts
- _base_send_ioc_init
- mpt3sas_port_enable_done
- _base_send_port_enable
- mpt3sas_port_enable
- _base_determine_wait_on_discovery
- _base_unmask_events
- _base_event_notification
- mpt3sas_base_validate_event_type
- mpt3sas_base_unlock_and_get_host_diagnostic
- mpt3sas_base_lock_host_diagnostic
- _base_diag_reset
- mpt3sas_base_make_ioc_ready
- _base_make_ioc_operational
- mpt3sas_base_free_resources
- mpt3sas_base_attach
- mpt3sas_base_detach
- _base_pre_reset_handler
- _base_clear_outstanding_mpt_commands
- _base_clear_outstanding_commands
- _base_reset_done_handler
- mpt3sas_wait_for_commands_to_complete
- _base_check_ioc_facts_changes
Improve your Profiling and Debugging skills
Find out more