1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
5 */
6#include "qla_def.h"
7#include "qla_target.h"
8
9#include <linux/delay.h>
10#include <linux/gfp.h>
11
12#ifdef CONFIG_PPC
13#define IS_PPCARCH true
14#else
15#define IS_PPCARCH false
16#endif
17
18static struct mb_cmd_name {
19 uint16_t cmd;
20 const char *str;
21} mb_str[] = {
22 {MBC_GET_PORT_DATABASE, "GPDB"},
23 {MBC_GET_ID_LIST, "GIDList"},
24 {MBC_GET_LINK_PRIV_STATS, "Stats"},
25 {MBC_GET_RESOURCE_COUNTS, "ResCnt"},
26};
27
28static const char *mb_to_str(uint16_t cmd)
29{
30 int i;
31 struct mb_cmd_name *e;
32
33 for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
34 e = mb_str + i;
35 if (cmd == e->cmd)
36 return e->str;
37 }
38 return "unknown";
39}
40
41static struct rom_cmd {
42 uint16_t cmd;
43} rom_cmds[] = {
44 { MBC_LOAD_RAM },
45 { MBC_EXECUTE_FIRMWARE },
46 { MBC_READ_RAM_WORD },
47 { MBC_MAILBOX_REGISTER_TEST },
48 { MBC_VERIFY_CHECKSUM },
49 { MBC_GET_FIRMWARE_VERSION },
50 { MBC_LOAD_RISC_RAM },
51 { MBC_DUMP_RISC_RAM },
52 { MBC_LOAD_RISC_RAM_EXTENDED },
53 { MBC_DUMP_RISC_RAM_EXTENDED },
54 { MBC_WRITE_RAM_WORD_EXTENDED },
55 { MBC_READ_RAM_EXTENDED },
56 { MBC_GET_RESOURCE_COUNTS },
57 { MBC_SET_FIRMWARE_OPTION },
58 { MBC_MID_INITIALIZE_FIRMWARE },
59 { MBC_GET_FIRMWARE_STATE },
60 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
61 { MBC_GET_RETRY_COUNT },
62 { MBC_TRACE_CONTROL },
63 { MBC_INITIALIZE_MULTIQ },
64 { MBC_IOCB_COMMAND_A64 },
65 { MBC_GET_ADAPTER_LOOP_ID },
66 { MBC_READ_SFP },
67 { MBC_SET_RNID_PARAMS },
68 { MBC_GET_RNID_PARAMS },
69 { MBC_GET_SET_ZIO_THRESHOLD },
70};
71
72static int is_rom_cmd(uint16_t cmd)
73{
74 int i;
75 struct rom_cmd *wc;
76
77 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
78 wc = rom_cmds + i;
79 if (wc->cmd == cmd)
80 return 1;
81 }
82
83 return 0;
84}
85
86/*
87 * qla2x00_mailbox_command
88 * Issue mailbox command and waits for completion.
89 *
90 * Input:
91 * ha = adapter block pointer.
92 * mcp = driver internal mbx struct pointer.
93 *
94 * Output:
95 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
96 *
97 * Returns:
98 * 0 : QLA_SUCCESS = cmd performed success
99 * 1 : QLA_FUNCTION_FAILED (error encountered)
100 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
101 *
102 * Context:
103 * Kernel context.
104 */
105static int
106qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
107{
108 int rval, i;
109 unsigned long flags = 0;
110 device_reg_t *reg;
111 uint8_t abort_active, eeh_delay;
112 uint8_t io_lock_on;
113 uint16_t command = 0;
114 uint16_t *iptr;
115 __le16 __iomem *optr;
116 uint32_t cnt;
117 uint32_t mboxes;
118 unsigned long wait_time;
119 struct qla_hw_data *ha = vha->hw;
120 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev: ha->pdev);
121 u32 chip_reset;
122
123
124 ql_dbg(ql_dbg_mbx, vha, 0x1000, fmt: "Entered %s.\n", __func__);
125
126 if (ha->pdev->error_state == pci_channel_io_perm_failure) {
127 ql_log(ql_log_warn, vha, 0x1001,
128 fmt: "PCI channel failed permanently, exiting.\n");
129 return QLA_FUNCTION_TIMEOUT;
130 }
131
132 if (vha->device_flags & DFLG_DEV_FAILED) {
133 ql_log(ql_log_warn, vha, 0x1002,
134 fmt: "Device in failed state, exiting.\n");
135 return QLA_FUNCTION_TIMEOUT;
136 }
137
138 /* if PCI error, then avoid mbx processing.*/
139 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
140 test_bit(UNLOADING, &base_vha->dpc_flags)) {
141 ql_log(ql_log_warn, vha, 0xd04e,
142 fmt: "PCI error, exiting.\n");
143 return QLA_FUNCTION_TIMEOUT;
144 }
145 eeh_delay = 0;
146 reg = ha->iobase;
147 io_lock_on = base_vha->flags.init_done;
148
149 rval = QLA_SUCCESS;
150 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
151 chip_reset = ha->chip_reset;
152
153 if (ha->flags.pci_channel_io_perm_failure) {
154 ql_log(ql_log_warn, vha, 0x1003,
155 fmt: "Perm failure on EEH timeout MBX, exiting.\n");
156 return QLA_FUNCTION_TIMEOUT;
157 }
158
159 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
160 /* Setting Link-Down error */
161 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
162 ql_log(ql_log_warn, vha, 0x1004,
163 fmt: "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
164 return QLA_FUNCTION_TIMEOUT;
165 }
166
167 /* check if ISP abort is active and return cmd with timeout */
168 if (((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
169 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
170 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
171 !is_rom_cmd(cmd: mcp->mb[0])) || ha->flags.eeh_busy) {
172 ql_log(ql_log_info, vha, 0x1005,
173 fmt: "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
174 mcp->mb[0]);
175 return QLA_FUNCTION_TIMEOUT;
176 }
177
178 atomic_inc(v: &ha->num_pend_mbx_stage1);
179 /*
180 * Wait for active mailbox commands to finish by waiting at most tov
181 * seconds. This is to serialize actual issuing of mailbox cmds during
182 * non ISP abort time.
183 */
184 if (!wait_for_completion_timeout(x: &ha->mbx_cmd_comp, timeout: mcp->tov * HZ)) {
185 /* Timeout occurred. Return error. */
186 ql_log(ql_log_warn, vha, 0xd035,
187 fmt: "Cmd access timeout, cmd=0x%x, Exiting.\n",
188 mcp->mb[0]);
189 vha->hw_err_cnt++;
190 atomic_dec(v: &ha->num_pend_mbx_stage1);
191 return QLA_FUNCTION_TIMEOUT;
192 }
193 atomic_dec(v: &ha->num_pend_mbx_stage1);
194 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
195 ha->flags.eeh_busy) {
196 ql_log(ql_log_warn, vha, 0xd035,
197 fmt: "Purge mbox: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n",
198 ha->flags.purge_mbox, ha->flags.eeh_busy, mcp->mb[0]);
199 rval = QLA_ABORTED;
200 goto premature_exit;
201 }
202
203
204 /* Save mailbox command for debug */
205 ha->mcp = mcp;
206
207 ql_dbg(ql_dbg_mbx, vha, 0x1006,
208 fmt: "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
209
210 spin_lock_irqsave(&ha->hardware_lock, flags);
211
212 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
213 ha->flags.mbox_busy) {
214 rval = QLA_ABORTED;
215 spin_unlock_irqrestore(lock: &ha->hardware_lock, flags);
216 goto premature_exit;
217 }
218 ha->flags.mbox_busy = 1;
219
220 /* Load mailbox registers. */
221 if (IS_P3P_TYPE(ha))
222 optr = &reg->isp82.mailbox_in[0];
223 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
224 optr = &reg->isp24.mailbox0;
225 else
226 optr = MAILBOX_REG(ha, &reg->isp, 0);
227
228 iptr = mcp->mb;
229 command = mcp->mb[0];
230 mboxes = mcp->out_mb;
231
232 ql_dbg(ql_dbg_mbx, vha, 0x1111,
233 fmt: "Mailbox registers (OUT):\n");
234 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
235 if (IS_QLA2200(ha) && cnt == 8)
236 optr = MAILBOX_REG(ha, &reg->isp, 8);
237 if (mboxes & BIT_0) {
238 ql_dbg(ql_dbg_mbx, vha, 0x1112,
239 fmt: "mbox[%d]<-0x%04x\n", cnt, *iptr);
240 wrt_reg_word(addr: optr, data: *iptr);
241 } else {
242 wrt_reg_word(addr: optr, data: 0);
243 }
244
245 mboxes >>= 1;
246 optr++;
247 iptr++;
248 }
249
250 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
251 fmt: "I/O Address = %p.\n", optr);
252
253 /* Issue set host interrupt command to send cmd out. */
254 ha->flags.mbox_int = 0;
255 clear_bit(MBX_INTERRUPT, addr: &ha->mbx_cmd_flags);
256
257 /* Unlock mbx registers and wait for interrupt */
258 ql_dbg(ql_dbg_mbx, vha, 0x100f,
259 fmt: "Going to unlock irq & waiting for interrupts. "
260 "jiffies=%lx.\n", jiffies);
261
262 /* Wait for mbx cmd completion until timeout */
263 atomic_inc(v: &ha->num_pend_mbx_stage2);
264 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
265 set_bit(MBX_INTR_WAIT, addr: &ha->mbx_cmd_flags);
266
267 if (IS_P3P_TYPE(ha))
268 wrt_reg_dword(addr: &reg->isp82.hint, HINT_MBX_INT_PENDING);
269 else if (IS_FWI2_CAPABLE(ha))
270 wrt_reg_dword(addr: &reg->isp24.hccr, HCCRX_SET_HOST_INT);
271 else
272 wrt_reg_word(addr: &reg->isp.hccr, HCCR_SET_HOST_INT);
273 spin_unlock_irqrestore(lock: &ha->hardware_lock, flags);
274
275 wait_time = jiffies;
276 if (!wait_for_completion_timeout(x: &ha->mbx_intr_comp,
277 timeout: mcp->tov * HZ)) {
278 ql_dbg(ql_dbg_mbx, vha, 0x117a,
279 fmt: "cmd=%x Timeout.\n", command);
280 spin_lock_irqsave(&ha->hardware_lock, flags);
281 clear_bit(MBX_INTR_WAIT, addr: &ha->mbx_cmd_flags);
282 spin_unlock_irqrestore(lock: &ha->hardware_lock, flags);
283
284 if (chip_reset != ha->chip_reset) {
285 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
286
287 spin_lock_irqsave(&ha->hardware_lock, flags);
288 ha->flags.mbox_busy = 0;
289 spin_unlock_irqrestore(lock: &ha->hardware_lock,
290 flags);
291 atomic_dec(v: &ha->num_pend_mbx_stage2);
292 rval = QLA_ABORTED;
293 goto premature_exit;
294 }
295 } else if (ha->flags.purge_mbox ||
296 chip_reset != ha->chip_reset) {
297 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
298
299 spin_lock_irqsave(&ha->hardware_lock, flags);
300 ha->flags.mbox_busy = 0;
301 spin_unlock_irqrestore(lock: &ha->hardware_lock, flags);
302 atomic_dec(v: &ha->num_pend_mbx_stage2);
303 rval = QLA_ABORTED;
304 goto premature_exit;
305 }
306
307 if (time_after(jiffies, wait_time + 5 * HZ))
308 ql_log(ql_log_warn, vha, 0x1015, fmt: "cmd=0x%x, waited %d msecs\n",
309 command, jiffies_to_msecs(j: jiffies - wait_time));
310 } else {
311 ql_dbg(ql_dbg_mbx, vha, 0x1011,
312 fmt: "Cmd=%x Polling Mode.\n", command);
313
314 if (IS_P3P_TYPE(ha)) {
315 if (rd_reg_dword(addr: &reg->isp82.hint) &
316 HINT_MBX_INT_PENDING) {
317 ha->flags.mbox_busy = 0;
318 spin_unlock_irqrestore(lock: &ha->hardware_lock,
319 flags);
320 atomic_dec(v: &ha->num_pend_mbx_stage2);
321 ql_dbg(ql_dbg_mbx, vha, 0x1012,
322 fmt: "Pending mailbox timeout, exiting.\n");
323 vha->hw_err_cnt++;
324 rval = QLA_FUNCTION_TIMEOUT;
325 goto premature_exit;
326 }
327 wrt_reg_dword(addr: &reg->isp82.hint, HINT_MBX_INT_PENDING);
328 } else if (IS_FWI2_CAPABLE(ha))
329 wrt_reg_dword(addr: &reg->isp24.hccr, HCCRX_SET_HOST_INT);
330 else
331 wrt_reg_word(addr: &reg->isp.hccr, HCCR_SET_HOST_INT);
332 spin_unlock_irqrestore(lock: &ha->hardware_lock, flags);
333
334 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
335 while (!ha->flags.mbox_int) {
336 if (ha->flags.purge_mbox ||
337 chip_reset != ha->chip_reset) {
338 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
339
340 spin_lock_irqsave(&ha->hardware_lock, flags);
341 ha->flags.mbox_busy = 0;
342 spin_unlock_irqrestore(lock: &ha->hardware_lock,
343 flags);
344 atomic_dec(v: &ha->num_pend_mbx_stage2);
345 rval = QLA_ABORTED;
346 goto premature_exit;
347 }
348
349 if (time_after(jiffies, wait_time))
350 break;
351
352 /* Check for pending interrupts. */
353 qla2x00_poll(rsp: ha->rsp_q_map[0]);
354
355 if (!ha->flags.mbox_int &&
356 !(IS_QLA2200(ha) &&
357 command == MBC_LOAD_RISC_RAM_EXTENDED))
358 msleep(msecs: 10);
359 } /* while */
360 ql_dbg(ql_dbg_mbx, vha, 0x1013,
361 fmt: "Waited %d sec.\n",
362 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
363 }
364 atomic_dec(v: &ha->num_pend_mbx_stage2);
365
366 /* Check whether we timed out */
367 if (ha->flags.mbox_int) {
368 uint16_t *iptr2;
369
370 ql_dbg(ql_dbg_mbx, vha, 0x1014,
371 fmt: "Cmd=%x completed.\n", command);
372
373 /* Got interrupt. Clear the flag. */
374 ha->flags.mbox_int = 0;
375 clear_bit(MBX_INTERRUPT, addr: &ha->mbx_cmd_flags);
376
377 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
378 spin_lock_irqsave(&ha->hardware_lock, flags);
379 ha->flags.mbox_busy = 0;
380 spin_unlock_irqrestore(lock: &ha->hardware_lock, flags);
381
382 /* Setting Link-Down error */
383 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
384 ha->mcp = NULL;
385 rval = QLA_FUNCTION_FAILED;
386 ql_log(ql_log_warn, vha, 0xd048,
387 fmt: "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
388 goto premature_exit;
389 }
390
391 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) {
392 ql_dbg(ql_dbg_mbx, vha, 0x11ff,
393 fmt: "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0],
394 MBS_COMMAND_COMPLETE);
395 rval = QLA_FUNCTION_FAILED;
396 }
397
398 /* Load return mailbox registers. */
399 iptr2 = mcp->mb;
400 iptr = (uint16_t *)&ha->mailbox_out[0];
401 mboxes = mcp->in_mb;
402
403 ql_dbg(ql_dbg_mbx, vha, 0x1113,
404 fmt: "Mailbox registers (IN):\n");
405 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
406 if (mboxes & BIT_0) {
407 *iptr2 = *iptr;
408 ql_dbg(ql_dbg_mbx, vha, 0x1114,
409 fmt: "mbox[%d]->0x%04x\n", cnt, *iptr2);
410 }
411
412 mboxes >>= 1;
413 iptr2++;
414 iptr++;
415 }
416 } else {
417
418 uint16_t mb[8];
419 uint32_t ictrl, host_status, hccr;
420 uint16_t w;
421
422 if (IS_FWI2_CAPABLE(ha)) {
423 mb[0] = rd_reg_word(addr: &reg->isp24.mailbox0);
424 mb[1] = rd_reg_word(addr: &reg->isp24.mailbox1);
425 mb[2] = rd_reg_word(addr: &reg->isp24.mailbox2);
426 mb[3] = rd_reg_word(addr: &reg->isp24.mailbox3);
427 mb[7] = rd_reg_word(addr: &reg->isp24.mailbox7);
428 ictrl = rd_reg_dword(addr: &reg->isp24.ictrl);
429 host_status = rd_reg_dword(addr: &reg->isp24.host_status);
430 hccr = rd_reg_dword(addr: &reg->isp24.hccr);
431
432 ql_log(ql_log_warn, vha, 0xd04c,
433 fmt: "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
434 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
435 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
436 mb[7], host_status, hccr);
437 vha->hw_err_cnt++;
438
439 } else {
440 mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
441 ictrl = rd_reg_word(addr: &reg->isp.ictrl);
442 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
443 fmt: "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
444 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
445 vha->hw_err_cnt++;
446 }
447 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
448
449 /* Capture FW dump only, if PCI device active */
450 if (!pci_channel_offline(pdev: vha->hw->pdev)) {
451 pci_read_config_word(dev: ha->pdev, PCI_VENDOR_ID, val: &w);
452 if (w == 0xffff || ictrl == 0xffffffff ||
453 (chip_reset != ha->chip_reset)) {
454 /* This is special case if there is unload
455 * of driver happening and if PCI device go
456 * into bad state due to PCI error condition
457 * then only PCI ERR flag would be set.
458 * we will do premature exit for above case.
459 */
460 spin_lock_irqsave(&ha->hardware_lock, flags);
461 ha->flags.mbox_busy = 0;
462 spin_unlock_irqrestore(lock: &ha->hardware_lock,
463 flags);
464 rval = QLA_FUNCTION_TIMEOUT;
465 goto premature_exit;
466 }
467
468 /* Attempt to capture firmware dump for further
469 * anallysis of the current formware state. we do not
470 * need to do this if we are intentionally generating
471 * a dump
472 */
473 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
474 qla2xxx_dump_fw(vha);
475 rval = QLA_FUNCTION_TIMEOUT;
476 }
477 }
478 spin_lock_irqsave(&ha->hardware_lock, flags);
479 ha->flags.mbox_busy = 0;
480 spin_unlock_irqrestore(lock: &ha->hardware_lock, flags);
481
482 /* Clean up */
483 ha->mcp = NULL;
484
485 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
486 ql_dbg(ql_dbg_mbx, vha, 0x101a,
487 fmt: "Checking for additional resp interrupt.\n");
488
489 /* polling mode for non isp_abort commands. */
490 qla2x00_poll(rsp: ha->rsp_q_map[0]);
491 }
492
493 if (rval == QLA_FUNCTION_TIMEOUT &&
494 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
495 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
496 ha->flags.eeh_busy) {
497 /* not in dpc. schedule it for dpc to take over. */
498 ql_dbg(ql_dbg_mbx, vha, 0x101b,
499 fmt: "Timeout, schedule isp_abort_needed.\n");
500
501 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
502 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
503 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
504 if (IS_QLA82XX(ha)) {
505 ql_dbg(ql_dbg_mbx, vha, 0x112a,
506 fmt: "disabling pause transmit on port "
507 "0 & 1.\n");
508 qla82xx_wr_32(ha,
509 QLA82XX_CRB_NIU + 0x98,
510 CRB_NIU_XG_PAUSE_CTL_P0|
511 CRB_NIU_XG_PAUSE_CTL_P1);
512 }
513 ql_log(ql_log_info, vha: base_vha, 0x101c,
514 fmt: "Mailbox cmd timeout occurred, cmd=0x%x, "
515 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
516 "abort.\n", command, mcp->mb[0],
517 ha->flags.eeh_busy);
518 vha->hw_err_cnt++;
519 set_bit(ISP_ABORT_NEEDED, addr: &vha->dpc_flags);
520 qla2xxx_wake_dpc(vha);
521 }
522 } else if (current == ha->dpc_thread) {
523 /* call abort directly since we are in the DPC thread */
524 ql_dbg(ql_dbg_mbx, vha, 0x101d,
525 fmt: "Timeout, calling abort_isp.\n");
526
527 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
528 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
529 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
530 if (IS_QLA82XX(ha)) {
531 ql_dbg(ql_dbg_mbx, vha, 0x112b,
532 fmt: "disabling pause transmit on port "
533 "0 & 1.\n");
534 qla82xx_wr_32(ha,
535 QLA82XX_CRB_NIU + 0x98,
536 CRB_NIU_XG_PAUSE_CTL_P0|
537 CRB_NIU_XG_PAUSE_CTL_P1);
538 }
539 ql_log(ql_log_info, vha: base_vha, 0x101e,
540 fmt: "Mailbox cmd timeout occurred, cmd=0x%x, "
541 "mb[0]=0x%x. Scheduling ISP abort ",
542 command, mcp->mb[0]);
543 vha->hw_err_cnt++;
544 set_bit(ABORT_ISP_ACTIVE, addr: &vha->dpc_flags);
545 clear_bit(ISP_ABORT_NEEDED, addr: &vha->dpc_flags);
546 /* Allow next mbx cmd to come in. */
547 complete(&ha->mbx_cmd_comp);
548 if (ha->isp_ops->abort_isp(vha) &&
549 !ha->flags.eeh_busy) {
550 /* Failed. retry later. */
551 set_bit(ISP_ABORT_NEEDED,
552 addr: &vha->dpc_flags);
553 }
554 clear_bit(ABORT_ISP_ACTIVE, addr: &vha->dpc_flags);
555 ql_dbg(ql_dbg_mbx, vha, 0x101f,
556 fmt: "Finished abort_isp.\n");
557 goto mbx_done;
558 }
559 }
560 }
561
562premature_exit:
563 /* Allow next mbx cmd to come in. */
564 complete(&ha->mbx_cmd_comp);
565
566mbx_done:
567 if (rval == QLA_ABORTED) {
568 ql_log(ql_log_info, vha, 0xd035,
569 fmt: "Chip Reset in progress. Purging Mbox cmd=0x%x.\n",
570 mcp->mb[0]);
571 } else if (rval) {
572 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
573 pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR,
574 dev_name(&ha->pdev->dev), 0x1020+0x800,
575 vha->host_no, rval);
576 mboxes = mcp->in_mb;
577 cnt = 4;
578 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
579 if (mboxes & BIT_0) {
580 printk(" mb[%u]=%x", i, mcp->mb[i]);
581 cnt--;
582 }
583 pr_warn(" cmd=%x ****\n", command);
584 }
585 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
586 ql_dbg(ql_dbg_mbx, vha, 0x1198,
587 fmt: "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
588 rd_reg_dword(addr: &reg->isp24.host_status),
589 rd_reg_dword(addr: &reg->isp24.ictrl),
590 rd_reg_dword(addr: &reg->isp24.istatus));
591 } else {
592 ql_dbg(ql_dbg_mbx, vha, 0x1206,
593 fmt: "ctrl_status=%#x ictrl=%#x istatus=%#x\n",
594 rd_reg_word(addr: &reg->isp.ctrl_status),
595 rd_reg_word(addr: &reg->isp.ictrl),
596 rd_reg_word(addr: &reg->isp.istatus));
597 }
598 } else {
599 ql_dbg(ql_dbg_mbx, vha: base_vha, 0x1021, fmt: "Done %s.\n", __func__);
600 }
601
602 i = 500;
603 while (i && eeh_delay && (ha->pci_error_state < QLA_PCI_SLOT_RESET)) {
604 /*
605 * The caller of this mailbox encounter pci error.
606 * Hold the thread until PCIE link reset complete to make
607 * sure caller does not unmap dma while recovery is
608 * in progress.
609 */
610 msleep(msecs: 1);
611 i--;
612 }
613 return rval;
614}
615
616int
617qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
618 uint32_t risc_code_size)
619{
620 int rval;
621 struct qla_hw_data *ha = vha->hw;
622 mbx_cmd_t mc;
623 mbx_cmd_t *mcp = &mc;
624
625 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
626 fmt: "Entered %s.\n", __func__);
627
628 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
629 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
630 mcp->mb[8] = MSW(risc_addr);
631 mcp->out_mb = MBX_8|MBX_0;
632 } else {
633 mcp->mb[0] = MBC_LOAD_RISC_RAM;
634 mcp->out_mb = MBX_0;
635 }
636 mcp->mb[1] = LSW(risc_addr);
637 mcp->mb[2] = MSW(req_dma);
638 mcp->mb[3] = LSW(req_dma);
639 mcp->mb[6] = MSW(MSD(req_dma));
640 mcp->mb[7] = LSW(MSD(req_dma));
641 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
642 if (IS_FWI2_CAPABLE(ha)) {
643 mcp->mb[4] = MSW(risc_code_size);
644 mcp->mb[5] = LSW(risc_code_size);
645 mcp->out_mb |= MBX_5|MBX_4;
646 } else {
647 mcp->mb[4] = LSW(risc_code_size);
648 mcp->out_mb |= MBX_4;
649 }
650
651 mcp->in_mb = MBX_1|MBX_0;
652 mcp->tov = MBX_TOV_SECONDS;
653 mcp->flags = 0;
654 rval = qla2x00_mailbox_command(vha, mcp);
655
656 if (rval != QLA_SUCCESS) {
657 ql_dbg(ql_dbg_mbx, vha, 0x1023,
658 fmt: "Failed=%x mb[0]=%x mb[1]=%x.\n",
659 rval, mcp->mb[0], mcp->mb[1]);
660 vha->hw_err_cnt++;
661 } else {
662 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
663 fmt: "Done %s.\n", __func__);
664 }
665
666 return rval;
667}
668
669#define NVME_ENABLE_FLAG BIT_3
670#define EDIF_HW_SUPPORT BIT_10
671
672/*
673 * qla2x00_execute_fw
674 * Start adapter firmware.
675 *
676 * Input:
677 * ha = adapter block pointer.
678 * TARGET_QUEUE_LOCK must be released.
679 * ADAPTER_STATE_LOCK must be released.
680 *
681 * Returns:
682 * qla2x00 local function return status code.
683 *
684 * Context:
685 * Kernel context.
686 */
687int
688qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
689{
690 int rval;
691 struct qla_hw_data *ha = vha->hw;
692 mbx_cmd_t mc;
693 mbx_cmd_t *mcp = &mc;
694 u8 semaphore = 0;
695#define EXE_FW_FORCE_SEMAPHORE BIT_7
696 u8 retry = 5;
697
698 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
699 fmt: "Entered %s.\n", __func__);
700
701again:
702 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
703 mcp->out_mb = MBX_0;
704 mcp->in_mb = MBX_0;
705 if (IS_FWI2_CAPABLE(ha)) {
706 mcp->mb[1] = MSW(risc_addr);
707 mcp->mb[2] = LSW(risc_addr);
708 mcp->mb[3] = 0;
709 mcp->mb[4] = 0;
710 mcp->mb[11] = 0;
711
712 /* Enable BPM? */
713 if (ha->flags.lr_detected) {
714 mcp->mb[4] = BIT_0;
715 if (IS_BPM_RANGE_CAPABLE(ha))
716 mcp->mb[4] |=
717 ha->lr_distance << LR_DIST_FW_POS;
718 }
719
720 if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha)))
721 mcp->mb[4] |= NVME_ENABLE_FLAG;
722
723 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
724 struct nvram_81xx *nv = ha->nvram;
725 /* set minimum speed if specified in nvram */
726 if (nv->min_supported_speed >= 2 &&
727 nv->min_supported_speed <= 5) {
728 mcp->mb[4] |= BIT_4;
729 mcp->mb[11] |= nv->min_supported_speed & 0xF;
730 mcp->out_mb |= MBX_11;
731 mcp->in_mb |= BIT_5;
732 vha->min_supported_speed =
733 nv->min_supported_speed;
734 }
735
736 if (IS_PPCARCH)
737 mcp->mb[11] |= BIT_4;
738 }
739
740 if (ha->flags.exlogins_enabled)
741 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
742
743 if (ha->flags.exchoffld_enabled)
744 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
745
746 if (semaphore)
747 mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE;
748
749 mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
750 mcp->in_mb |= MBX_5 | MBX_3 | MBX_2 | MBX_1;
751 } else {
752 mcp->mb[1] = LSW(risc_addr);
753 mcp->out_mb |= MBX_1;
754 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
755 mcp->mb[2] = 0;
756 mcp->out_mb |= MBX_2;
757 }
758 }
759
760 mcp->tov = MBX_TOV_SECONDS;
761 mcp->flags = 0;
762 rval = qla2x00_mailbox_command(vha, mcp);
763
764 if (rval != QLA_SUCCESS) {
765 if (IS_QLA28XX(ha) && rval == QLA_COMMAND_ERROR &&
766 mcp->mb[1] == 0x27 && retry) {
767 semaphore = 1;
768 retry--;
769 ql_dbg(ql_dbg_async, vha, 0x1026,
770 fmt: "Exe FW: force semaphore.\n");
771 goto again;
772 }
773
774 if (retry) {
775 retry--;
776 ql_dbg(ql_dbg_async, vha, 0x509d,
777 fmt: "Exe FW retry: mb[0]=%x retry[%d]\n", mcp->mb[0], retry);
778 goto again;
779 }
780 ql_dbg(ql_dbg_mbx, vha, 0x1026,
781 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
782 vha->hw_err_cnt++;
783 return rval;
784 }
785
786 if (!IS_FWI2_CAPABLE(ha))
787 goto done;
788
789 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
790 ql_dbg(ql_dbg_mbx, vha, 0x119a,
791 fmt: "fw_ability_mask=%x.\n", ha->fw_ability_mask);
792 ql_dbg(ql_dbg_mbx, vha, 0x1027, fmt: "exchanges=%x.\n", mcp->mb[1]);
793 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
794 ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1);
795 ql_dbg(ql_dbg_mbx, vha, 0x119b, fmt: "max_supported_speed=%s.\n",
796 ha->max_supported_speed == 0 ? "16Gps" :
797 ha->max_supported_speed == 1 ? "32Gps" :
798 ha->max_supported_speed == 2 ? "64Gps" : "unknown");
799 if (vha->min_supported_speed) {
800 ha->min_supported_speed = mcp->mb[5] &
801 (BIT_0 | BIT_1 | BIT_2);
802 ql_dbg(ql_dbg_mbx, vha, 0x119c,
803 fmt: "min_supported_speed=%s.\n",
804 ha->min_supported_speed == 6 ? "64Gps" :
805 ha->min_supported_speed == 5 ? "32Gps" :
806 ha->min_supported_speed == 4 ? "16Gps" :
807 ha->min_supported_speed == 3 ? "8Gps" :
808 ha->min_supported_speed == 2 ? "4Gps" : "unknown");
809 }
810 }
811
812 if (IS_QLA28XX(ha) && (mcp->mb[5] & EDIF_HW_SUPPORT)) {
813 ha->flags.edif_hw = 1;
814 ql_log(ql_log_info, vha, 0xffff,
815 fmt: "%s: edif HW\n", __func__);
816 }
817
818done:
819 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
820 fmt: "Done %s.\n", __func__);
821
822 return rval;
823}
824
825/*
826 * qla_get_exlogin_status
827 * Get extended login status
828 * uses the memory offload control/status Mailbox
829 *
830 * Input:
831 * ha: adapter state pointer.
832 * fwopt: firmware options
833 *
834 * Returns:
835 * qla2x00 local function status
836 *
837 * Context:
838 * Kernel context.
839 */
840#define FETCH_XLOGINS_STAT 0x8
841int
842qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
843 uint16_t *ex_logins_cnt)
844{
845 int rval;
846 mbx_cmd_t mc;
847 mbx_cmd_t *mcp = &mc;
848
849 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
850 fmt: "Entered %s\n", __func__);
851
852 memset(mcp->mb, 0 , sizeof(mcp->mb));
853 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
854 mcp->mb[1] = FETCH_XLOGINS_STAT;
855 mcp->out_mb = MBX_1|MBX_0;
856 mcp->in_mb = MBX_10|MBX_4|MBX_0;
857 mcp->tov = MBX_TOV_SECONDS;
858 mcp->flags = 0;
859
860 rval = qla2x00_mailbox_command(vha, mcp);
861 if (rval != QLA_SUCCESS) {
862 ql_dbg(ql_dbg_mbx, vha, 0x1115, fmt: "Failed=%x.\n", rval);
863 } else {
864 *buf_sz = mcp->mb[4];
865 *ex_logins_cnt = mcp->mb[10];
866
867 ql_log(ql_log_info, vha, 0x1190,
868 fmt: "buffer size 0x%x, exchange login count=%d\n",
869 mcp->mb[4], mcp->mb[10]);
870
871 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
872 fmt: "Done %s.\n", __func__);
873 }
874
875 return rval;
876}
877
878/*
879 * qla_set_exlogin_mem_cfg
880 * set extended login memory configuration
881 * Mbx needs to be issues before init_cb is set
882 *
883 * Input:
884 * ha: adapter state pointer.
885 * buffer: buffer pointer
886 * phys_addr: physical address of buffer
887 * size: size of buffer
888 * TARGET_QUEUE_LOCK must be released
889 * ADAPTER_STATE_LOCK must be release
890 *
891 * Returns:
892 * qla2x00 local funxtion status code.
893 *
894 * Context:
895 * Kernel context.
896 */
897#define CONFIG_XLOGINS_MEM 0x9
898int
899qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
900{
901 int rval;
902 mbx_cmd_t mc;
903 mbx_cmd_t *mcp = &mc;
904 struct qla_hw_data *ha = vha->hw;
905
906 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
907 fmt: "Entered %s.\n", __func__);
908
909 memset(mcp->mb, 0 , sizeof(mcp->mb));
910 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
911 mcp->mb[1] = CONFIG_XLOGINS_MEM;
912 mcp->mb[2] = MSW(phys_addr);
913 mcp->mb[3] = LSW(phys_addr);
914 mcp->mb[6] = MSW(MSD(phys_addr));
915 mcp->mb[7] = LSW(MSD(phys_addr));
916 mcp->mb[8] = MSW(ha->exlogin_size);
917 mcp->mb[9] = LSW(ha->exlogin_size);
918 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
919 mcp->in_mb = MBX_11|MBX_0;
920 mcp->tov = MBX_TOV_SECONDS;
921 mcp->flags = 0;
922 rval = qla2x00_mailbox_command(vha, mcp);
923 if (rval != QLA_SUCCESS) {
924 ql_dbg(ql_dbg_mbx, vha, 0x111b,
925 fmt: "EXlogin Failed=%x. MB0=%x MB11=%x\n",
926 rval, mcp->mb[0], mcp->mb[11]);
927 } else {
928 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
929 fmt: "Done %s.\n", __func__);
930 }
931
932 return rval;
933}
934
935/*
936 * qla_get_exchoffld_status
937 * Get exchange offload status
938 * uses the memory offload control/status Mailbox
939 *
940 * Input:
941 * ha: adapter state pointer.
942 * fwopt: firmware options
943 *
944 * Returns:
945 * qla2x00 local function status
946 *
947 * Context:
948 * Kernel context.
949 */
950#define FETCH_XCHOFFLD_STAT 0x2
951int
952qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
953 uint16_t *ex_logins_cnt)
954{
955 int rval;
956 mbx_cmd_t mc;
957 mbx_cmd_t *mcp = &mc;
958
959 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
960 fmt: "Entered %s\n", __func__);
961
962 memset(mcp->mb, 0 , sizeof(mcp->mb));
963 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
964 mcp->mb[1] = FETCH_XCHOFFLD_STAT;
965 mcp->out_mb = MBX_1|MBX_0;
966 mcp->in_mb = MBX_10|MBX_4|MBX_0;
967 mcp->tov = MBX_TOV_SECONDS;
968 mcp->flags = 0;
969
970 rval = qla2x00_mailbox_command(vha, mcp);
971 if (rval != QLA_SUCCESS) {
972 ql_dbg(ql_dbg_mbx, vha, 0x1155, fmt: "Failed=%x.\n", rval);
973 } else {
974 *buf_sz = mcp->mb[4];
975 *ex_logins_cnt = mcp->mb[10];
976
977 ql_log(ql_log_info, vha, 0x118e,
978 fmt: "buffer size 0x%x, exchange offload count=%d\n",
979 mcp->mb[4], mcp->mb[10]);
980
981 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
982 fmt: "Done %s.\n", __func__);
983 }
984
985 return rval;
986}
987
988/*
989 * qla_set_exchoffld_mem_cfg
990 * Set exchange offload memory configuration
991 * Mbx needs to be issues before init_cb is set
992 *
993 * Input:
994 * ha: adapter state pointer.
995 * buffer: buffer pointer
996 * phys_addr: physical address of buffer
997 * size: size of buffer
998 * TARGET_QUEUE_LOCK must be released
999 * ADAPTER_STATE_LOCK must be release
1000 *
1001 * Returns:
1002 * qla2x00 local funxtion status code.
1003 *
1004 * Context:
1005 * Kernel context.
1006 */
1007#define CONFIG_XCHOFFLD_MEM 0x3
1008int
1009qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
1010{
1011 int rval;
1012 mbx_cmd_t mc;
1013 mbx_cmd_t *mcp = &mc;
1014 struct qla_hw_data *ha = vha->hw;
1015
1016 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
1017 fmt: "Entered %s.\n", __func__);
1018
1019 memset(mcp->mb, 0 , sizeof(mcp->mb));
1020 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
1021 mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
1022 mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
1023 mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
1024 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
1025 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
1026 mcp->mb[8] = MSW(ha->exchoffld_size);
1027 mcp->mb[9] = LSW(ha->exchoffld_size);
1028 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1029 mcp->in_mb = MBX_11|MBX_0;
1030 mcp->tov = MBX_TOV_SECONDS;
1031 mcp->flags = 0;
1032 rval = qla2x00_mailbox_command(vha, mcp);
1033 if (rval != QLA_SUCCESS) {
1034 /*EMPTY*/
1035 ql_dbg(ql_dbg_mbx, vha, 0x1158, fmt: "Failed=%x.\n", rval);
1036 } else {
1037 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
1038 fmt: "Done %s.\n", __func__);
1039 }
1040
1041 return rval;
1042}
1043
1044/*
1045 * qla2x00_get_fw_version
1046 * Get firmware version.
1047 *
1048 * Input:
1049 * ha: adapter state pointer.
1050 * major: pointer for major number.
1051 * minor: pointer for minor number.
1052 * subminor: pointer for subminor number.
1053 *
1054 * Returns:
1055 * qla2x00 local function return status code.
1056 *
1057 * Context:
1058 * Kernel context.
1059 */
1060int
1061qla2x00_get_fw_version(scsi_qla_host_t *vha)
1062{
1063 int rval;
1064 mbx_cmd_t mc;
1065 mbx_cmd_t *mcp = &mc;
1066 struct qla_hw_data *ha = vha->hw;
1067
1068 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
1069 fmt: "Entered %s.\n", __func__);
1070
1071 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
1072 mcp->out_mb = MBX_0;
1073 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1074 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
1075 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
1076 if (IS_FWI2_CAPABLE(ha))
1077 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
1078 if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
1079 mcp->in_mb |=
1080 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
1081 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7;
1082
1083 mcp->flags = 0;
1084 mcp->tov = MBX_TOV_SECONDS;
1085 rval = qla2x00_mailbox_command(vha, mcp);
1086 if (rval != QLA_SUCCESS)
1087 goto failed;
1088
1089 /* Return mailbox data. */
1090 ha->fw_major_version = mcp->mb[1];
1091 ha->fw_minor_version = mcp->mb[2];
1092 ha->fw_subminor_version = mcp->mb[3];
1093 ha->fw_attributes = mcp->mb[6];
1094 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
1095 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
1096 else
1097 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
1098
1099 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1100 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1101 ha->mpi_version[1] = mcp->mb[11] >> 8;
1102 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1103 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
1104 ha->phy_version[0] = mcp->mb[8] & 0xff;
1105 ha->phy_version[1] = mcp->mb[9] >> 8;
1106 ha->phy_version[2] = mcp->mb[9] & 0xff;
1107 }
1108
1109 if (IS_FWI2_CAPABLE(ha)) {
1110 ha->fw_attributes_h = mcp->mb[15];
1111 ha->fw_attributes_ext[0] = mcp->mb[16];
1112 ha->fw_attributes_ext[1] = mcp->mb[17];
1113 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
1114 fmt: "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
1115 __func__, mcp->mb[15], mcp->mb[6]);
1116 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
1117 fmt: "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
1118 __func__, mcp->mb[17], mcp->mb[16]);
1119
1120 if (ha->fw_attributes_h & 0x4)
1121 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
1122 fmt: "%s: Firmware supports Extended Login 0x%x\n",
1123 __func__, ha->fw_attributes_h);
1124
1125 if (ha->fw_attributes_h & 0x8)
1126 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
1127 fmt: "%s: Firmware supports Exchange Offload 0x%x\n",
1128 __func__, ha->fw_attributes_h);
1129
1130 /*
1131 * FW supports nvme and driver load parameter requested nvme.
1132 * BIT 26 of fw_attributes indicates NVMe support.
1133 */
1134 if ((ha->fw_attributes_h &
1135 (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) &&
1136 ql2xnvmeenable) {
1137 if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST)
1138 vha->flags.nvme_first_burst = 1;
1139
1140 vha->flags.nvme_enabled = 1;
1141 ql_log(ql_log_info, vha, 0xd302,
1142 fmt: "%s: FC-NVMe is Enabled (0x%x)\n",
1143 __func__, ha->fw_attributes_h);
1144 }
1145
1146 /* BIT_13 of Extended FW Attributes informs about NVMe2 support */
1147 if (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_NVME2) {
1148 ql_log(ql_log_info, vha, 0xd302,
1149 fmt: "Firmware supports NVMe2 0x%x\n",
1150 ha->fw_attributes_ext[0]);
1151 vha->flags.nvme2_enabled = 1;
1152 }
1153
1154 if (IS_QLA28XX(ha) && ha->flags.edif_hw && ql2xsecenable &&
1155 (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_EDIF)) {
1156 ha->flags.edif_enabled = 1;
1157 ql_log(ql_log_info, vha, 0xffff,
1158 fmt: "%s: edif is enabled\n", __func__);
1159 }
1160 }
1161
1162 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1163 ha->serdes_version[0] = mcp->mb[7] & 0xff;
1164 ha->serdes_version[1] = mcp->mb[8] >> 8;
1165 ha->serdes_version[2] = mcp->mb[8] & 0xff;
1166 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1167 ha->mpi_version[1] = mcp->mb[11] >> 8;
1168 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1169 ha->pep_version[0] = mcp->mb[13] & 0xff;
1170 ha->pep_version[1] = mcp->mb[14] >> 8;
1171 ha->pep_version[2] = mcp->mb[14] & 0xff;
1172 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
1173 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
1174 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
1175 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
1176 if (IS_QLA28XX(ha)) {
1177 if (mcp->mb[16] & BIT_10)
1178 ha->flags.secure_fw = 1;
1179
1180 ql_log(ql_log_info, vha, 0xffff,
1181 fmt: "Secure Flash Update in FW: %s\n",
1182 (ha->flags.secure_fw) ? "Supported" :
1183 "Not Supported");
1184 }
1185
1186 if (ha->flags.scm_supported_a &&
1187 (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) {
1188 ha->flags.scm_supported_f = 1;
1189 ha->sf_init_cb->flags |= cpu_to_le16(BIT_13);
1190 }
1191 ql_log(ql_log_info, vha, 0x11a3, fmt: "SCM in FW: %s\n",
1192 (ha->flags.scm_supported_f) ? "Supported" :
1193 "Not Supported");
1194
1195 if (vha->flags.nvme2_enabled) {
1196 /* set BIT_15 of special feature control block for SLER */
1197 ha->sf_init_cb->flags |= cpu_to_le16(BIT_15);
1198 /* set BIT_14 of special feature control block for PI CTRL*/
1199 ha->sf_init_cb->flags |= cpu_to_le16(BIT_14);
1200 }
1201 }
1202
1203failed:
1204 if (rval != QLA_SUCCESS) {
1205 /*EMPTY*/
1206 ql_dbg(ql_dbg_mbx, vha, 0x102a, fmt: "Failed=%x.\n", rval);
1207 } else {
1208 /*EMPTY*/
1209 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
1210 fmt: "Done %s.\n", __func__);
1211 }
1212 return rval;
1213}
1214
1215/*
1216 * qla2x00_get_fw_options
1217 * Set firmware options.
1218 *
1219 * Input:
1220 * ha = adapter block pointer.
1221 * fwopt = pointer for firmware options.
1222 *
1223 * Returns:
1224 * qla2x00 local function return status code.
1225 *
1226 * Context:
1227 * Kernel context.
1228 */
1229int
1230qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1231{
1232 int rval;
1233 mbx_cmd_t mc;
1234 mbx_cmd_t *mcp = &mc;
1235
1236 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
1237 fmt: "Entered %s.\n", __func__);
1238
1239 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
1240 mcp->out_mb = MBX_0;
1241 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1242 mcp->tov = MBX_TOV_SECONDS;
1243 mcp->flags = 0;
1244 rval = qla2x00_mailbox_command(vha, mcp);
1245
1246 if (rval != QLA_SUCCESS) {
1247 /*EMPTY*/
1248 ql_dbg(ql_dbg_mbx, vha, 0x102d, fmt: "Failed=%x.\n", rval);
1249 } else {
1250 fwopts[0] = mcp->mb[0];
1251 fwopts[1] = mcp->mb[1];
1252 fwopts[2] = mcp->mb[2];
1253 fwopts[3] = mcp->mb[3];
1254
1255 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
1256 fmt: "Done %s.\n", __func__);
1257 }
1258
1259 return rval;
1260}
1261
1262
1263/*
1264 * qla2x00_set_fw_options
1265 * Set firmware options.
1266 *
1267 * Input:
1268 * ha = adapter block pointer.
1269 * fwopt = pointer for firmware options.
1270 *
1271 * Returns:
1272 * qla2x00 local function return status code.
1273 *
1274 * Context:
1275 * Kernel context.
1276 */
1277int
1278qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1279{
1280 int rval;
1281 mbx_cmd_t mc;
1282 mbx_cmd_t *mcp = &mc;
1283
1284 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
1285 fmt: "Entered %s.\n", __func__);
1286
1287 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
1288 mcp->mb[1] = fwopts[1];
1289 mcp->mb[2] = fwopts[2];
1290 mcp->mb[3] = fwopts[3];
1291 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1292 mcp->in_mb = MBX_0;
1293 if (IS_FWI2_CAPABLE(vha->hw)) {
1294 mcp->in_mb |= MBX_1;
1295 mcp->mb[10] = fwopts[10];
1296 mcp->out_mb |= MBX_10;
1297 } else {
1298 mcp->mb[10] = fwopts[10];
1299 mcp->mb[11] = fwopts[11];
1300 mcp->mb[12] = 0; /* Undocumented, but used */
1301 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
1302 }
1303 mcp->tov = MBX_TOV_SECONDS;
1304 mcp->flags = 0;
1305 rval = qla2x00_mailbox_command(vha, mcp);
1306
1307 fwopts[0] = mcp->mb[0];
1308
1309 if (rval != QLA_SUCCESS) {
1310 /*EMPTY*/
1311 ql_dbg(ql_dbg_mbx, vha, 0x1030,
1312 fmt: "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
1313 } else {
1314 /*EMPTY*/
1315 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
1316 fmt: "Done %s.\n", __func__);
1317 }
1318
1319 return rval;
1320}
1321
1322/*
1323 * qla2x00_mbx_reg_test
1324 * Mailbox register wrap test.
1325 *
1326 * Input:
1327 * ha = adapter block pointer.
1328 * TARGET_QUEUE_LOCK must be released.
1329 * ADAPTER_STATE_LOCK must be released.
1330 *
1331 * Returns:
1332 * qla2x00 local function return status code.
1333 *
1334 * Context:
1335 * Kernel context.
1336 */
1337int
1338qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
1339{
1340 int rval;
1341 mbx_cmd_t mc;
1342 mbx_cmd_t *mcp = &mc;
1343
1344 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
1345 fmt: "Entered %s.\n", __func__);
1346
1347 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
1348 mcp->mb[1] = 0xAAAA;
1349 mcp->mb[2] = 0x5555;
1350 mcp->mb[3] = 0xAA55;
1351 mcp->mb[4] = 0x55AA;
1352 mcp->mb[5] = 0xA5A5;
1353 mcp->mb[6] = 0x5A5A;
1354 mcp->mb[7] = 0x2525;
1355 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1356 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1357 mcp->tov = MBX_TOV_SECONDS;
1358 mcp->flags = 0;
1359 rval = qla2x00_mailbox_command(vha, mcp);
1360
1361 if (rval == QLA_SUCCESS) {
1362 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
1363 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
1364 rval = QLA_FUNCTION_FAILED;
1365 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
1366 mcp->mb[7] != 0x2525)
1367 rval = QLA_FUNCTION_FAILED;
1368 }
1369
1370 if (rval != QLA_SUCCESS) {
1371 /*EMPTY*/
1372 ql_dbg(ql_dbg_mbx, vha, 0x1033, fmt: "Failed=%x.\n", rval);
1373 vha->hw_err_cnt++;
1374 } else {
1375 /*EMPTY*/
1376 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
1377 fmt: "Done %s.\n", __func__);
1378 }
1379
1380 return rval;
1381}
1382
1383/*
1384 * qla2x00_verify_checksum
1385 * Verify firmware checksum.
1386 *
1387 * Input:
1388 * ha = adapter block pointer.
1389 * TARGET_QUEUE_LOCK must be released.
1390 * ADAPTER_STATE_LOCK must be released.
1391 *
1392 * Returns:
1393 * qla2x00 local function return status code.
1394 *
1395 * Context:
1396 * Kernel context.
1397 */
1398int
1399qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
1400{
1401 int rval;
1402 mbx_cmd_t mc;
1403 mbx_cmd_t *mcp = &mc;
1404
1405 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
1406 fmt: "Entered %s.\n", __func__);
1407
1408 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
1409 mcp->out_mb = MBX_0;
1410 mcp->in_mb = MBX_0;
1411 if (IS_FWI2_CAPABLE(vha->hw)) {
1412 mcp->mb[1] = MSW(risc_addr);
1413 mcp->mb[2] = LSW(risc_addr);
1414 mcp->out_mb |= MBX_2|MBX_1;
1415 mcp->in_mb |= MBX_2|MBX_1;
1416 } else {
1417 mcp->mb[1] = LSW(risc_addr);
1418 mcp->out_mb |= MBX_1;
1419 mcp->in_mb |= MBX_1;
1420 }
1421
1422 mcp->tov = MBX_TOV_SECONDS;
1423 mcp->flags = 0;
1424 rval = qla2x00_mailbox_command(vha, mcp);
1425
1426 if (rval != QLA_SUCCESS) {
1427 ql_dbg(ql_dbg_mbx, vha, 0x1036,
1428 fmt: "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
1429 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
1430 } else {
1431 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
1432 fmt: "Done %s.\n", __func__);
1433 }
1434
1435 return rval;
1436}
1437
1438/*
1439 * qla2x00_issue_iocb
1440 * Issue IOCB using mailbox command
1441 *
1442 * Input:
1443 * ha = adapter state pointer.
1444 * buffer = buffer pointer.
1445 * phys_addr = physical address of buffer.
1446 * size = size of buffer.
1447 * TARGET_QUEUE_LOCK must be released.
1448 * ADAPTER_STATE_LOCK must be released.
1449 *
1450 * Returns:
1451 * qla2x00 local function return status code.
1452 *
1453 * Context:
1454 * Kernel context.
1455 */
1456int
1457qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
1458 dma_addr_t phys_addr, size_t size, uint32_t tov)
1459{
1460 int rval;
1461 mbx_cmd_t mc;
1462 mbx_cmd_t *mcp = &mc;
1463
1464 if (!vha->hw->flags.fw_started)
1465 return QLA_INVALID_COMMAND;
1466
1467 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
1468 fmt: "Entered %s.\n", __func__);
1469
1470 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
1471 mcp->mb[1] = 0;
1472 mcp->mb[2] = MSW(LSD(phys_addr));
1473 mcp->mb[3] = LSW(LSD(phys_addr));
1474 mcp->mb[6] = MSW(MSD(phys_addr));
1475 mcp->mb[7] = LSW(MSD(phys_addr));
1476 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1477 mcp->in_mb = MBX_1|MBX_0;
1478 mcp->tov = tov;
1479 mcp->flags = 0;
1480 rval = qla2x00_mailbox_command(vha, mcp);
1481
1482 if (rval != QLA_SUCCESS) {
1483 /*EMPTY*/
1484 ql_dbg(ql_dbg_mbx, vha, 0x1039, fmt: "Failed=%x.\n", rval);
1485 } else {
1486 sts_entry_t *sts_entry = buffer;
1487
1488 /* Mask reserved bits. */
1489 sts_entry->entry_status &=
1490 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
1491 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
1492 fmt: "Done %s (status=%x).\n", __func__,
1493 sts_entry->entry_status);
1494 }
1495
1496 return rval;
1497}
1498
1499int
1500qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
1501 size_t size)
1502{
1503 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
1504 MBX_TOV_SECONDS);
1505}
1506
1507/*
1508 * qla2x00_abort_command
1509 * Abort command aborts a specified IOCB.
1510 *
1511 * Input:
1512 * ha = adapter block pointer.
1513 * sp = SB structure pointer.
1514 *
1515 * Returns:
1516 * qla2x00 local function return status code.
1517 *
1518 * Context:
1519 * Kernel context.
1520 */
1521int
1522qla2x00_abort_command(srb_t *sp)
1523{
1524 unsigned long flags = 0;
1525 int rval;
1526 uint32_t handle = 0;
1527 mbx_cmd_t mc;
1528 mbx_cmd_t *mcp = &mc;
1529 fc_port_t *fcport = sp->fcport;
1530 scsi_qla_host_t *vha = fcport->vha;
1531 struct qla_hw_data *ha = vha->hw;
1532 struct req_que *req;
1533 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1534
1535 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
1536 fmt: "Entered %s.\n", __func__);
1537
1538 if (sp->qpair)
1539 req = sp->qpair->req;
1540 else
1541 req = vha->req;
1542
1543 spin_lock_irqsave(&ha->hardware_lock, flags);
1544 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1545 if (req->outstanding_cmds[handle] == sp)
1546 break;
1547 }
1548 spin_unlock_irqrestore(lock: &ha->hardware_lock, flags);
1549
1550 if (handle == req->num_outstanding_cmds) {
1551 /* command not found */
1552 return QLA_FUNCTION_FAILED;
1553 }
1554
1555 mcp->mb[0] = MBC_ABORT_COMMAND;
1556 if (HAS_EXTENDED_IDS(ha))
1557 mcp->mb[1] = fcport->loop_id;
1558 else
1559 mcp->mb[1] = fcport->loop_id << 8;
1560 mcp->mb[2] = (uint16_t)handle;
1561 mcp->mb[3] = (uint16_t)(handle >> 16);
1562 mcp->mb[6] = (uint16_t)cmd->device->lun;
1563 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1564 mcp->in_mb = MBX_0;
1565 mcp->tov = MBX_TOV_SECONDS;
1566 mcp->flags = 0;
1567 rval = qla2x00_mailbox_command(vha, mcp);
1568
1569 if (rval != QLA_SUCCESS) {
1570 ql_dbg(ql_dbg_mbx, vha, 0x103c, fmt: "Failed=%x.\n", rval);
1571 } else {
1572 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
1573 fmt: "Done %s.\n", __func__);
1574 }
1575
1576 return rval;
1577}
1578
1579int
1580qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
1581{
1582 int rval, rval2;
1583 mbx_cmd_t mc;
1584 mbx_cmd_t *mcp = &mc;
1585 scsi_qla_host_t *vha;
1586
1587 vha = fcport->vha;
1588
1589 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
1590 fmt: "Entered %s.\n", __func__);
1591
1592 mcp->mb[0] = MBC_ABORT_TARGET;
1593 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
1594 if (HAS_EXTENDED_IDS(vha->hw)) {
1595 mcp->mb[1] = fcport->loop_id;
1596 mcp->mb[10] = 0;
1597 mcp->out_mb |= MBX_10;
1598 } else {
1599 mcp->mb[1] = fcport->loop_id << 8;
1600 }
1601 mcp->mb[2] = vha->hw->loop_reset_delay;
1602 mcp->mb[9] = vha->vp_idx;
1603
1604 mcp->in_mb = MBX_0;
1605 mcp->tov = MBX_TOV_SECONDS;
1606 mcp->flags = 0;
1607 rval = qla2x00_mailbox_command(vha, mcp);
1608 if (rval != QLA_SUCCESS) {
1609 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
1610 fmt: "Failed=%x.\n", rval);
1611 }
1612
1613 /* Issue marker IOCB. */
1614 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0,
1615 MK_SYNC_ID);
1616 if (rval2 != QLA_SUCCESS) {
1617 ql_dbg(ql_dbg_mbx, vha, 0x1040,
1618 fmt: "Failed to issue marker IOCB (%x).\n", rval2);
1619 } else {
1620 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
1621 fmt: "Done %s.\n", __func__);
1622 }
1623
1624 return rval;
1625}
1626
1627int
1628qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
1629{
1630 int rval, rval2;
1631 mbx_cmd_t mc;
1632 mbx_cmd_t *mcp = &mc;
1633 scsi_qla_host_t *vha;
1634
1635 vha = fcport->vha;
1636
1637 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1638 fmt: "Entered %s.\n", __func__);
1639
1640 mcp->mb[0] = MBC_LUN_RESET;
1641 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1642 if (HAS_EXTENDED_IDS(vha->hw))
1643 mcp->mb[1] = fcport->loop_id;
1644 else
1645 mcp->mb[1] = fcport->loop_id << 8;
1646 mcp->mb[2] = (u32)l;
1647 mcp->mb[3] = 0;
1648 mcp->mb[9] = vha->vp_idx;
1649
1650 mcp->in_mb = MBX_0;
1651 mcp->tov = MBX_TOV_SECONDS;
1652 mcp->flags = 0;
1653 rval = qla2x00_mailbox_command(vha, mcp);
1654 if (rval != QLA_SUCCESS) {
1655 ql_dbg(ql_dbg_mbx, vha, 0x1043, fmt: "Failed=%x.\n", rval);
1656 }
1657
1658 /* Issue marker IOCB. */
1659 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l,
1660 MK_SYNC_ID_LUN);
1661 if (rval2 != QLA_SUCCESS) {
1662 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1663 fmt: "Failed to issue marker IOCB (%x).\n", rval2);
1664 } else {
1665 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1666 fmt: "Done %s.\n", __func__);
1667 }
1668
1669 return rval;
1670}
1671
1672/*
1673 * qla2x00_get_adapter_id
1674 * Get adapter ID and topology.
1675 *
1676 * Input:
1677 * ha = adapter block pointer.
1678 * id = pointer for loop ID.
1679 * al_pa = pointer for AL_PA.
1680 * area = pointer for area.
1681 * domain = pointer for domain.
1682 * top = pointer for topology.
1683 * TARGET_QUEUE_LOCK must be released.
1684 * ADAPTER_STATE_LOCK must be released.
1685 *
1686 * Returns:
1687 * qla2x00 local function return status code.
1688 *
1689 * Context:
1690 * Kernel context.
1691 */
1692int
1693qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1694 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1695{
1696 int rval;
1697 mbx_cmd_t mc;
1698 mbx_cmd_t *mcp = &mc;
1699
1700 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1701 fmt: "Entered %s.\n", __func__);
1702
1703 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1704 mcp->mb[9] = vha->vp_idx;
1705 mcp->out_mb = MBX_9|MBX_0;
1706 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1707 if (IS_CNA_CAPABLE(vha->hw))
1708 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1709 if (IS_FWI2_CAPABLE(vha->hw))
1710 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1711 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
1712 mcp->in_mb |= MBX_15|MBX_21|MBX_22|MBX_23;
1713
1714 mcp->tov = MBX_TOV_SECONDS;
1715 mcp->flags = 0;
1716 rval = qla2x00_mailbox_command(vha, mcp);
1717 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1718 rval = QLA_COMMAND_ERROR;
1719 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1720 rval = QLA_INVALID_COMMAND;
1721
1722 /* Return data. */
1723 *id = mcp->mb[1];
1724 *al_pa = LSB(mcp->mb[2]);
1725 *area = MSB(mcp->mb[2]);
1726 *domain = LSB(mcp->mb[3]);
1727 *top = mcp->mb[6];
1728 *sw_cap = mcp->mb[7];
1729
1730 if (rval != QLA_SUCCESS) {
1731 /*EMPTY*/
1732 ql_dbg(ql_dbg_mbx, vha, 0x1047, fmt: "Failed=%x.\n", rval);
1733 } else {
1734 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1735 fmt: "Done %s.\n", __func__);
1736
1737 if (IS_CNA_CAPABLE(vha->hw)) {
1738 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1739 vha->fcoe_fcf_idx = mcp->mb[10];
1740 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1741 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1742 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1743 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1744 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1745 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1746 }
1747 /* If FA-WWN supported */
1748 if (IS_FAWWN_CAPABLE(vha->hw)) {
1749 if (mcp->mb[7] & BIT_14) {
1750 vha->port_name[0] = MSB(mcp->mb[16]);
1751 vha->port_name[1] = LSB(mcp->mb[16]);
1752 vha->port_name[2] = MSB(mcp->mb[17]);
1753 vha->port_name[3] = LSB(mcp->mb[17]);
1754 vha->port_name[4] = MSB(mcp->mb[18]);
1755 vha->port_name[5] = LSB(mcp->mb[18]);
1756 vha->port_name[6] = MSB(mcp->mb[19]);
1757 vha->port_name[7] = LSB(mcp->mb[19]);
1758 fc_host_port_name(vha->host) =
1759 wwn_to_u64(wwn: vha->port_name);
1760 ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1761 fmt: "FA-WWN acquired %016llx\n",
1762 wwn_to_u64(wwn: vha->port_name));
1763 }
1764 }
1765
1766 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) {
1767 vha->bbcr = mcp->mb[15];
1768 if (mcp->mb[7] & SCM_EDC_ACC_RECEIVED) {
1769 ql_log(ql_log_info, vha, 0x11a4,
1770 fmt: "SCM: EDC ELS completed, flags 0x%x\n",
1771 mcp->mb[21]);
1772 }
1773 if (mcp->mb[7] & SCM_RDF_ACC_RECEIVED) {
1774 vha->hw->flags.scm_enabled = 1;
1775 vha->scm_fabric_connection_flags |=
1776 SCM_FLAG_RDF_COMPLETED;
1777 ql_log(ql_log_info, vha, 0x11a5,
1778 fmt: "SCM: RDF ELS completed, flags 0x%x\n",
1779 mcp->mb[23]);
1780 }
1781 }
1782 }
1783
1784 return rval;
1785}
1786
1787/*
1788 * qla2x00_get_retry_cnt
1789 * Get current firmware login retry count and delay.
1790 *
1791 * Input:
1792 * ha = adapter block pointer.
1793 * retry_cnt = pointer to login retry count.
1794 * tov = pointer to login timeout value.
1795 *
1796 * Returns:
1797 * qla2x00 local function return status code.
1798 *
1799 * Context:
1800 * Kernel context.
1801 */
1802int
1803qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1804 uint16_t *r_a_tov)
1805{
1806 int rval;
1807 uint16_t ratov;
1808 mbx_cmd_t mc;
1809 mbx_cmd_t *mcp = &mc;
1810
1811 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1812 fmt: "Entered %s.\n", __func__);
1813
1814 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1815 mcp->out_mb = MBX_0;
1816 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1817 mcp->tov = MBX_TOV_SECONDS;
1818 mcp->flags = 0;
1819 rval = qla2x00_mailbox_command(vha, mcp);
1820
1821 if (rval != QLA_SUCCESS) {
1822 /*EMPTY*/
1823 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1824 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1825 } else {
1826 /* Convert returned data and check our values. */
1827 *r_a_tov = mcp->mb[3] / 2;
1828 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1829 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1830 /* Update to the larger values */
1831 *retry_cnt = (uint8_t)mcp->mb[1];
1832 *tov = ratov;
1833 }
1834
1835 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1836 fmt: "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1837 }
1838
1839 return rval;
1840}
1841
1842/*
1843 * qla2x00_init_firmware
1844 * Initialize adapter firmware.
1845 *
1846 * Input:
1847 * ha = adapter block pointer.
1848 * dptr = Initialization control block pointer.
1849 * size = size of initialization control block.
1850 * TARGET_QUEUE_LOCK must be released.
1851 * ADAPTER_STATE_LOCK must be released.
1852 *
1853 * Returns:
1854 * qla2x00 local function return status code.
1855 *
1856 * Context:
1857 * Kernel context.
1858 */
1859int
1860qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1861{
1862 int rval;
1863 mbx_cmd_t mc;
1864 mbx_cmd_t *mcp = &mc;
1865 struct qla_hw_data *ha = vha->hw;
1866
1867 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1868 fmt: "Entered %s.\n", __func__);
1869
1870 if (IS_P3P_TYPE(ha) && ql2xdbwr)
1871 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1872 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1873
1874 if (ha->flags.npiv_supported)
1875 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1876 else
1877 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1878
1879 mcp->mb[1] = 0;
1880 mcp->mb[2] = MSW(ha->init_cb_dma);
1881 mcp->mb[3] = LSW(ha->init_cb_dma);
1882 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1883 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1884 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1885 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1886 mcp->mb[1] = BIT_0;
1887 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1888 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1889 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1890 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1891 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1892 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1893 }
1894
1895 if (ha->flags.scm_supported_f || vha->flags.nvme2_enabled) {
1896 mcp->mb[1] |= BIT_1;
1897 mcp->mb[16] = MSW(ha->sf_init_cb_dma);
1898 mcp->mb[17] = LSW(ha->sf_init_cb_dma);
1899 mcp->mb[18] = MSW(MSD(ha->sf_init_cb_dma));
1900 mcp->mb[19] = LSW(MSD(ha->sf_init_cb_dma));
1901 mcp->mb[15] = sizeof(*ha->sf_init_cb);
1902 mcp->out_mb |= MBX_19|MBX_18|MBX_17|MBX_16|MBX_15;
1903 }
1904
1905 /* 1 and 2 should normally be captured. */
1906 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1907 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
1908 /* mb3 is additional info about the installed SFP. */
1909 mcp->in_mb |= MBX_3;
1910 mcp->buf_size = size;
1911 mcp->flags = MBX_DMA_OUT;
1912 mcp->tov = MBX_TOV_SECONDS;
1913 rval = qla2x00_mailbox_command(vha, mcp);
1914
1915 if (rval != QLA_SUCCESS) {
1916 /*EMPTY*/
1917 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1918 fmt: "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n",
1919 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1920 if (ha->init_cb) {
1921 ql_dbg(ql_dbg_mbx, vha, 0x104d, fmt: "init_cb:\n");
1922 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1923 0x0104d, ha->init_cb, sizeof(*ha->init_cb));
1924 }
1925 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1926 ql_dbg(ql_dbg_mbx, vha, 0x104d, fmt: "ex_init_cb:\n");
1927 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1928 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb));
1929 }
1930 } else {
1931 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1932 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1933 ql_dbg(ql_dbg_mbx, vha, 0x119d,
1934 fmt: "Invalid SFP/Validation Failed\n");
1935 }
1936 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1937 fmt: "Done %s.\n", __func__);
1938 }
1939
1940 return rval;
1941}
1942
1943
1944/*
1945 * qla2x00_get_port_database
1946 * Issue normal/enhanced get port database mailbox command
1947 * and copy device name as necessary.
1948 *
1949 * Input:
1950 * ha = adapter state pointer.
1951 * dev = structure pointer.
1952 * opt = enhanced cmd option byte.
1953 *
1954 * Returns:
1955 * qla2x00 local function return status code.
1956 *
1957 * Context:
1958 * Kernel context.
1959 */
1960int
1961qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1962{
1963 int rval;
1964 mbx_cmd_t mc;
1965 mbx_cmd_t *mcp = &mc;
1966 port_database_t *pd;
1967 struct port_database_24xx *pd24;
1968 dma_addr_t pd_dma;
1969 struct qla_hw_data *ha = vha->hw;
1970
1971 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1972 fmt: "Entered %s.\n", __func__);
1973
1974 pd24 = NULL;
1975 pd = dma_pool_zalloc(pool: ha->s_dma_pool, GFP_KERNEL, handle: &pd_dma);
1976 if (pd == NULL) {
1977 ql_log(ql_log_warn, vha, 0x1050,
1978 fmt: "Failed to allocate port database structure.\n");
1979 fcport->query = 0;
1980 return QLA_MEMORY_ALLOC_FAILED;
1981 }
1982
1983 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1984 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1985 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1986 mcp->mb[2] = MSW(pd_dma);
1987 mcp->mb[3] = LSW(pd_dma);
1988 mcp->mb[6] = MSW(MSD(pd_dma));
1989 mcp->mb[7] = LSW(MSD(pd_dma));
1990 mcp->mb[9] = vha->vp_idx;
1991 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1992 mcp->in_mb = MBX_0;
1993 if (IS_FWI2_CAPABLE(ha)) {
1994 mcp->mb[1] = fcport->loop_id;
1995 mcp->mb[10] = opt;
1996 mcp->out_mb |= MBX_10|MBX_1;
1997 mcp->in_mb |= MBX_1;
1998 } else if (HAS_EXTENDED_IDS(ha)) {
1999 mcp->mb[1] = fcport->loop_id;
2000 mcp->mb[10] = opt;
2001 mcp->out_mb |= MBX_10|MBX_1;
2002 } else {
2003 mcp->mb[1] = fcport->loop_id << 8 | opt;
2004 mcp->out_mb |= MBX_1;
2005 }
2006 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
2007 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
2008 mcp->flags = MBX_DMA_IN;
2009 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2010 rval = qla2x00_mailbox_command(vha, mcp);
2011 if (rval != QLA_SUCCESS)
2012 goto gpd_error_out;
2013
2014 if (IS_FWI2_CAPABLE(ha)) {
2015 uint64_t zero = 0;
2016 u8 current_login_state, last_login_state;
2017
2018 pd24 = (struct port_database_24xx *) pd;
2019
2020 /* Check for logged in state. */
2021 if (NVME_TARGET(ha, fcport)) {
2022 current_login_state = pd24->current_login_state >> 4;
2023 last_login_state = pd24->last_login_state >> 4;
2024 } else {
2025 current_login_state = pd24->current_login_state & 0xf;
2026 last_login_state = pd24->last_login_state & 0xf;
2027 }
2028 fcport->current_login_state = pd24->current_login_state;
2029 fcport->last_login_state = pd24->last_login_state;
2030
2031 /* Check for logged in state. */
2032 if (current_login_state != PDS_PRLI_COMPLETE &&
2033 last_login_state != PDS_PRLI_COMPLETE) {
2034 ql_dbg(ql_dbg_mbx, vha, 0x119a,
2035 fmt: "Unable to verify login-state (%x/%x) for loop_id %x.\n",
2036 current_login_state, last_login_state,
2037 fcport->loop_id);
2038 rval = QLA_FUNCTION_FAILED;
2039
2040 if (!fcport->query)
2041 goto gpd_error_out;
2042 }
2043
2044 if (fcport->loop_id == FC_NO_LOOP_ID ||
2045 (memcmp(p: fcport->port_name, q: (uint8_t *)&zero, size: 8) &&
2046 memcmp(p: fcport->port_name, q: pd24->port_name, size: 8))) {
2047 /* We lost the device mid way. */
2048 rval = QLA_NOT_LOGGED_IN;
2049 goto gpd_error_out;
2050 }
2051
2052 /* Names are little-endian. */
2053 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
2054 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
2055
2056 /* Get port_id of device. */
2057 fcport->d_id.b.domain = pd24->port_id[0];
2058 fcport->d_id.b.area = pd24->port_id[1];
2059 fcport->d_id.b.al_pa = pd24->port_id[2];
2060 fcport->d_id.b.rsvd_1 = 0;
2061
2062 /* If not target must be initiator or unknown type. */
2063 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
2064 fcport->port_type = FCT_INITIATOR;
2065 else
2066 fcport->port_type = FCT_TARGET;
2067
2068 /* Passback COS information. */
2069 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
2070 FC_COS_CLASS2 : FC_COS_CLASS3;
2071
2072 if (pd24->prli_svc_param_word_3[0] & BIT_7)
2073 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
2074 } else {
2075 uint64_t zero = 0;
2076
2077 /* Check for logged in state. */
2078 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
2079 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
2080 ql_dbg(ql_dbg_mbx, vha, 0x100a,
2081 fmt: "Unable to verify login-state (%x/%x) - "
2082 "portid=%02x%02x%02x.\n", pd->master_state,
2083 pd->slave_state, fcport->d_id.b.domain,
2084 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2085 rval = QLA_FUNCTION_FAILED;
2086 goto gpd_error_out;
2087 }
2088
2089 if (fcport->loop_id == FC_NO_LOOP_ID ||
2090 (memcmp(p: fcport->port_name, q: (uint8_t *)&zero, size: 8) &&
2091 memcmp(p: fcport->port_name, q: pd->port_name, size: 8))) {
2092 /* We lost the device mid way. */
2093 rval = QLA_NOT_LOGGED_IN;
2094 goto gpd_error_out;
2095 }
2096
2097 /* Names are little-endian. */
2098 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
2099 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
2100
2101 /* Get port_id of device. */
2102 fcport->d_id.b.domain = pd->port_id[0];
2103 fcport->d_id.b.area = pd->port_id[3];
2104 fcport->d_id.b.al_pa = pd->port_id[2];
2105 fcport->d_id.b.rsvd_1 = 0;
2106
2107 /* If not target must be initiator or unknown type. */
2108 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
2109 fcport->port_type = FCT_INITIATOR;
2110 else
2111 fcport->port_type = FCT_TARGET;
2112
2113 /* Passback COS information. */
2114 fcport->supported_classes = (pd->options & BIT_4) ?
2115 FC_COS_CLASS2 : FC_COS_CLASS3;
2116 }
2117
2118gpd_error_out:
2119 dma_pool_free(pool: ha->s_dma_pool, vaddr: pd, addr: pd_dma);
2120 fcport->query = 0;
2121
2122 if (rval != QLA_SUCCESS) {
2123 ql_dbg(ql_dbg_mbx, vha, 0x1052,
2124 fmt: "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
2125 mcp->mb[0], mcp->mb[1]);
2126 } else {
2127 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
2128 fmt: "Done %s.\n", __func__);
2129 }
2130
2131 return rval;
2132}
2133
2134int
2135qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle,
2136 struct port_database_24xx *pdb)
2137{
2138 mbx_cmd_t mc;
2139 mbx_cmd_t *mcp = &mc;
2140 dma_addr_t pdb_dma;
2141 int rval;
2142
2143 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1115,
2144 fmt: "Entered %s.\n", __func__);
2145
2146 memset(pdb, 0, sizeof(*pdb));
2147
2148 pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb,
2149 sizeof(*pdb), DMA_FROM_DEVICE);
2150 if (!pdb_dma) {
2151 ql_log(ql_log_warn, vha, 0x1116, fmt: "Failed to map dma buffer.\n");
2152 return QLA_MEMORY_ALLOC_FAILED;
2153 }
2154
2155 mcp->mb[0] = MBC_GET_PORT_DATABASE;
2156 mcp->mb[1] = nport_handle;
2157 mcp->mb[2] = MSW(LSD(pdb_dma));
2158 mcp->mb[3] = LSW(LSD(pdb_dma));
2159 mcp->mb[6] = MSW(MSD(pdb_dma));
2160 mcp->mb[7] = LSW(MSD(pdb_dma));
2161 mcp->mb[9] = 0;
2162 mcp->mb[10] = 0;
2163 mcp->out_mb = MBX_10|MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2164 mcp->in_mb = MBX_1|MBX_0;
2165 mcp->buf_size = sizeof(*pdb);
2166 mcp->flags = MBX_DMA_IN;
2167 mcp->tov = vha->hw->login_timeout * 2;
2168 rval = qla2x00_mailbox_command(vha, mcp);
2169
2170 if (rval != QLA_SUCCESS) {
2171 ql_dbg(ql_dbg_mbx, vha, 0x111a,
2172 fmt: "Failed=%x mb[0]=%x mb[1]=%x.\n",
2173 rval, mcp->mb[0], mcp->mb[1]);
2174 } else {
2175 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111b,
2176 fmt: "Done %s.\n", __func__);
2177 }
2178
2179 dma_unmap_single(&vha->hw->pdev->dev, pdb_dma,
2180 sizeof(*pdb), DMA_FROM_DEVICE);
2181
2182 return rval;
2183}
2184
2185/*
2186 * qla2x00_get_firmware_state
2187 * Get adapter firmware state.
2188 *
2189 * Input:
2190 * ha = adapter block pointer.
2191 * dptr = pointer for firmware state.
2192 * TARGET_QUEUE_LOCK must be released.
2193 * ADAPTER_STATE_LOCK must be released.
2194 *
2195 * Returns:
2196 * qla2x00 local function return status code.
2197 *
2198 * Context:
2199 * Kernel context.
2200 */
2201int
2202qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
2203{
2204 int rval;
2205 mbx_cmd_t mc;
2206 mbx_cmd_t *mcp = &mc;
2207 struct qla_hw_data *ha = vha->hw;
2208
2209 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
2210 fmt: "Entered %s.\n", __func__);
2211
2212 if (!ha->flags.fw_started)
2213 return QLA_FUNCTION_FAILED;
2214
2215 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
2216 mcp->out_mb = MBX_0;
2217 if (IS_FWI2_CAPABLE(vha->hw))
2218 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2219 else
2220 mcp->in_mb = MBX_1|MBX_0;
2221 mcp->tov = MBX_TOV_SECONDS;
2222 mcp->flags = 0;
2223 rval = qla2x00_mailbox_command(vha, mcp);
2224
2225 /* Return firmware states. */
2226 states[0] = mcp->mb[1];
2227 if (IS_FWI2_CAPABLE(vha->hw)) {
2228 states[1] = mcp->mb[2];
2229 states[2] = mcp->mb[3]; /* SFP info */
2230 states[3] = mcp->mb[4];
2231 states[4] = mcp->mb[5];
2232 states[5] = mcp->mb[6]; /* DPORT status */
2233 }
2234
2235 if (rval != QLA_SUCCESS) {
2236 /*EMPTY*/
2237 ql_dbg(ql_dbg_mbx, vha, 0x1055, fmt: "Failed=%x.\n", rval);
2238 } else {
2239 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
2240 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
2241 ql_dbg(ql_dbg_mbx, vha, 0x119e,
2242 fmt: "Invalid SFP/Validation Failed\n");
2243 }
2244 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
2245 fmt: "Done %s.\n", __func__);
2246 }
2247
2248 return rval;
2249}
2250
2251/*
2252 * qla2x00_get_port_name
2253 * Issue get port name mailbox command.
2254 * Returned name is in big endian format.
2255 *
2256 * Input:
2257 * ha = adapter block pointer.
2258 * loop_id = loop ID of device.
2259 * name = pointer for name.
2260 * TARGET_QUEUE_LOCK must be released.
2261 * ADAPTER_STATE_LOCK must be released.
2262 *
2263 * Returns:
2264 * qla2x00 local function return status code.
2265 *
2266 * Context:
2267 * Kernel context.
2268 */
2269int
2270qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
2271 uint8_t opt)
2272{
2273 int rval;
2274 mbx_cmd_t mc;
2275 mbx_cmd_t *mcp = &mc;
2276
2277 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
2278 fmt: "Entered %s.\n", __func__);
2279
2280 mcp->mb[0] = MBC_GET_PORT_NAME;
2281 mcp->mb[9] = vha->vp_idx;
2282 mcp->out_mb = MBX_9|MBX_1|MBX_0;
2283 if (HAS_EXTENDED_IDS(vha->hw)) {
2284 mcp->mb[1] = loop_id;
2285 mcp->mb[10] = opt;
2286 mcp->out_mb |= MBX_10;
2287 } else {
2288 mcp->mb[1] = loop_id << 8 | opt;
2289 }
2290
2291 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2292 mcp->tov = MBX_TOV_SECONDS;
2293 mcp->flags = 0;
2294 rval = qla2x00_mailbox_command(vha, mcp);
2295
2296 if (rval != QLA_SUCCESS) {
2297 /*EMPTY*/
2298 ql_dbg(ql_dbg_mbx, vha, 0x1058, fmt: "Failed=%x.\n", rval);
2299 } else {
2300 if (name != NULL) {
2301 /* This function returns name in big endian. */
2302 name[0] = MSB(mcp->mb[2]);
2303 name[1] = LSB(mcp->mb[2]);
2304 name[2] = MSB(mcp->mb[3]);
2305 name[3] = LSB(mcp->mb[3]);
2306 name[4] = MSB(mcp->mb[6]);
2307 name[5] = LSB(mcp->mb[6]);
2308 name[6] = MSB(mcp->mb[7]);
2309 name[7] = LSB(mcp->mb[7]);
2310 }
2311
2312 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
2313 fmt: "Done %s.\n", __func__);
2314 }
2315
2316 return rval;
2317}
2318
2319/*
2320 * qla24xx_link_initialization
2321 * Issue link initialization mailbox command.
2322 *
2323 * Input:
2324 * ha = adapter block pointer.
2325 * TARGET_QUEUE_LOCK must be released.
2326 * ADAPTER_STATE_LOCK must be released.
2327 *
2328 * Returns:
2329 * qla2x00 local function return status code.
2330 *
2331 * Context:
2332 * Kernel context.
2333 */
2334int
2335qla24xx_link_initialize(scsi_qla_host_t *vha)
2336{
2337 int rval;
2338 mbx_cmd_t mc;
2339 mbx_cmd_t *mcp = &mc;
2340
2341 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
2342 fmt: "Entered %s.\n", __func__);
2343
2344 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
2345 return QLA_FUNCTION_FAILED;
2346
2347 mcp->mb[0] = MBC_LINK_INITIALIZATION;
2348 mcp->mb[1] = BIT_4;
2349 if (vha->hw->operating_mode == LOOP)
2350 mcp->mb[1] |= BIT_6;
2351 else
2352 mcp->mb[1] |= BIT_5;
2353 mcp->mb[2] = 0;
2354 mcp->mb[3] = 0;
2355 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2356 mcp->in_mb = MBX_0;
2357 mcp->tov = MBX_TOV_SECONDS;
2358 mcp->flags = 0;
2359 rval = qla2x00_mailbox_command(vha, mcp);
2360
2361 if (rval != QLA_SUCCESS) {
2362 ql_dbg(ql_dbg_mbx, vha, 0x1153, fmt: "Failed=%x.\n", rval);
2363 } else {
2364 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
2365 fmt: "Done %s.\n", __func__);
2366 }
2367
2368 return rval;
2369}
2370
2371/*
2372 * qla2x00_lip_reset
2373 * Issue LIP reset mailbox command.
2374 *
2375 * Input:
2376 * ha = adapter block pointer.
2377 * TARGET_QUEUE_LOCK must be released.
2378 * ADAPTER_STATE_LOCK must be released.
2379 *
2380 * Returns:
2381 * qla2x00 local function return status code.
2382 *
2383 * Context:
2384 * Kernel context.
2385 */
2386int
2387qla2x00_lip_reset(scsi_qla_host_t *vha)
2388{
2389 int rval;
2390 mbx_cmd_t mc;
2391 mbx_cmd_t *mcp = &mc;
2392
2393 ql_dbg(ql_dbg_disc, vha, 0x105a,
2394 fmt: "Entered %s.\n", __func__);
2395
2396 if (IS_CNA_CAPABLE(vha->hw)) {
2397 /* Logout across all FCFs. */
2398 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2399 mcp->mb[1] = BIT_1;
2400 mcp->mb[2] = 0;
2401 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2402 } else if (IS_FWI2_CAPABLE(vha->hw)) {
2403 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2404 mcp->mb[1] = BIT_4;
2405 mcp->mb[2] = 0;
2406 mcp->mb[3] = vha->hw->loop_reset_delay;
2407 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2408 } else {
2409 mcp->mb[0] = MBC_LIP_RESET;
2410 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2411 if (HAS_EXTENDED_IDS(vha->hw)) {
2412 mcp->mb[1] = 0x00ff;
2413 mcp->mb[10] = 0;
2414 mcp->out_mb |= MBX_10;
2415 } else {
2416 mcp->mb[1] = 0xff00;
2417 }
2418 mcp->mb[2] = vha->hw->loop_reset_delay;
2419 mcp->mb[3] = 0;
2420 }
2421 mcp->in_mb = MBX_0;
2422 mcp->tov = MBX_TOV_SECONDS;
2423 mcp->flags = 0;
2424 rval = qla2x00_mailbox_command(vha, mcp);
2425
2426 if (rval != QLA_SUCCESS) {
2427 /*EMPTY*/
2428 ql_dbg(ql_dbg_mbx, vha, 0x105b, fmt: "Failed=%x.\n", rval);
2429 } else {
2430 /*EMPTY*/
2431 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
2432 fmt: "Done %s.\n", __func__);
2433 }
2434
2435 return rval;
2436}
2437
2438/*
2439 * qla2x00_send_sns
2440 * Send SNS command.
2441 *
2442 * Input:
2443 * ha = adapter block pointer.
2444 * sns = pointer for command.
2445 * cmd_size = command size.
2446 * buf_size = response/command size.
2447 * TARGET_QUEUE_LOCK must be released.
2448 * ADAPTER_STATE_LOCK must be released.
2449 *
2450 * Returns:
2451 * qla2x00 local function return status code.
2452 *
2453 * Context:
2454 * Kernel context.
2455 */
2456int
2457qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
2458 uint16_t cmd_size, size_t buf_size)
2459{
2460 int rval;
2461 mbx_cmd_t mc;
2462 mbx_cmd_t *mcp = &mc;
2463
2464 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
2465 fmt: "Entered %s.\n", __func__);
2466
2467 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
2468 fmt: "Retry cnt=%d ratov=%d total tov=%d.\n",
2469 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
2470
2471 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
2472 mcp->mb[1] = cmd_size;
2473 mcp->mb[2] = MSW(sns_phys_address);
2474 mcp->mb[3] = LSW(sns_phys_address);
2475 mcp->mb[6] = MSW(MSD(sns_phys_address));
2476 mcp->mb[7] = LSW(MSD(sns_phys_address));
2477 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2478 mcp->in_mb = MBX_0|MBX_1;
2479 mcp->buf_size = buf_size;
2480 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
2481 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
2482 rval = qla2x00_mailbox_command(vha, mcp);
2483
2484 if (rval != QLA_SUCCESS) {
2485 /*EMPTY*/
2486 ql_dbg(ql_dbg_mbx, vha, 0x105f,
2487 fmt: "Failed=%x mb[0]=%x mb[1]=%x.\n",
2488 rval, mcp->mb[0], mcp->mb[1]);
2489 } else {
2490 /*EMPTY*/
2491 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
2492 fmt: "Done %s.\n", __func__);
2493 }
2494
2495 return rval;
2496}
2497
2498int
2499qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2500 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2501{
2502 int rval;
2503
2504 struct logio_entry_24xx *lg;
2505 dma_addr_t lg_dma;
2506 uint32_t iop[2];
2507 struct qla_hw_data *ha = vha->hw;
2508 struct req_que *req;
2509
2510 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
2511 fmt: "Entered %s.\n", __func__);
2512
2513 if (vha->vp_idx && vha->qpair)
2514 req = vha->qpair->req;
2515 else
2516 req = ha->req_q_map[0];
2517
2518 lg = dma_pool_zalloc(pool: ha->s_dma_pool, GFP_KERNEL, handle: &lg_dma);
2519 if (lg == NULL) {
2520 ql_log(ql_log_warn, vha, 0x1062,
2521 fmt: "Failed to allocate login IOCB.\n");
2522 return QLA_MEMORY_ALLOC_FAILED;
2523 }
2524
2525 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2526 lg->entry_count = 1;
2527 lg->handle = make_handle(x: req->id, y: lg->handle);
2528 lg->nport_handle = cpu_to_le16(loop_id);
2529 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2530 if (opt & BIT_0)
2531 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2532 if (opt & BIT_1)
2533 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2534 lg->port_id[0] = al_pa;
2535 lg->port_id[1] = area;
2536 lg->port_id[2] = domain;
2537 lg->vp_index = vha->vp_idx;
2538 rval = qla2x00_issue_iocb_timeout(vha, buffer: lg, phys_addr: lg_dma, size: 0,
2539 tov: (ha->r_a_tov / 10 * 2) + 2);
2540 if (rval != QLA_SUCCESS) {
2541 ql_dbg(ql_dbg_mbx, vha, 0x1063,
2542 fmt: "Failed to issue login IOCB (%x).\n", rval);
2543 } else if (lg->entry_status != 0) {
2544 ql_dbg(ql_dbg_mbx, vha, 0x1064,
2545 fmt: "Failed to complete IOCB -- error status (%x).\n",
2546 lg->entry_status);
2547 rval = QLA_FUNCTION_FAILED;
2548 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2549 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2550 iop[1] = le32_to_cpu(lg->io_parameter[1]);
2551
2552 ql_dbg(ql_dbg_mbx, vha, 0x1065,
2553 fmt: "Failed to complete IOCB -- completion status (%x) "
2554 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2555 iop[0], iop[1]);
2556
2557 switch (iop[0]) {
2558 case LSC_SCODE_PORTID_USED:
2559 mb[0] = MBS_PORT_ID_USED;
2560 mb[1] = LSW(iop[1]);
2561 break;
2562 case LSC_SCODE_NPORT_USED:
2563 mb[0] = MBS_LOOP_ID_USED;
2564 break;
2565 case LSC_SCODE_NOLINK:
2566 case LSC_SCODE_NOIOCB:
2567 case LSC_SCODE_NOXCB:
2568 case LSC_SCODE_CMD_FAILED:
2569 case LSC_SCODE_NOFABRIC:
2570 case LSC_SCODE_FW_NOT_READY:
2571 case LSC_SCODE_NOT_LOGGED_IN:
2572 case LSC_SCODE_NOPCB:
2573 case LSC_SCODE_ELS_REJECT:
2574 case LSC_SCODE_CMD_PARAM_ERR:
2575 case LSC_SCODE_NONPORT:
2576 case LSC_SCODE_LOGGED_IN:
2577 case LSC_SCODE_NOFLOGI_ACC:
2578 default:
2579 mb[0] = MBS_COMMAND_ERROR;
2580 break;
2581 }
2582 } else {
2583 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
2584 fmt: "Done %s.\n", __func__);
2585
2586 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2587
2588 mb[0] = MBS_COMMAND_COMPLETE;
2589 mb[1] = 0;
2590 if (iop[0] & BIT_4) {
2591 if (iop[0] & BIT_8)
2592 mb[1] |= BIT_1;
2593 } else
2594 mb[1] = BIT_0;
2595
2596 /* Passback COS information. */
2597 mb[10] = 0;
2598 if (lg->io_parameter[7] || lg->io_parameter[8])
2599 mb[10] |= BIT_0; /* Class 2. */
2600 if (lg->io_parameter[9] || lg->io_parameter[10])
2601 mb[10] |= BIT_1; /* Class 3. */
2602 if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
2603 mb[10] |= BIT_7; /* Confirmed Completion
2604 * Allowed
2605 */
2606 }
2607
2608 dma_pool_free(pool: ha->s_dma_pool, vaddr: lg, addr: lg_dma);
2609
2610 return rval;
2611}
2612
2613/*
2614 * qla2x00_login_fabric
2615 * Issue login fabric port mailbox command.
2616 *
2617 * Input:
2618 * ha = adapter block pointer.
2619 * loop_id = device loop ID.
2620 * domain = device domain.
2621 * area = device area.
2622 * al_pa = device AL_PA.
2623 * status = pointer for return status.
2624 * opt = command options.
2625 * TARGET_QUEUE_LOCK must be released.
2626 * ADAPTER_STATE_LOCK must be released.
2627 *
2628 * Returns:
2629 * qla2x00 local function return status code.
2630 *
2631 * Context:
2632 * Kernel context.
2633 */
2634int
2635qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2636 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2637{
2638 int rval;
2639 mbx_cmd_t mc;
2640 mbx_cmd_t *mcp = &mc;
2641 struct qla_hw_data *ha = vha->hw;
2642
2643 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
2644 fmt: "Entered %s.\n", __func__);
2645
2646 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
2647 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2648 if (HAS_EXTENDED_IDS(ha)) {
2649 mcp->mb[1] = loop_id;
2650 mcp->mb[10] = opt;
2651 mcp->out_mb |= MBX_10;
2652 } else {
2653 mcp->mb[1] = (loop_id << 8) | opt;
2654 }
2655 mcp->mb[2] = domain;
2656 mcp->mb[3] = area << 8 | al_pa;
2657
2658 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
2659 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2660 mcp->flags = 0;
2661 rval = qla2x00_mailbox_command(vha, mcp);
2662
2663 /* Return mailbox statuses. */
2664 if (mb != NULL) {
2665 mb[0] = mcp->mb[0];
2666 mb[1] = mcp->mb[1];
2667 mb[2] = mcp->mb[2];
2668 mb[6] = mcp->mb[6];
2669 mb[7] = mcp->mb[7];
2670 /* COS retrieved from Get-Port-Database mailbox command. */
2671 mb[10] = 0;
2672 }
2673
2674 if (rval != QLA_SUCCESS) {
2675 /* RLU tmp code: need to change main mailbox_command function to
2676 * return ok even when the mailbox completion value is not
2677 * SUCCESS. The caller needs to be responsible to interpret
2678 * the return values of this mailbox command if we're not
2679 * to change too much of the existing code.
2680 */
2681 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
2682 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
2683 mcp->mb[0] == 0x4006)
2684 rval = QLA_SUCCESS;
2685
2686 /*EMPTY*/
2687 ql_dbg(ql_dbg_mbx, vha, 0x1068,
2688 fmt: "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2689 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2690 } else {
2691 /*EMPTY*/
2692 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2693 fmt: "Done %s.\n", __func__);
2694 }
2695
2696 return rval;
2697}
2698
2699/*
2700 * qla2x00_login_local_device
2701 * Issue login loop port mailbox command.
2702 *
2703 * Input:
2704 * ha = adapter block pointer.
2705 * loop_id = device loop ID.
2706 * opt = command options.
2707 *
2708 * Returns:
2709 * Return status code.
2710 *
2711 * Context:
2712 * Kernel context.
2713 *
2714 */
2715int
2716qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2717 uint16_t *mb_ret, uint8_t opt)
2718{
2719 int rval;
2720 mbx_cmd_t mc;
2721 mbx_cmd_t *mcp = &mc;
2722 struct qla_hw_data *ha = vha->hw;
2723
2724 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2725 fmt: "Entered %s.\n", __func__);
2726
2727 if (IS_FWI2_CAPABLE(ha))
2728 return qla24xx_login_fabric(vha, loop_id: fcport->loop_id,
2729 domain: fcport->d_id.b.domain, area: fcport->d_id.b.area,
2730 al_pa: fcport->d_id.b.al_pa, mb: mb_ret, opt);
2731
2732 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2733 if (HAS_EXTENDED_IDS(ha))
2734 mcp->mb[1] = fcport->loop_id;
2735 else
2736 mcp->mb[1] = fcport->loop_id << 8;
2737 mcp->mb[2] = opt;
2738 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2739 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2740 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2741 mcp->flags = 0;
2742 rval = qla2x00_mailbox_command(vha, mcp);
2743
2744 /* Return mailbox statuses. */
2745 if (mb_ret != NULL) {
2746 mb_ret[0] = mcp->mb[0];
2747 mb_ret[1] = mcp->mb[1];
2748 mb_ret[6] = mcp->mb[6];
2749 mb_ret[7] = mcp->mb[7];
2750 }
2751
2752 if (rval != QLA_SUCCESS) {
2753 /* AV tmp code: need to change main mailbox_command function to
2754 * return ok even when the mailbox completion value is not
2755 * SUCCESS. The caller needs to be responsible to interpret
2756 * the return values of this mailbox command if we're not
2757 * to change too much of the existing code.
2758 */
2759 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2760 rval = QLA_SUCCESS;
2761
2762 ql_dbg(ql_dbg_mbx, vha, 0x106b,
2763 fmt: "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2764 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2765 } else {
2766 /*EMPTY*/
2767 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2768 fmt: "Done %s.\n", __func__);
2769 }
2770
2771 return (rval);
2772}
2773
2774int
2775qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2776 uint8_t area, uint8_t al_pa)
2777{
2778 int rval;
2779 struct logio_entry_24xx *lg;
2780 dma_addr_t lg_dma;
2781 struct qla_hw_data *ha = vha->hw;
2782 struct req_que *req;
2783
2784 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2785 fmt: "Entered %s.\n", __func__);
2786
2787 lg = dma_pool_zalloc(pool: ha->s_dma_pool, GFP_KERNEL, handle: &lg_dma);
2788 if (lg == NULL) {
2789 ql_log(ql_log_warn, vha, 0x106e,
2790 fmt: "Failed to allocate logout IOCB.\n");
2791 return QLA_MEMORY_ALLOC_FAILED;
2792 }
2793
2794 req = vha->req;
2795 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2796 lg->entry_count = 1;
2797 lg->handle = make_handle(x: req->id, y: lg->handle);
2798 lg->nport_handle = cpu_to_le16(loop_id);
2799 lg->control_flags =
2800 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2801 LCF_FREE_NPORT);
2802 lg->port_id[0] = al_pa;
2803 lg->port_id[1] = area;
2804 lg->port_id[2] = domain;
2805 lg->vp_index = vha->vp_idx;
2806 rval = qla2x00_issue_iocb_timeout(vha, buffer: lg, phys_addr: lg_dma, size: 0,
2807 tov: (ha->r_a_tov / 10 * 2) + 2);
2808 if (rval != QLA_SUCCESS) {
2809 ql_dbg(ql_dbg_mbx, vha, 0x106f,
2810 fmt: "Failed to issue logout IOCB (%x).\n", rval);
2811 } else if (lg->entry_status != 0) {
2812 ql_dbg(ql_dbg_mbx, vha, 0x1070,
2813 fmt: "Failed to complete IOCB -- error status (%x).\n",
2814 lg->entry_status);
2815 rval = QLA_FUNCTION_FAILED;
2816 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2817 ql_dbg(ql_dbg_mbx, vha, 0x1071,
2818 fmt: "Failed to complete IOCB -- completion status (%x) "
2819 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2820 le32_to_cpu(lg->io_parameter[0]),
2821 le32_to_cpu(lg->io_parameter[1]));
2822 } else {
2823 /*EMPTY*/
2824 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2825 fmt: "Done %s.\n", __func__);
2826 }
2827
2828 dma_pool_free(pool: ha->s_dma_pool, vaddr: lg, addr: lg_dma);
2829
2830 return rval;
2831}
2832
2833/*
2834 * qla2x00_fabric_logout
2835 * Issue logout fabric port mailbox command.
2836 *
2837 * Input:
2838 * ha = adapter block pointer.
2839 * loop_id = device loop ID.
2840 * TARGET_QUEUE_LOCK must be released.
2841 * ADAPTER_STATE_LOCK must be released.
2842 *
2843 * Returns:
2844 * qla2x00 local function return status code.
2845 *
2846 * Context:
2847 * Kernel context.
2848 */
2849int
2850qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2851 uint8_t area, uint8_t al_pa)
2852{
2853 int rval;
2854 mbx_cmd_t mc;
2855 mbx_cmd_t *mcp = &mc;
2856
2857 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2858 fmt: "Entered %s.\n", __func__);
2859
2860 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2861 mcp->out_mb = MBX_1|MBX_0;
2862 if (HAS_EXTENDED_IDS(vha->hw)) {
2863 mcp->mb[1] = loop_id;
2864 mcp->mb[10] = 0;
2865 mcp->out_mb |= MBX_10;
2866 } else {
2867 mcp->mb[1] = loop_id << 8;
2868 }
2869
2870 mcp->in_mb = MBX_1|MBX_0;
2871 mcp->tov = MBX_TOV_SECONDS;
2872 mcp->flags = 0;
2873 rval = qla2x00_mailbox_command(vha, mcp);
2874
2875 if (rval != QLA_SUCCESS) {
2876 /*EMPTY*/
2877 ql_dbg(ql_dbg_mbx, vha, 0x1074,
2878 fmt: "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2879 } else {
2880 /*EMPTY*/
2881 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2882 fmt: "Done %s.\n", __func__);
2883 }
2884
2885 return rval;
2886}
2887
2888/*
2889 * qla2x00_full_login_lip
2890 * Issue full login LIP mailbox command.
2891 *
2892 * Input:
2893 * ha = adapter block pointer.
2894 * TARGET_QUEUE_LOCK must be released.
2895 * ADAPTER_STATE_LOCK must be released.
2896 *
2897 * Returns:
2898 * qla2x00 local function return status code.
2899 *
2900 * Context:
2901 * Kernel context.
2902 */
2903int
2904qla2x00_full_login_lip(scsi_qla_host_t *vha)
2905{
2906 int rval;
2907 mbx_cmd_t mc;
2908 mbx_cmd_t *mcp = &mc;
2909
2910 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2911 fmt: "Entered %s.\n", __func__);
2912
2913 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2914 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0;
2915 mcp->mb[2] = 0;
2916 mcp->mb[3] = 0;
2917 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2918 mcp->in_mb = MBX_0;
2919 mcp->tov = MBX_TOV_SECONDS;
2920 mcp->flags = 0;
2921 rval = qla2x00_mailbox_command(vha, mcp);
2922
2923 if (rval != QLA_SUCCESS) {
2924 /*EMPTY*/
2925 ql_dbg(ql_dbg_mbx, vha, 0x1077, fmt: "Failed=%x.\n", rval);
2926 } else {
2927 /*EMPTY*/
2928 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2929 fmt: "Done %s.\n", __func__);
2930 }
2931
2932 return rval;
2933}
2934
2935/*
2936 * qla2x00_get_id_list
2937 *
2938 * Input:
2939 * ha = adapter block pointer.
2940 *
2941 * Returns:
2942 * qla2x00 local function return status code.
2943 *
2944 * Context:
2945 * Kernel context.
2946 */
2947int
2948qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2949 uint16_t *entries)
2950{
2951 int rval;
2952 mbx_cmd_t mc;
2953 mbx_cmd_t *mcp = &mc;
2954
2955 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2956 fmt: "Entered %s.\n", __func__);
2957
2958 if (id_list == NULL)
2959 return QLA_FUNCTION_FAILED;
2960
2961 mcp->mb[0] = MBC_GET_ID_LIST;
2962 mcp->out_mb = MBX_0;
2963 if (IS_FWI2_CAPABLE(vha->hw)) {
2964 mcp->mb[2] = MSW(id_list_dma);
2965 mcp->mb[3] = LSW(id_list_dma);
2966 mcp->mb[6] = MSW(MSD(id_list_dma));
2967 mcp->mb[7] = LSW(MSD(id_list_dma));
2968 mcp->mb[8] = 0;
2969 mcp->mb[9] = vha->vp_idx;
2970 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2971 } else {
2972 mcp->mb[1] = MSW(id_list_dma);
2973 mcp->mb[2] = LSW(id_list_dma);
2974 mcp->mb[3] = MSW(MSD(id_list_dma));
2975 mcp->mb[6] = LSW(MSD(id_list_dma));
2976 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2977 }
2978 mcp->in_mb = MBX_1|MBX_0;
2979 mcp->tov = MBX_TOV_SECONDS;
2980 mcp->flags = 0;
2981 rval = qla2x00_mailbox_command(vha, mcp);
2982
2983 if (rval != QLA_SUCCESS) {
2984 /*EMPTY*/
2985 ql_dbg(ql_dbg_mbx, vha, 0x107a, fmt: "Failed=%x.\n", rval);
2986 } else {
2987 *entries = mcp->mb[1];
2988 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2989 fmt: "Done %s.\n", __func__);
2990 }
2991
2992 return rval;
2993}
2994
2995/*
2996 * qla2x00_get_resource_cnts
2997 * Get current firmware resource counts.
2998 *
2999 * Input:
3000 * ha = adapter block pointer.
3001 *
3002 * Returns:
3003 * qla2x00 local function return status code.
3004 *
3005 * Context:
3006 * Kernel context.
3007 */
3008int
3009qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
3010{
3011 struct qla_hw_data *ha = vha->hw;
3012 int rval;
3013 mbx_cmd_t mc;
3014 mbx_cmd_t *mcp = &mc;
3015
3016 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
3017 fmt: "Entered %s.\n", __func__);
3018
3019 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
3020 mcp->out_mb = MBX_0;
3021 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3022 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
3023 IS_QLA27XX(ha) || IS_QLA28XX(ha))
3024 mcp->in_mb |= MBX_12;
3025 mcp->tov = MBX_TOV_SECONDS;
3026 mcp->flags = 0;
3027 rval = qla2x00_mailbox_command(vha, mcp);
3028
3029 if (rval != QLA_SUCCESS) {
3030 /*EMPTY*/
3031 ql_dbg(ql_dbg_mbx, vha, 0x107d,
3032 fmt: "Failed mb[0]=%x.\n", mcp->mb[0]);
3033 } else {
3034 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
3035 fmt: "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
3036 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
3037 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
3038 mcp->mb[11], mcp->mb[12]);
3039
3040 ha->orig_fw_tgt_xcb_count = mcp->mb[1];
3041 ha->cur_fw_tgt_xcb_count = mcp->mb[2];
3042 ha->cur_fw_xcb_count = mcp->mb[3];
3043 ha->orig_fw_xcb_count = mcp->mb[6];
3044 ha->cur_fw_iocb_count = mcp->mb[7];
3045 ha->orig_fw_iocb_count = mcp->mb[10];
3046 if (ha->flags.npiv_supported)
3047 ha->max_npiv_vports = mcp->mb[11];
3048 if (IS_QLA81XX(ha) || IS_QLA83XX(ha))
3049 ha->fw_max_fcf_count = mcp->mb[12];
3050 }
3051
3052 return (rval);
3053}
3054
3055/*
3056 * qla2x00_get_fcal_position_map
3057 * Get FCAL (LILP) position map using mailbox command
3058 *
3059 * Input:
3060 * ha = adapter state pointer.
3061 * pos_map = buffer pointer (can be NULL).
3062 *
3063 * Returns:
3064 * qla2x00 local function return status code.
3065 *
3066 * Context:
3067 * Kernel context.
3068 */
3069int
3070qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map,
3071 u8 *num_entries)
3072{
3073 int rval;
3074 mbx_cmd_t mc;
3075 mbx_cmd_t *mcp = &mc;
3076 char *pmap;
3077 dma_addr_t pmap_dma;
3078 struct qla_hw_data *ha = vha->hw;
3079
3080 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
3081 fmt: "Entered %s.\n", __func__);
3082
3083 pmap = dma_pool_zalloc(pool: ha->s_dma_pool, GFP_KERNEL, handle: &pmap_dma);
3084 if (pmap == NULL) {
3085 ql_log(ql_log_warn, vha, 0x1080,
3086 fmt: "Memory alloc failed.\n");
3087 return QLA_MEMORY_ALLOC_FAILED;
3088 }
3089
3090 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
3091 mcp->mb[2] = MSW(pmap_dma);
3092 mcp->mb[3] = LSW(pmap_dma);
3093 mcp->mb[6] = MSW(MSD(pmap_dma));
3094 mcp->mb[7] = LSW(MSD(pmap_dma));
3095 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3096 mcp->in_mb = MBX_1|MBX_0;
3097 mcp->buf_size = FCAL_MAP_SIZE;
3098 mcp->flags = MBX_DMA_IN;
3099 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
3100 rval = qla2x00_mailbox_command(vha, mcp);
3101
3102 if (rval == QLA_SUCCESS) {
3103 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
3104 fmt: "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
3105 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
3106 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
3107 pmap, pmap[0] + 1);
3108
3109 if (pos_map)
3110 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
3111 if (num_entries)
3112 *num_entries = pmap[0];
3113 }
3114 dma_pool_free(pool: ha->s_dma_pool, vaddr: pmap, addr: pmap_dma);
3115
3116 if (rval != QLA_SUCCESS) {
3117 ql_dbg(ql_dbg_mbx, vha, 0x1082, fmt: "Failed=%x.\n", rval);
3118 } else {
3119 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
3120 fmt: "Done %s.\n", __func__);
3121 }
3122
3123 return rval;
3124}
3125
3126/*
3127 * qla2x00_get_link_status
3128 *
3129 * Input:
3130 * ha = adapter block pointer.
3131 * loop_id = device loop ID.
3132 * ret_buf = pointer to link status return buffer.
3133 *
3134 * Returns:
3135 * 0 = success.
3136 * BIT_0 = mem alloc error.
3137 * BIT_1 = mailbox error.
3138 */
3139int
3140qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
3141 struct link_statistics *stats, dma_addr_t stats_dma)
3142{
3143 int rval;
3144 mbx_cmd_t mc;
3145 mbx_cmd_t *mcp = &mc;
3146 uint32_t *iter = (uint32_t *)stats;
3147 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
3148 struct qla_hw_data *ha = vha->hw;
3149
3150 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
3151 fmt: "Entered %s.\n", __func__);
3152
3153 mcp->mb[0] = MBC_GET_LINK_STATUS;
3154 mcp->mb[2] = MSW(LSD(stats_dma));
3155 mcp->mb[3] = LSW(LSD(stats_dma));
3156 mcp->mb[6] = MSW(MSD(stats_dma));
3157 mcp->mb[7] = LSW(MSD(stats_dma));
3158 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3159 mcp->in_mb = MBX_0;
3160 if (IS_FWI2_CAPABLE(ha)) {
3161 mcp->mb[1] = loop_id;
3162 mcp->mb[4] = 0;
3163 mcp->mb[10] = 0;
3164 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
3165 mcp->in_mb |= MBX_1;
3166 } else if (HAS_EXTENDED_IDS(ha)) {
3167 mcp->mb[1] = loop_id;
3168 mcp->mb[10] = 0;
3169 mcp->out_mb |= MBX_10|MBX_1;
3170 } else {
3171 mcp->mb[1] = loop_id << 8;
3172 mcp->out_mb |= MBX_1;
3173 }
3174 mcp->tov = MBX_TOV_SECONDS;
3175 mcp->flags = IOCTL_CMD;
3176 rval = qla2x00_mailbox_command(vha, mcp);
3177
3178 if (rval == QLA_SUCCESS) {
3179 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3180 ql_dbg(ql_dbg_mbx, vha, 0x1085,
3181 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3182 rval = QLA_FUNCTION_FAILED;
3183 } else {
3184 /* Re-endianize - firmware data is le32. */
3185 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
3186 fmt: "Done %s.\n", __func__);
3187 for ( ; dwords--; iter++)
3188 le32_to_cpus(iter);
3189 }
3190 } else {
3191 /* Failed. */
3192 ql_dbg(ql_dbg_mbx, vha, 0x1087, fmt: "Failed=%x.\n", rval);
3193 }
3194
3195 return rval;
3196}
3197
3198int
3199qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
3200 dma_addr_t stats_dma, uint16_t options)
3201{
3202 int rval;
3203 mbx_cmd_t mc;
3204 mbx_cmd_t *mcp = &mc;
3205 uint32_t *iter = (uint32_t *)stats;
3206 ushort dwords = sizeof(*stats)/sizeof(*iter);
3207
3208 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
3209 fmt: "Entered %s.\n", __func__);
3210
3211 memset(&mc, 0, sizeof(mc));
3212 mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
3213 mc.mb[2] = MSW(LSD(stats_dma));
3214 mc.mb[3] = LSW(LSD(stats_dma));
3215 mc.mb[6] = MSW(MSD(stats_dma));
3216 mc.mb[7] = LSW(MSD(stats_dma));
3217 mc.mb[8] = dwords;
3218 mc.mb[9] = vha->vp_idx;
3219 mc.mb[10] = options;
3220
3221 rval = qla24xx_send_mb_cmd(vha, &mc);
3222
3223 if (rval == QLA_SUCCESS) {
3224 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3225 ql_dbg(ql_dbg_mbx, vha, 0x1089,
3226 fmt: "Failed mb[0]=%x.\n", mcp->mb[0]);
3227 rval = QLA_FUNCTION_FAILED;
3228 } else {
3229 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
3230 fmt: "Done %s.\n", __func__);
3231 /* Re-endianize - firmware data is le32. */
3232 for ( ; dwords--; iter++)
3233 le32_to_cpus(iter);
3234 }
3235 } else {
3236 /* Failed. */
3237 ql_dbg(ql_dbg_mbx, vha, 0x108b, fmt: "Failed=%x.\n", rval);
3238 }
3239
3240 return rval;
3241}
3242
3243int
3244qla24xx_abort_command(srb_t *sp)
3245{
3246 int rval;
3247 unsigned long flags = 0;
3248
3249 struct abort_entry_24xx *abt;
3250 dma_addr_t abt_dma;
3251 uint32_t handle;
3252 fc_port_t *fcport = sp->fcport;
3253 struct scsi_qla_host *vha = fcport->vha;
3254 struct qla_hw_data *ha = vha->hw;
3255 struct req_que *req;
3256 struct qla_qpair *qpair = sp->qpair;
3257
3258 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
3259 fmt: "Entered %s.\n", __func__);
3260
3261 if (sp->qpair)
3262 req = sp->qpair->req;
3263 else
3264 return QLA_ERR_NO_QPAIR;
3265
3266 if (ql2xasynctmfenable)
3267 return qla24xx_async_abort_command(sp);
3268
3269 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3270 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
3271 if (req->outstanding_cmds[handle] == sp)
3272 break;
3273 }
3274 spin_unlock_irqrestore(lock: qpair->qp_lock_ptr, flags);
3275 if (handle == req->num_outstanding_cmds) {
3276 /* Command not found. */
3277 return QLA_ERR_NOT_FOUND;
3278 }
3279
3280 abt = dma_pool_zalloc(pool: ha->s_dma_pool, GFP_KERNEL, handle: &abt_dma);
3281 if (abt == NULL) {
3282 ql_log(ql_log_warn, vha, 0x108d,
3283 fmt: "Failed to allocate abort IOCB.\n");
3284 return QLA_MEMORY_ALLOC_FAILED;
3285 }
3286
3287 abt->entry_type = ABORT_IOCB_TYPE;
3288 abt->entry_count = 1;
3289 abt->handle = make_handle(x: req->id, y: abt->handle);
3290 abt->nport_handle = cpu_to_le16(fcport->loop_id);
3291 abt->handle_to_abort = make_handle(x: req->id, y: handle);
3292 abt->port_id[0] = fcport->d_id.b.al_pa;
3293 abt->port_id[1] = fcport->d_id.b.area;
3294 abt->port_id[2] = fcport->d_id.b.domain;
3295 abt->vp_index = fcport->vha->vp_idx;
3296
3297 abt->req_que_no = cpu_to_le16(req->id);
3298 /* Need to pass original sp */
3299 qla_nvme_abort_set_option(abt, sp);
3300
3301 rval = qla2x00_issue_iocb(vha, buffer: abt, phys_addr: abt_dma, size: 0);
3302 if (rval != QLA_SUCCESS) {
3303 ql_dbg(ql_dbg_mbx, vha, 0x108e,
3304 fmt: "Failed to issue IOCB (%x).\n", rval);
3305 } else if (abt->entry_status != 0) {
3306 ql_dbg(ql_dbg_mbx, vha, 0x108f,
3307 fmt: "Failed to complete IOCB -- error status (%x).\n",
3308 abt->entry_status);
3309 rval = QLA_FUNCTION_FAILED;
3310 } else if (abt->nport_handle != cpu_to_le16(0)) {
3311 ql_dbg(ql_dbg_mbx, vha, 0x1090,
3312 fmt: "Failed to complete IOCB -- completion status (%x).\n",
3313 le16_to_cpu(abt->nport_handle));
3314 if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR))
3315 rval = QLA_FUNCTION_PARAMETER_ERROR;
3316 else
3317 rval = QLA_FUNCTION_FAILED;
3318 } else {
3319 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
3320 fmt: "Done %s.\n", __func__);
3321 }
3322 if (rval == QLA_SUCCESS)
3323 qla_nvme_abort_process_comp_status(abt, sp);
3324
3325 qla_wait_nvme_release_cmd_kref(sp);
3326
3327 dma_pool_free(pool: ha->s_dma_pool, vaddr: abt, addr: abt_dma);
3328
3329 return rval;
3330}
3331
3332struct tsk_mgmt_cmd {
3333 union {
3334 struct tsk_mgmt_entry tsk;
3335 struct sts_entry_24xx sts;
3336 } p;
3337};
3338
3339static int
3340__qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
3341 uint64_t l, int tag)
3342{
3343 int rval, rval2;
3344 struct tsk_mgmt_cmd *tsk;
3345 struct sts_entry_24xx *sts;
3346 dma_addr_t tsk_dma;
3347 scsi_qla_host_t *vha;
3348 struct qla_hw_data *ha;
3349 struct req_que *req;
3350 struct qla_qpair *qpair;
3351
3352 vha = fcport->vha;
3353 ha = vha->hw;
3354 req = vha->req;
3355
3356 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
3357 fmt: "Entered %s.\n", __func__);
3358
3359 if (vha->vp_idx && vha->qpair) {
3360 /* NPIV port */
3361 qpair = vha->qpair;
3362 req = qpair->req;
3363 }
3364
3365 tsk = dma_pool_zalloc(pool: ha->s_dma_pool, GFP_KERNEL, handle: &tsk_dma);
3366 if (tsk == NULL) {
3367 ql_log(ql_log_warn, vha, 0x1093,
3368 fmt: "Failed to allocate task management IOCB.\n");
3369 return QLA_MEMORY_ALLOC_FAILED;
3370 }
3371
3372 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
3373 tsk->p.tsk.entry_count = 1;
3374 tsk->p.tsk.handle = make_handle(x: req->id, y: tsk->p.tsk.handle);
3375 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
3376 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
3377 tsk->p.tsk.control_flags = cpu_to_le32(type);
3378 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
3379 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
3380 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
3381 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
3382 if (type == TCF_LUN_RESET) {
3383 int_to_scsilun(l, &tsk->p.tsk.lun);
3384 host_to_fcp_swap(fcp: (uint8_t *)&tsk->p.tsk.lun,
3385 bsize: sizeof(tsk->p.tsk.lun));
3386 }
3387
3388 sts = &tsk->p.sts;
3389 rval = qla2x00_issue_iocb(vha, buffer: tsk, phys_addr: tsk_dma, size: 0);
3390 if (rval != QLA_SUCCESS) {
3391 ql_dbg(ql_dbg_mbx, vha, 0x1094,
3392 fmt: "Failed to issue %s reset IOCB (%x).\n", name, rval);
3393 } else if (sts->entry_status != 0) {
3394 ql_dbg(ql_dbg_mbx, vha, 0x1095,
3395 fmt: "Failed to complete IOCB -- error status (%x).\n",
3396 sts->entry_status);
3397 rval = QLA_FUNCTION_FAILED;
3398 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
3399 ql_dbg(ql_dbg_mbx, vha, 0x1096,
3400 fmt: "Failed to complete IOCB -- completion status (%x).\n",
3401 le16_to_cpu(sts->comp_status));
3402 rval = QLA_FUNCTION_FAILED;
3403 } else if (le16_to_cpu(sts->scsi_status) &
3404 SS_RESPONSE_INFO_LEN_VALID) {
3405 if (le32_to_cpu(sts->rsp_data_len) < 4) {
3406 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
3407 fmt: "Ignoring inconsistent data length -- not enough "
3408 "response info (%d).\n",
3409 le32_to_cpu(sts->rsp_data_len));
3410 } else if (sts->data[3]) {
3411 ql_dbg(ql_dbg_mbx, vha, 0x1098,
3412 fmt: "Failed to complete IOCB -- response (%x).\n",
3413 sts->data[3]);
3414 rval = QLA_FUNCTION_FAILED;
3415 }
3416 }
3417
3418 /* Issue marker IOCB. */
3419 rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l,
3420 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
3421 if (rval2 != QLA_SUCCESS) {
3422 ql_dbg(ql_dbg_mbx, vha, 0x1099,
3423 fmt: "Failed to issue marker IOCB (%x).\n", rval2);
3424 } else {
3425 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
3426 fmt: "Done %s.\n", __func__);
3427 }
3428
3429 dma_pool_free(pool: ha->s_dma_pool, vaddr: tsk, addr: tsk_dma);
3430
3431 return rval;
3432}
3433
3434int
3435qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
3436{
3437 struct qla_hw_data *ha = fcport->vha->hw;
3438
3439 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3440 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
3441
3442 return __qla24xx_issue_tmf(name: "Target", TCF_TARGET_RESET, fcport, l, tag);
3443}
3444
3445int
3446qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
3447{
3448 struct qla_hw_data *ha = fcport->vha->hw;
3449
3450 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3451 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
3452
3453 return __qla24xx_issue_tmf(name: "Lun", TCF_LUN_RESET, fcport, l, tag);
3454}
3455
3456int
3457qla2x00_system_error(scsi_qla_host_t *vha)
3458{
3459 int rval;
3460 mbx_cmd_t mc;
3461 mbx_cmd_t *mcp = &mc;
3462 struct qla_hw_data *ha = vha->hw;
3463
3464 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
3465 return QLA_FUNCTION_FAILED;
3466
3467 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
3468 fmt: "Entered %s.\n", __func__);
3469
3470 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
3471 mcp->out_mb = MBX_0;
3472 mcp->in_mb = MBX_0;
3473 mcp->tov = 5;
3474 mcp->flags = 0;
3475 rval = qla2x00_mailbox_command(vha, mcp);
3476
3477 if (rval != QLA_SUCCESS) {
3478 ql_dbg(ql_dbg_mbx, vha, 0x109c, fmt: "Failed=%x.\n", rval);
3479 } else {
3480 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
3481 fmt: "Done %s.\n", __func__);
3482 }
3483
3484 return rval;
3485}
3486
3487int
3488qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
3489{
3490 int rval;
3491 mbx_cmd_t mc;
3492 mbx_cmd_t *mcp = &mc;
3493
3494 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3495 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3496 return QLA_FUNCTION_FAILED;
3497
3498 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
3499 fmt: "Entered %s.\n", __func__);
3500
3501 mcp->mb[0] = MBC_WRITE_SERDES;
3502 mcp->mb[1] = addr;
3503 if (IS_QLA2031(vha->hw))
3504 mcp->mb[2] = data & 0xff;
3505 else
3506 mcp->mb[2] = data;
3507
3508 mcp->mb[3] = 0;
3509 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
3510 mcp->in_mb = MBX_0;
3511 mcp->tov = MBX_TOV_SECONDS;
3512 mcp->flags = 0;
3513 rval = qla2x00_mailbox_command(vha, mcp);
3514
3515 if (rval != QLA_SUCCESS) {
3516 ql_dbg(ql_dbg_mbx, vha, 0x1183,
3517 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3518 } else {
3519 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
3520 fmt: "Done %s.\n", __func__);
3521 }
3522
3523 return rval;
3524}
3525
3526int
3527qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
3528{
3529 int rval;
3530 mbx_cmd_t mc;
3531 mbx_cmd_t *mcp = &mc;
3532
3533 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3534 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3535 return QLA_FUNCTION_FAILED;
3536
3537 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
3538 fmt: "Entered %s.\n", __func__);
3539
3540 mcp->mb[0] = MBC_READ_SERDES;
3541 mcp->mb[1] = addr;
3542 mcp->mb[3] = 0;
3543 mcp->out_mb = MBX_3|MBX_1|MBX_0;
3544 mcp->in_mb = MBX_1|MBX_0;
3545 mcp->tov = MBX_TOV_SECONDS;
3546 mcp->flags = 0;
3547 rval = qla2x00_mailbox_command(vha, mcp);
3548
3549 if (IS_QLA2031(vha->hw))
3550 *data = mcp->mb[1] & 0xff;
3551 else
3552 *data = mcp->mb[1];
3553
3554 if (rval != QLA_SUCCESS) {
3555 ql_dbg(ql_dbg_mbx, vha, 0x1186,
3556 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3557 } else {
3558 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
3559 fmt: "Done %s.\n", __func__);
3560 }
3561
3562 return rval;
3563}
3564
3565int
3566qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
3567{
3568 int rval;
3569 mbx_cmd_t mc;
3570 mbx_cmd_t *mcp = &mc;
3571
3572 if (!IS_QLA8044(vha->hw))
3573 return QLA_FUNCTION_FAILED;
3574
3575 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
3576 fmt: "Entered %s.\n", __func__);
3577
3578 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3579 mcp->mb[1] = HCS_WRITE_SERDES;
3580 mcp->mb[3] = LSW(addr);
3581 mcp->mb[4] = MSW(addr);
3582 mcp->mb[5] = LSW(data);
3583 mcp->mb[6] = MSW(data);
3584 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
3585 mcp->in_mb = MBX_0;
3586 mcp->tov = MBX_TOV_SECONDS;
3587 mcp->flags = 0;
3588 rval = qla2x00_mailbox_command(vha, mcp);
3589
3590 if (rval != QLA_SUCCESS) {
3591 ql_dbg(ql_dbg_mbx, vha, 0x11a1,
3592 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3593 } else {
3594 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
3595 fmt: "Done %s.\n", __func__);
3596 }
3597
3598 return rval;
3599}
3600
3601int
3602qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
3603{
3604 int rval;
3605 mbx_cmd_t mc;
3606 mbx_cmd_t *mcp = &mc;
3607
3608 if (!IS_QLA8044(vha->hw))
3609 return QLA_FUNCTION_FAILED;
3610
3611 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
3612 fmt: "Entered %s.\n", __func__);
3613
3614 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3615 mcp->mb[1] = HCS_READ_SERDES;
3616 mcp->mb[3] = LSW(addr);
3617 mcp->mb[4] = MSW(addr);
3618 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
3619 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3620 mcp->tov = MBX_TOV_SECONDS;
3621 mcp->flags = 0;
3622 rval = qla2x00_mailbox_command(vha, mcp);
3623
3624 *data = mcp->mb[2] << 16 | mcp->mb[1];
3625
3626 if (rval != QLA_SUCCESS) {
3627 ql_dbg(ql_dbg_mbx, vha, 0x118a,
3628 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3629 } else {
3630 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
3631 fmt: "Done %s.\n", __func__);
3632 }
3633
3634 return rval;
3635}
3636
3637/**
3638 * qla2x00_set_serdes_params() -
3639 * @vha: HA context
3640 * @sw_em_1g: serial link options
3641 * @sw_em_2g: serial link options
3642 * @sw_em_4g: serial link options
3643 *
3644 * Returns
3645 */
3646int
3647qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
3648 uint16_t sw_em_2g, uint16_t sw_em_4g)
3649{
3650 int rval;
3651 mbx_cmd_t mc;
3652 mbx_cmd_t *mcp = &mc;
3653
3654 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
3655 fmt: "Entered %s.\n", __func__);
3656
3657 mcp->mb[0] = MBC_SERDES_PARAMS;
3658 mcp->mb[1] = BIT_0;
3659 mcp->mb[2] = sw_em_1g | BIT_15;
3660 mcp->mb[3] = sw_em_2g | BIT_15;
3661 mcp->mb[4] = sw_em_4g | BIT_15;
3662 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3663 mcp->in_mb = MBX_0;
3664 mcp->tov = MBX_TOV_SECONDS;
3665 mcp->flags = 0;
3666 rval = qla2x00_mailbox_command(vha, mcp);
3667
3668 if (rval != QLA_SUCCESS) {
3669 /*EMPTY*/
3670 ql_dbg(ql_dbg_mbx, vha, 0x109f,
3671 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3672 } else {
3673 /*EMPTY*/
3674 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
3675 fmt: "Done %s.\n", __func__);
3676 }
3677
3678 return rval;
3679}
3680
3681int
3682qla2x00_stop_firmware(scsi_qla_host_t *vha)
3683{
3684 int rval;
3685 mbx_cmd_t mc;
3686 mbx_cmd_t *mcp = &mc;
3687
3688 if (!IS_FWI2_CAPABLE(vha->hw))
3689 return QLA_FUNCTION_FAILED;
3690
3691 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
3692 fmt: "Entered %s.\n", __func__);
3693
3694 mcp->mb[0] = MBC_STOP_FIRMWARE;
3695 mcp->mb[1] = 0;
3696 mcp->out_mb = MBX_1|MBX_0;
3697 mcp->in_mb = MBX_0;
3698 mcp->tov = 5;
3699 mcp->flags = 0;
3700 rval = qla2x00_mailbox_command(vha, mcp);
3701
3702 if (rval != QLA_SUCCESS) {
3703 ql_dbg(ql_dbg_mbx, vha, 0x10a2, fmt: "Failed=%x.\n", rval);
3704 if (mcp->mb[0] == MBS_INVALID_COMMAND)
3705 rval = QLA_INVALID_COMMAND;
3706 } else {
3707 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
3708 fmt: "Done %s.\n", __func__);
3709 }
3710
3711 return rval;
3712}
3713
3714int
3715qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
3716 uint16_t buffers)
3717{
3718 int rval;
3719 mbx_cmd_t mc;
3720 mbx_cmd_t *mcp = &mc;
3721
3722 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
3723 fmt: "Entered %s.\n", __func__);
3724
3725 if (!IS_FWI2_CAPABLE(vha->hw))
3726 return QLA_FUNCTION_FAILED;
3727
3728 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3729 return QLA_FUNCTION_FAILED;
3730
3731 mcp->mb[0] = MBC_TRACE_CONTROL;
3732 mcp->mb[1] = TC_EFT_ENABLE;
3733 mcp->mb[2] = LSW(eft_dma);
3734 mcp->mb[3] = MSW(eft_dma);
3735 mcp->mb[4] = LSW(MSD(eft_dma));
3736 mcp->mb[5] = MSW(MSD(eft_dma));
3737 mcp->mb[6] = buffers;
3738 mcp->mb[7] = TC_AEN_DISABLE;
3739 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3740 mcp->in_mb = MBX_1|MBX_0;
3741 mcp->tov = MBX_TOV_SECONDS;
3742 mcp->flags = 0;
3743 rval = qla2x00_mailbox_command(vha, mcp);
3744 if (rval != QLA_SUCCESS) {
3745 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
3746 fmt: "Failed=%x mb[0]=%x mb[1]=%x.\n",
3747 rval, mcp->mb[0], mcp->mb[1]);
3748 } else {
3749 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
3750 fmt: "Done %s.\n", __func__);
3751 }
3752
3753 return rval;
3754}
3755
3756int
3757qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
3758{
3759 int rval;
3760 mbx_cmd_t mc;
3761 mbx_cmd_t *mcp = &mc;
3762
3763 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
3764 fmt: "Entered %s.\n", __func__);
3765
3766 if (!IS_FWI2_CAPABLE(vha->hw))
3767 return QLA_FUNCTION_FAILED;
3768
3769 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3770 return QLA_FUNCTION_FAILED;
3771
3772 mcp->mb[0] = MBC_TRACE_CONTROL;
3773 mcp->mb[1] = TC_EFT_DISABLE;
3774 mcp->out_mb = MBX_1|MBX_0;
3775 mcp->in_mb = MBX_1|MBX_0;
3776 mcp->tov = MBX_TOV_SECONDS;
3777 mcp->flags = 0;
3778 rval = qla2x00_mailbox_command(vha, mcp);
3779 if (rval != QLA_SUCCESS) {
3780 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
3781 fmt: "Failed=%x mb[0]=%x mb[1]=%x.\n",
3782 rval, mcp->mb[0], mcp->mb[1]);
3783 } else {
3784 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
3785 fmt: "Done %s.\n", __func__);
3786 }
3787
3788 return rval;
3789}
3790
3791int
3792qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3793 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
3794{
3795 int rval;
3796 mbx_cmd_t mc;
3797 mbx_cmd_t *mcp = &mc;
3798
3799 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
3800 fmt: "Entered %s.\n", __func__);
3801
3802 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3803 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
3804 !IS_QLA28XX(vha->hw))
3805 return QLA_FUNCTION_FAILED;
3806
3807 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3808 return QLA_FUNCTION_FAILED;
3809
3810 mcp->mb[0] = MBC_TRACE_CONTROL;
3811 mcp->mb[1] = TC_FCE_ENABLE;
3812 mcp->mb[2] = LSW(fce_dma);
3813 mcp->mb[3] = MSW(fce_dma);
3814 mcp->mb[4] = LSW(MSD(fce_dma));
3815 mcp->mb[5] = MSW(MSD(fce_dma));
3816 mcp->mb[6] = buffers;
3817 mcp->mb[7] = TC_AEN_DISABLE;
3818 mcp->mb[8] = 0;
3819 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
3820 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
3821 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3822 MBX_1|MBX_0;
3823 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3824 mcp->tov = MBX_TOV_SECONDS;
3825 mcp->flags = 0;
3826 rval = qla2x00_mailbox_command(vha, mcp);
3827 if (rval != QLA_SUCCESS) {
3828 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
3829 fmt: "Failed=%x mb[0]=%x mb[1]=%x.\n",
3830 rval, mcp->mb[0], mcp->mb[1]);
3831 } else {
3832 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
3833 fmt: "Done %s.\n", __func__);
3834
3835 if (mb)
3836 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
3837 if (dwords)
3838 *dwords = buffers;
3839 }
3840
3841 return rval;
3842}
3843
3844int
3845qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3846{
3847 int rval;
3848 mbx_cmd_t mc;
3849 mbx_cmd_t *mcp = &mc;
3850
3851 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3852 fmt: "Entered %s.\n", __func__);
3853
3854 if (!IS_FWI2_CAPABLE(vha->hw))
3855 return QLA_FUNCTION_FAILED;
3856
3857 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3858 return QLA_FUNCTION_FAILED;
3859
3860 mcp->mb[0] = MBC_TRACE_CONTROL;
3861 mcp->mb[1] = TC_FCE_DISABLE;
3862 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3863 mcp->out_mb = MBX_2|MBX_1|MBX_0;
3864 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3865 MBX_1|MBX_0;
3866 mcp->tov = MBX_TOV_SECONDS;
3867 mcp->flags = 0;
3868 rval = qla2x00_mailbox_command(vha, mcp);
3869 if (rval != QLA_SUCCESS) {
3870 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3871 fmt: "Failed=%x mb[0]=%x mb[1]=%x.\n",
3872 rval, mcp->mb[0], mcp->mb[1]);
3873 } else {
3874 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3875 fmt: "Done %s.\n", __func__);
3876
3877 if (wr)
3878 *wr = (uint64_t) mcp->mb[5] << 48 |
3879 (uint64_t) mcp->mb[4] << 32 |
3880 (uint64_t) mcp->mb[3] << 16 |
3881 (uint64_t) mcp->mb[2];
3882 if (rd)
3883 *rd = (uint64_t) mcp->mb[9] << 48 |
3884 (uint64_t) mcp->mb[8] << 32 |
3885 (uint64_t) mcp->mb[7] << 16 |
3886 (uint64_t) mcp->mb[6];
3887 }
3888
3889 return rval;
3890}
3891
3892int
3893qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3894 uint16_t *port_speed, uint16_t *mb)
3895{
3896 int rval;
3897 mbx_cmd_t mc;
3898 mbx_cmd_t *mcp = &mc;
3899
3900 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3901 fmt: "Entered %s.\n", __func__);
3902
3903 if (!IS_IIDMA_CAPABLE(vha->hw))
3904 return QLA_FUNCTION_FAILED;
3905
3906 mcp->mb[0] = MBC_PORT_PARAMS;
3907 mcp->mb[1] = loop_id;
3908 mcp->mb[2] = mcp->mb[3] = 0;
3909 mcp->mb[9] = vha->vp_idx;
3910 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3911 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3912 mcp->tov = MBX_TOV_SECONDS;
3913 mcp->flags = 0;
3914 rval = qla2x00_mailbox_command(vha, mcp);
3915
3916 /* Return mailbox statuses. */
3917 if (mb) {
3918 mb[0] = mcp->mb[0];
3919 mb[1] = mcp->mb[1];
3920 mb[3] = mcp->mb[3];
3921 }
3922
3923 if (rval != QLA_SUCCESS) {
3924 ql_dbg(ql_dbg_mbx, vha, 0x10b1, fmt: "Failed=%x.\n", rval);
3925 } else {
3926 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3927 fmt: "Done %s.\n", __func__);
3928 if (port_speed)
3929 *port_speed = mcp->mb[3];
3930 }
3931
3932 return rval;
3933}
3934
3935int
3936qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3937 uint16_t port_speed, uint16_t *mb)
3938{
3939 int rval;
3940 mbx_cmd_t mc;
3941 mbx_cmd_t *mcp = &mc;
3942
3943 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3944 fmt: "Entered %s.\n", __func__);
3945
3946 if (!IS_IIDMA_CAPABLE(vha->hw))
3947 return QLA_FUNCTION_FAILED;
3948
3949 mcp->mb[0] = MBC_PORT_PARAMS;
3950 mcp->mb[1] = loop_id;
3951 mcp->mb[2] = BIT_0;
3952 mcp->mb[3] = port_speed & 0x3F;
3953 mcp->mb[9] = vha->vp_idx;
3954 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3955 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3956 mcp->tov = MBX_TOV_SECONDS;
3957 mcp->flags = 0;
3958 rval = qla2x00_mailbox_command(vha, mcp);
3959
3960 /* Return mailbox statuses. */
3961 if (mb) {
3962 mb[0] = mcp->mb[0];
3963 mb[1] = mcp->mb[1];
3964 mb[3] = mcp->mb[3];
3965 }
3966
3967 if (rval != QLA_SUCCESS) {
3968 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3969 fmt: "Failed=%x.\n", rval);
3970 } else {
3971 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3972 fmt: "Done %s.\n", __func__);
3973 }
3974
3975 return rval;
3976}
3977
3978void
3979qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3980 struct vp_rpt_id_entry_24xx *rptid_entry)
3981{
3982 struct qla_hw_data *ha = vha->hw;
3983 scsi_qla_host_t *vp = NULL;
3984 unsigned long flags;
3985 int found;
3986 port_id_t id;
3987 struct fc_port *fcport;
3988
3989 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3990 fmt: "Entered %s.\n", __func__);
3991
3992 if (rptid_entry->entry_status != 0)
3993 return;
3994
3995 id.b.domain = rptid_entry->port_id[2];
3996 id.b.area = rptid_entry->port_id[1];
3997 id.b.al_pa = rptid_entry->port_id[0];
3998 id.b.rsvd_1 = 0;
3999 ha->flags.n2n_ae = 0;
4000
4001 if (rptid_entry->format == 0) {
4002 /* loop */
4003 ql_dbg(ql_dbg_async, vha, 0x10b7,
4004 fmt: "Format 0 : Number of VPs setup %d, number of "
4005 "VPs acquired %d.\n", rptid_entry->vp_setup,
4006 rptid_entry->vp_acquired);
4007 ql_dbg(ql_dbg_async, vha, 0x10b8,
4008 fmt: "Primary port id %02x%02x%02x.\n",
4009 rptid_entry->port_id[2], rptid_entry->port_id[1],
4010 rptid_entry->port_id[0]);
4011 ha->current_topology = ISP_CFG_NL;
4012 qla_update_host_map(vha, id);
4013
4014 } else if (rptid_entry->format == 1) {
4015 /* fabric */
4016 ql_dbg(ql_dbg_async, vha, 0x10b9,
4017 fmt: "Format 1: VP[%d] enabled - status %d - with "
4018 "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
4019 rptid_entry->vp_status,
4020 rptid_entry->port_id[2], rptid_entry->port_id[1],
4021 rptid_entry->port_id[0]);
4022 ql_dbg(ql_dbg_async, vha, 0x5075,
4023 fmt: "Format 1: Remote WWPN %8phC.\n",
4024 rptid_entry->u.f1.port_name);
4025
4026 ql_dbg(ql_dbg_async, vha, 0x5075,
4027 fmt: "Format 1: WWPN %8phC.\n",
4028 vha->port_name);
4029
4030 switch (rptid_entry->u.f1.flags & TOPO_MASK) {
4031 case TOPO_N2N:
4032 ha->current_topology = ISP_CFG_N;
4033 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4034 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4035 fcport->scan_state = QLA_FCPORT_SCAN;
4036 fcport->n2n_flag = 0;
4037 }
4038 id.b24 = 0;
4039 if (wwn_to_u64(wwn: vha->port_name) >
4040 wwn_to_u64(wwn: rptid_entry->u.f1.port_name)) {
4041 vha->d_id.b24 = 0;
4042 vha->d_id.b.al_pa = 1;
4043 ha->flags.n2n_bigger = 1;
4044
4045 id.b.al_pa = 2;
4046 ql_dbg(ql_dbg_async, vha, 0x5075,
4047 fmt: "Format 1: assign local id %x remote id %x\n",
4048 vha->d_id.b24, id.b24);
4049 } else {
4050 ql_dbg(ql_dbg_async, vha, 0x5075,
4051 fmt: "Format 1: Remote login - Waiting for WWPN %8phC.\n",
4052 rptid_entry->u.f1.port_name);
4053 ha->flags.n2n_bigger = 0;
4054 }
4055
4056 fcport = qla2x00_find_fcport_by_wwpn(vha,
4057 rptid_entry->u.f1.port_name, 1);
4058 spin_unlock_irqrestore(lock: &vha->hw->tgt.sess_lock, flags);
4059
4060
4061 if (fcport) {
4062 fcport->plogi_nack_done_deadline = jiffies + HZ;
4063 fcport->dm_login_expire = jiffies +
4064 QLA_N2N_WAIT_TIME * HZ;
4065 fcport->scan_state = QLA_FCPORT_FOUND;
4066 fcport->n2n_flag = 1;
4067 fcport->keep_nport_handle = 1;
4068 fcport->login_retry = vha->hw->login_retry_count;
4069 fcport->fc4_type = FS_FC4TYPE_FCP;
4070 if (vha->flags.nvme_enabled)
4071 fcport->fc4_type |= FS_FC4TYPE_NVME;
4072
4073 if (wwn_to_u64(wwn: vha->port_name) >
4074 wwn_to_u64(wwn: fcport->port_name)) {
4075 fcport->d_id = id;
4076 }
4077
4078 switch (fcport->disc_state) {
4079 case DSC_DELETED:
4080 set_bit(RELOGIN_NEEDED,
4081 addr: &vha->dpc_flags);
4082 break;
4083 case DSC_DELETE_PEND:
4084 break;
4085 default:
4086 qlt_schedule_sess_for_deletion(fcport);
4087 break;
4088 }
4089 } else {
4090 qla24xx_post_newsess_work(vha, &id,
4091 rptid_entry->u.f1.port_name,
4092 rptid_entry->u.f1.node_name,
4093 NULL,
4094 FS_FCP_IS_N2N);
4095 }
4096
4097 /* if our portname is higher then initiate N2N login */
4098
4099 set_bit(N2N_LOGIN_NEEDED, addr: &vha->dpc_flags);
4100 return;
4101 case TOPO_FL:
4102 ha->current_topology = ISP_CFG_FL;
4103 break;
4104 case TOPO_F:
4105 ha->current_topology = ISP_CFG_F;
4106 break;
4107 default:
4108 break;
4109 }
4110
4111 ha->flags.gpsc_supported = 1;
4112 ha->current_topology = ISP_CFG_F;
4113 /* buffer to buffer credit flag */
4114 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
4115
4116 if (rptid_entry->vp_idx == 0) {
4117 if (rptid_entry->vp_status == VP_STAT_COMPL) {
4118 /* FA-WWN is only for physical port */
4119 if (qla_ini_mode_enabled(ha: vha) &&
4120 ha->flags.fawwpn_enabled &&
4121 (rptid_entry->u.f1.flags &
4122 BIT_6)) {
4123 memcpy(vha->port_name,
4124 rptid_entry->u.f1.port_name,
4125 WWN_SIZE);
4126 }
4127
4128 qla_update_host_map(vha, id);
4129 }
4130
4131 set_bit(REGISTER_FC4_NEEDED, addr: &vha->dpc_flags);
4132 set_bit(REGISTER_FDMI_NEEDED, addr: &vha->dpc_flags);
4133 } else {
4134 if (rptid_entry->vp_status != VP_STAT_COMPL &&
4135 rptid_entry->vp_status != VP_STAT_ID_CHG) {
4136 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
4137 fmt: "Could not acquire ID for VP[%d].\n",
4138 rptid_entry->vp_idx);
4139 return;
4140 }
4141
4142 found = 0;
4143 spin_lock_irqsave(&ha->vport_slock, flags);
4144 list_for_each_entry(vp, &ha->vp_list, list) {
4145 if (rptid_entry->vp_idx == vp->vp_idx) {
4146 found = 1;
4147 break;
4148 }
4149 }
4150 spin_unlock_irqrestore(lock: &ha->vport_slock, flags);
4151
4152 if (!found)
4153 return;
4154
4155 qla_update_host_map(vp, id);
4156
4157 /*
4158 * Cannot configure here as we are still sitting on the
4159 * response queue. Handle it in dpc context.
4160 */
4161 set_bit(VP_IDX_ACQUIRED, addr: &vp->vp_flags);
4162 set_bit(REGISTER_FC4_NEEDED, addr: &vp->dpc_flags);
4163 set_bit(REGISTER_FDMI_NEEDED, addr: &vp->dpc_flags);
4164 }
4165 set_bit(VP_DPC_NEEDED, addr: &vha->dpc_flags);
4166 qla2xxx_wake_dpc(vha);
4167 } else if (rptid_entry->format == 2) {
4168 ql_dbg(ql_dbg_async, vha, 0x505f,
4169 fmt: "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
4170 rptid_entry->port_id[2], rptid_entry->port_id[1],
4171 rptid_entry->port_id[0]);
4172
4173 ql_dbg(ql_dbg_async, vha, 0x5075,
4174 fmt: "N2N: Remote WWPN %8phC.\n",
4175 rptid_entry->u.f2.port_name);
4176
4177 /* N2N. direct connect */
4178 ha->current_topology = ISP_CFG_N;
4179 ha->flags.rida_fmt2 = 1;
4180 vha->d_id.b.domain = rptid_entry->port_id[2];
4181 vha->d_id.b.area = rptid_entry->port_id[1];
4182 vha->d_id.b.al_pa = rptid_entry->port_id[0];
4183
4184 ha->flags.n2n_ae = 1;
4185 spin_lock_irqsave(&ha->vport_slock, flags);
4186 qla_update_vp_map(vha, SET_AL_PA);
4187 spin_unlock_irqrestore(lock: &ha->vport_slock, flags);
4188
4189 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4190 fcport->scan_state = QLA_FCPORT_SCAN;
4191 fcport->n2n_flag = 0;
4192 }
4193
4194 fcport = qla2x00_find_fcport_by_wwpn(vha,
4195 rptid_entry->u.f2.port_name, 1);
4196
4197 if (fcport) {
4198 fcport->login_retry = vha->hw->login_retry_count;
4199 fcport->plogi_nack_done_deadline = jiffies + HZ;
4200 fcport->scan_state = QLA_FCPORT_FOUND;
4201 fcport->keep_nport_handle = 1;
4202 fcport->n2n_flag = 1;
4203 fcport->d_id.b.domain =
4204 rptid_entry->u.f2.remote_nport_id[2];
4205 fcport->d_id.b.area =
4206 rptid_entry->u.f2.remote_nport_id[1];
4207 fcport->d_id.b.al_pa =
4208 rptid_entry->u.f2.remote_nport_id[0];
4209
4210 /*
4211 * For the case where remote port sending PRLO, FW
4212 * sends up RIDA Format 2 as an indication of session
4213 * loss. In other word, FW state change from PRLI
4214 * complete back to PLOGI complete. Delete the
4215 * session and let relogin drive the reconnect.
4216 */
4217 if (atomic_read(v: &fcport->state) == FCS_ONLINE)
4218 qlt_schedule_sess_for_deletion(fcport);
4219 }
4220 }
4221}
4222
4223/*
4224 * qla24xx_modify_vp_config
4225 * Change VP configuration for vha
4226 *
4227 * Input:
4228 * vha = adapter block pointer.
4229 *
4230 * Returns:
4231 * qla2xxx local function return status code.
4232 *
4233 * Context:
4234 * Kernel context.
4235 */
4236int
4237qla24xx_modify_vp_config(scsi_qla_host_t *vha)
4238{
4239 int rval;
4240 struct vp_config_entry_24xx *vpmod;
4241 dma_addr_t vpmod_dma;
4242 struct qla_hw_data *ha = vha->hw;
4243 struct scsi_qla_host *base_vha = pci_get_drvdata(pdev: ha->pdev);
4244
4245 /* This can be called by the parent */
4246
4247 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
4248 fmt: "Entered %s.\n", __func__);
4249
4250 vpmod = dma_pool_zalloc(pool: ha->s_dma_pool, GFP_KERNEL, handle: &vpmod_dma);
4251 if (!vpmod) {
4252 ql_log(ql_log_warn, vha, 0x10bc,
4253 fmt: "Failed to allocate modify VP IOCB.\n");
4254 return QLA_MEMORY_ALLOC_FAILED;
4255 }
4256
4257 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
4258 vpmod->entry_count = 1;
4259 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
4260 vpmod->vp_count = 1;
4261 vpmod->vp_index1 = vha->vp_idx;
4262 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
4263
4264 qlt_modify_vp_config(vha, vpmod);
4265
4266 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
4267 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
4268 vpmod->entry_count = 1;
4269
4270 rval = qla2x00_issue_iocb(vha: base_vha, buffer: vpmod, phys_addr: vpmod_dma, size: 0);
4271 if (rval != QLA_SUCCESS) {
4272 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
4273 fmt: "Failed to issue VP config IOCB (%x).\n", rval);
4274 } else if (vpmod->comp_status != 0) {
4275 ql_dbg(ql_dbg_mbx, vha, 0x10be,
4276 fmt: "Failed to complete IOCB -- error status (%x).\n",
4277 vpmod->comp_status);
4278 rval = QLA_FUNCTION_FAILED;
4279 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
4280 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
4281 fmt: "Failed to complete IOCB -- completion status (%x).\n",
4282 le16_to_cpu(vpmod->comp_status));
4283 rval = QLA_FUNCTION_FAILED;
4284 } else {
4285 /* EMPTY */
4286 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
4287 fmt: "Done %s.\n", __func__);
4288 fc_vport_set_state(vport: vha->fc_vport, new_state: FC_VPORT_INITIALIZING);
4289 }
4290 dma_pool_free(pool: ha->s_dma_pool, vaddr: vpmod, addr: vpmod_dma);
4291
4292 return rval;
4293}
4294
4295/*
4296 * qla2x00_send_change_request
4297 * Receive or disable RSCN request from fabric controller
4298 *
4299 * Input:
4300 * ha = adapter block pointer
4301 * format = registration format:
4302 * 0 - Reserved
4303 * 1 - Fabric detected registration
4304 * 2 - N_port detected registration
4305 * 3 - Full registration
4306 * FF - clear registration
4307 * vp_idx = Virtual port index
4308 *
4309 * Returns:
4310 * qla2x00 local function return status code.
4311 *
4312 * Context:
4313 * Kernel Context
4314 */
4315
4316int
4317qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
4318 uint16_t vp_idx)
4319{
4320 int rval;
4321 mbx_cmd_t mc;
4322 mbx_cmd_t *mcp = &mc;
4323
4324 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
4325 fmt: "Entered %s.\n", __func__);
4326
4327 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
4328 mcp->mb[1] = format;
4329 mcp->mb[9] = vp_idx;
4330 mcp->out_mb = MBX_9|MBX_1|MBX_0;
4331 mcp->in_mb = MBX_0|MBX_1;
4332 mcp->tov = MBX_TOV_SECONDS;
4333 mcp->flags = 0;
4334 rval = qla2x00_mailbox_command(vha, mcp);
4335
4336 if (rval == QLA_SUCCESS) {
4337 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
4338 rval = BIT_1;
4339 }
4340 } else
4341 rval = BIT_1;
4342
4343 return rval;
4344}
4345
4346int
4347qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4348 uint32_t size)
4349{
4350 int rval;
4351 mbx_cmd_t mc;
4352 mbx_cmd_t *mcp = &mc;
4353
4354 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
4355 fmt: "Entered %s.\n", __func__);
4356
4357 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
4358 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4359 mcp->mb[8] = MSW(addr);
4360 mcp->mb[10] = 0;
4361 mcp->out_mb = MBX_10|MBX_8|MBX_0;
4362 } else {
4363 mcp->mb[0] = MBC_DUMP_RISC_RAM;
4364 mcp->out_mb = MBX_0;
4365 }
4366 mcp->mb[1] = LSW(addr);
4367 mcp->mb[2] = MSW(req_dma);
4368 mcp->mb[3] = LSW(req_dma);
4369 mcp->mb[6] = MSW(MSD(req_dma));
4370 mcp->mb[7] = LSW(MSD(req_dma));
4371 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
4372 if (IS_FWI2_CAPABLE(vha->hw)) {
4373 mcp->mb[4] = MSW(size);
4374 mcp->mb[5] = LSW(size);
4375 mcp->out_mb |= MBX_5|MBX_4;
4376 } else {
4377 mcp->mb[4] = LSW(size);
4378 mcp->out_mb |= MBX_4;
4379 }
4380
4381 mcp->in_mb = MBX_0;
4382 mcp->tov = MBX_TOV_SECONDS;
4383 mcp->flags = 0;
4384 rval = qla2x00_mailbox_command(vha, mcp);
4385
4386 if (rval != QLA_SUCCESS) {
4387 ql_dbg(ql_dbg_mbx, vha, 0x1008,
4388 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4389 } else {
4390 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
4391 fmt: "Done %s.\n", __func__);
4392 }
4393
4394 return rval;
4395}
4396/* 84XX Support **************************************************************/
4397
4398struct cs84xx_mgmt_cmd {
4399 union {
4400 struct verify_chip_entry_84xx req;
4401 struct verify_chip_rsp_84xx rsp;
4402 } p;
4403};
4404
4405int
4406qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
4407{
4408 int rval, retry;
4409 struct cs84xx_mgmt_cmd *mn;
4410 dma_addr_t mn_dma;
4411 uint16_t options;
4412 unsigned long flags;
4413 struct qla_hw_data *ha = vha->hw;
4414
4415 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
4416 fmt: "Entered %s.\n", __func__);
4417
4418 mn = dma_pool_alloc(pool: ha->s_dma_pool, GFP_KERNEL, handle: &mn_dma);
4419 if (mn == NULL) {
4420 return QLA_MEMORY_ALLOC_FAILED;
4421 }
4422
4423 /* Force Update? */
4424 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
4425 /* Diagnostic firmware? */
4426 /* options |= MENLO_DIAG_FW; */
4427 /* We update the firmware with only one data sequence. */
4428 options |= VCO_END_OF_DATA;
4429
4430 do {
4431 retry = 0;
4432 memset(mn, 0, sizeof(*mn));
4433 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
4434 mn->p.req.entry_count = 1;
4435 mn->p.req.options = cpu_to_le16(options);
4436
4437 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
4438 fmt: "Dump of Verify Request.\n");
4439 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
4440 mn, sizeof(*mn));
4441
4442 rval = qla2x00_issue_iocb_timeout(vha, buffer: mn, phys_addr: mn_dma, size: 0, tov: 120);
4443 if (rval != QLA_SUCCESS) {
4444 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
4445 fmt: "Failed to issue verify IOCB (%x).\n", rval);
4446 goto verify_done;
4447 }
4448
4449 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
4450 fmt: "Dump of Verify Response.\n");
4451 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
4452 mn, sizeof(*mn));
4453
4454 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
4455 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
4456 le16_to_cpu(mn->p.rsp.failure_code) : 0;
4457 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
4458 fmt: "cs=%x fc=%x.\n", status[0], status[1]);
4459
4460 if (status[0] != CS_COMPLETE) {
4461 rval = QLA_FUNCTION_FAILED;
4462 if (!(options & VCO_DONT_UPDATE_FW)) {
4463 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
4464 fmt: "Firmware update failed. Retrying "
4465 "without update firmware.\n");
4466 options |= VCO_DONT_UPDATE_FW;
4467 options &= ~VCO_FORCE_UPDATE;
4468 retry = 1;
4469 }
4470 } else {
4471 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
4472 fmt: "Firmware updated to %x.\n",
4473 le32_to_cpu(mn->p.rsp.fw_ver));
4474
4475 /* NOTE: we only update OP firmware. */
4476 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
4477 ha->cs84xx->op_fw_version =
4478 le32_to_cpu(mn->p.rsp.fw_ver);
4479 spin_unlock_irqrestore(lock: &ha->cs84xx->access_lock,
4480 flags);
4481 }
4482 } while (retry);
4483
4484verify_done:
4485 dma_pool_free(pool: ha->s_dma_pool, vaddr: mn, addr: mn_dma);
4486
4487 if (rval != QLA_SUCCESS) {
4488 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
4489 fmt: "Failed=%x.\n", rval);
4490 } else {
4491 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
4492 fmt: "Done %s.\n", __func__);
4493 }
4494
4495 return rval;
4496}
4497
4498int
4499qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4500{
4501 int rval;
4502 unsigned long flags;
4503 mbx_cmd_t mc;
4504 mbx_cmd_t *mcp = &mc;
4505 struct qla_hw_data *ha = vha->hw;
4506
4507 if (!ha->flags.fw_started)
4508 return QLA_SUCCESS;
4509
4510 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4511 fmt: "Entered %s.\n", __func__);
4512
4513 if (IS_SHADOW_REG_CAPABLE(ha))
4514 req->options |= BIT_13;
4515
4516 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4517 mcp->mb[1] = req->options;
4518 mcp->mb[2] = MSW(LSD(req->dma));
4519 mcp->mb[3] = LSW(LSD(req->dma));
4520 mcp->mb[6] = MSW(MSD(req->dma));
4521 mcp->mb[7] = LSW(MSD(req->dma));
4522 mcp->mb[5] = req->length;
4523 if (req->rsp)
4524 mcp->mb[10] = req->rsp->id;
4525 mcp->mb[12] = req->qos;
4526 mcp->mb[11] = req->vp_idx;
4527 mcp->mb[13] = req->rid;
4528 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4529 mcp->mb[15] = 0;
4530
4531 mcp->mb[4] = req->id;
4532 /* que in ptr index */
4533 mcp->mb[8] = 0;
4534 /* que out ptr index */
4535 mcp->mb[9] = *req->out_ptr = 0;
4536 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
4537 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4538 mcp->in_mb = MBX_0;
4539 mcp->flags = MBX_DMA_OUT;
4540 mcp->tov = MBX_TOV_SECONDS * 2;
4541
4542 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4543 IS_QLA28XX(ha))
4544 mcp->in_mb |= MBX_1;
4545 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4546 mcp->out_mb |= MBX_15;
4547 /* debug q create issue in SR-IOV */
4548 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4549 }
4550
4551 spin_lock_irqsave(&ha->hardware_lock, flags);
4552 if (!(req->options & BIT_0)) {
4553 wrt_reg_dword(addr: req->req_q_in, data: 0);
4554 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4555 wrt_reg_dword(addr: req->req_q_out, data: 0);
4556 }
4557 spin_unlock_irqrestore(lock: &ha->hardware_lock, flags);
4558
4559 rval = qla2x00_mailbox_command(vha, mcp);
4560 if (rval != QLA_SUCCESS) {
4561 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
4562 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4563 } else {
4564 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
4565 fmt: "Done %s.\n", __func__);
4566 }
4567
4568 return rval;
4569}
4570
4571int
4572qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4573{
4574 int rval;
4575 unsigned long flags;
4576 mbx_cmd_t mc;
4577 mbx_cmd_t *mcp = &mc;
4578 struct qla_hw_data *ha = vha->hw;
4579
4580 if (!ha->flags.fw_started)
4581 return QLA_SUCCESS;
4582
4583 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4584 fmt: "Entered %s.\n", __func__);
4585
4586 if (IS_SHADOW_REG_CAPABLE(ha))
4587 rsp->options |= BIT_13;
4588
4589 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4590 mcp->mb[1] = rsp->options;
4591 mcp->mb[2] = MSW(LSD(rsp->dma));
4592 mcp->mb[3] = LSW(LSD(rsp->dma));
4593 mcp->mb[6] = MSW(MSD(rsp->dma));
4594 mcp->mb[7] = LSW(MSD(rsp->dma));
4595 mcp->mb[5] = rsp->length;
4596 mcp->mb[14] = rsp->msix->entry;
4597 mcp->mb[13] = rsp->rid;
4598 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4599 mcp->mb[15] = 0;
4600
4601 mcp->mb[4] = rsp->id;
4602 /* que in ptr index */
4603 mcp->mb[8] = *rsp->in_ptr = 0;
4604 /* que out ptr index */
4605 mcp->mb[9] = 0;
4606 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
4607 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4608 mcp->in_mb = MBX_0;
4609 mcp->flags = MBX_DMA_OUT;
4610 mcp->tov = MBX_TOV_SECONDS * 2;
4611
4612 if (IS_QLA81XX(ha)) {
4613 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
4614 mcp->in_mb |= MBX_1;
4615 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4616 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
4617 mcp->in_mb |= MBX_1;
4618 /* debug q create issue in SR-IOV */
4619 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4620 }
4621
4622 spin_lock_irqsave(&ha->hardware_lock, flags);
4623 if (!(rsp->options & BIT_0)) {
4624 wrt_reg_dword(addr: rsp->rsp_q_out, data: 0);
4625 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4626 wrt_reg_dword(addr: rsp->rsp_q_in, data: 0);
4627 }
4628
4629 spin_unlock_irqrestore(lock: &ha->hardware_lock, flags);
4630
4631 rval = qla2x00_mailbox_command(vha, mcp);
4632 if (rval != QLA_SUCCESS) {
4633 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
4634 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4635 } else {
4636 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
4637 fmt: "Done %s.\n", __func__);
4638 }
4639
4640 return rval;
4641}
4642
4643int
4644qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
4645{
4646 int rval;
4647 mbx_cmd_t mc;
4648 mbx_cmd_t *mcp = &mc;
4649
4650 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
4651 fmt: "Entered %s.\n", __func__);
4652
4653 mcp->mb[0] = MBC_IDC_ACK;
4654 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
4655 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4656 mcp->in_mb = MBX_0;
4657 mcp->tov = MBX_TOV_SECONDS;
4658 mcp->flags = 0;
4659 rval = qla2x00_mailbox_command(vha, mcp);
4660
4661 if (rval != QLA_SUCCESS) {
4662 ql_dbg(ql_dbg_mbx, vha, 0x10da,
4663 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4664 } else {
4665 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
4666 fmt: "Done %s.\n", __func__);
4667 }
4668
4669 return rval;
4670}
4671
4672int
4673qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
4674{
4675 int rval;
4676 mbx_cmd_t mc;
4677 mbx_cmd_t *mcp = &mc;
4678
4679 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
4680 fmt: "Entered %s.\n", __func__);
4681
4682 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4683 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4684 return QLA_FUNCTION_FAILED;
4685
4686 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4687 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
4688 mcp->out_mb = MBX_1|MBX_0;
4689 mcp->in_mb = MBX_1|MBX_0;
4690 mcp->tov = MBX_TOV_SECONDS;
4691 mcp->flags = 0;
4692 rval = qla2x00_mailbox_command(vha, mcp);
4693
4694 if (rval != QLA_SUCCESS) {
4695 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
4696 fmt: "Failed=%x mb[0]=%x mb[1]=%x.\n",
4697 rval, mcp->mb[0], mcp->mb[1]);
4698 } else {
4699 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
4700 fmt: "Done %s.\n", __func__);
4701 *sector_size = mcp->mb[1];
4702 }
4703
4704 return rval;
4705}
4706
4707int
4708qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
4709{
4710 int rval;
4711 mbx_cmd_t mc;
4712 mbx_cmd_t *mcp = &mc;
4713
4714 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4715 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4716 return QLA_FUNCTION_FAILED;
4717
4718 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
4719 fmt: "Entered %s.\n", __func__);
4720
4721 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4722 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
4723 FAC_OPT_CMD_WRITE_PROTECT;
4724 mcp->out_mb = MBX_1|MBX_0;
4725 mcp->in_mb = MBX_1|MBX_0;
4726 mcp->tov = MBX_TOV_SECONDS;
4727 mcp->flags = 0;
4728 rval = qla2x00_mailbox_command(vha, mcp);
4729
4730 if (rval != QLA_SUCCESS) {
4731 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
4732 fmt: "Failed=%x mb[0]=%x mb[1]=%x.\n",
4733 rval, mcp->mb[0], mcp->mb[1]);
4734 } else {
4735 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
4736 fmt: "Done %s.\n", __func__);
4737 }
4738
4739 return rval;
4740}
4741
4742int
4743qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
4744{
4745 int rval;
4746 mbx_cmd_t mc;
4747 mbx_cmd_t *mcp = &mc;
4748
4749 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4750 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4751 return QLA_FUNCTION_FAILED;
4752
4753 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4754 fmt: "Entered %s.\n", __func__);
4755
4756 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4757 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
4758 mcp->mb[2] = LSW(start);
4759 mcp->mb[3] = MSW(start);
4760 mcp->mb[4] = LSW(finish);
4761 mcp->mb[5] = MSW(finish);
4762 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4763 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4764 mcp->tov = MBX_TOV_SECONDS;
4765 mcp->flags = 0;
4766 rval = qla2x00_mailbox_command(vha, mcp);
4767
4768 if (rval != QLA_SUCCESS) {
4769 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4770 fmt: "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4771 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4772 } else {
4773 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4774 fmt: "Done %s.\n", __func__);
4775 }
4776
4777 return rval;
4778}
4779
4780int
4781qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock)
4782{
4783 int rval = QLA_SUCCESS;
4784 mbx_cmd_t mc;
4785 mbx_cmd_t *mcp = &mc;
4786 struct qla_hw_data *ha = vha->hw;
4787
4788 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
4789 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4790 return rval;
4791
4792 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4793 fmt: "Entered %s.\n", __func__);
4794
4795 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4796 mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE :
4797 FAC_OPT_CMD_UNLOCK_SEMAPHORE);
4798 mcp->out_mb = MBX_1|MBX_0;
4799 mcp->in_mb = MBX_1|MBX_0;
4800 mcp->tov = MBX_TOV_SECONDS;
4801 mcp->flags = 0;
4802 rval = qla2x00_mailbox_command(vha, mcp);
4803
4804 if (rval != QLA_SUCCESS) {
4805 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4806 fmt: "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4807 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4808 } else {
4809 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4810 fmt: "Done %s.\n", __func__);
4811 }
4812
4813 return rval;
4814}
4815
4816int
4817qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
4818{
4819 int rval = 0;
4820 mbx_cmd_t mc;
4821 mbx_cmd_t *mcp = &mc;
4822
4823 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
4824 fmt: "Entered %s.\n", __func__);
4825
4826 mcp->mb[0] = MBC_RESTART_MPI_FW;
4827 mcp->out_mb = MBX_0;
4828 mcp->in_mb = MBX_0|MBX_1;
4829 mcp->tov = MBX_TOV_SECONDS;
4830 mcp->flags = 0;
4831 rval = qla2x00_mailbox_command(vha, mcp);
4832
4833 if (rval != QLA_SUCCESS) {
4834 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
4835 fmt: "Failed=%x mb[0]=%x mb[1]=%x.\n",
4836 rval, mcp->mb[0], mcp->mb[1]);
4837 } else {
4838 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
4839 fmt: "Done %s.\n", __func__);
4840 }
4841
4842 return rval;
4843}
4844
4845int
4846qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4847{
4848 int rval;
4849 mbx_cmd_t mc;
4850 mbx_cmd_t *mcp = &mc;
4851 int i;
4852 int len;
4853 __le16 *str;
4854 struct qla_hw_data *ha = vha->hw;
4855
4856 if (!IS_P3P_TYPE(ha))
4857 return QLA_FUNCTION_FAILED;
4858
4859 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
4860 fmt: "Entered %s.\n", __func__);
4861
4862 str = (__force __le16 *)version;
4863 len = strlen(version);
4864
4865 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4866 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
4867 mcp->out_mb = MBX_1|MBX_0;
4868 for (i = 4; i < 16 && len; i++, str++, len -= 2) {
4869 mcp->mb[i] = le16_to_cpup(p: str);
4870 mcp->out_mb |= 1<<i;
4871 }
4872 for (; i < 16; i++) {
4873 mcp->mb[i] = 0;
4874 mcp->out_mb |= 1<<i;
4875 }
4876 mcp->in_mb = MBX_1|MBX_0;
4877 mcp->tov = MBX_TOV_SECONDS;
4878 mcp->flags = 0;
4879 rval = qla2x00_mailbox_command(vha, mcp);
4880
4881 if (rval != QLA_SUCCESS) {
4882 ql_dbg(ql_dbg_mbx, vha, 0x117c,
4883 fmt: "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4884 } else {
4885 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
4886 fmt: "Done %s.\n", __func__);
4887 }
4888
4889 return rval;
4890}
4891
4892int
4893qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4894{
4895 int rval;
4896 mbx_cmd_t mc;
4897 mbx_cmd_t *mcp = &mc;
4898 int len;
4899 uint16_t dwlen;
4900 uint8_t *str;
4901 dma_addr_t str_dma;
4902 struct qla_hw_data *ha = vha->hw;
4903
4904 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4905 IS_P3P_TYPE(ha))
4906 return QLA_FUNCTION_FAILED;
4907
4908 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
4909 fmt: "Entered %s.\n", __func__);
4910
4911 str = dma_pool_alloc(pool: ha->s_dma_pool, GFP_KERNEL, handle: &str_dma);
4912 if (!str) {
4913 ql_log(ql_log_warn, vha, 0x117f,
4914 fmt: "Failed to allocate driver version param.\n");
4915 return QLA_MEMORY_ALLOC_FAILED;
4916 }
4917
4918 memcpy(str, "\x7\x3\x11\x0", 4);
4919 dwlen = str[0];
4920 len = dwlen * 4 - 4;
4921 memset(str + 4, 0, len);
4922 if (len > strlen(version))
4923 len = strlen(version);
4924 memcpy(str + 4, version, len);
4925
4926 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4927 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
4928 mcp->mb[2] = MSW(LSD(str_dma));
4929 mcp->mb[3] = LSW(LSD(str_dma));
4930 mcp->mb[6] = MSW(MSD(str_dma));
4931 mcp->mb[7] = LSW(MSD(str_dma));
4932 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4933 mcp->in_mb = MBX_1|MBX_0;
4934 mcp->tov = MBX_TOV_SECONDS;
4935 mcp->flags = 0;
4936 rval = qla2x00_mailbox_command(vha, mcp);
4937
4938 if (rval != QLA_SUCCESS) {
4939 ql_dbg(ql_dbg_mbx, vha, 0x1180,
4940 fmt: "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4941 } else {
4942 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
4943 fmt: "Done %s.\n", __func__);
4944 }
4945
4946 dma_pool_free(pool: ha->s_dma_pool, vaddr: str, addr: str_dma);
4947
4948 return rval;
4949}
4950
4951int
4952qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
4953 void *buf, uint16_t bufsiz)
4954{
4955 int rval, i;
4956 mbx_cmd_t mc;
4957 mbx_cmd_t *mcp = &mc;
4958 uint32_t *bp;
4959
4960 if (!IS_FWI2_CAPABLE(vha->hw))
4961 return QLA_FUNCTION_FAILED;
4962
4963 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4964 fmt: "Entered %s.\n", __func__);
4965
4966 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4967 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
4968 mcp->mb[2] = MSW(buf_dma);
4969 mcp->mb[3] = LSW(buf_dma);
4970 mcp->mb[6] = MSW(MSD(buf_dma));
4971 mcp->mb[7] = LSW(MSD(buf_dma));
4972 mcp->mb[8] = bufsiz/4;
4973 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4974 mcp->in_mb = MBX_1|MBX_0;
4975 mcp->tov = MBX_TOV_SECONDS;
4976 mcp->flags = 0;
4977 rval = qla2x00_mailbox_command(vha, mcp);
4978
4979 if (rval != QLA_SUCCESS) {
4980 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4981 fmt: "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4982 } else {
4983 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4984 fmt: "Done %s.\n", __func__);
4985 bp = (uint32_t *) buf;
4986 for (i = 0; i < (bufsiz-4)/4; i++, bp++)
4987 *bp = le32_to_cpu((__force __le32)*bp);
4988 }
4989
4990 return rval;
4991}
4992
4993#define PUREX_CMD_COUNT 4
4994int
4995qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
4996{
4997 int rval;
4998 mbx_cmd_t mc;
4999 mbx_cmd_t *mcp = &mc;
5000 uint8_t *els_cmd_map;
5001 uint8_t active_cnt = 0;
5002 dma_addr_t els_cmd_map_dma;
5003 uint8_t cmd_opcode[PUREX_CMD_COUNT];
5004 uint8_t i, index, purex_bit;
5005 struct qla_hw_data *ha = vha->hw;
5006
5007 if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) &&
5008 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5009 return QLA_SUCCESS;
5010
5011 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197,
5012 fmt: "Entered %s.\n", __func__);
5013
5014 els_cmd_map = dma_alloc_coherent(dev: &ha->pdev->dev, ELS_CMD_MAP_SIZE,
5015 dma_handle: &els_cmd_map_dma, GFP_KERNEL);
5016 if (!els_cmd_map) {
5017 ql_log(ql_log_warn, vha, 0x7101,
5018 fmt: "Failed to allocate RDP els command param.\n");
5019 return QLA_MEMORY_ALLOC_FAILED;
5020 }
5021
5022 /* List of Purex ELS */
5023 if (ql2xrdpenable) {
5024 cmd_opcode[active_cnt] = ELS_RDP;
5025 active_cnt++;
5026 }
5027 if (ha->flags.scm_supported_f) {
5028 cmd_opcode[active_cnt] = ELS_FPIN;
5029 active_cnt++;
5030 }
5031 if (ha->flags.edif_enabled) {
5032 cmd_opcode[active_cnt] = ELS_AUTH_ELS;
5033 active_cnt++;
5034 }
5035
5036 for (i = 0; i < active_cnt; i++) {
5037 index = cmd_opcode[i] / 8;
5038 purex_bit = cmd_opcode[i] % 8;
5039 els_cmd_map[index] |= 1 << purex_bit;
5040 }
5041
5042 mcp->mb[0] = MBC_SET_RNID_PARAMS;
5043 mcp->mb[1] = RNID_TYPE_ELS_CMD << 8;
5044 mcp->mb[2] = MSW(LSD(els_cmd_map_dma));
5045 mcp->mb[3] = LSW(LSD(els_cmd_map_dma));
5046 mcp->mb[6] = MSW(MSD(els_cmd_map_dma));
5047 mcp->mb[7] = LSW(MSD(els_cmd_map_dma));
5048 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5049 mcp->in_mb = MBX_1|MBX_0;
5050 mcp->tov = MBX_TOV_SECONDS;
5051 mcp->flags = MBX_DMA_OUT;
5052 mcp->buf_size = ELS_CMD_MAP_SIZE;
5053 rval = qla2x00_mailbox_command(vha, mcp);
5054
5055 if (rval != QLA_SUCCESS) {
5056 ql_dbg(ql_dbg_mbx, vha, 0x118d,
5057 fmt: "Failed=%x (%x,%x).\n", rval, mcp->mb[0], mcp->mb[1]);
5058 } else {
5059 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
5060 fmt: "Done %s.\n", __func__);
5061 }
5062
5063 dma_free_coherent(dev: &ha->pdev->dev, ELS_CMD_MAP_SIZE,
5064 cpu_addr: els_cmd_map, dma_handle: els_cmd_map_dma);
5065
5066 return rval;
5067}
5068
5069static int
5070qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
5071{
5072 int rval;
5073 mbx_cmd_t mc;
5074 mbx_cmd_t *mcp = &mc;
5075
5076 if (!IS_FWI2_CAPABLE(vha->hw))
5077 return QLA_FUNCTION_FAILED;
5078
5079 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
5080 fmt: "Entered %s.\n", __func__);
5081
5082 mcp->mb[0] = MBC_GET_RNID_PARAMS;
5083 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
5084 mcp->out_mb = MBX_1|MBX_0;
5085 mcp->in_mb = MBX_1|MBX_0;
5086 mcp->tov = MBX_TOV_SECONDS;
5087 mcp->flags = 0;
5088 rval = qla2x00_mailbox_command(vha, mcp);
5089 *temp = mcp->mb[1];
5090
5091 if (rval != QLA_SUCCESS) {
5092 ql_dbg(ql_dbg_mbx, vha, 0x115a,
5093 fmt: "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
5094 } else {
5095 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
5096 fmt: "Done %s.\n", __func__);
5097 }
5098
5099 return rval;
5100}
5101
5102int
5103qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
5104 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
5105{
5106 int rval;
5107 mbx_cmd_t mc;
5108 mbx_cmd_t *mcp = &mc;
5109 struct qla_hw_data *ha = vha->hw;
5110
5111 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
5112 fmt: "Entered %s.\n", __func__);
5113
5114 if (!IS_FWI2_CAPABLE(ha))
5115 return QLA_FUNCTION_FAILED;
5116
5117 if (len == 1)
5118 opt |= BIT_0;
5119
5120 mcp->mb[0] = MBC_READ_SFP;
5121 mcp->mb[1] = dev;
5122 mcp->mb[2] = MSW(LSD(sfp_dma));
5123 mcp->mb[3] = LSW(LSD(sfp_dma));
5124 mcp->mb[6] = MSW(MSD(sfp_dma));
5125 mcp->mb[7] = LSW(MSD(sfp_dma));
5126 mcp->mb[8] = len;
5127 mcp->mb[9] = off;
5128 mcp->mb[10] = opt;
5129 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5130 mcp->in_mb = MBX_1|MBX_0;
5131 mcp->tov = MBX_TOV_SECONDS;
5132 mcp->flags = 0;
5133 rval = qla2x00_mailbox_command(vha, mcp);
5134
5135 if (opt & BIT_0)
5136 *sfp = mcp->mb[1];
5137
5138 if (rval != QLA_SUCCESS) {
5139 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
5140 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5141 if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) {
5142 /* sfp is not there */
5143 rval = QLA_INTERFACE_ERROR;
5144 }
5145 } else {
5146 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
5147 fmt: "Done %s.\n", __func__);
5148 }
5149
5150 return rval;
5151}
5152
5153int
5154qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
5155 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
5156{
5157 int rval;
5158 mbx_cmd_t mc;
5159 mbx_cmd_t *mcp = &mc;
5160 struct qla_hw_data *ha = vha->hw;
5161
5162 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
5163 fmt: "Entered %s.\n", __func__);
5164
5165 if (!IS_FWI2_CAPABLE(ha))
5166 return QLA_FUNCTION_FAILED;
5167
5168 if (len == 1)
5169 opt |= BIT_0;
5170
5171 if (opt & BIT_0)
5172 len = *sfp;
5173
5174 mcp->mb[0] = MBC_WRITE_SFP;
5175 mcp->mb[1] = dev;
5176 mcp->mb[2] = MSW(LSD(sfp_dma));
5177 mcp->mb[3] = LSW(LSD(sfp_dma));
5178 mcp->mb[6] = MSW(MSD(sfp_dma));
5179 mcp->mb[7] = LSW(MSD(sfp_dma));
5180 mcp->mb[8] = len;
5181 mcp->mb[9] = off;
5182 mcp->mb[10] = opt;
5183 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5184 mcp->in_mb = MBX_1|MBX_0;
5185 mcp->tov = MBX_TOV_SECONDS;
5186 mcp->flags = 0;
5187 rval = qla2x00_mailbox_command(vha, mcp);
5188
5189 if (rval != QLA_SUCCESS) {
5190 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
5191 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5192 } else {
5193 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
5194 fmt: "Done %s.\n", __func__);
5195 }
5196
5197 return rval;
5198}
5199
5200int
5201qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
5202 uint16_t size_in_bytes, uint16_t *actual_size)
5203{
5204 int rval;
5205 mbx_cmd_t mc;
5206 mbx_cmd_t *mcp = &mc;
5207
5208 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
5209 fmt: "Entered %s.\n", __func__);
5210
5211 if (!IS_CNA_CAPABLE(vha->hw))
5212 return QLA_FUNCTION_FAILED;
5213
5214 mcp->mb[0] = MBC_GET_XGMAC_STATS;
5215 mcp->mb[2] = MSW(stats_dma);
5216 mcp->mb[3] = LSW(stats_dma);
5217 mcp->mb[6] = MSW(MSD(stats_dma));
5218 mcp->mb[7] = LSW(MSD(stats_dma));
5219 mcp->mb[8] = size_in_bytes >> 2;
5220 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
5221 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5222 mcp->tov = MBX_TOV_SECONDS;
5223 mcp->flags = 0;
5224 rval = qla2x00_mailbox_command(vha, mcp);
5225
5226 if (rval != QLA_SUCCESS) {
5227 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
5228 fmt: "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5229 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5230 } else {
5231 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
5232 fmt: "Done %s.\n", __func__);
5233
5234
5235 *actual_size = mcp->mb[2] << 2;
5236 }
5237
5238 return rval;
5239}
5240
5241int
5242qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
5243 uint16_t size)
5244{
5245 int rval;
5246 mbx_cmd_t mc;
5247 mbx_cmd_t *mcp = &mc;
5248
5249 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
5250 fmt: "Entered %s.\n", __func__);
5251
5252 if (!IS_CNA_CAPABLE(vha->hw))
5253 return QLA_FUNCTION_FAILED;
5254
5255 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
5256 mcp->mb[1] = 0;
5257 mcp->mb[2] = MSW(tlv_dma);
5258 mcp->mb[3] = LSW(tlv_dma);
5259 mcp->mb[6] = MSW(MSD(tlv_dma));
5260 mcp->mb[7] = LSW(MSD(tlv_dma));
5261 mcp->mb[8] = size;
5262 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5263 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5264 mcp->tov = MBX_TOV_SECONDS;
5265 mcp->flags = 0;
5266 rval = qla2x00_mailbox_command(vha, mcp);
5267
5268 if (rval != QLA_SUCCESS) {
5269 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
5270 fmt: "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5271 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5272 } else {
5273 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
5274 fmt: "Done %s.\n", __func__);
5275 }
5276
5277 return rval;
5278}
5279
5280int
5281qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
5282{
5283 int rval;
5284 mbx_cmd_t mc;
5285 mbx_cmd_t *mcp = &mc;
5286
5287 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
5288 fmt: "Entered %s.\n", __func__);
5289
5290 if (!IS_FWI2_CAPABLE(vha->hw))
5291 return QLA_FUNCTION_FAILED;
5292
5293 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
5294 mcp->mb[1] = LSW(risc_addr);
5295 mcp->mb[8] = MSW(risc_addr);
5296 mcp->out_mb = MBX_8|MBX_1|MBX_0;
5297 mcp->in_mb = MBX_3|MBX_2|MBX_0;
5298 mcp->tov = MBX_TOV_SECONDS;
5299 mcp->flags = 0;
5300 rval = qla2x00_mailbox_command(vha, mcp);
5301 if (rval != QLA_SUCCESS) {
5302 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
5303 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5304 } else {
5305 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
5306 fmt: "Done %s.\n", __func__);
5307 *data = mcp->mb[3] << 16 | mcp->mb[2];
5308 }
5309
5310 return rval;
5311}
5312
5313int
5314qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5315 uint16_t *mresp)
5316{
5317 int rval;
5318 mbx_cmd_t mc;
5319 mbx_cmd_t *mcp = &mc;
5320
5321 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
5322 fmt: "Entered %s.\n", __func__);
5323
5324 memset(mcp->mb, 0 , sizeof(mcp->mb));
5325 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
5326 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
5327
5328 /* transfer count */
5329 mcp->mb[10] = LSW(mreq->transfer_size);
5330 mcp->mb[11] = MSW(mreq->transfer_size);
5331
5332 /* send data address */
5333 mcp->mb[14] = LSW(mreq->send_dma);
5334 mcp->mb[15] = MSW(mreq->send_dma);
5335 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5336 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5337
5338 /* receive data address */
5339 mcp->mb[16] = LSW(mreq->rcv_dma);
5340 mcp->mb[17] = MSW(mreq->rcv_dma);
5341 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5342 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5343
5344 /* Iteration count */
5345 mcp->mb[18] = LSW(mreq->iteration_count);
5346 mcp->mb[19] = MSW(mreq->iteration_count);
5347
5348 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
5349 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5350 if (IS_CNA_CAPABLE(vha->hw))
5351 mcp->out_mb |= MBX_2;
5352 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
5353
5354 mcp->buf_size = mreq->transfer_size;
5355 mcp->tov = MBX_TOV_SECONDS;
5356 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5357
5358 rval = qla2x00_mailbox_command(vha, mcp);
5359
5360 if (rval != QLA_SUCCESS) {
5361 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
5362 fmt: "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
5363 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
5364 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
5365 } else {
5366 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
5367 fmt: "Done %s.\n", __func__);
5368 }
5369
5370 /* Copy mailbox information */
5371 memcpy( mresp, mcp->mb, 64);
5372 return rval;
5373}
5374
5375int
5376qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5377 uint16_t *mresp)
5378{
5379 int rval;
5380 mbx_cmd_t mc;
5381 mbx_cmd_t *mcp = &mc;
5382 struct qla_hw_data *ha = vha->hw;
5383
5384 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
5385 fmt: "Entered %s.\n", __func__);
5386
5387 memset(mcp->mb, 0 , sizeof(mcp->mb));
5388 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
5389 /* BIT_6 specifies 64bit address */
5390 mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
5391 if (IS_CNA_CAPABLE(ha)) {
5392 mcp->mb[2] = vha->fcoe_fcf_idx;
5393 }
5394 mcp->mb[16] = LSW(mreq->rcv_dma);
5395 mcp->mb[17] = MSW(mreq->rcv_dma);
5396 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5397 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5398
5399 mcp->mb[10] = LSW(mreq->transfer_size);
5400
5401 mcp->mb[14] = LSW(mreq->send_dma);
5402 mcp->mb[15] = MSW(mreq->send_dma);
5403 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5404 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5405
5406 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
5407 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5408 if (IS_CNA_CAPABLE(ha))
5409 mcp->out_mb |= MBX_2;
5410
5411 mcp->in_mb = MBX_0;
5412 if (IS_CNA_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
5413 IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5414 mcp->in_mb |= MBX_1;
5415 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
5416 IS_QLA28XX(ha))
5417 mcp->in_mb |= MBX_3;
5418
5419 mcp->tov = MBX_TOV_SECONDS;
5420 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5421 mcp->buf_size = mreq->transfer_size;
5422
5423 rval = qla2x00_mailbox_command(vha, mcp);
5424
5425 if (rval != QLA_SUCCESS) {
5426 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
5427 fmt: "Failed=%x mb[0]=%x mb[1]=%x.\n",
5428 rval, mcp->mb[0], mcp->mb[1]);
5429 } else {
5430 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
5431 fmt: "Done %s.\n", __func__);
5432 }
5433
5434 /* Copy mailbox information */
5435 memcpy(mresp, mcp->mb, 64);
5436 return rval;
5437}
5438
5439int
5440qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
5441{
5442 int rval;
5443 mbx_cmd_t mc;
5444 mbx_cmd_t *mcp = &mc;
5445
5446 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
5447 fmt: "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
5448
5449 mcp->mb[0] = MBC_ISP84XX_RESET;
5450 mcp->mb[1] = enable_diagnostic;
5451 mcp->out_mb = MBX_1|MBX_0;
5452 mcp->in_mb = MBX_1|MBX_0;
5453 mcp->tov = MBX_TOV_SECONDS;
5454 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5455 rval = qla2x00_mailbox_command(vha, mcp);
5456
5457 if (rval != QLA_SUCCESS)
5458 ql_dbg(ql_dbg_mbx, vha, 0x10fe, fmt: "Failed=%x.\n", rval);
5459 else
5460 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
5461 fmt: "Done %s.\n", __func__);
5462
5463 return rval;
5464}
5465
5466int
5467qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
5468{
5469 int rval;
5470 mbx_cmd_t mc;
5471 mbx_cmd_t *mcp = &mc;
5472
5473 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
5474 fmt: "Entered %s.\n", __func__);
5475
5476 if (!IS_FWI2_CAPABLE(vha->hw))
5477 return QLA_FUNCTION_FAILED;
5478
5479 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
5480 mcp->mb[1] = LSW(risc_addr);
5481 mcp->mb[2] = LSW(data);
5482 mcp->mb[3] = MSW(data);
5483 mcp->mb[8] = MSW(risc_addr);
5484 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
5485 mcp->in_mb = MBX_1|MBX_0;
5486 mcp->tov = MBX_TOV_SECONDS;
5487 mcp->flags = 0;
5488 rval = qla2x00_mailbox_command(vha, mcp);
5489 if (rval != QLA_SUCCESS) {
5490 ql_dbg(ql_dbg_mbx, vha, 0x1101,
5491 fmt: "Failed=%x mb[0]=%x mb[1]=%x.\n",
5492 rval, mcp->mb[0], mcp->mb[1]);
5493 } else {
5494 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
5495 fmt: "Done %s.\n", __func__);
5496 }
5497
5498 return rval;
5499}
5500
5501int
5502qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
5503{
5504 int rval;
5505 uint32_t stat, timer;
5506 uint16_t mb0 = 0;
5507 struct qla_hw_data *ha = vha->hw;
5508 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5509
5510 rval = QLA_SUCCESS;
5511
5512 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
5513 fmt: "Entered %s.\n", __func__);
5514
5515 clear_bit(MBX_INTERRUPT, addr: &ha->mbx_cmd_flags);
5516
5517 /* Write the MBC data to the registers */
5518 wrt_reg_word(addr: &reg->mailbox0, MBC_WRITE_MPI_REGISTER);
5519 wrt_reg_word(addr: &reg->mailbox1, data: mb[0]);
5520 wrt_reg_word(addr: &reg->mailbox2, data: mb[1]);
5521 wrt_reg_word(addr: &reg->mailbox3, data: mb[2]);
5522 wrt_reg_word(addr: &reg->mailbox4, data: mb[3]);
5523
5524 wrt_reg_dword(addr: &reg->hccr, HCCRX_SET_HOST_INT);
5525
5526 /* Poll for MBC interrupt */
5527 for (timer = 6000000; timer; timer--) {
5528 /* Check for pending interrupts. */
5529 stat = rd_reg_dword(addr: &reg->host_status);
5530 if (stat & HSRX_RISC_INT) {
5531 stat &= 0xff;
5532
5533 if (stat == 0x1 || stat == 0x2 ||
5534 stat == 0x10 || stat == 0x11) {
5535 set_bit(MBX_INTERRUPT,
5536 addr: &ha->mbx_cmd_flags);
5537 mb0 = rd_reg_word(addr: &reg->mailbox0);
5538 wrt_reg_dword(addr: &reg->hccr,
5539 HCCRX_CLR_RISC_INT);
5540 rd_reg_dword(addr: &reg->hccr);
5541 break;
5542 }
5543 }
5544 udelay(5);
5545 }
5546
5547 if (test_and_clear_bit(MBX_INTERRUPT, addr: &ha->mbx_cmd_flags))
5548 rval = mb0 & MBS_MASK;
5549 else
5550 rval = QLA_FUNCTION_FAILED;
5551
5552 if (rval != QLA_SUCCESS) {
5553 ql_dbg(ql_dbg_mbx, vha, 0x1104,
5554 fmt: "Failed=%x mb[0]=%x.\n", rval, mb[0]);
5555 } else {
5556 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
5557 fmt: "Done %s.\n", __func__);
5558 }
5559
5560 return rval;
5561}
5562
5563/* Set the specified data rate */
5564int
5565qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode)
5566{
5567 int rval;
5568 mbx_cmd_t mc;
5569 mbx_cmd_t *mcp = &mc;
5570 struct qla_hw_data *ha = vha->hw;
5571 uint16_t val;
5572
5573 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5574 fmt: "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate,
5575 mode);
5576
5577 if (!IS_FWI2_CAPABLE(ha))
5578 return QLA_FUNCTION_FAILED;
5579
5580 memset(mcp, 0, sizeof(*mcp));
5581 switch (ha->set_data_rate) {
5582 case PORT_SPEED_AUTO:
5583 case PORT_SPEED_4GB:
5584 case PORT_SPEED_8GB:
5585 case PORT_SPEED_16GB:
5586 case PORT_SPEED_32GB:
5587 val = ha->set_data_rate;
5588 break;
5589 default:
5590 ql_log(ql_log_warn, vha, 0x1199,
5591 fmt: "Unrecognized speed setting:%d. Setting Autoneg\n",
5592 ha->set_data_rate);
5593 val = ha->set_data_rate = PORT_SPEED_AUTO;
5594 break;
5595 }
5596
5597 mcp->mb[0] = MBC_DATA_RATE;
5598 mcp->mb[1] = mode;
5599 mcp->mb[2] = val;
5600
5601 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5602 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5603 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5604 mcp->in_mb |= MBX_4|MBX_3;
5605 mcp->tov = MBX_TOV_SECONDS;
5606 mcp->flags = 0;
5607 rval = qla2x00_mailbox_command(vha, mcp);
5608 if (rval != QLA_SUCCESS) {
5609 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5610 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5611 } else {
5612 if (mcp->mb[1] != 0x7)
5613 ql_dbg(ql_dbg_mbx, vha, 0x1179,
5614 fmt: "Speed set:0x%x\n", mcp->mb[1]);
5615
5616 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5617 fmt: "Done %s.\n", __func__);
5618 }
5619
5620 return rval;
5621}
5622
5623int
5624qla2x00_get_data_rate(scsi_qla_host_t *vha)
5625{
5626 int rval;
5627 mbx_cmd_t mc;
5628 mbx_cmd_t *mcp = &mc;
5629 struct qla_hw_data *ha = vha->hw;
5630
5631 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5632 fmt: "Entered %s.\n", __func__);
5633
5634 if (!IS_FWI2_CAPABLE(ha))
5635 return QLA_FUNCTION_FAILED;
5636
5637 mcp->mb[0] = MBC_DATA_RATE;
5638 mcp->mb[1] = QLA_GET_DATA_RATE;
5639 mcp->out_mb = MBX_1|MBX_0;
5640 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5641 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5642 mcp->in_mb |= MBX_4|MBX_3;
5643 mcp->tov = MBX_TOV_SECONDS;
5644 mcp->flags = 0;
5645 rval = qla2x00_mailbox_command(vha, mcp);
5646 if (rval != QLA_SUCCESS) {
5647 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5648 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5649 } else {
5650 if (mcp->mb[1] != 0x7)
5651 ha->link_data_rate = mcp->mb[1];
5652
5653 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
5654 if (mcp->mb[4] & BIT_0)
5655 ql_log(ql_log_info, vha, 0x11a2,
5656 fmt: "FEC=enabled (data rate).\n");
5657 }
5658
5659 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5660 fmt: "Done %s.\n", __func__);
5661 if (mcp->mb[1] != 0x7)
5662 ha->link_data_rate = mcp->mb[1];
5663 }
5664
5665 return rval;
5666}
5667
5668int
5669qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5670{
5671 int rval;
5672 mbx_cmd_t mc;
5673 mbx_cmd_t *mcp = &mc;
5674 struct qla_hw_data *ha = vha->hw;
5675
5676 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
5677 fmt: "Entered %s.\n", __func__);
5678
5679 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
5680 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5681 return QLA_FUNCTION_FAILED;
5682 mcp->mb[0] = MBC_GET_PORT_CONFIG;
5683 mcp->out_mb = MBX_0;
5684 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5685 mcp->tov = MBX_TOV_SECONDS;
5686 mcp->flags = 0;
5687
5688 rval = qla2x00_mailbox_command(vha, mcp);
5689
5690 if (rval != QLA_SUCCESS) {
5691 ql_dbg(ql_dbg_mbx, vha, 0x110a,
5692 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5693 } else {
5694 /* Copy all bits to preserve original value */
5695 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
5696
5697 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
5698 fmt: "Done %s.\n", __func__);
5699 }
5700 return rval;
5701}
5702
5703int
5704qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5705{
5706 int rval;
5707 mbx_cmd_t mc;
5708 mbx_cmd_t *mcp = &mc;
5709
5710 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
5711 fmt: "Entered %s.\n", __func__);
5712
5713 mcp->mb[0] = MBC_SET_PORT_CONFIG;
5714 /* Copy all bits to preserve original setting */
5715 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
5716 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5717 mcp->in_mb = MBX_0;
5718 mcp->tov = MBX_TOV_SECONDS;
5719 mcp->flags = 0;
5720 rval = qla2x00_mailbox_command(vha, mcp);
5721
5722 if (rval != QLA_SUCCESS) {
5723 ql_dbg(ql_dbg_mbx, vha, 0x110d,
5724 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5725 } else
5726 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
5727 fmt: "Done %s.\n", __func__);
5728
5729 return rval;
5730}
5731
5732
5733int
5734qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
5735 uint16_t *mb)
5736{
5737 int rval;
5738 mbx_cmd_t mc;
5739 mbx_cmd_t *mcp = &mc;
5740 struct qla_hw_data *ha = vha->hw;
5741
5742 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
5743 fmt: "Entered %s.\n", __func__);
5744
5745 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
5746 return QLA_FUNCTION_FAILED;
5747
5748 mcp->mb[0] = MBC_PORT_PARAMS;
5749 mcp->mb[1] = loop_id;
5750 if (ha->flags.fcp_prio_enabled)
5751 mcp->mb[2] = BIT_1;
5752 else
5753 mcp->mb[2] = BIT_2;
5754 mcp->mb[4] = priority & 0xf;
5755 mcp->mb[9] = vha->vp_idx;
5756 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5757 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5758 mcp->tov = MBX_TOV_SECONDS;
5759 mcp->flags = 0;
5760 rval = qla2x00_mailbox_command(vha, mcp);
5761 if (mb != NULL) {
5762 mb[0] = mcp->mb[0];
5763 mb[1] = mcp->mb[1];
5764 mb[3] = mcp->mb[3];
5765 mb[4] = mcp->mb[4];
5766 }
5767
5768 if (rval != QLA_SUCCESS) {
5769 ql_dbg(ql_dbg_mbx, vha, 0x10cd, fmt: "Failed=%x.\n", rval);
5770 } else {
5771 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
5772 fmt: "Done %s.\n", __func__);
5773 }
5774
5775 return rval;
5776}
5777
5778int
5779qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
5780{
5781 int rval = QLA_FUNCTION_FAILED;
5782 struct qla_hw_data *ha = vha->hw;
5783 uint8_t byte;
5784
5785 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
5786 ql_dbg(ql_dbg_mbx, vha, 0x1150,
5787 fmt: "Thermal not supported by this card.\n");
5788 return rval;
5789 }
5790
5791 if (IS_QLA25XX(ha)) {
5792 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5793 ha->pdev->subsystem_device == 0x0175) {
5794 rval = qla2x00_read_sfp(vha, sfp_dma: 0, sfp: &byte,
5795 dev: 0x98, off: 0x1, len: 1, BIT_13|BIT_0);
5796 *temp = byte;
5797 return rval;
5798 }
5799 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
5800 ha->pdev->subsystem_device == 0x338e) {
5801 rval = qla2x00_read_sfp(vha, sfp_dma: 0, sfp: &byte,
5802 dev: 0x98, off: 0x1, len: 1, BIT_15|BIT_14|BIT_0);
5803 *temp = byte;
5804 return rval;
5805 }
5806 ql_dbg(ql_dbg_mbx, vha, 0x10c9,
5807 fmt: "Thermal not supported by this card.\n");
5808 return rval;
5809 }
5810
5811 if (IS_QLA82XX(ha)) {
5812 *temp = qla82xx_read_temperature(vha);
5813 rval = QLA_SUCCESS;
5814 return rval;
5815 } else if (IS_QLA8044(ha)) {
5816 *temp = qla8044_read_temperature(vha);
5817 rval = QLA_SUCCESS;
5818 return rval;
5819 }
5820
5821 rval = qla2x00_read_asic_temperature(vha, temp);
5822 return rval;
5823}
5824
5825int
5826qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
5827{
5828 int rval;
5829 struct qla_hw_data *ha = vha->hw;
5830 mbx_cmd_t mc;
5831 mbx_cmd_t *mcp = &mc;
5832
5833 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
5834 fmt: "Entered %s.\n", __func__);
5835
5836 if (!IS_FWI2_CAPABLE(ha))
5837 return QLA_FUNCTION_FAILED;
5838
5839 memset(mcp, 0, sizeof(mbx_cmd_t));
5840 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5841 mcp->mb[1] = 1;
5842
5843 mcp->out_mb = MBX_1|MBX_0;
5844 mcp->in_mb = MBX_0;
5845 mcp->tov = MBX_TOV_SECONDS;
5846 mcp->flags = 0;
5847
5848 rval = qla2x00_mailbox_command(vha, mcp);
5849 if (rval != QLA_SUCCESS) {
5850 ql_dbg(ql_dbg_mbx, vha, 0x1016,
5851 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5852 } else {
5853 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
5854 fmt: "Done %s.\n", __func__);
5855 }
5856
5857 return rval;
5858}
5859
5860int
5861qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
5862{
5863 int rval;
5864 struct qla_hw_data *ha = vha->hw;
5865 mbx_cmd_t mc;
5866 mbx_cmd_t *mcp = &mc;
5867
5868 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
5869 fmt: "Entered %s.\n", __func__);
5870
5871 if (!IS_P3P_TYPE(ha))
5872 return QLA_FUNCTION_FAILED;
5873
5874 memset(mcp, 0, sizeof(mbx_cmd_t));
5875 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5876 mcp->mb[1] = 0;
5877
5878 mcp->out_mb = MBX_1|MBX_0;
5879 mcp->in_mb = MBX_0;
5880 mcp->tov = MBX_TOV_SECONDS;
5881 mcp->flags = 0;
5882
5883 rval = qla2x00_mailbox_command(vha, mcp);
5884 if (rval != QLA_SUCCESS) {
5885 ql_dbg(ql_dbg_mbx, vha, 0x100c,
5886 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5887 } else {
5888 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
5889 fmt: "Done %s.\n", __func__);
5890 }
5891
5892 return rval;
5893}
5894
5895int
5896qla82xx_md_get_template_size(scsi_qla_host_t *vha)
5897{
5898 struct qla_hw_data *ha = vha->hw;
5899 mbx_cmd_t mc;
5900 mbx_cmd_t *mcp = &mc;
5901 int rval = QLA_FUNCTION_FAILED;
5902
5903 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
5904 fmt: "Entered %s.\n", __func__);
5905
5906 memset(mcp->mb, 0 , sizeof(mcp->mb));
5907 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5908 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5909 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
5910 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
5911
5912 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5913 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
5914 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5915
5916 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5917 mcp->tov = MBX_TOV_SECONDS;
5918 rval = qla2x00_mailbox_command(vha, mcp);
5919
5920 /* Always copy back return mailbox values. */
5921 if (rval != QLA_SUCCESS) {
5922 ql_dbg(ql_dbg_mbx, vha, 0x1120,
5923 fmt: "mailbox command FAILED=0x%x, subcode=%x.\n",
5924 (mcp->mb[1] << 16) | mcp->mb[0],
5925 (mcp->mb[3] << 16) | mcp->mb[2]);
5926 } else {
5927 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
5928 fmt: "Done %s.\n", __func__);
5929 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
5930 if (!ha->md_template_size) {
5931 ql_dbg(ql_dbg_mbx, vha, 0x1122,
5932 fmt: "Null template size obtained.\n");
5933 rval = QLA_FUNCTION_FAILED;
5934 }
5935 }
5936 return rval;
5937}
5938
5939int
5940qla82xx_md_get_template(scsi_qla_host_t *vha)
5941{
5942 struct qla_hw_data *ha = vha->hw;
5943 mbx_cmd_t mc;
5944 mbx_cmd_t *mcp = &mc;
5945 int rval = QLA_FUNCTION_FAILED;
5946
5947 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
5948 fmt: "Entered %s.\n", __func__);
5949
5950 ha->md_tmplt_hdr = dma_alloc_coherent(dev: &ha->pdev->dev,
5951 size: ha->md_template_size, dma_handle: &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5952 if (!ha->md_tmplt_hdr) {
5953 ql_log(ql_log_warn, vha, 0x1124,
5954 fmt: "Unable to allocate memory for Minidump template.\n");
5955 return rval;
5956 }
5957
5958 memset(mcp->mb, 0 , sizeof(mcp->mb));
5959 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5960 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5961 mcp->mb[2] = LSW(RQST_TMPLT);
5962 mcp->mb[3] = MSW(RQST_TMPLT);
5963 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
5964 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
5965 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
5966 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
5967 mcp->mb[8] = LSW(ha->md_template_size);
5968 mcp->mb[9] = MSW(ha->md_template_size);
5969
5970 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5971 mcp->tov = MBX_TOV_SECONDS;
5972 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5973 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5974 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5975 rval = qla2x00_mailbox_command(vha, mcp);
5976
5977 if (rval != QLA_SUCCESS) {
5978 ql_dbg(ql_dbg_mbx, vha, 0x1125,
5979 fmt: "mailbox command FAILED=0x%x, subcode=%x.\n",
5980 ((mcp->mb[1] << 16) | mcp->mb[0]),
5981 ((mcp->mb[3] << 16) | mcp->mb[2]));
5982 } else
5983 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
5984 fmt: "Done %s.\n", __func__);
5985 return rval;
5986}
5987
5988int
5989qla8044_md_get_template(scsi_qla_host_t *vha)
5990{
5991 struct qla_hw_data *ha = vha->hw;
5992 mbx_cmd_t mc;
5993 mbx_cmd_t *mcp = &mc;
5994 int rval = QLA_FUNCTION_FAILED;
5995 int offset = 0, size = MINIDUMP_SIZE_36K;
5996
5997 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
5998 fmt: "Entered %s.\n", __func__);
5999
6000 ha->md_tmplt_hdr = dma_alloc_coherent(dev: &ha->pdev->dev,
6001 size: ha->md_template_size, dma_handle: &ha->md_tmplt_hdr_dma, GFP_KERNEL);
6002 if (!ha->md_tmplt_hdr) {
6003 ql_log(ql_log_warn, vha, 0xb11b,
6004 fmt: "Unable to allocate memory for Minidump template.\n");
6005 return rval;
6006 }
6007
6008 memset(mcp->mb, 0 , sizeof(mcp->mb));
6009 while (offset < ha->md_template_size) {
6010 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
6011 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
6012 mcp->mb[2] = LSW(RQST_TMPLT);
6013 mcp->mb[3] = MSW(RQST_TMPLT);
6014 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
6015 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
6016 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
6017 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
6018 mcp->mb[8] = LSW(size);
6019 mcp->mb[9] = MSW(size);
6020 mcp->mb[10] = offset & 0x0000FFFF;
6021 mcp->mb[11] = offset & 0xFFFF0000;
6022 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
6023 mcp->tov = MBX_TOV_SECONDS;
6024 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
6025 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6026 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6027 rval = qla2x00_mailbox_command(vha, mcp);
6028
6029 if (rval != QLA_SUCCESS) {
6030 ql_dbg(ql_dbg_mbx, vha, 0xb11c,
6031 fmt: "mailbox command FAILED=0x%x, subcode=%x.\n",
6032 ((mcp->mb[1] << 16) | mcp->mb[0]),
6033 ((mcp->mb[3] << 16) | mcp->mb[2]));
6034 return rval;
6035 } else
6036 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
6037 fmt: "Done %s.\n", __func__);
6038 offset = offset + size;
6039 }
6040 return rval;
6041}
6042
6043int
6044qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
6045{
6046 int rval;
6047 struct qla_hw_data *ha = vha->hw;
6048 mbx_cmd_t mc;
6049 mbx_cmd_t *mcp = &mc;
6050
6051 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
6052 return QLA_FUNCTION_FAILED;
6053
6054 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
6055 fmt: "Entered %s.\n", __func__);
6056
6057 memset(mcp, 0, sizeof(mbx_cmd_t));
6058 mcp->mb[0] = MBC_SET_LED_CONFIG;
6059 mcp->mb[1] = led_cfg[0];
6060 mcp->mb[2] = led_cfg[1];
6061 if (IS_QLA8031(ha)) {
6062 mcp->mb[3] = led_cfg[2];
6063 mcp->mb[4] = led_cfg[3];
6064 mcp->mb[5] = led_cfg[4];
6065 mcp->mb[6] = led_cfg[5];
6066 }
6067
6068 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6069 if (IS_QLA8031(ha))
6070 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
6071 mcp->in_mb = MBX_0;
6072 mcp->tov = MBX_TOV_SECONDS;
6073 mcp->flags = 0;
6074
6075 rval = qla2x00_mailbox_command(vha, mcp);
6076 if (rval != QLA_SUCCESS) {
6077 ql_dbg(ql_dbg_mbx, vha, 0x1134,
6078 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6079 } else {
6080 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
6081 fmt: "Done %s.\n", __func__);
6082 }
6083
6084 return rval;
6085}
6086
6087int
6088qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
6089{
6090 int rval;
6091 struct qla_hw_data *ha = vha->hw;
6092 mbx_cmd_t mc;
6093 mbx_cmd_t *mcp = &mc;
6094
6095 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
6096 return QLA_FUNCTION_FAILED;
6097
6098 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
6099 fmt: "Entered %s.\n", __func__);
6100
6101 memset(mcp, 0, sizeof(mbx_cmd_t));
6102 mcp->mb[0] = MBC_GET_LED_CONFIG;
6103
6104 mcp->out_mb = MBX_0;
6105 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6106 if (IS_QLA8031(ha))
6107 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
6108 mcp->tov = MBX_TOV_SECONDS;
6109 mcp->flags = 0;
6110
6111 rval = qla2x00_mailbox_command(vha, mcp);
6112 if (rval != QLA_SUCCESS) {
6113 ql_dbg(ql_dbg_mbx, vha, 0x1137,
6114 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6115 } else {
6116 led_cfg[0] = mcp->mb[1];
6117 led_cfg[1] = mcp->mb[2];
6118 if (IS_QLA8031(ha)) {
6119 led_cfg[2] = mcp->mb[3];
6120 led_cfg[3] = mcp->mb[4];
6121 led_cfg[4] = mcp->mb[5];
6122 led_cfg[5] = mcp->mb[6];
6123 }
6124 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
6125 fmt: "Done %s.\n", __func__);
6126 }
6127
6128 return rval;
6129}
6130
6131int
6132qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
6133{
6134 int rval;
6135 struct qla_hw_data *ha = vha->hw;
6136 mbx_cmd_t mc;
6137 mbx_cmd_t *mcp = &mc;
6138
6139 if (!IS_P3P_TYPE(ha))
6140 return QLA_FUNCTION_FAILED;
6141
6142 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
6143 fmt: "Entered %s.\n", __func__);
6144
6145 memset(mcp, 0, sizeof(mbx_cmd_t));
6146 mcp->mb[0] = MBC_SET_LED_CONFIG;
6147 if (enable)
6148 mcp->mb[7] = 0xE;
6149 else
6150 mcp->mb[7] = 0xD;
6151
6152 mcp->out_mb = MBX_7|MBX_0;
6153 mcp->in_mb = MBX_0;
6154 mcp->tov = MBX_TOV_SECONDS;
6155 mcp->flags = 0;
6156
6157 rval = qla2x00_mailbox_command(vha, mcp);
6158 if (rval != QLA_SUCCESS) {
6159 ql_dbg(ql_dbg_mbx, vha, 0x1128,
6160 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6161 } else {
6162 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
6163 fmt: "Done %s.\n", __func__);
6164 }
6165
6166 return rval;
6167}
6168
6169int
6170qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
6171{
6172 int rval;
6173 struct qla_hw_data *ha = vha->hw;
6174 mbx_cmd_t mc;
6175 mbx_cmd_t *mcp = &mc;
6176
6177 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6178 return QLA_FUNCTION_FAILED;
6179
6180 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
6181 fmt: "Entered %s.\n", __func__);
6182
6183 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
6184 mcp->mb[1] = LSW(reg);
6185 mcp->mb[2] = MSW(reg);
6186 mcp->mb[3] = LSW(data);
6187 mcp->mb[4] = MSW(data);
6188 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6189
6190 mcp->in_mb = MBX_1|MBX_0;
6191 mcp->tov = MBX_TOV_SECONDS;
6192 mcp->flags = 0;
6193 rval = qla2x00_mailbox_command(vha, mcp);
6194
6195 if (rval != QLA_SUCCESS) {
6196 ql_dbg(ql_dbg_mbx, vha, 0x1131,
6197 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6198 } else {
6199 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
6200 fmt: "Done %s.\n", __func__);
6201 }
6202
6203 return rval;
6204}
6205
6206int
6207qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
6208{
6209 int rval;
6210 struct qla_hw_data *ha = vha->hw;
6211 mbx_cmd_t mc;
6212 mbx_cmd_t *mcp = &mc;
6213
6214 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
6215 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
6216 fmt: "Implicit LOGO Unsupported.\n");
6217 return QLA_FUNCTION_FAILED;
6218 }
6219
6220
6221 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
6222 fmt: "Entering %s.\n", __func__);
6223
6224 /* Perform Implicit LOGO. */
6225 mcp->mb[0] = MBC_PORT_LOGOUT;
6226 mcp->mb[1] = fcport->loop_id;
6227 mcp->mb[10] = BIT_15;
6228 mcp->out_mb = MBX_10|MBX_1|MBX_0;
6229 mcp->in_mb = MBX_0;
6230 mcp->tov = MBX_TOV_SECONDS;
6231 mcp->flags = 0;
6232 rval = qla2x00_mailbox_command(vha, mcp);
6233 if (rval != QLA_SUCCESS)
6234 ql_dbg(ql_dbg_mbx, vha, 0x113d,
6235 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6236 else
6237 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
6238 fmt: "Done %s.\n", __func__);
6239
6240 return rval;
6241}
6242
6243int
6244qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
6245{
6246 int rval;
6247 mbx_cmd_t mc;
6248 mbx_cmd_t *mcp = &mc;
6249 struct qla_hw_data *ha = vha->hw;
6250 unsigned long retry_max_time = jiffies + (2 * HZ);
6251
6252 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6253 return QLA_FUNCTION_FAILED;
6254
6255 ql_dbg(ql_dbg_mbx, vha, 0x114b, fmt: "Entered %s.\n", __func__);
6256
6257retry_rd_reg:
6258 mcp->mb[0] = MBC_READ_REMOTE_REG;
6259 mcp->mb[1] = LSW(reg);
6260 mcp->mb[2] = MSW(reg);
6261 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6262 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
6263 mcp->tov = MBX_TOV_SECONDS;
6264 mcp->flags = 0;
6265 rval = qla2x00_mailbox_command(vha, mcp);
6266
6267 if (rval != QLA_SUCCESS) {
6268 ql_dbg(ql_dbg_mbx, vha, 0x114c,
6269 fmt: "Failed=%x mb[0]=%x mb[1]=%x.\n",
6270 rval, mcp->mb[0], mcp->mb[1]);
6271 } else {
6272 *data = (mcp->mb[3] | (mcp->mb[4] << 16));
6273 if (*data == QLA8XXX_BAD_VALUE) {
6274 /*
6275 * During soft-reset CAMRAM register reads might
6276 * return 0xbad0bad0. So retry for MAX of 2 sec
6277 * while reading camram registers.
6278 */
6279 if (time_after(jiffies, retry_max_time)) {
6280 ql_dbg(ql_dbg_mbx, vha, 0x1141,
6281 fmt: "Failure to read CAMRAM register. "
6282 "data=0x%x.\n", *data);
6283 return QLA_FUNCTION_FAILED;
6284 }
6285 msleep(msecs: 100);
6286 goto retry_rd_reg;
6287 }
6288 ql_dbg(ql_dbg_mbx, vha, 0x1142, fmt: "Done %s.\n", __func__);
6289 }
6290
6291 return rval;
6292}
6293
6294int
6295qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
6296{
6297 int rval;
6298 mbx_cmd_t mc;
6299 mbx_cmd_t *mcp = &mc;
6300 struct qla_hw_data *ha = vha->hw;
6301
6302 if (!IS_QLA83XX(ha))
6303 return QLA_FUNCTION_FAILED;
6304
6305 ql_dbg(ql_dbg_mbx, vha, 0x1143, fmt: "Entered %s.\n", __func__);
6306
6307 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
6308 mcp->out_mb = MBX_0;
6309 mcp->in_mb = MBX_1|MBX_0;
6310 mcp->tov = MBX_TOV_SECONDS;
6311 mcp->flags = 0;
6312 rval = qla2x00_mailbox_command(vha, mcp);
6313
6314 if (rval != QLA_SUCCESS) {
6315 ql_dbg(ql_dbg_mbx, vha, 0x1144,
6316 fmt: "Failed=%x mb[0]=%x mb[1]=%x.\n",
6317 rval, mcp->mb[0], mcp->mb[1]);
6318 qla2xxx_dump_fw(vha);
6319 } else {
6320 ql_dbg(ql_dbg_mbx, vha, 0x1145, fmt: "Done %s.\n", __func__);
6321 }
6322
6323 return rval;
6324}
6325
6326int
6327qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
6328 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
6329{
6330 int rval;
6331 mbx_cmd_t mc;
6332 mbx_cmd_t *mcp = &mc;
6333 uint8_t subcode = (uint8_t)options;
6334 struct qla_hw_data *ha = vha->hw;
6335
6336 if (!IS_QLA8031(ha))
6337 return QLA_FUNCTION_FAILED;
6338
6339 ql_dbg(ql_dbg_mbx, vha, 0x1146, fmt: "Entered %s.\n", __func__);
6340
6341 mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
6342 mcp->mb[1] = options;
6343 mcp->out_mb = MBX_1|MBX_0;
6344 if (subcode & BIT_2) {
6345 mcp->mb[2] = LSW(start_addr);
6346 mcp->mb[3] = MSW(start_addr);
6347 mcp->mb[4] = LSW(end_addr);
6348 mcp->mb[5] = MSW(end_addr);
6349 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
6350 }
6351 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6352 if (!(subcode & (BIT_2 | BIT_5)))
6353 mcp->in_mb |= MBX_4|MBX_3;
6354 mcp->tov = MBX_TOV_SECONDS;
6355 mcp->flags = 0;
6356 rval = qla2x00_mailbox_command(vha, mcp);
6357
6358 if (rval != QLA_SUCCESS) {
6359 ql_dbg(ql_dbg_mbx, vha, 0x1147,
6360 fmt: "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
6361 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
6362 mcp->mb[4]);
6363 qla2xxx_dump_fw(vha);
6364 } else {
6365 if (subcode & BIT_5)
6366 *sector_size = mcp->mb[1];
6367 else if (subcode & (BIT_6 | BIT_7)) {
6368 ql_dbg(ql_dbg_mbx, vha, 0x1148,
6369 fmt: "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6370 } else if (subcode & (BIT_3 | BIT_4)) {
6371 ql_dbg(ql_dbg_mbx, vha, 0x1149,
6372 fmt: "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6373 }
6374 ql_dbg(ql_dbg_mbx, vha, 0x114a, fmt: "Done %s.\n", __func__);
6375 }
6376
6377 return rval;
6378}
6379
6380int
6381qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
6382 uint32_t size)
6383{
6384 int rval;
6385 mbx_cmd_t mc;
6386 mbx_cmd_t *mcp = &mc;
6387
6388 if (!IS_MCTP_CAPABLE(vha->hw))
6389 return QLA_FUNCTION_FAILED;
6390
6391 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
6392 fmt: "Entered %s.\n", __func__);
6393
6394 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
6395 mcp->mb[1] = LSW(addr);
6396 mcp->mb[2] = MSW(req_dma);
6397 mcp->mb[3] = LSW(req_dma);
6398 mcp->mb[4] = MSW(size);
6399 mcp->mb[5] = LSW(size);
6400 mcp->mb[6] = MSW(MSD(req_dma));
6401 mcp->mb[7] = LSW(MSD(req_dma));
6402 mcp->mb[8] = MSW(addr);
6403 /* Setting RAM ID to valid */
6404 /* For MCTP RAM ID is 0x40 */
6405 mcp->mb[10] = BIT_7 | 0x40;
6406
6407 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
6408 MBX_0;
6409
6410 mcp->in_mb = MBX_0;
6411 mcp->tov = MBX_TOV_SECONDS;
6412 mcp->flags = 0;
6413 rval = qla2x00_mailbox_command(vha, mcp);
6414
6415 if (rval != QLA_SUCCESS) {
6416 ql_dbg(ql_dbg_mbx, vha, 0x114e,
6417 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6418 } else {
6419 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
6420 fmt: "Done %s.\n", __func__);
6421 }
6422
6423 return rval;
6424}
6425
6426int
6427qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
6428 void *dd_buf, uint size, uint options)
6429{
6430 int rval;
6431 mbx_cmd_t mc;
6432 mbx_cmd_t *mcp = &mc;
6433 dma_addr_t dd_dma;
6434
6435 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
6436 !IS_QLA28XX(vha->hw))
6437 return QLA_FUNCTION_FAILED;
6438
6439 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
6440 fmt: "Entered %s.\n", __func__);
6441
6442 dd_dma = dma_map_single(&vha->hw->pdev->dev,
6443 dd_buf, size, DMA_FROM_DEVICE);
6444 if (dma_mapping_error(dev: &vha->hw->pdev->dev, dma_addr: dd_dma)) {
6445 ql_log(ql_log_warn, vha, 0x1194, fmt: "Failed to map dma buffer.\n");
6446 return QLA_MEMORY_ALLOC_FAILED;
6447 }
6448
6449 memset(dd_buf, 0, size);
6450
6451 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
6452 mcp->mb[1] = options;
6453 mcp->mb[2] = MSW(LSD(dd_dma));
6454 mcp->mb[3] = LSW(LSD(dd_dma));
6455 mcp->mb[6] = MSW(MSD(dd_dma));
6456 mcp->mb[7] = LSW(MSD(dd_dma));
6457 mcp->mb[8] = size;
6458 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
6459 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6460 mcp->buf_size = size;
6461 mcp->flags = MBX_DMA_IN;
6462 mcp->tov = MBX_TOV_SECONDS * 4;
6463 rval = qla2x00_mailbox_command(vha, mcp);
6464
6465 if (rval != QLA_SUCCESS) {
6466 ql_dbg(ql_dbg_mbx, vha, 0x1195, fmt: "Failed=%x.\n", rval);
6467 } else {
6468 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
6469 fmt: "Done %s.\n", __func__);
6470 }
6471
6472 dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
6473 size, DMA_FROM_DEVICE);
6474
6475 return rval;
6476}
6477
6478int
6479qla26xx_dport_diagnostics_v2(scsi_qla_host_t *vha,
6480 struct qla_dport_diag_v2 *dd, mbx_cmd_t *mcp)
6481{
6482 int rval;
6483 dma_addr_t dd_dma;
6484 uint size = sizeof(dd->buf);
6485 uint16_t options = dd->options;
6486
6487 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
6488 fmt: "Entered %s.\n", __func__);
6489
6490 dd_dma = dma_map_single(&vha->hw->pdev->dev,
6491 dd->buf, size, DMA_FROM_DEVICE);
6492 if (dma_mapping_error(dev: &vha->hw->pdev->dev, dma_addr: dd_dma)) {
6493 ql_log(ql_log_warn, vha, 0x1194,
6494 fmt: "Failed to map dma buffer.\n");
6495 return QLA_MEMORY_ALLOC_FAILED;
6496 }
6497
6498 memset(dd->buf, 0, size);
6499
6500 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
6501 mcp->mb[1] = options;
6502 mcp->mb[2] = MSW(LSD(dd_dma));
6503 mcp->mb[3] = LSW(LSD(dd_dma));
6504 mcp->mb[6] = MSW(MSD(dd_dma));
6505 mcp->mb[7] = LSW(MSD(dd_dma));
6506 mcp->mb[8] = size;
6507 mcp->out_mb = MBX_8 | MBX_7 | MBX_6 | MBX_3 | MBX_2 | MBX_1 | MBX_0;
6508 mcp->in_mb = MBX_3 | MBX_2 | MBX_1 | MBX_0;
6509 mcp->buf_size = size;
6510 mcp->flags = MBX_DMA_IN;
6511 mcp->tov = MBX_TOV_SECONDS * 4;
6512 rval = qla2x00_mailbox_command(vha, mcp);
6513
6514 if (rval != QLA_SUCCESS) {
6515 ql_dbg(ql_dbg_mbx, vha, 0x1195, fmt: "Failed=%x.\n", rval);
6516 } else {
6517 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
6518 fmt: "Done %s.\n", __func__);
6519 }
6520
6521 dma_unmap_single(&vha->hw->pdev->dev, dd_dma, size, DMA_FROM_DEVICE);
6522
6523 return rval;
6524}
6525
6526static void qla2x00_async_mb_sp_done(srb_t *sp, int res)
6527{
6528 sp->u.iocb_cmd.u.mbx.rc = res;
6529
6530 complete(&sp->u.iocb_cmd.u.mbx.comp);
6531 /* don't free sp here. Let the caller do the free */
6532}
6533
6534/*
6535 * This mailbox uses the iocb interface to send MB command.
6536 * This allows non-critial (non chip setup) command to go
6537 * out in parrallel.
6538 */
6539int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
6540{
6541 int rval = QLA_FUNCTION_FAILED;
6542 srb_t *sp;
6543 struct srb_iocb *c;
6544
6545 if (!vha->hw->flags.fw_started)
6546 goto done;
6547
6548 /* ref: INIT */
6549 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
6550 if (!sp)
6551 goto done;
6552
6553 c = &sp->u.iocb_cmd;
6554 init_completion(x: &c->u.mbx.comp);
6555
6556 sp->type = SRB_MB_IOCB;
6557 sp->name = mb_to_str(cmd: mcp->mb[0]);
6558 qla2x00_init_async_sp(sp, tmo: qla2x00_get_async_timeout(vha) + 2,
6559 done: qla2x00_async_mb_sp_done);
6560
6561 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
6562
6563 rval = qla2x00_start_sp(sp);
6564 if (rval != QLA_SUCCESS) {
6565 ql_dbg(ql_dbg_mbx, vha, 0x1018,
6566 fmt: "%s: %s Failed submission. %x.\n",
6567 __func__, sp->name, rval);
6568 goto done_free_sp;
6569 }
6570
6571 ql_dbg(ql_dbg_mbx, vha, 0x113f, fmt: "MB:%s hndl %x submitted\n",
6572 sp->name, sp->handle);
6573
6574 wait_for_completion(&c->u.mbx.comp);
6575 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
6576
6577 rval = c->u.mbx.rc;
6578 switch (rval) {
6579 case QLA_FUNCTION_TIMEOUT:
6580 ql_dbg(ql_dbg_mbx, vha, 0x1140, fmt: "%s: %s Timeout. %x.\n",
6581 __func__, sp->name, rval);
6582 break;
6583 case QLA_SUCCESS:
6584 ql_dbg(ql_dbg_mbx, vha, 0x119d, fmt: "%s: %s done.\n",
6585 __func__, sp->name);
6586 break;
6587 default:
6588 ql_dbg(ql_dbg_mbx, vha, 0x119e, fmt: "%s: %s Failed. %x.\n",
6589 __func__, sp->name, rval);
6590 break;
6591 }
6592
6593done_free_sp:
6594 /* ref: INIT */
6595 kref_put(kref: &sp->cmd_kref, release: qla2x00_sp_release);
6596done:
6597 return rval;
6598}
6599
6600/*
6601 * qla24xx_gpdb_wait
6602 * NOTE: Do not call this routine from DPC thread
6603 */
6604int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
6605{
6606 int rval = QLA_FUNCTION_FAILED;
6607 dma_addr_t pd_dma;
6608 struct port_database_24xx *pd;
6609 struct qla_hw_data *ha = vha->hw;
6610 mbx_cmd_t mc;
6611
6612 if (!vha->hw->flags.fw_started)
6613 goto done;
6614
6615 pd = dma_pool_zalloc(pool: ha->s_dma_pool, GFP_KERNEL, handle: &pd_dma);
6616 if (pd == NULL) {
6617 ql_log(ql_log_warn, vha, 0xd047,
6618 fmt: "Failed to allocate port database structure.\n");
6619 goto done_free_sp;
6620 }
6621
6622 memset(&mc, 0, sizeof(mc));
6623 mc.mb[0] = MBC_GET_PORT_DATABASE;
6624 mc.mb[1] = fcport->loop_id;
6625 mc.mb[2] = MSW(pd_dma);
6626 mc.mb[3] = LSW(pd_dma);
6627 mc.mb[6] = MSW(MSD(pd_dma));
6628 mc.mb[7] = LSW(MSD(pd_dma));
6629 mc.mb[9] = vha->vp_idx;
6630 mc.mb[10] = opt;
6631
6632 rval = qla24xx_send_mb_cmd(vha, mcp: &mc);
6633 if (rval != QLA_SUCCESS) {
6634 ql_dbg(ql_dbg_mbx, vha, 0x1193,
6635 fmt: "%s: %8phC fail\n", __func__, fcport->port_name);
6636 goto done_free_sp;
6637 }
6638
6639 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
6640
6641 ql_dbg(ql_dbg_mbx, vha, 0x1197, fmt: "%s: %8phC done\n",
6642 __func__, fcport->port_name);
6643
6644done_free_sp:
6645 if (pd)
6646 dma_pool_free(pool: ha->s_dma_pool, vaddr: pd, addr: pd_dma);
6647done:
6648 return rval;
6649}
6650
6651int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
6652 struct port_database_24xx *pd)
6653{
6654 int rval = QLA_SUCCESS;
6655 uint64_t zero = 0;
6656 u8 current_login_state, last_login_state;
6657
6658 if (NVME_TARGET(vha->hw, fcport)) {
6659 current_login_state = pd->current_login_state >> 4;
6660 last_login_state = pd->last_login_state >> 4;
6661 } else {
6662 current_login_state = pd->current_login_state & 0xf;
6663 last_login_state = pd->last_login_state & 0xf;
6664 }
6665
6666 /* Check for logged in state. */
6667 if (current_login_state != PDS_PRLI_COMPLETE) {
6668 ql_dbg(ql_dbg_mbx, vha, 0x119a,
6669 fmt: "Unable to verify login-state (%x/%x) for loop_id %x.\n",
6670 current_login_state, last_login_state, fcport->loop_id);
6671 rval = QLA_FUNCTION_FAILED;
6672 goto gpd_error_out;
6673 }
6674
6675 if (fcport->loop_id == FC_NO_LOOP_ID ||
6676 (memcmp(p: fcport->port_name, q: (uint8_t *)&zero, size: 8) &&
6677 memcmp(p: fcport->port_name, q: pd->port_name, size: 8))) {
6678 /* We lost the device mid way. */
6679 rval = QLA_NOT_LOGGED_IN;
6680 goto gpd_error_out;
6681 }
6682
6683 /* Names are little-endian. */
6684 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6685 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6686
6687 /* Get port_id of device. */
6688 fcport->d_id.b.domain = pd->port_id[0];
6689 fcport->d_id.b.area = pd->port_id[1];
6690 fcport->d_id.b.al_pa = pd->port_id[2];
6691 fcport->d_id.b.rsvd_1 = 0;
6692
6693 ql_dbg(ql_dbg_disc, vha, 0x2062,
6694 fmt: "%8phC SVC Param w3 %02x%02x",
6695 fcport->port_name,
6696 pd->prli_svc_param_word_3[1],
6697 pd->prli_svc_param_word_3[0]);
6698
6699 if (NVME_TARGET(vha->hw, fcport)) {
6700 fcport->port_type = FCT_NVME;
6701 if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
6702 fcport->port_type |= FCT_NVME_INITIATOR;
6703 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6704 fcport->port_type |= FCT_NVME_TARGET;
6705 if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0)
6706 fcport->port_type |= FCT_NVME_DISCOVERY;
6707 } else {
6708 /* If not target must be initiator or unknown type. */
6709 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6710 fcport->port_type = FCT_INITIATOR;
6711 else
6712 fcport->port_type = FCT_TARGET;
6713 }
6714 /* Passback COS information. */
6715 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6716 FC_COS_CLASS2 : FC_COS_CLASS3;
6717
6718 if (pd->prli_svc_param_word_3[0] & BIT_7) {
6719 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6720 fcport->conf_compl_supported = 1;
6721 }
6722
6723gpd_error_out:
6724 return rval;
6725}
6726
6727/*
6728 * qla24xx_gidlist__wait
6729 * NOTE: don't call this routine from DPC thread.
6730 */
6731int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6732 void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6733{
6734 int rval = QLA_FUNCTION_FAILED;
6735 mbx_cmd_t mc;
6736
6737 if (!vha->hw->flags.fw_started)
6738 goto done;
6739
6740 memset(&mc, 0, sizeof(mc));
6741 mc.mb[0] = MBC_GET_ID_LIST;
6742 mc.mb[2] = MSW(id_list_dma);
6743 mc.mb[3] = LSW(id_list_dma);
6744 mc.mb[6] = MSW(MSD(id_list_dma));
6745 mc.mb[7] = LSW(MSD(id_list_dma));
6746 mc.mb[8] = 0;
6747 mc.mb[9] = vha->vp_idx;
6748
6749 rval = qla24xx_send_mb_cmd(vha, mcp: &mc);
6750 if (rval != QLA_SUCCESS) {
6751 ql_dbg(ql_dbg_mbx, vha, 0x119b,
6752 fmt: "%s: fail\n", __func__);
6753 } else {
6754 *entries = mc.mb[1];
6755 ql_dbg(ql_dbg_mbx, vha, 0x119c,
6756 fmt: "%s: done\n", __func__);
6757 }
6758done:
6759 return rval;
6760}
6761
6762int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
6763{
6764 int rval;
6765 mbx_cmd_t mc;
6766 mbx_cmd_t *mcp = &mc;
6767
6768 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
6769 fmt: "Entered %s\n", __func__);
6770
6771 memset(mcp->mb, 0 , sizeof(mcp->mb));
6772 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6773 mcp->mb[1] = 1;
6774 mcp->mb[2] = value;
6775 mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
6776 mcp->in_mb = MBX_2 | MBX_0;
6777 mcp->tov = MBX_TOV_SECONDS;
6778 mcp->flags = 0;
6779
6780 rval = qla2x00_mailbox_command(vha, mcp);
6781
6782 ql_dbg(ql_dbg_mbx, vha, 0x1201, fmt: "%s %x\n",
6783 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6784
6785 return rval;
6786}
6787
6788int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
6789{
6790 int rval;
6791 mbx_cmd_t mc;
6792 mbx_cmd_t *mcp = &mc;
6793
6794 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
6795 fmt: "Entered %s\n", __func__);
6796
6797 memset(mcp->mb, 0, sizeof(mcp->mb));
6798 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6799 mcp->mb[1] = 0;
6800 mcp->out_mb = MBX_1 | MBX_0;
6801 mcp->in_mb = MBX_2 | MBX_0;
6802 mcp->tov = MBX_TOV_SECONDS;
6803 mcp->flags = 0;
6804
6805 rval = qla2x00_mailbox_command(vha, mcp);
6806 if (rval == QLA_SUCCESS)
6807 *value = mc.mb[2];
6808
6809 ql_dbg(ql_dbg_mbx, vha, 0x1205, fmt: "%s %x\n",
6810 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6811
6812 return rval;
6813}
6814
6815int
6816qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
6817{
6818 struct qla_hw_data *ha = vha->hw;
6819 uint16_t iter, addr, offset;
6820 dma_addr_t phys_addr;
6821 int rval, c;
6822 u8 *sfp_data;
6823
6824 memset(ha->sfp_data, 0, SFP_DEV_SIZE);
6825 addr = 0xa0;
6826 phys_addr = ha->sfp_data_dma;
6827 sfp_data = ha->sfp_data;
6828 offset = c = 0;
6829
6830 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
6831 if (iter == 4) {
6832 /* Skip to next device address. */
6833 addr = 0xa2;
6834 offset = 0;
6835 }
6836
6837 rval = qla2x00_read_sfp(vha, sfp_dma: phys_addr, sfp: sfp_data,
6838 dev: addr, off: offset, SFP_BLOCK_SIZE, BIT_1);
6839 if (rval != QLA_SUCCESS) {
6840 ql_log(ql_log_warn, vha, 0x706d,
6841 fmt: "Unable to read SFP data (%x/%x/%x).\n", rval,
6842 addr, offset);
6843
6844 return rval;
6845 }
6846
6847 if (buf && (c < count)) {
6848 u16 sz;
6849
6850 if ((count - c) >= SFP_BLOCK_SIZE)
6851 sz = SFP_BLOCK_SIZE;
6852 else
6853 sz = count - c;
6854
6855 memcpy(buf, sfp_data, sz);
6856 buf += SFP_BLOCK_SIZE;
6857 c += sz;
6858 }
6859 phys_addr += SFP_BLOCK_SIZE;
6860 sfp_data += SFP_BLOCK_SIZE;
6861 offset += SFP_BLOCK_SIZE;
6862 }
6863
6864 return rval;
6865}
6866
6867int qla24xx_res_count_wait(struct scsi_qla_host *vha,
6868 uint16_t *out_mb, int out_mb_sz)
6869{
6870 int rval = QLA_FUNCTION_FAILED;
6871 mbx_cmd_t mc;
6872
6873 if (!vha->hw->flags.fw_started)
6874 goto done;
6875
6876 memset(&mc, 0, sizeof(mc));
6877 mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
6878
6879 rval = qla24xx_send_mb_cmd(vha, mcp: &mc);
6880 if (rval != QLA_SUCCESS) {
6881 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6882 fmt: "%s: fail\n", __func__);
6883 } else {
6884 if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
6885 memcpy(out_mb, mc.mb, out_mb_sz);
6886 else
6887 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
6888
6889 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6890 fmt: "%s: done\n", __func__);
6891 }
6892done:
6893 return rval;
6894}
6895
6896int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts,
6897 uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr,
6898 uint32_t sfub_len)
6899{
6900 int rval;
6901 mbx_cmd_t mc;
6902 mbx_cmd_t *mcp = &mc;
6903
6904 mcp->mb[0] = MBC_SECURE_FLASH_UPDATE;
6905 mcp->mb[1] = opts;
6906 mcp->mb[2] = region;
6907 mcp->mb[3] = MSW(len);
6908 mcp->mb[4] = LSW(len);
6909 mcp->mb[5] = MSW(sfub_dma_addr);
6910 mcp->mb[6] = LSW(sfub_dma_addr);
6911 mcp->mb[7] = MSW(MSD(sfub_dma_addr));
6912 mcp->mb[8] = LSW(MSD(sfub_dma_addr));
6913 mcp->mb[9] = sfub_len;
6914 mcp->out_mb =
6915 MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6916 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6917 mcp->tov = MBX_TOV_SECONDS;
6918 mcp->flags = 0;
6919 rval = qla2x00_mailbox_command(vha, mcp);
6920
6921 if (rval != QLA_SUCCESS) {
6922 ql_dbg(ql_dbg_mbx, vha, 0xffff, fmt: "%s(%ld): failed rval 0x%x, %x %x %x",
6923 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1],
6924 mcp->mb[2]);
6925 }
6926
6927 return rval;
6928}
6929
6930int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6931 uint32_t data)
6932{
6933 int rval;
6934 mbx_cmd_t mc;
6935 mbx_cmd_t *mcp = &mc;
6936
6937 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6938 fmt: "Entered %s.\n", __func__);
6939
6940 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
6941 mcp->mb[1] = LSW(addr);
6942 mcp->mb[2] = MSW(addr);
6943 mcp->mb[3] = LSW(data);
6944 mcp->mb[4] = MSW(data);
6945 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6946 mcp->in_mb = MBX_1|MBX_0;
6947 mcp->tov = MBX_TOV_SECONDS;
6948 mcp->flags = 0;
6949 rval = qla2x00_mailbox_command(vha, mcp);
6950
6951 if (rval != QLA_SUCCESS) {
6952 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6953 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6954 } else {
6955 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6956 fmt: "Done %s.\n", __func__);
6957 }
6958
6959 return rval;
6960}
6961
6962int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6963 uint32_t *data)
6964{
6965 int rval;
6966 mbx_cmd_t mc;
6967 mbx_cmd_t *mcp = &mc;
6968
6969 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6970 fmt: "Entered %s.\n", __func__);
6971
6972 mcp->mb[0] = MBC_READ_REMOTE_REG;
6973 mcp->mb[1] = LSW(addr);
6974 mcp->mb[2] = MSW(addr);
6975 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6976 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6977 mcp->tov = MBX_TOV_SECONDS;
6978 mcp->flags = 0;
6979 rval = qla2x00_mailbox_command(vha, mcp);
6980
6981 *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]);
6982
6983 if (rval != QLA_SUCCESS) {
6984 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6985 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6986 } else {
6987 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6988 fmt: "Done %s.\n", __func__);
6989 }
6990
6991 return rval;
6992}
6993
6994int
6995ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led)
6996{
6997 struct qla_hw_data *ha = vha->hw;
6998 mbx_cmd_t mc;
6999 mbx_cmd_t *mcp = &mc;
7000 int rval;
7001
7002 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
7003 return QLA_FUNCTION_FAILED;
7004
7005 ql_dbg(ql_dbg_mbx, vha, 0x7070, fmt: "Entered %s (options=%x).\n",
7006 __func__, options);
7007
7008 mcp->mb[0] = MBC_SET_GET_FC_LED_CONFIG;
7009 mcp->mb[1] = options;
7010 mcp->out_mb = MBX_1|MBX_0;
7011 mcp->in_mb = MBX_1|MBX_0;
7012 if (options & BIT_0) {
7013 if (options & BIT_1) {
7014 mcp->mb[2] = led[2];
7015 mcp->out_mb |= MBX_2;
7016 }
7017 if (options & BIT_2) {
7018 mcp->mb[3] = led[0];
7019 mcp->out_mb |= MBX_3;
7020 }
7021 if (options & BIT_3) {
7022 mcp->mb[4] = led[1];
7023 mcp->out_mb |= MBX_4;
7024 }
7025 } else {
7026 mcp->in_mb |= MBX_4|MBX_3|MBX_2;
7027 }
7028 mcp->tov = MBX_TOV_SECONDS;
7029 mcp->flags = 0;
7030 rval = qla2x00_mailbox_command(vha, mcp);
7031 if (rval) {
7032 ql_dbg(ql_dbg_mbx, vha, 0x7071, fmt: "Failed %s %x (mb=%x,%x)\n",
7033 __func__, rval, mcp->mb[0], mcp->mb[1]);
7034 return rval;
7035 }
7036
7037 if (options & BIT_0) {
7038 ha->beacon_blink_led = 0;
7039 ql_dbg(ql_dbg_mbx, vha, 0x7072, fmt: "Done %s\n", __func__);
7040 } else {
7041 led[2] = mcp->mb[2];
7042 led[0] = mcp->mb[3];
7043 led[1] = mcp->mb[4];
7044 ql_dbg(ql_dbg_mbx, vha, 0x7073, fmt: "Done %s (led=%x,%x,%x)\n",
7045 __func__, led[0], led[1], led[2]);
7046 }
7047
7048 return rval;
7049}
7050
7051/**
7052 * qla_no_op_mb(): This MB is used to check if FW is still alive and
7053 * able to generate an interrupt. Otherwise, a timeout will trigger
7054 * FW dump + reset
7055 * @vha: host adapter pointer
7056 * Return: None
7057 */
7058void qla_no_op_mb(struct scsi_qla_host *vha)
7059{
7060 mbx_cmd_t mc;
7061 mbx_cmd_t *mcp = &mc;
7062 int rval;
7063
7064 memset(&mc, 0, sizeof(mc));
7065 mcp->mb[0] = 0; // noop cmd= 0
7066 mcp->out_mb = MBX_0;
7067 mcp->in_mb = MBX_0;
7068 mcp->tov = 5;
7069 mcp->flags = 0;
7070 rval = qla2x00_mailbox_command(vha, mcp);
7071
7072 if (rval) {
7073 ql_dbg(ql_dbg_async, vha, 0x7071,
7074 fmt: "Failed %s %x\n", __func__, rval);
7075 }
7076}
7077
7078int qla_mailbox_passthru(scsi_qla_host_t *vha,
7079 uint16_t *mbx_in, uint16_t *mbx_out)
7080{
7081 mbx_cmd_t mc;
7082 mbx_cmd_t *mcp = &mc;
7083 int rval = -EINVAL;
7084
7085 memset(&mc, 0, sizeof(mc));
7086 /* Receiving all 32 register's contents */
7087 memcpy(&mcp->mb, (char *)mbx_in, (32 * sizeof(uint16_t)));
7088
7089 mcp->out_mb = 0xFFFFFFFF;
7090 mcp->in_mb = 0xFFFFFFFF;
7091
7092 mcp->tov = MBX_TOV_SECONDS;
7093 mcp->flags = 0;
7094 mcp->bufp = NULL;
7095
7096 rval = qla2x00_mailbox_command(vha, mcp);
7097
7098 if (rval != QLA_SUCCESS) {
7099 ql_dbg(ql_dbg_mbx, vha, 0xf0a2,
7100 fmt: "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
7101 } else {
7102 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xf0a3, fmt: "Done %s.\n",
7103 __func__);
7104 /* passing all 32 register's contents */
7105 memcpy(mbx_out, &mcp->mb, 32 * sizeof(uint16_t));
7106 }
7107
7108 return rval;
7109}
7110

source code of linux/drivers/scsi/qla2xxx/qla_mbx.c