1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * |
9 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
10 | * * |
11 | * This program is free software; you can redistribute it and/or * |
12 | * modify it under the terms of version 2 of the GNU General * |
13 | * Public License as published by the Free Software Foundation. * |
14 | * This program is distributed in the hope that it will be useful. * |
15 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * |
16 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * |
17 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * |
18 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * |
19 | * TO BE LEGALLY INVALID. See the GNU General Public License for * |
20 | * more details, a copy of which can be found in the file COPYING * |
21 | * included with this package. * |
22 | *******************************************************************/ |
23 | |
24 | #include <linux/blkdev.h> |
25 | #include <linux/delay.h> |
26 | #include <linux/slab.h> |
27 | #include <linux/pci.h> |
28 | #include <linux/kthread.h> |
29 | #include <linux/interrupt.h> |
30 | #include <linux/lockdep.h> |
31 | #include <linux/utsname.h> |
32 | |
33 | #include <scsi/scsi.h> |
34 | #include <scsi/scsi_device.h> |
35 | #include <scsi/scsi_host.h> |
36 | #include <scsi/scsi_transport_fc.h> |
37 | #include <scsi/fc/fc_fs.h> |
38 | |
39 | #include "lpfc_hw4.h" |
40 | #include "lpfc_hw.h" |
41 | #include "lpfc_nl.h" |
42 | #include "lpfc_disc.h" |
43 | #include "lpfc_sli.h" |
44 | #include "lpfc_sli4.h" |
45 | #include "lpfc.h" |
46 | #include "lpfc_scsi.h" |
47 | #include "lpfc_nvme.h" |
48 | #include "lpfc_logmsg.h" |
49 | #include "lpfc_crtn.h" |
50 | #include "lpfc_vport.h" |
51 | #include "lpfc_debugfs.h" |
52 | |
53 | /* AlpaArray for assignment of scsid for scan-down and bind_method */ |
54 | static uint8_t lpfcAlpaArray[] = { |
55 | 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6, |
56 | 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA, |
57 | 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5, |
58 | 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, |
59 | 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97, |
60 | 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79, |
61 | 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B, |
62 | 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56, |
63 | 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, |
64 | 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35, |
65 | 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, |
66 | 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17, |
67 | 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01 |
68 | }; |
69 | |
70 | static void lpfc_disc_timeout_handler(struct lpfc_vport *); |
71 | static void lpfc_disc_flush_list(struct lpfc_vport *vport); |
72 | static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); |
73 | static int lpfc_fcf_inuse(struct lpfc_hba *); |
74 | static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *); |
75 | static void lpfc_check_inactive_vmid(struct lpfc_hba *phba); |
76 | static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba); |
77 | |
78 | static int |
79 | lpfc_valid_xpt_node(struct lpfc_nodelist *ndlp) |
80 | { |
81 | if (ndlp->nlp_fc4_type || |
82 | ndlp->nlp_type & NLP_FABRIC) |
83 | return 1; |
84 | return 0; |
85 | } |
86 | /* The source of a terminate rport I/O is either a dev_loss_tmo |
87 | * event or a call to fc_remove_host. While the rport should be |
88 | * valid during these downcalls, the transport can call twice |
89 | * in a single event. This routine provides somoe protection |
90 | * as the NDLP isn't really free, just released to the pool. |
91 | */ |
92 | static int |
93 | lpfc_rport_invalid(struct fc_rport *rport) |
94 | { |
95 | struct lpfc_rport_data *rdata; |
96 | struct lpfc_nodelist *ndlp; |
97 | |
98 | if (!rport) { |
99 | pr_err("**** %s: NULL rport, exit.\n" , __func__); |
100 | return -EINVAL; |
101 | } |
102 | |
103 | rdata = rport->dd_data; |
104 | if (!rdata) { |
105 | pr_err("**** %s: NULL dd_data on rport x%px SID x%x\n" , |
106 | __func__, rport, rport->scsi_target_id); |
107 | return -EINVAL; |
108 | } |
109 | |
110 | ndlp = rdata->pnode; |
111 | if (!rdata->pnode) { |
112 | pr_info("**** %s: NULL ndlp on rport x%px SID x%x\n" , |
113 | __func__, rport, rport->scsi_target_id); |
114 | return -EINVAL; |
115 | } |
116 | |
117 | if (!ndlp->vport) { |
118 | pr_err("**** %s: Null vport on ndlp x%px, DID x%x rport x%px " |
119 | "SID x%x\n" , __func__, ndlp, ndlp->nlp_DID, rport, |
120 | rport->scsi_target_id); |
121 | return -EINVAL; |
122 | } |
123 | return 0; |
124 | } |
125 | |
126 | void |
127 | lpfc_terminate_rport_io(struct fc_rport *rport) |
128 | { |
129 | struct lpfc_rport_data *rdata; |
130 | struct lpfc_nodelist *ndlp; |
131 | struct lpfc_vport *vport; |
132 | |
133 | if (lpfc_rport_invalid(rport)) |
134 | return; |
135 | |
136 | rdata = rport->dd_data; |
137 | ndlp = rdata->pnode; |
138 | vport = ndlp->vport; |
139 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, |
140 | "rport terminate: sid:x%x did:x%x flg:x%x" , |
141 | ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); |
142 | |
143 | if (ndlp->nlp_sid != NLP_NO_SID) |
144 | lpfc_sli_abort_iocb(vport, tgt_id: ndlp->nlp_sid, lun_id: 0, abort_cmd: LPFC_CTX_TGT); |
145 | } |
146 | |
147 | /* |
148 | * This function will be called when dev_loss_tmo fire. |
149 | */ |
150 | void |
151 | lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) |
152 | { |
153 | struct lpfc_nodelist *ndlp; |
154 | struct lpfc_vport *vport; |
155 | struct lpfc_hba *phba; |
156 | struct lpfc_work_evt *evtp; |
157 | unsigned long iflags; |
158 | |
159 | ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode; |
160 | if (!ndlp) |
161 | return; |
162 | |
163 | vport = ndlp->vport; |
164 | phba = vport->phba; |
165 | |
166 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, |
167 | "rport devlosscb: sid:x%x did:x%x flg:x%x" , |
168 | ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); |
169 | |
170 | lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, |
171 | "3181 dev_loss_callbk x%06x, rport x%px flg x%x " |
172 | "load_flag x%lx refcnt %u state %d xpt x%x\n" , |
173 | ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag, |
174 | vport->load_flag, kref_read(&ndlp->kref), |
175 | ndlp->nlp_state, ndlp->fc4_xpt_flags); |
176 | |
177 | /* Don't schedule a worker thread event if the vport is going down. */ |
178 | if (test_bit(FC_UNLOADING, &vport->load_flag)) { |
179 | spin_lock_irqsave(&ndlp->lock, iflags); |
180 | ndlp->rport = NULL; |
181 | |
182 | /* The scsi_transport is done with the rport so lpfc cannot |
183 | * call to unregister. Remove the scsi transport reference |
184 | * and clean up the SCSI transport node details. |
185 | */ |
186 | if (ndlp->fc4_xpt_flags & (NLP_XPT_REGD | SCSI_XPT_REGD)) { |
187 | ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD; |
188 | |
189 | /* NVME transport-registered rports need the |
190 | * NLP_XPT_REGD flag to complete an unregister. |
191 | */ |
192 | if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) |
193 | ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; |
194 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
195 | lpfc_nlp_put(ndlp); |
196 | spin_lock_irqsave(&ndlp->lock, iflags); |
197 | } |
198 | |
199 | /* Only 1 thread can drop the initial node reference. If |
200 | * another thread has set NLP_DROPPED, this thread is done. |
201 | */ |
202 | if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD) && |
203 | !(ndlp->nlp_flag & NLP_DROPPED)) { |
204 | ndlp->nlp_flag |= NLP_DROPPED; |
205 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
206 | lpfc_nlp_put(ndlp); |
207 | return; |
208 | } |
209 | |
210 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
211 | return; |
212 | } |
213 | |
214 | if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) |
215 | return; |
216 | |
217 | if (rport->port_name != wwn_to_u64(wwn: ndlp->nlp_portname.u.wwn)) |
218 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
219 | "6789 rport name %llx != node port name %llx" , |
220 | rport->port_name, |
221 | wwn_to_u64(ndlp->nlp_portname.u.wwn)); |
222 | |
223 | evtp = &ndlp->dev_loss_evt; |
224 | |
225 | if (!list_empty(head: &evtp->evt_listp)) { |
226 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
227 | "6790 rport name %llx dev_loss_evt pending\n" , |
228 | rport->port_name); |
229 | return; |
230 | } |
231 | |
232 | spin_lock_irqsave(&ndlp->lock, iflags); |
233 | ndlp->nlp_flag |= NLP_IN_DEV_LOSS; |
234 | |
235 | /* If there is a PLOGI in progress, and we are in a |
236 | * NLP_NPR_2B_DISC state, don't turn off the flag. |
237 | */ |
238 | if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) |
239 | ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; |
240 | |
241 | /* |
242 | * The backend does not expect any more calls associated with this |
243 | * rport. Remove the association between rport and ndlp. |
244 | */ |
245 | ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD; |
246 | ((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL; |
247 | ndlp->rport = NULL; |
248 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
249 | |
250 | if (phba->worker_thread) { |
251 | /* We need to hold the node by incrementing the reference |
252 | * count until this queued work is done |
253 | */ |
254 | evtp->evt_arg1 = lpfc_nlp_get(ndlp); |
255 | |
256 | spin_lock_irqsave(&phba->hbalock, iflags); |
257 | if (evtp->evt_arg1) { |
258 | evtp->evt = LPFC_EVT_DEV_LOSS; |
259 | list_add_tail(new: &evtp->evt_listp, head: &phba->work_list); |
260 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
261 | lpfc_worker_wake_up(phba); |
262 | return; |
263 | } |
264 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
265 | } else { |
266 | lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, |
267 | "3188 worker thread is stopped %s x%06x, " |
268 | " rport x%px flg x%x load_flag x%lx refcnt " |
269 | "%d\n" , __func__, ndlp->nlp_DID, |
270 | ndlp->rport, ndlp->nlp_flag, |
271 | vport->load_flag, kref_read(&ndlp->kref)); |
272 | if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) { |
273 | spin_lock_irqsave(&ndlp->lock, iflags); |
274 | /* Node is in dev loss. No further transaction. */ |
275 | ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; |
276 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
277 | lpfc_disc_state_machine(vport, ndlp, NULL, |
278 | NLP_EVT_DEVICE_RM); |
279 | } |
280 | } |
281 | } |
282 | |
283 | /** |
284 | * lpfc_check_inactive_vmid_one - VMID inactivity checker for a vport |
285 | * @vport: Pointer to vport context object. |
286 | * |
287 | * This function checks for idle VMID entries related to a particular vport. If |
288 | * found unused/idle, free them accordingly. |
289 | **/ |
290 | static void lpfc_check_inactive_vmid_one(struct lpfc_vport *vport) |
291 | { |
292 | u16 keep; |
293 | u32 difftime = 0, r, bucket; |
294 | u64 *lta; |
295 | int cpu; |
296 | struct lpfc_vmid *vmp; |
297 | |
298 | write_lock(&vport->vmid_lock); |
299 | |
300 | if (!vport->cur_vmid_cnt) |
301 | goto out; |
302 | |
303 | /* iterate through the table */ |
304 | hash_for_each(vport->hash_table, bucket, vmp, hnode) { |
305 | keep = 0; |
306 | if (vmp->flag & LPFC_VMID_REGISTERED) { |
307 | /* check if the particular VMID is in use */ |
308 | /* for all available per cpu variable */ |
309 | for_each_possible_cpu(cpu) { |
310 | /* if last access time is less than timeout */ |
311 | lta = per_cpu_ptr(vmp->last_io_time, cpu); |
312 | if (!lta) |
313 | continue; |
314 | difftime = (jiffies) - (*lta); |
315 | if ((vport->vmid_inactivity_timeout * |
316 | JIFFIES_PER_HR) > difftime) { |
317 | keep = 1; |
318 | break; |
319 | } |
320 | } |
321 | |
322 | /* if none of the cpus have been used by the vm, */ |
323 | /* remove the entry if already registered */ |
324 | if (!keep) { |
325 | /* mark the entry for deregistration */ |
326 | vmp->flag = LPFC_VMID_DE_REGISTER; |
327 | write_unlock(&vport->vmid_lock); |
328 | if (vport->vmid_priority_tagging) |
329 | r = lpfc_vmid_uvem(vport, vmid: vmp, ins: false); |
330 | else |
331 | r = lpfc_vmid_cmd(vport, |
332 | SLI_CTAS_DAPP_IDENT, |
333 | vmid: vmp); |
334 | |
335 | /* decrement number of active vms and mark */ |
336 | /* entry in slot as free */ |
337 | write_lock(&vport->vmid_lock); |
338 | if (!r) { |
339 | struct lpfc_vmid *ht = vmp; |
340 | |
341 | vport->cur_vmid_cnt--; |
342 | ht->flag = LPFC_VMID_SLOT_FREE; |
343 | free_percpu(pdata: ht->last_io_time); |
344 | ht->last_io_time = NULL; |
345 | hash_del(node: &ht->hnode); |
346 | } |
347 | } |
348 | } |
349 | } |
350 | out: |
351 | write_unlock(&vport->vmid_lock); |
352 | } |
353 | |
354 | /** |
355 | * lpfc_check_inactive_vmid - VMID inactivity checker |
356 | * @phba: Pointer to hba context object. |
357 | * |
358 | * This function is called from the worker thread to determine if an entry in |
359 | * the VMID table can be released since there was no I/O activity seen from that |
360 | * particular VM for the specified time. When this happens, the entry in the |
361 | * table is released and also the resources on the switch cleared. |
362 | **/ |
363 | |
364 | static void lpfc_check_inactive_vmid(struct lpfc_hba *phba) |
365 | { |
366 | struct lpfc_vport *vport; |
367 | struct lpfc_vport **vports; |
368 | int i; |
369 | |
370 | vports = lpfc_create_vport_work_array(phba); |
371 | if (!vports) |
372 | return; |
373 | |
374 | for (i = 0; i <= phba->max_vports; i++) { |
375 | if ((!vports[i]) && (i == 0)) |
376 | vport = phba->pport; |
377 | else |
378 | vport = vports[i]; |
379 | if (!vport) |
380 | break; |
381 | |
382 | lpfc_check_inactive_vmid_one(vport); |
383 | } |
384 | lpfc_destroy_vport_work_array(phba, vports); |
385 | } |
386 | |
387 | /** |
388 | * lpfc_check_nlp_post_devloss - Check to restore ndlp refcnt after devloss |
389 | * @vport: Pointer to vport object. |
390 | * @ndlp: Pointer to remote node object. |
391 | * |
392 | * If NLP_IN_RECOV_POST_DEV_LOSS flag was set due to outstanding recovery of |
393 | * node during dev_loss_tmo processing, then this function restores the nlp_put |
394 | * kref decrement from lpfc_dev_loss_tmo_handler. |
395 | **/ |
396 | void |
397 | lpfc_check_nlp_post_devloss(struct lpfc_vport *vport, |
398 | struct lpfc_nodelist *ndlp) |
399 | { |
400 | unsigned long iflags; |
401 | |
402 | spin_lock_irqsave(&ndlp->lock, iflags); |
403 | if (ndlp->save_flags & NLP_IN_RECOV_POST_DEV_LOSS) { |
404 | ndlp->save_flags &= ~NLP_IN_RECOV_POST_DEV_LOSS; |
405 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
406 | lpfc_nlp_get(ndlp); |
407 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, |
408 | "8438 Devloss timeout reversed on DID x%x " |
409 | "refcnt %d ndlp %p flag x%x " |
410 | "port_state = x%x\n" , |
411 | ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp, |
412 | ndlp->nlp_flag, vport->port_state); |
413 | return; |
414 | } |
415 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
416 | } |
417 | |
418 | /** |
419 | * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler |
420 | * @ndlp: Pointer to remote node object. |
421 | * |
422 | * This function is called from the worker thread when devloss timeout timer |
423 | * expires. For SLI4 host, this routine shall return 1 when at lease one |
424 | * remote node, including this @ndlp, is still in use of FCF; otherwise, this |
425 | * routine shall return 0 when there is no remote node is still in use of FCF |
426 | * when devloss timeout happened to this @ndlp. |
427 | **/ |
428 | static int |
429 | lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) |
430 | { |
431 | struct lpfc_vport *vport; |
432 | struct lpfc_hba *phba; |
433 | uint8_t *name; |
434 | int warn_on = 0; |
435 | int fcf_inuse = 0; |
436 | bool recovering = false; |
437 | struct fc_vport *fc_vport = NULL; |
438 | unsigned long iflags; |
439 | |
440 | vport = ndlp->vport; |
441 | name = (uint8_t *)&ndlp->nlp_portname; |
442 | phba = vport->phba; |
443 | |
444 | if (phba->sli_rev == LPFC_SLI_REV4) |
445 | fcf_inuse = lpfc_fcf_inuse(phba); |
446 | |
447 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, |
448 | "rport devlosstmo:did:x%x type:x%x id:x%x" , |
449 | ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_sid); |
450 | |
451 | lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, |
452 | "3182 %s x%06x, nflag x%x xflags x%x refcnt %d\n" , |
453 | __func__, ndlp->nlp_DID, ndlp->nlp_flag, |
454 | ndlp->fc4_xpt_flags, kref_read(&ndlp->kref)); |
455 | |
456 | /* If the driver is recovering the rport, ignore devloss. */ |
457 | if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { |
458 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
459 | "0284 Devloss timeout Ignored on " |
460 | "WWPN %x:%x:%x:%x:%x:%x:%x:%x " |
461 | "NPort x%x\n" , |
462 | *name, *(name+1), *(name+2), *(name+3), |
463 | *(name+4), *(name+5), *(name+6), *(name+7), |
464 | ndlp->nlp_DID); |
465 | |
466 | spin_lock_irqsave(&ndlp->lock, iflags); |
467 | ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; |
468 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
469 | return fcf_inuse; |
470 | } |
471 | |
472 | /* Fabric nodes are done. */ |
473 | if (ndlp->nlp_type & NLP_FABRIC) { |
474 | spin_lock_irqsave(&ndlp->lock, iflags); |
475 | |
476 | /* The driver has to account for a race between any fabric |
477 | * node that's in recovery when dev_loss_tmo expires. When this |
478 | * happens, the driver has to allow node recovery. |
479 | */ |
480 | switch (ndlp->nlp_DID) { |
481 | case Fabric_DID: |
482 | fc_vport = vport->fc_vport; |
483 | if (fc_vport) { |
484 | /* NPIV path. */ |
485 | if (fc_vport->vport_state == |
486 | FC_VPORT_INITIALIZING) |
487 | recovering = true; |
488 | } else { |
489 | /* Physical port path. */ |
490 | if (phba->hba_flag & HBA_FLOGI_OUTSTANDING) |
491 | recovering = true; |
492 | } |
493 | break; |
494 | case Fabric_Cntl_DID: |
495 | if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) |
496 | recovering = true; |
497 | break; |
498 | case FDMI_DID: |
499 | fallthrough; |
500 | case NameServer_DID: |
501 | if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && |
502 | ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) |
503 | recovering = true; |
504 | break; |
505 | default: |
506 | /* Ensure the nlp_DID at least has the correct prefix. |
507 | * The fabric domain controller's last three nibbles |
508 | * vary so we handle it in the default case. |
509 | */ |
510 | if (ndlp->nlp_DID & Fabric_DID_MASK) { |
511 | if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && |
512 | ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) |
513 | recovering = true; |
514 | } |
515 | break; |
516 | } |
517 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
518 | |
519 | /* Mark an NLP_IN_RECOV_POST_DEV_LOSS flag to know if reversing |
520 | * the following lpfc_nlp_put is necessary after fabric node is |
521 | * recovered. |
522 | */ |
523 | if (recovering) { |
524 | lpfc_printf_vlog(vport, KERN_INFO, |
525 | LOG_DISCOVERY | LOG_NODE, |
526 | "8436 Devloss timeout marked on " |
527 | "DID x%x refcnt %d ndlp %p " |
528 | "flag x%x port_state = x%x\n" , |
529 | ndlp->nlp_DID, kref_read(&ndlp->kref), |
530 | ndlp, ndlp->nlp_flag, |
531 | vport->port_state); |
532 | spin_lock_irqsave(&ndlp->lock, iflags); |
533 | ndlp->save_flags |= NLP_IN_RECOV_POST_DEV_LOSS; |
534 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
535 | } else if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { |
536 | /* Fabric node fully recovered before this dev_loss_tmo |
537 | * queue work is processed. Thus, ignore the |
538 | * dev_loss_tmo event. |
539 | */ |
540 | lpfc_printf_vlog(vport, KERN_INFO, |
541 | LOG_DISCOVERY | LOG_NODE, |
542 | "8437 Devloss timeout ignored on " |
543 | "DID x%x refcnt %d ndlp %p " |
544 | "flag x%x port_state = x%x\n" , |
545 | ndlp->nlp_DID, kref_read(&ndlp->kref), |
546 | ndlp, ndlp->nlp_flag, |
547 | vport->port_state); |
548 | return fcf_inuse; |
549 | } |
550 | |
551 | spin_lock_irqsave(&ndlp->lock, iflags); |
552 | ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; |
553 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
554 | lpfc_nlp_put(ndlp); |
555 | return fcf_inuse; |
556 | } |
557 | |
558 | if (ndlp->nlp_sid != NLP_NO_SID) { |
559 | warn_on = 1; |
560 | lpfc_sli_abort_iocb(vport, tgt_id: ndlp->nlp_sid, lun_id: 0, abort_cmd: LPFC_CTX_TGT); |
561 | } |
562 | |
563 | if (warn_on) { |
564 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
565 | "0203 Devloss timeout on " |
566 | "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " |
567 | "NPort x%06x Data: x%x x%x x%x refcnt %d\n" , |
568 | *name, *(name+1), *(name+2), *(name+3), |
569 | *(name+4), *(name+5), *(name+6), *(name+7), |
570 | ndlp->nlp_DID, ndlp->nlp_flag, |
571 | ndlp->nlp_state, ndlp->nlp_rpi, |
572 | kref_read(&ndlp->kref)); |
573 | } else { |
574 | lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT, |
575 | "0204 Devloss timeout on " |
576 | "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " |
577 | "NPort x%06x Data: x%x x%x x%x\n" , |
578 | *name, *(name+1), *(name+2), *(name+3), |
579 | *(name+4), *(name+5), *(name+6), *(name+7), |
580 | ndlp->nlp_DID, ndlp->nlp_flag, |
581 | ndlp->nlp_state, ndlp->nlp_rpi); |
582 | } |
583 | spin_lock_irqsave(&ndlp->lock, iflags); |
584 | ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; |
585 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
586 | |
587 | /* If we are devloss, but we are in the process of rediscovering the |
588 | * ndlp, don't issue a NLP_EVT_DEVICE_RM event. |
589 | */ |
590 | if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && |
591 | ndlp->nlp_state <= NLP_STE_PRLI_ISSUE) { |
592 | return fcf_inuse; |
593 | } |
594 | |
595 | if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) |
596 | lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); |
597 | |
598 | return fcf_inuse; |
599 | } |
600 | |
601 | static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba) |
602 | { |
603 | struct lpfc_vport *vport; |
604 | struct lpfc_vport **vports; |
605 | int i; |
606 | |
607 | vports = lpfc_create_vport_work_array(phba); |
608 | if (!vports) |
609 | return; |
610 | |
611 | for (i = 0; i <= phba->max_vports; i++) { |
612 | if ((!vports[i]) && (i == 0)) |
613 | vport = phba->pport; |
614 | else |
615 | vport = vports[i]; |
616 | if (!vport) |
617 | break; |
618 | |
619 | if (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA) { |
620 | if (!lpfc_issue_els_qfpa(vport)) |
621 | vport->vmid_flag &= ~LPFC_VMID_ISSUE_QFPA; |
622 | } |
623 | } |
624 | lpfc_destroy_vport_work_array(phba, vports); |
625 | } |
626 | |
627 | /** |
628 | * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler |
629 | * @phba: Pointer to hba context object. |
630 | * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler. |
631 | * @nlp_did: remote node identifer with devloss timeout. |
632 | * |
633 | * This function is called from the worker thread after invoking devloss |
634 | * timeout handler and releasing the reference count for the ndlp with |
635 | * which the devloss timeout was handled for SLI4 host. For the devloss |
636 | * timeout of the last remote node which had been in use of FCF, when this |
637 | * routine is invoked, it shall be guaranteed that none of the remote are |
638 | * in-use of FCF. When devloss timeout to the last remote using the FCF, |
639 | * if the FIP engine is neither in FCF table scan process nor roundrobin |
640 | * failover process, the in-use FCF shall be unregistered. If the FIP |
641 | * engine is in FCF discovery process, the devloss timeout state shall |
642 | * be set for either the FCF table scan process or roundrobin failover |
643 | * process to unregister the in-use FCF. |
644 | **/ |
645 | static void |
646 | lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse, |
647 | uint32_t nlp_did) |
648 | { |
649 | /* If devloss timeout happened to a remote node when FCF had no |
650 | * longer been in-use, do nothing. |
651 | */ |
652 | if (!fcf_inuse) |
653 | return; |
654 | |
655 | if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) { |
656 | spin_lock_irq(lock: &phba->hbalock); |
657 | if (phba->fcf.fcf_flag & FCF_DISCOVERY) { |
658 | if (phba->hba_flag & HBA_DEVLOSS_TMO) { |
659 | spin_unlock_irq(lock: &phba->hbalock); |
660 | return; |
661 | } |
662 | phba->hba_flag |= HBA_DEVLOSS_TMO; |
663 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
664 | "2847 Last remote node (x%x) using " |
665 | "FCF devloss tmo\n" , nlp_did); |
666 | } |
667 | if (phba->fcf.fcf_flag & FCF_REDISC_PROG) { |
668 | spin_unlock_irq(lock: &phba->hbalock); |
669 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
670 | "2868 Devloss tmo to FCF rediscovery " |
671 | "in progress\n" ); |
672 | return; |
673 | } |
674 | if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) { |
675 | spin_unlock_irq(lock: &phba->hbalock); |
676 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
677 | "2869 Devloss tmo to idle FIP engine, " |
678 | "unreg in-use FCF and rescan.\n" ); |
679 | /* Unregister in-use FCF and rescan */ |
680 | lpfc_unregister_fcf_rescan(phba); |
681 | return; |
682 | } |
683 | spin_unlock_irq(lock: &phba->hbalock); |
684 | if (phba->hba_flag & FCF_TS_INPROG) |
685 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
686 | "2870 FCF table scan in progress\n" ); |
687 | if (phba->hba_flag & FCF_RR_INPROG) |
688 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
689 | "2871 FLOGI roundrobin FCF failover " |
690 | "in progress\n" ); |
691 | } |
692 | lpfc_unregister_unused_fcf(phba); |
693 | } |
694 | |
695 | /** |
696 | * lpfc_alloc_fast_evt - Allocates data structure for posting event |
697 | * @phba: Pointer to hba context object. |
698 | * |
699 | * This function is called from the functions which need to post |
700 | * events from interrupt context. This function allocates data |
701 | * structure required for posting event. It also keeps track of |
702 | * number of events pending and prevent event storm when there are |
703 | * too many events. |
704 | **/ |
705 | struct lpfc_fast_path_event * |
706 | lpfc_alloc_fast_evt(struct lpfc_hba *phba) { |
707 | struct lpfc_fast_path_event *ret; |
708 | |
709 | /* If there are lot of fast event do not exhaust memory due to this */ |
710 | if (atomic_read(v: &phba->fast_event_count) > LPFC_MAX_EVT_COUNT) |
711 | return NULL; |
712 | |
713 | ret = kzalloc(size: sizeof(struct lpfc_fast_path_event), |
714 | GFP_ATOMIC); |
715 | if (ret) { |
716 | atomic_inc(v: &phba->fast_event_count); |
717 | INIT_LIST_HEAD(list: &ret->work_evt.evt_listp); |
718 | ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; |
719 | } |
720 | return ret; |
721 | } |
722 | |
723 | /** |
724 | * lpfc_free_fast_evt - Frees event data structure |
725 | * @phba: Pointer to hba context object. |
726 | * @evt: Event object which need to be freed. |
727 | * |
728 | * This function frees the data structure required for posting |
729 | * events. |
730 | **/ |
731 | void |
732 | lpfc_free_fast_evt(struct lpfc_hba *phba, |
733 | struct lpfc_fast_path_event *evt) { |
734 | |
735 | atomic_dec(v: &phba->fast_event_count); |
736 | kfree(objp: evt); |
737 | } |
738 | |
739 | /** |
740 | * lpfc_send_fastpath_evt - Posts events generated from fast path |
741 | * @phba: Pointer to hba context object. |
742 | * @evtp: Event data structure. |
743 | * |
744 | * This function is called from worker thread, when the interrupt |
745 | * context need to post an event. This function posts the event |
746 | * to fc transport netlink interface. |
747 | **/ |
748 | static void |
749 | lpfc_send_fastpath_evt(struct lpfc_hba *phba, |
750 | struct lpfc_work_evt *evtp) |
751 | { |
752 | unsigned long evt_category, evt_sub_category; |
753 | struct lpfc_fast_path_event *fast_evt_data; |
754 | char *evt_data; |
755 | uint32_t evt_data_size; |
756 | struct Scsi_Host *shost; |
757 | |
758 | fast_evt_data = container_of(evtp, struct lpfc_fast_path_event, |
759 | work_evt); |
760 | |
761 | evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type; |
762 | evt_sub_category = (unsigned long) fast_evt_data->un. |
763 | fabric_evt.subcategory; |
764 | shost = lpfc_shost_from_vport(vport: fast_evt_data->vport); |
765 | if (evt_category == FC_REG_FABRIC_EVENT) { |
766 | if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) { |
767 | evt_data = (char *) &fast_evt_data->un.read_check_error; |
768 | evt_data_size = sizeof(fast_evt_data->un. |
769 | read_check_error); |
770 | } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) || |
771 | (evt_sub_category == LPFC_EVENT_PORT_BUSY)) { |
772 | evt_data = (char *) &fast_evt_data->un.fabric_evt; |
773 | evt_data_size = sizeof(fast_evt_data->un.fabric_evt); |
774 | } else { |
775 | lpfc_free_fast_evt(phba, evt: fast_evt_data); |
776 | return; |
777 | } |
778 | } else if (evt_category == FC_REG_SCSI_EVENT) { |
779 | switch (evt_sub_category) { |
780 | case LPFC_EVENT_QFULL: |
781 | case LPFC_EVENT_DEVBSY: |
782 | evt_data = (char *) &fast_evt_data->un.scsi_evt; |
783 | evt_data_size = sizeof(fast_evt_data->un.scsi_evt); |
784 | break; |
785 | case LPFC_EVENT_CHECK_COND: |
786 | evt_data = (char *) &fast_evt_data->un.check_cond_evt; |
787 | evt_data_size = sizeof(fast_evt_data->un. |
788 | check_cond_evt); |
789 | break; |
790 | case LPFC_EVENT_VARQUEDEPTH: |
791 | evt_data = (char *) &fast_evt_data->un.queue_depth_evt; |
792 | evt_data_size = sizeof(fast_evt_data->un. |
793 | queue_depth_evt); |
794 | break; |
795 | default: |
796 | lpfc_free_fast_evt(phba, evt: fast_evt_data); |
797 | return; |
798 | } |
799 | } else { |
800 | lpfc_free_fast_evt(phba, evt: fast_evt_data); |
801 | return; |
802 | } |
803 | |
804 | if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME) |
805 | fc_host_post_vendor_event(shost, |
806 | event_number: fc_get_event_number(), |
807 | data_len: evt_data_size, |
808 | data_buf: evt_data, |
809 | LPFC_NL_VENDOR_ID); |
810 | |
811 | lpfc_free_fast_evt(phba, evt: fast_evt_data); |
812 | return; |
813 | } |
814 | |
815 | static void |
816 | lpfc_work_list_done(struct lpfc_hba *phba) |
817 | { |
818 | struct lpfc_work_evt *evtp = NULL; |
819 | struct lpfc_nodelist *ndlp; |
820 | int free_evt; |
821 | int fcf_inuse; |
822 | uint32_t nlp_did; |
823 | bool hba_pci_err; |
824 | |
825 | spin_lock_irq(lock: &phba->hbalock); |
826 | while (!list_empty(head: &phba->work_list)) { |
827 | list_remove_head((&phba->work_list), evtp, typeof(*evtp), |
828 | evt_listp); |
829 | spin_unlock_irq(lock: &phba->hbalock); |
830 | hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags); |
831 | free_evt = 1; |
832 | switch (evtp->evt) { |
833 | case LPFC_EVT_ELS_RETRY: |
834 | ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); |
835 | if (!hba_pci_err) { |
836 | lpfc_els_retry_delay_handler(ndlp); |
837 | free_evt = 0; /* evt is part of ndlp */ |
838 | } |
839 | /* decrement the node reference count held |
840 | * for this queued work |
841 | */ |
842 | lpfc_nlp_put(ndlp); |
843 | break; |
844 | case LPFC_EVT_DEV_LOSS: |
845 | ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); |
846 | fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp); |
847 | free_evt = 0; |
848 | /* decrement the node reference count held for |
849 | * this queued work |
850 | */ |
851 | nlp_did = ndlp->nlp_DID; |
852 | lpfc_nlp_put(ndlp); |
853 | if (phba->sli_rev == LPFC_SLI_REV4) |
854 | lpfc_sli4_post_dev_loss_tmo_handler(phba, |
855 | fcf_inuse, |
856 | nlp_did); |
857 | break; |
858 | case LPFC_EVT_RECOVER_PORT: |
859 | ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); |
860 | if (!hba_pci_err) { |
861 | lpfc_sli_abts_recover_port(ndlp->vport, ndlp); |
862 | free_evt = 0; |
863 | } |
864 | /* decrement the node reference count held for |
865 | * this queued work |
866 | */ |
867 | lpfc_nlp_put(ndlp); |
868 | break; |
869 | case LPFC_EVT_ONLINE: |
870 | if (phba->link_state < LPFC_LINK_DOWN) |
871 | *(int *) (evtp->evt_arg1) = lpfc_online(phba); |
872 | else |
873 | *(int *) (evtp->evt_arg1) = 0; |
874 | complete((struct completion *)(evtp->evt_arg2)); |
875 | break; |
876 | case LPFC_EVT_OFFLINE_PREP: |
877 | if (phba->link_state >= LPFC_LINK_DOWN) |
878 | lpfc_offline_prep(phba, LPFC_MBX_WAIT); |
879 | *(int *)(evtp->evt_arg1) = 0; |
880 | complete((struct completion *)(evtp->evt_arg2)); |
881 | break; |
882 | case LPFC_EVT_OFFLINE: |
883 | lpfc_offline(phba); |
884 | lpfc_sli_brdrestart(phba); |
885 | *(int *)(evtp->evt_arg1) = |
886 | lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY); |
887 | lpfc_unblock_mgmt_io(phba); |
888 | complete((struct completion *)(evtp->evt_arg2)); |
889 | break; |
890 | case LPFC_EVT_WARM_START: |
891 | lpfc_offline(phba); |
892 | lpfc_reset_barrier(phba); |
893 | lpfc_sli_brdreset(phba); |
894 | lpfc_hba_down_post(phba); |
895 | *(int *)(evtp->evt_arg1) = |
896 | lpfc_sli_brdready(phba, HS_MBRDY); |
897 | lpfc_unblock_mgmt_io(phba); |
898 | complete((struct completion *)(evtp->evt_arg2)); |
899 | break; |
900 | case LPFC_EVT_KILL: |
901 | lpfc_offline(phba); |
902 | *(int *)(evtp->evt_arg1) |
903 | = (phba->pport->stopped) |
904 | ? 0 : lpfc_sli_brdkill(phba); |
905 | lpfc_unblock_mgmt_io(phba); |
906 | complete((struct completion *)(evtp->evt_arg2)); |
907 | break; |
908 | case LPFC_EVT_FASTPATH_MGMT_EVT: |
909 | lpfc_send_fastpath_evt(phba, evtp); |
910 | free_evt = 0; |
911 | break; |
912 | case LPFC_EVT_RESET_HBA: |
913 | if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) |
914 | lpfc_reset_hba(phba); |
915 | break; |
916 | } |
917 | if (free_evt) |
918 | kfree(objp: evtp); |
919 | spin_lock_irq(lock: &phba->hbalock); |
920 | } |
921 | spin_unlock_irq(lock: &phba->hbalock); |
922 | |
923 | } |
924 | |
925 | static void |
926 | lpfc_work_done(struct lpfc_hba *phba) |
927 | { |
928 | struct lpfc_sli_ring *pring; |
929 | uint32_t ha_copy, status, control, work_port_events; |
930 | struct lpfc_vport **vports; |
931 | struct lpfc_vport *vport; |
932 | int i; |
933 | bool hba_pci_err; |
934 | |
935 | hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags); |
936 | spin_lock_irq(lock: &phba->hbalock); |
937 | ha_copy = phba->work_ha; |
938 | phba->work_ha = 0; |
939 | spin_unlock_irq(lock: &phba->hbalock); |
940 | if (hba_pci_err) |
941 | ha_copy = 0; |
942 | |
943 | /* First, try to post the next mailbox command to SLI4 device */ |
944 | if (phba->pci_dev_grp == LPFC_PCI_DEV_OC && !hba_pci_err) |
945 | lpfc_sli4_post_async_mbox(phba); |
946 | |
947 | if (ha_copy & HA_ERATT) { |
948 | /* Handle the error attention event */ |
949 | lpfc_handle_eratt(phba); |
950 | |
951 | if (phba->fw_dump_cmpl) { |
952 | complete(phba->fw_dump_cmpl); |
953 | phba->fw_dump_cmpl = NULL; |
954 | } |
955 | } |
956 | |
957 | if (ha_copy & HA_MBATT) |
958 | lpfc_sli_handle_mb_event(phba); |
959 | |
960 | if (ha_copy & HA_LATT) |
961 | lpfc_handle_latt(phba); |
962 | |
963 | /* Handle VMID Events */ |
964 | if (lpfc_is_vmid_enabled(phba) && !hba_pci_err) { |
965 | if (phba->pport->work_port_events & |
966 | WORKER_CHECK_VMID_ISSUE_QFPA) { |
967 | lpfc_check_vmid_qfpa_issue(phba); |
968 | phba->pport->work_port_events &= |
969 | ~WORKER_CHECK_VMID_ISSUE_QFPA; |
970 | } |
971 | if (phba->pport->work_port_events & |
972 | WORKER_CHECK_INACTIVE_VMID) { |
973 | lpfc_check_inactive_vmid(phba); |
974 | phba->pport->work_port_events &= |
975 | ~WORKER_CHECK_INACTIVE_VMID; |
976 | } |
977 | } |
978 | |
979 | /* Process SLI4 events */ |
980 | if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { |
981 | if (phba->hba_flag & HBA_RRQ_ACTIVE) |
982 | lpfc_handle_rrq_active(phba); |
983 | if (phba->hba_flag & ELS_XRI_ABORT_EVENT) |
984 | lpfc_sli4_els_xri_abort_event_proc(phba); |
985 | if (phba->hba_flag & ASYNC_EVENT) |
986 | lpfc_sli4_async_event_proc(phba); |
987 | if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) { |
988 | spin_lock_irq(lock: &phba->hbalock); |
989 | phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER; |
990 | spin_unlock_irq(lock: &phba->hbalock); |
991 | lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); |
992 | } |
993 | if (phba->fcf.fcf_flag & FCF_REDISC_EVT) |
994 | lpfc_sli4_fcf_redisc_event_proc(phba); |
995 | } |
996 | |
997 | vports = lpfc_create_vport_work_array(phba); |
998 | if (vports != NULL) |
999 | for (i = 0; i <= phba->max_vports; i++) { |
1000 | /* |
1001 | * We could have no vports in array if unloading, so if |
1002 | * this happens then just use the pport |
1003 | */ |
1004 | if (vports[i] == NULL && i == 0) |
1005 | vport = phba->pport; |
1006 | else |
1007 | vport = vports[i]; |
1008 | if (vport == NULL) |
1009 | break; |
1010 | spin_lock_irq(lock: &vport->work_port_lock); |
1011 | work_port_events = vport->work_port_events; |
1012 | vport->work_port_events &= ~work_port_events; |
1013 | spin_unlock_irq(lock: &vport->work_port_lock); |
1014 | if (hba_pci_err) |
1015 | continue; |
1016 | if (work_port_events & WORKER_DISC_TMO) |
1017 | lpfc_disc_timeout_handler(vport); |
1018 | if (work_port_events & WORKER_ELS_TMO) |
1019 | lpfc_els_timeout_handler(vport); |
1020 | if (work_port_events & WORKER_HB_TMO) |
1021 | lpfc_hb_timeout_handler(phba); |
1022 | if (work_port_events & WORKER_MBOX_TMO) |
1023 | lpfc_mbox_timeout_handler(phba); |
1024 | if (work_port_events & WORKER_FABRIC_BLOCK_TMO) |
1025 | lpfc_unblock_fabric_iocbs(phba); |
1026 | if (work_port_events & WORKER_RAMP_DOWN_QUEUE) |
1027 | lpfc_ramp_down_queue_handler(phba); |
1028 | if (work_port_events & WORKER_DELAYED_DISC_TMO) |
1029 | lpfc_delayed_disc_timeout_handler(vport); |
1030 | } |
1031 | lpfc_destroy_vport_work_array(phba, vports); |
1032 | |
1033 | pring = lpfc_phba_elsring(phba); |
1034 | status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); |
1035 | status >>= (4*LPFC_ELS_RING); |
1036 | if (pring && (status & HA_RXMASK || |
1037 | pring->flag & LPFC_DEFERRED_RING_EVENT || |
1038 | phba->hba_flag & HBA_SP_QUEUE_EVT)) { |
1039 | if (pring->flag & LPFC_STOP_IOCB_EVENT) { |
1040 | pring->flag |= LPFC_DEFERRED_RING_EVENT; |
1041 | /* Preserve legacy behavior. */ |
1042 | if (!(phba->hba_flag & HBA_SP_QUEUE_EVT)) |
1043 | set_bit(LPFC_DATA_READY, addr: &phba->data_flags); |
1044 | } else { |
1045 | /* Driver could have abort request completed in queue |
1046 | * when link goes down. Allow for this transition. |
1047 | */ |
1048 | if (phba->link_state >= LPFC_LINK_DOWN || |
1049 | phba->link_flag & LS_MDS_LOOPBACK) { |
1050 | pring->flag &= ~LPFC_DEFERRED_RING_EVENT; |
1051 | lpfc_sli_handle_slow_ring_event(phba, pring, |
1052 | (status & |
1053 | HA_RXMASK)); |
1054 | } |
1055 | } |
1056 | if (phba->sli_rev == LPFC_SLI_REV4) |
1057 | lpfc_drain_txq(phba); |
1058 | /* |
1059 | * Turn on Ring interrupts |
1060 | */ |
1061 | if (phba->sli_rev <= LPFC_SLI_REV3) { |
1062 | spin_lock_irq(lock: &phba->hbalock); |
1063 | control = readl(addr: phba->HCregaddr); |
1064 | if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { |
1065 | lpfc_debugfs_slow_ring_trc(phba, |
1066 | "WRK Enable ring: cntl:x%x hacopy:x%x" , |
1067 | control, ha_copy, 0); |
1068 | |
1069 | control |= (HC_R0INT_ENA << LPFC_ELS_RING); |
1070 | writel(val: control, addr: phba->HCregaddr); |
1071 | readl(addr: phba->HCregaddr); /* flush */ |
1072 | } else { |
1073 | lpfc_debugfs_slow_ring_trc(phba, |
1074 | "WRK Ring ok: cntl:x%x hacopy:x%x" , |
1075 | control, ha_copy, 0); |
1076 | } |
1077 | spin_unlock_irq(lock: &phba->hbalock); |
1078 | } |
1079 | } |
1080 | lpfc_work_list_done(phba); |
1081 | } |
1082 | |
1083 | int |
1084 | lpfc_do_work(void *p) |
1085 | { |
1086 | struct lpfc_hba *phba = p; |
1087 | int rc; |
1088 | |
1089 | set_user_nice(current, MIN_NICE); |
1090 | current->flags |= PF_NOFREEZE; |
1091 | phba->data_flags = 0; |
1092 | |
1093 | while (!kthread_should_stop()) { |
1094 | /* wait and check worker queue activities */ |
1095 | rc = wait_event_interruptible(phba->work_waitq, |
1096 | (test_and_clear_bit(LPFC_DATA_READY, |
1097 | &phba->data_flags) |
1098 | || kthread_should_stop())); |
1099 | /* Signal wakeup shall terminate the worker thread */ |
1100 | if (rc) { |
1101 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1102 | "0433 Wakeup on signal: rc=x%x\n" , rc); |
1103 | break; |
1104 | } |
1105 | |
1106 | /* Attend pending lpfc data processing */ |
1107 | lpfc_work_done(phba); |
1108 | } |
1109 | phba->worker_thread = NULL; |
1110 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, |
1111 | "0432 Worker thread stopped.\n" ); |
1112 | return 0; |
1113 | } |
1114 | |
1115 | /* |
1116 | * This is only called to handle FC worker events. Since this a rare |
1117 | * occurrence, we allocate a struct lpfc_work_evt structure here instead of |
1118 | * embedding it in the IOCB. |
1119 | */ |
1120 | int |
1121 | lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2, |
1122 | uint32_t evt) |
1123 | { |
1124 | struct lpfc_work_evt *evtp; |
1125 | unsigned long flags; |
1126 | |
1127 | /* |
1128 | * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will |
1129 | * be queued to worker thread for processing |
1130 | */ |
1131 | evtp = kmalloc(size: sizeof(struct lpfc_work_evt), GFP_ATOMIC); |
1132 | if (!evtp) |
1133 | return 0; |
1134 | |
1135 | evtp->evt_arg1 = arg1; |
1136 | evtp->evt_arg2 = arg2; |
1137 | evtp->evt = evt; |
1138 | |
1139 | spin_lock_irqsave(&phba->hbalock, flags); |
1140 | list_add_tail(new: &evtp->evt_listp, head: &phba->work_list); |
1141 | spin_unlock_irqrestore(lock: &phba->hbalock, flags); |
1142 | |
1143 | lpfc_worker_wake_up(phba); |
1144 | |
1145 | return 1; |
1146 | } |
1147 | |
1148 | void |
1149 | lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) |
1150 | { |
1151 | struct lpfc_hba *phba = vport->phba; |
1152 | struct lpfc_nodelist *ndlp, *next_ndlp; |
1153 | |
1154 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { |
1155 | if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) || |
1156 | ((vport->port_type == LPFC_NPIV_PORT) && |
1157 | ((ndlp->nlp_DID == NameServer_DID) || |
1158 | (ndlp->nlp_DID == FDMI_DID) || |
1159 | (ndlp->nlp_DID == Fabric_Cntl_DID)))) |
1160 | lpfc_unreg_rpi(vport, ndlp); |
1161 | |
1162 | /* Leave Fabric nodes alone on link down */ |
1163 | if ((phba->sli_rev < LPFC_SLI_REV4) && |
1164 | (!remove && ndlp->nlp_type & NLP_FABRIC)) |
1165 | continue; |
1166 | |
1167 | /* Notify transport of connectivity loss to trigger cleanup. */ |
1168 | if (phba->nvmet_support && |
1169 | ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) |
1170 | lpfc_nvmet_invalidate_host(phba, ndlp); |
1171 | |
1172 | lpfc_disc_state_machine(vport, ndlp, NULL, |
1173 | remove |
1174 | ? NLP_EVT_DEVICE_RM |
1175 | : NLP_EVT_DEVICE_RECOVERY); |
1176 | } |
1177 | if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) { |
1178 | if (phba->sli_rev == LPFC_SLI_REV4) |
1179 | lpfc_sli4_unreg_all_rpis(vport); |
1180 | lpfc_mbx_unreg_vpi(vport); |
1181 | set_bit(nr: FC_VPORT_NEEDS_REG_VPI, addr: &vport->fc_flag); |
1182 | } |
1183 | } |
1184 | |
1185 | void |
1186 | lpfc_port_link_failure(struct lpfc_vport *vport) |
1187 | { |
1188 | lpfc_vport_set_state(vport, new_state: FC_VPORT_LINKDOWN); |
1189 | |
1190 | /* Cleanup any outstanding received buffers */ |
1191 | lpfc_cleanup_rcv_buffers(vport); |
1192 | |
1193 | /* Cleanup any outstanding RSCN activity */ |
1194 | lpfc_els_flush_rscn(vport); |
1195 | |
1196 | /* Cleanup any outstanding ELS commands */ |
1197 | lpfc_els_flush_cmd(vport); |
1198 | |
1199 | lpfc_cleanup_rpis(vport, remove: 0); |
1200 | |
1201 | /* Turn off discovery timer if its running */ |
1202 | lpfc_can_disctmo(vport); |
1203 | } |
1204 | |
1205 | void |
1206 | lpfc_linkdown_port(struct lpfc_vport *vport) |
1207 | { |
1208 | struct lpfc_hba *phba = vport->phba; |
1209 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
1210 | |
1211 | if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME) |
1212 | fc_host_post_event(shost, event_number: fc_get_event_number(), |
1213 | event_code: FCH_EVT_LINKDOWN, event_data: 0); |
1214 | |
1215 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
1216 | "Link Down: state:x%x rtry:x%x flg:x%x" , |
1217 | vport->port_state, vport->fc_ns_retry, vport->fc_flag); |
1218 | |
1219 | lpfc_port_link_failure(vport); |
1220 | |
1221 | /* Stop delayed Nport discovery */ |
1222 | clear_bit(nr: FC_DISC_DELAYED, addr: &vport->fc_flag); |
1223 | del_timer_sync(timer: &vport->delayed_disc_tmo); |
1224 | |
1225 | if (phba->sli_rev == LPFC_SLI_REV4 && |
1226 | vport->port_type == LPFC_PHYSICAL_PORT && |
1227 | phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) { |
1228 | /* Assume success on link up */ |
1229 | phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC; |
1230 | } |
1231 | } |
1232 | |
1233 | int |
1234 | lpfc_linkdown(struct lpfc_hba *phba) |
1235 | { |
1236 | struct lpfc_vport *vport = phba->pport; |
1237 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
1238 | struct lpfc_vport **vports; |
1239 | LPFC_MBOXQ_t *mb; |
1240 | int i; |
1241 | int offline; |
1242 | |
1243 | if (phba->link_state == LPFC_LINK_DOWN) |
1244 | return 0; |
1245 | |
1246 | /* Block all SCSI stack I/Os */ |
1247 | lpfc_scsi_dev_block(phba); |
1248 | offline = pci_channel_offline(pdev: phba->pcidev); |
1249 | |
1250 | phba->defer_flogi_acc_flag = false; |
1251 | |
1252 | /* Clear external loopback plug detected flag */ |
1253 | phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; |
1254 | |
1255 | spin_lock_irq(lock: &phba->hbalock); |
1256 | phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); |
1257 | spin_unlock_irq(lock: &phba->hbalock); |
1258 | if (phba->link_state > LPFC_LINK_DOWN) { |
1259 | phba->link_state = LPFC_LINK_DOWN; |
1260 | if (phba->sli4_hba.conf_trunk) { |
1261 | phba->trunk_link.link0.state = 0; |
1262 | phba->trunk_link.link1.state = 0; |
1263 | phba->trunk_link.link2.state = 0; |
1264 | phba->trunk_link.link3.state = 0; |
1265 | phba->trunk_link.phy_lnk_speed = |
1266 | LPFC_LINK_SPEED_UNKNOWN; |
1267 | phba->sli4_hba.link_state.logical_speed = |
1268 | LPFC_LINK_SPEED_UNKNOWN; |
1269 | } |
1270 | clear_bit(nr: FC_LBIT, addr: &phba->pport->fc_flag); |
1271 | } |
1272 | vports = lpfc_create_vport_work_array(phba); |
1273 | if (vports != NULL) { |
1274 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
1275 | /* Issue a LINK DOWN event to all nodes */ |
1276 | lpfc_linkdown_port(vport: vports[i]); |
1277 | |
1278 | vports[i]->fc_myDID = 0; |
1279 | |
1280 | if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || |
1281 | (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { |
1282 | if (phba->nvmet_support) |
1283 | lpfc_nvmet_update_targetport(phba); |
1284 | else |
1285 | lpfc_nvme_update_localport(vport: vports[i]); |
1286 | } |
1287 | } |
1288 | } |
1289 | lpfc_destroy_vport_work_array(phba, vports); |
1290 | |
1291 | /* Clean up any SLI3 firmware default rpi's */ |
1292 | if (phba->sli_rev > LPFC_SLI_REV3 || offline) |
1293 | goto skip_unreg_did; |
1294 | |
1295 | mb = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
1296 | if (mb) { |
1297 | lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb); |
1298 | mb->vport = vport; |
1299 | mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
1300 | if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) |
1301 | == MBX_NOT_FINISHED) { |
1302 | mempool_free(element: mb, pool: phba->mbox_mem_pool); |
1303 | } |
1304 | } |
1305 | |
1306 | skip_unreg_did: |
1307 | /* Setup myDID for link up if we are in pt2pt mode */ |
1308 | if (test_bit(FC_PT2PT, &phba->pport->fc_flag)) { |
1309 | mb = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
1310 | if (mb) { |
1311 | lpfc_config_link(phba, mb); |
1312 | mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
1313 | mb->vport = vport; |
1314 | if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) |
1315 | == MBX_NOT_FINISHED) { |
1316 | mempool_free(element: mb, pool: phba->mbox_mem_pool); |
1317 | } |
1318 | } |
1319 | clear_bit(nr: FC_PT2PT, addr: &phba->pport->fc_flag); |
1320 | clear_bit(nr: FC_PT2PT_PLOGI, addr: &phba->pport->fc_flag); |
1321 | spin_lock_irq(lock: shost->host_lock); |
1322 | phba->pport->rcv_flogi_cnt = 0; |
1323 | spin_unlock_irq(lock: shost->host_lock); |
1324 | } |
1325 | return 0; |
1326 | } |
1327 | |
1328 | static void |
1329 | lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport) |
1330 | { |
1331 | struct lpfc_nodelist *ndlp; |
1332 | |
1333 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
1334 | ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); |
1335 | |
1336 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) |
1337 | continue; |
1338 | if (ndlp->nlp_type & NLP_FABRIC) { |
1339 | /* On Linkup its safe to clean up the ndlp |
1340 | * from Fabric connections. |
1341 | */ |
1342 | if (ndlp->nlp_DID != Fabric_DID) |
1343 | lpfc_unreg_rpi(vport, ndlp); |
1344 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); |
1345 | } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { |
1346 | /* Fail outstanding IO now since device is |
1347 | * marked for PLOGI. |
1348 | */ |
1349 | lpfc_unreg_rpi(vport, ndlp); |
1350 | } |
1351 | } |
1352 | } |
1353 | |
1354 | static void |
1355 | lpfc_linkup_port(struct lpfc_vport *vport) |
1356 | { |
1357 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
1358 | struct lpfc_hba *phba = vport->phba; |
1359 | |
1360 | if (test_bit(FC_UNLOADING, &vport->load_flag)) |
1361 | return; |
1362 | |
1363 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
1364 | "Link Up: top:x%x speed:x%x flg:x%x" , |
1365 | phba->fc_topology, phba->fc_linkspeed, phba->link_flag); |
1366 | |
1367 | /* If NPIV is not enabled, only bring the physical port up */ |
1368 | if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && |
1369 | (vport != phba->pport)) |
1370 | return; |
1371 | |
1372 | if (phba->defer_flogi_acc_flag) { |
1373 | clear_bit(nr: FC_ABORT_DISCOVERY, addr: &vport->fc_flag); |
1374 | clear_bit(nr: FC_RSCN_MODE, addr: &vport->fc_flag); |
1375 | clear_bit(nr: FC_NLP_MORE, addr: &vport->fc_flag); |
1376 | clear_bit(nr: FC_RSCN_DISCOVERY, addr: &vport->fc_flag); |
1377 | } else { |
1378 | clear_bit(nr: FC_PT2PT, addr: &vport->fc_flag); |
1379 | clear_bit(nr: FC_PT2PT_PLOGI, addr: &vport->fc_flag); |
1380 | clear_bit(nr: FC_ABORT_DISCOVERY, addr: &vport->fc_flag); |
1381 | clear_bit(nr: FC_RSCN_MODE, addr: &vport->fc_flag); |
1382 | clear_bit(nr: FC_NLP_MORE, addr: &vport->fc_flag); |
1383 | clear_bit(nr: FC_RSCN_DISCOVERY, addr: &vport->fc_flag); |
1384 | } |
1385 | set_bit(nr: FC_NDISC_ACTIVE, addr: &vport->fc_flag); |
1386 | |
1387 | spin_lock_irq(lock: shost->host_lock); |
1388 | vport->fc_ns_retry = 0; |
1389 | spin_unlock_irq(lock: shost->host_lock); |
1390 | lpfc_setup_fdmi_mask(vport); |
1391 | |
1392 | lpfc_linkup_cleanup_nodes(vport); |
1393 | } |
1394 | |
1395 | static int |
1396 | lpfc_linkup(struct lpfc_hba *phba) |
1397 | { |
1398 | struct lpfc_vport **vports; |
1399 | int i; |
1400 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport: phba->pport); |
1401 | |
1402 | phba->link_state = LPFC_LINK_UP; |
1403 | |
1404 | /* Unblock fabric iocbs if they are blocked */ |
1405 | clear_bit(nr: FABRIC_COMANDS_BLOCKED, addr: &phba->bit_flags); |
1406 | del_timer_sync(timer: &phba->fabric_block_timer); |
1407 | |
1408 | vports = lpfc_create_vport_work_array(phba); |
1409 | if (vports != NULL) |
1410 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) |
1411 | lpfc_linkup_port(vport: vports[i]); |
1412 | lpfc_destroy_vport_work_array(phba, vports); |
1413 | |
1414 | /* Clear the pport flogi counter in case the link down was |
1415 | * absorbed without an ACQE. No lock here - in worker thread |
1416 | * and discovery is synchronized. |
1417 | */ |
1418 | spin_lock_irq(lock: shost->host_lock); |
1419 | phba->pport->rcv_flogi_cnt = 0; |
1420 | spin_unlock_irq(lock: shost->host_lock); |
1421 | |
1422 | /* reinitialize initial HBA flag */ |
1423 | phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_RHBA_CMPL); |
1424 | |
1425 | return 0; |
1426 | } |
1427 | |
1428 | /* |
1429 | * This routine handles processing a CLEAR_LA mailbox |
1430 | * command upon completion. It is setup in the LPFC_MBOXQ |
1431 | * as the completion routine when the command is |
1432 | * handed off to the SLI layer. SLI3 only. |
1433 | */ |
1434 | static void |
1435 | lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
1436 | { |
1437 | struct lpfc_vport *vport = pmb->vport; |
1438 | struct lpfc_sli *psli = &phba->sli; |
1439 | MAILBOX_t *mb = &pmb->u.mb; |
1440 | uint32_t control; |
1441 | |
1442 | /* Since we don't do discovery right now, turn these off here */ |
1443 | psli->sli3_ring[LPFC_EXTRA_RING].flag &= ~LPFC_STOP_IOCB_EVENT; |
1444 | psli->sli3_ring[LPFC_FCP_RING].flag &= ~LPFC_STOP_IOCB_EVENT; |
1445 | |
1446 | /* Check for error */ |
1447 | if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) { |
1448 | /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */ |
1449 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
1450 | "0320 CLEAR_LA mbxStatus error x%x hba " |
1451 | "state x%x\n" , |
1452 | mb->mbxStatus, vport->port_state); |
1453 | phba->link_state = LPFC_HBA_ERROR; |
1454 | goto out; |
1455 | } |
1456 | |
1457 | if (vport->port_type == LPFC_PHYSICAL_PORT) |
1458 | phba->link_state = LPFC_HBA_READY; |
1459 | |
1460 | spin_lock_irq(lock: &phba->hbalock); |
1461 | psli->sli_flag |= LPFC_PROCESS_LA; |
1462 | control = readl(addr: phba->HCregaddr); |
1463 | control |= HC_LAINT_ENA; |
1464 | writel(val: control, addr: phba->HCregaddr); |
1465 | readl(addr: phba->HCregaddr); /* flush */ |
1466 | spin_unlock_irq(lock: &phba->hbalock); |
1467 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
1468 | return; |
1469 | |
1470 | out: |
1471 | /* Device Discovery completes */ |
1472 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
1473 | "0225 Device Discovery completes\n" ); |
1474 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
1475 | |
1476 | clear_bit(nr: FC_ABORT_DISCOVERY, addr: &vport->fc_flag); |
1477 | |
1478 | lpfc_can_disctmo(vport); |
1479 | |
1480 | /* turn on Link Attention interrupts */ |
1481 | |
1482 | spin_lock_irq(lock: &phba->hbalock); |
1483 | psli->sli_flag |= LPFC_PROCESS_LA; |
1484 | control = readl(addr: phba->HCregaddr); |
1485 | control |= HC_LAINT_ENA; |
1486 | writel(val: control, addr: phba->HCregaddr); |
1487 | readl(addr: phba->HCregaddr); /* flush */ |
1488 | spin_unlock_irq(lock: &phba->hbalock); |
1489 | |
1490 | return; |
1491 | } |
1492 | |
1493 | void |
1494 | lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
1495 | { |
1496 | struct lpfc_vport *vport = pmb->vport; |
1497 | LPFC_MBOXQ_t *sparam_mb; |
1498 | u16 status = pmb->u.mb.mbxStatus; |
1499 | int rc; |
1500 | |
1501 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
1502 | |
1503 | if (status) |
1504 | goto out; |
1505 | |
1506 | /* don't perform discovery for SLI4 loopback diagnostic test */ |
1507 | if ((phba->sli_rev == LPFC_SLI_REV4) && |
1508 | !(phba->hba_flag & HBA_FCOE_MODE) && |
1509 | (phba->link_flag & LS_LOOPBACK_MODE)) |
1510 | return; |
1511 | |
1512 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && |
1513 | test_bit(FC_PUBLIC_LOOP, &vport->fc_flag) && |
1514 | !test_bit(FC_LBIT, &vport->fc_flag)) { |
1515 | /* Need to wait for FAN - use discovery timer |
1516 | * for timeout. port_state is identically |
1517 | * LPFC_LOCAL_CFG_LINK while waiting for FAN |
1518 | */ |
1519 | lpfc_set_disctmo(vport); |
1520 | return; |
1521 | } |
1522 | |
1523 | /* Start discovery by sending a FLOGI. port_state is identically |
1524 | * LPFC_FLOGI while waiting for FLOGI cmpl. |
1525 | */ |
1526 | if (vport->port_state != LPFC_FLOGI) { |
1527 | /* Issue MBX_READ_SPARAM to update CSPs before FLOGI if |
1528 | * bb-credit recovery is in place. |
1529 | */ |
1530 | if (phba->bbcredit_support && phba->cfg_enable_bbcr && |
1531 | !(phba->link_flag & LS_LOOPBACK_MODE)) { |
1532 | sparam_mb = mempool_alloc(pool: phba->mbox_mem_pool, |
1533 | GFP_KERNEL); |
1534 | if (!sparam_mb) |
1535 | goto sparam_out; |
1536 | |
1537 | rc = lpfc_read_sparam(phba, sparam_mb, 0); |
1538 | if (rc) { |
1539 | mempool_free(element: sparam_mb, pool: phba->mbox_mem_pool); |
1540 | goto sparam_out; |
1541 | } |
1542 | sparam_mb->vport = vport; |
1543 | sparam_mb->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; |
1544 | rc = lpfc_sli_issue_mbox(phba, sparam_mb, MBX_NOWAIT); |
1545 | if (rc == MBX_NOT_FINISHED) { |
1546 | lpfc_mbox_rsrc_cleanup(phba, mbox: sparam_mb, |
1547 | locked: MBOX_THD_UNLOCKED); |
1548 | goto sparam_out; |
1549 | } |
1550 | |
1551 | phba->hba_flag |= HBA_DEFER_FLOGI; |
1552 | } else { |
1553 | lpfc_initial_flogi(vport); |
1554 | } |
1555 | } else { |
1556 | if (test_bit(FC_PT2PT, &vport->fc_flag)) |
1557 | lpfc_disc_start(vport); |
1558 | } |
1559 | return; |
1560 | |
1561 | out: |
1562 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
1563 | "0306 CONFIG_LINK mbxStatus error x%x HBA state x%x\n" , |
1564 | status, vport->port_state); |
1565 | |
1566 | sparam_out: |
1567 | lpfc_linkdown(phba); |
1568 | |
1569 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
1570 | "0200 CONFIG_LINK bad hba state x%x\n" , |
1571 | vport->port_state); |
1572 | |
1573 | lpfc_issue_clear_la(phba, vport); |
1574 | return; |
1575 | } |
1576 | |
1577 | /** |
1578 | * lpfc_sli4_clear_fcf_rr_bmask |
1579 | * @phba: pointer to the struct lpfc_hba for this port. |
1580 | * This fucnction resets the round robin bit mask and clears the |
1581 | * fcf priority list. The list deletions are done while holding the |
1582 | * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared |
1583 | * from the lpfc_fcf_pri record. |
1584 | **/ |
1585 | void |
1586 | lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba) |
1587 | { |
1588 | struct lpfc_fcf_pri *fcf_pri; |
1589 | struct lpfc_fcf_pri *next_fcf_pri; |
1590 | memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask)); |
1591 | spin_lock_irq(lock: &phba->hbalock); |
1592 | list_for_each_entry_safe(fcf_pri, next_fcf_pri, |
1593 | &phba->fcf.fcf_pri_list, list) { |
1594 | list_del_init(entry: &fcf_pri->list); |
1595 | fcf_pri->fcf_rec.flag = 0; |
1596 | } |
1597 | spin_unlock_irq(lock: &phba->hbalock); |
1598 | } |
1599 | static void |
1600 | lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) |
1601 | { |
1602 | struct lpfc_vport *vport = mboxq->vport; |
1603 | |
1604 | if (mboxq->u.mb.mbxStatus) { |
1605 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
1606 | "2017 REG_FCFI mbxStatus error x%x " |
1607 | "HBA state x%x\n" , mboxq->u.mb.mbxStatus, |
1608 | vport->port_state); |
1609 | goto fail_out; |
1610 | } |
1611 | |
1612 | /* Start FCoE discovery by sending a FLOGI. */ |
1613 | phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi); |
1614 | /* Set the FCFI registered flag */ |
1615 | spin_lock_irq(lock: &phba->hbalock); |
1616 | phba->fcf.fcf_flag |= FCF_REGISTERED; |
1617 | spin_unlock_irq(lock: &phba->hbalock); |
1618 | |
1619 | /* If there is a pending FCoE event, restart FCF table scan. */ |
1620 | if ((!(phba->hba_flag & FCF_RR_INPROG)) && |
1621 | lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) |
1622 | goto fail_out; |
1623 | |
1624 | /* Mark successful completion of FCF table scan */ |
1625 | spin_lock_irq(lock: &phba->hbalock); |
1626 | phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); |
1627 | phba->hba_flag &= ~FCF_TS_INPROG; |
1628 | if (vport->port_state != LPFC_FLOGI) { |
1629 | phba->hba_flag |= FCF_RR_INPROG; |
1630 | spin_unlock_irq(lock: &phba->hbalock); |
1631 | lpfc_issue_init_vfi(vport); |
1632 | goto out; |
1633 | } |
1634 | spin_unlock_irq(lock: &phba->hbalock); |
1635 | goto out; |
1636 | |
1637 | fail_out: |
1638 | spin_lock_irq(lock: &phba->hbalock); |
1639 | phba->hba_flag &= ~FCF_RR_INPROG; |
1640 | spin_unlock_irq(lock: &phba->hbalock); |
1641 | out: |
1642 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
1643 | } |
1644 | |
1645 | /** |
1646 | * lpfc_fab_name_match - Check if the fcf fabric name match. |
1647 | * @fab_name: pointer to fabric name. |
1648 | * @new_fcf_record: pointer to fcf record. |
1649 | * |
1650 | * This routine compare the fcf record's fabric name with provided |
1651 | * fabric name. If the fabric name are identical this function |
1652 | * returns 1 else return 0. |
1653 | **/ |
1654 | static uint32_t |
1655 | lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record) |
1656 | { |
1657 | if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) |
1658 | return 0; |
1659 | if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) |
1660 | return 0; |
1661 | if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) |
1662 | return 0; |
1663 | if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) |
1664 | return 0; |
1665 | if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) |
1666 | return 0; |
1667 | if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) |
1668 | return 0; |
1669 | if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) |
1670 | return 0; |
1671 | if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)) |
1672 | return 0; |
1673 | return 1; |
1674 | } |
1675 | |
1676 | /** |
1677 | * lpfc_sw_name_match - Check if the fcf switch name match. |
1678 | * @sw_name: pointer to switch name. |
1679 | * @new_fcf_record: pointer to fcf record. |
1680 | * |
1681 | * This routine compare the fcf record's switch name with provided |
1682 | * switch name. If the switch name are identical this function |
1683 | * returns 1 else return 0. |
1684 | **/ |
1685 | static uint32_t |
1686 | lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record) |
1687 | { |
1688 | if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record)) |
1689 | return 0; |
1690 | if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record)) |
1691 | return 0; |
1692 | if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record)) |
1693 | return 0; |
1694 | if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record)) |
1695 | return 0; |
1696 | if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record)) |
1697 | return 0; |
1698 | if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record)) |
1699 | return 0; |
1700 | if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record)) |
1701 | return 0; |
1702 | if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record)) |
1703 | return 0; |
1704 | return 1; |
1705 | } |
1706 | |
1707 | /** |
1708 | * lpfc_mac_addr_match - Check if the fcf mac address match. |
1709 | * @mac_addr: pointer to mac address. |
1710 | * @new_fcf_record: pointer to fcf record. |
1711 | * |
1712 | * This routine compare the fcf record's mac address with HBA's |
1713 | * FCF mac address. If the mac addresses are identical this function |
1714 | * returns 1 else return 0. |
1715 | **/ |
1716 | static uint32_t |
1717 | lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record) |
1718 | { |
1719 | if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) |
1720 | return 0; |
1721 | if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) |
1722 | return 0; |
1723 | if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) |
1724 | return 0; |
1725 | if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) |
1726 | return 0; |
1727 | if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) |
1728 | return 0; |
1729 | if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record)) |
1730 | return 0; |
1731 | return 1; |
1732 | } |
1733 | |
1734 | static bool |
1735 | lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id) |
1736 | { |
1737 | return (curr_vlan_id == new_vlan_id); |
1738 | } |
1739 | |
1740 | /** |
1741 | * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record. |
1742 | * @phba: pointer to lpfc hba data structure. |
1743 | * @fcf_index: Index for the lpfc_fcf_record. |
1744 | * @new_fcf_record: pointer to hba fcf record. |
1745 | * |
1746 | * This routine updates the driver FCF priority record from the new HBA FCF |
1747 | * record. The hbalock is asserted held in the code path calling this |
1748 | * routine. |
1749 | **/ |
1750 | static void |
1751 | __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index, |
1752 | struct fcf_record *new_fcf_record |
1753 | ) |
1754 | { |
1755 | struct lpfc_fcf_pri *fcf_pri; |
1756 | |
1757 | fcf_pri = &phba->fcf.fcf_pri[fcf_index]; |
1758 | fcf_pri->fcf_rec.fcf_index = fcf_index; |
1759 | /* FCF record priority */ |
1760 | fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority; |
1761 | |
1762 | } |
1763 | |
1764 | /** |
1765 | * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba. |
1766 | * @fcf_rec: pointer to driver fcf record. |
1767 | * @new_fcf_record: pointer to fcf record. |
1768 | * |
1769 | * This routine copies the FCF information from the FCF |
1770 | * record to lpfc_hba data structure. |
1771 | **/ |
1772 | static void |
1773 | lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec, |
1774 | struct fcf_record *new_fcf_record) |
1775 | { |
1776 | /* Fabric name */ |
1777 | fcf_rec->fabric_name[0] = |
1778 | bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record); |
1779 | fcf_rec->fabric_name[1] = |
1780 | bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record); |
1781 | fcf_rec->fabric_name[2] = |
1782 | bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record); |
1783 | fcf_rec->fabric_name[3] = |
1784 | bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record); |
1785 | fcf_rec->fabric_name[4] = |
1786 | bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record); |
1787 | fcf_rec->fabric_name[5] = |
1788 | bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record); |
1789 | fcf_rec->fabric_name[6] = |
1790 | bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record); |
1791 | fcf_rec->fabric_name[7] = |
1792 | bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record); |
1793 | /* Mac address */ |
1794 | fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record); |
1795 | fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record); |
1796 | fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record); |
1797 | fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record); |
1798 | fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record); |
1799 | fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record); |
1800 | /* FCF record index */ |
1801 | fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); |
1802 | /* FCF record priority */ |
1803 | fcf_rec->priority = new_fcf_record->fip_priority; |
1804 | /* Switch name */ |
1805 | fcf_rec->switch_name[0] = |
1806 | bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record); |
1807 | fcf_rec->switch_name[1] = |
1808 | bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record); |
1809 | fcf_rec->switch_name[2] = |
1810 | bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record); |
1811 | fcf_rec->switch_name[3] = |
1812 | bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record); |
1813 | fcf_rec->switch_name[4] = |
1814 | bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record); |
1815 | fcf_rec->switch_name[5] = |
1816 | bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record); |
1817 | fcf_rec->switch_name[6] = |
1818 | bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record); |
1819 | fcf_rec->switch_name[7] = |
1820 | bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record); |
1821 | } |
1822 | |
1823 | /** |
1824 | * __lpfc_update_fcf_record - Update driver fcf record |
1825 | * @phba: pointer to lpfc hba data structure. |
1826 | * @fcf_rec: pointer to driver fcf record. |
1827 | * @new_fcf_record: pointer to hba fcf record. |
1828 | * @addr_mode: address mode to be set to the driver fcf record. |
1829 | * @vlan_id: vlan tag to be set to the driver fcf record. |
1830 | * @flag: flag bits to be set to the driver fcf record. |
1831 | * |
1832 | * This routine updates the driver FCF record from the new HBA FCF record |
1833 | * together with the address mode, vlan_id, and other informations. This |
1834 | * routine is called with the hbalock held. |
1835 | **/ |
1836 | static void |
1837 | __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec, |
1838 | struct fcf_record *new_fcf_record, uint32_t addr_mode, |
1839 | uint16_t vlan_id, uint32_t flag) |
1840 | { |
1841 | lockdep_assert_held(&phba->hbalock); |
1842 | |
1843 | /* Copy the fields from the HBA's FCF record */ |
1844 | lpfc_copy_fcf_record(fcf_rec, new_fcf_record); |
1845 | /* Update other fields of driver FCF record */ |
1846 | fcf_rec->addr_mode = addr_mode; |
1847 | fcf_rec->vlan_id = vlan_id; |
1848 | fcf_rec->flag |= (flag | RECORD_VALID); |
1849 | __lpfc_update_fcf_record_pri(phba, |
1850 | bf_get(lpfc_fcf_record_fcf_index, new_fcf_record), |
1851 | new_fcf_record); |
1852 | } |
1853 | |
1854 | /** |
1855 | * lpfc_register_fcf - Register the FCF with hba. |
1856 | * @phba: pointer to lpfc hba data structure. |
1857 | * |
1858 | * This routine issues a register fcfi mailbox command to register |
1859 | * the fcf with HBA. |
1860 | **/ |
1861 | static void |
1862 | lpfc_register_fcf(struct lpfc_hba *phba) |
1863 | { |
1864 | LPFC_MBOXQ_t *fcf_mbxq; |
1865 | int rc; |
1866 | |
1867 | spin_lock_irq(lock: &phba->hbalock); |
1868 | /* If the FCF is not available do nothing. */ |
1869 | if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { |
1870 | phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); |
1871 | spin_unlock_irq(lock: &phba->hbalock); |
1872 | return; |
1873 | } |
1874 | |
1875 | /* The FCF is already registered, start discovery */ |
1876 | if (phba->fcf.fcf_flag & FCF_REGISTERED) { |
1877 | phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); |
1878 | phba->hba_flag &= ~FCF_TS_INPROG; |
1879 | if (phba->pport->port_state != LPFC_FLOGI && |
1880 | test_bit(FC_FABRIC, &phba->pport->fc_flag)) { |
1881 | phba->hba_flag |= FCF_RR_INPROG; |
1882 | spin_unlock_irq(lock: &phba->hbalock); |
1883 | lpfc_initial_flogi(phba->pport); |
1884 | return; |
1885 | } |
1886 | spin_unlock_irq(lock: &phba->hbalock); |
1887 | return; |
1888 | } |
1889 | spin_unlock_irq(lock: &phba->hbalock); |
1890 | |
1891 | fcf_mbxq = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
1892 | if (!fcf_mbxq) { |
1893 | spin_lock_irq(lock: &phba->hbalock); |
1894 | phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); |
1895 | spin_unlock_irq(lock: &phba->hbalock); |
1896 | return; |
1897 | } |
1898 | |
1899 | lpfc_reg_fcfi(phba, fcf_mbxq); |
1900 | fcf_mbxq->vport = phba->pport; |
1901 | fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; |
1902 | rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); |
1903 | if (rc == MBX_NOT_FINISHED) { |
1904 | spin_lock_irq(lock: &phba->hbalock); |
1905 | phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); |
1906 | spin_unlock_irq(lock: &phba->hbalock); |
1907 | mempool_free(element: fcf_mbxq, pool: phba->mbox_mem_pool); |
1908 | } |
1909 | |
1910 | return; |
1911 | } |
1912 | |
1913 | /** |
1914 | * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery. |
1915 | * @phba: pointer to lpfc hba data structure. |
1916 | * @new_fcf_record: pointer to fcf record. |
1917 | * @boot_flag: Indicates if this record used by boot bios. |
1918 | * @addr_mode: The address mode to be used by this FCF |
1919 | * @vlan_id: The vlan id to be used as vlan tagging by this FCF. |
1920 | * |
1921 | * This routine compare the fcf record with connect list obtained from the |
1922 | * config region to decide if this FCF can be used for SAN discovery. It returns |
1923 | * 1 if this record can be used for SAN discovery else return zero. If this FCF |
1924 | * record can be used for SAN discovery, the boot_flag will indicate if this FCF |
1925 | * is used by boot bios and addr_mode will indicate the addressing mode to be |
1926 | * used for this FCF when the function returns. |
1927 | * If the FCF record need to be used with a particular vlan id, the vlan is |
1928 | * set in the vlan_id on return of the function. If not VLAN tagging need to |
1929 | * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID; |
1930 | **/ |
1931 | static int |
1932 | lpfc_match_fcf_conn_list(struct lpfc_hba *phba, |
1933 | struct fcf_record *new_fcf_record, |
1934 | uint32_t *boot_flag, uint32_t *addr_mode, |
1935 | uint16_t *vlan_id) |
1936 | { |
1937 | struct lpfc_fcf_conn_entry *conn_entry; |
1938 | int i, j, fcf_vlan_id = 0; |
1939 | |
1940 | /* Find the lowest VLAN id in the FCF record */ |
1941 | for (i = 0; i < 512; i++) { |
1942 | if (new_fcf_record->vlan_bitmap[i]) { |
1943 | fcf_vlan_id = i * 8; |
1944 | j = 0; |
1945 | while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) { |
1946 | j++; |
1947 | fcf_vlan_id++; |
1948 | } |
1949 | break; |
1950 | } |
1951 | } |
1952 | |
1953 | /* FCF not valid/available or solicitation in progress */ |
1954 | if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || |
1955 | !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) || |
1956 | bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record)) |
1957 | return 0; |
1958 | |
1959 | if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { |
1960 | *boot_flag = 0; |
1961 | *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, |
1962 | new_fcf_record); |
1963 | if (phba->valid_vlan) |
1964 | *vlan_id = phba->vlan_id; |
1965 | else |
1966 | *vlan_id = LPFC_FCOE_NULL_VID; |
1967 | return 1; |
1968 | } |
1969 | |
1970 | /* |
1971 | * If there are no FCF connection table entry, driver connect to all |
1972 | * FCFs. |
1973 | */ |
1974 | if (list_empty(head: &phba->fcf_conn_rec_list)) { |
1975 | *boot_flag = 0; |
1976 | *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, |
1977 | new_fcf_record); |
1978 | |
1979 | /* |
1980 | * When there are no FCF connect entries, use driver's default |
1981 | * addressing mode - FPMA. |
1982 | */ |
1983 | if (*addr_mode & LPFC_FCF_FPMA) |
1984 | *addr_mode = LPFC_FCF_FPMA; |
1985 | |
1986 | /* If FCF record report a vlan id use that vlan id */ |
1987 | if (fcf_vlan_id) |
1988 | *vlan_id = fcf_vlan_id; |
1989 | else |
1990 | *vlan_id = LPFC_FCOE_NULL_VID; |
1991 | return 1; |
1992 | } |
1993 | |
1994 | list_for_each_entry(conn_entry, |
1995 | &phba->fcf_conn_rec_list, list) { |
1996 | if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID)) |
1997 | continue; |
1998 | |
1999 | if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) && |
2000 | !lpfc_fab_name_match(fab_name: conn_entry->conn_rec.fabric_name, |
2001 | new_fcf_record)) |
2002 | continue; |
2003 | if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) && |
2004 | !lpfc_sw_name_match(sw_name: conn_entry->conn_rec.switch_name, |
2005 | new_fcf_record)) |
2006 | continue; |
2007 | if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) { |
2008 | /* |
2009 | * If the vlan bit map does not have the bit set for the |
2010 | * vlan id to be used, then it is not a match. |
2011 | */ |
2012 | if (!(new_fcf_record->vlan_bitmap |
2013 | [conn_entry->conn_rec.vlan_tag / 8] & |
2014 | (1 << (conn_entry->conn_rec.vlan_tag % 8)))) |
2015 | continue; |
2016 | } |
2017 | |
2018 | /* |
2019 | * If connection record does not support any addressing mode, |
2020 | * skip the FCF record. |
2021 | */ |
2022 | if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record) |
2023 | & (LPFC_FCF_FPMA | LPFC_FCF_SPMA))) |
2024 | continue; |
2025 | |
2026 | /* |
2027 | * Check if the connection record specifies a required |
2028 | * addressing mode. |
2029 | */ |
2030 | if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && |
2031 | !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) { |
2032 | |
2033 | /* |
2034 | * If SPMA required but FCF not support this continue. |
2035 | */ |
2036 | if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && |
2037 | !(bf_get(lpfc_fcf_record_mac_addr_prov, |
2038 | new_fcf_record) & LPFC_FCF_SPMA)) |
2039 | continue; |
2040 | |
2041 | /* |
2042 | * If FPMA required but FCF not support this continue. |
2043 | */ |
2044 | if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && |
2045 | !(bf_get(lpfc_fcf_record_mac_addr_prov, |
2046 | new_fcf_record) & LPFC_FCF_FPMA)) |
2047 | continue; |
2048 | } |
2049 | |
2050 | /* |
2051 | * This fcf record matches filtering criteria. |
2052 | */ |
2053 | if (conn_entry->conn_rec.flags & FCFCNCT_BOOT) |
2054 | *boot_flag = 1; |
2055 | else |
2056 | *boot_flag = 0; |
2057 | |
2058 | /* |
2059 | * If user did not specify any addressing mode, or if the |
2060 | * preferred addressing mode specified by user is not supported |
2061 | * by FCF, allow fabric to pick the addressing mode. |
2062 | */ |
2063 | *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, |
2064 | new_fcf_record); |
2065 | /* |
2066 | * If the user specified a required address mode, assign that |
2067 | * address mode |
2068 | */ |
2069 | if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && |
2070 | (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED))) |
2071 | *addr_mode = (conn_entry->conn_rec.flags & |
2072 | FCFCNCT_AM_SPMA) ? |
2073 | LPFC_FCF_SPMA : LPFC_FCF_FPMA; |
2074 | /* |
2075 | * If the user specified a preferred address mode, use the |
2076 | * addr mode only if FCF support the addr_mode. |
2077 | */ |
2078 | else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && |
2079 | (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && |
2080 | (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && |
2081 | (*addr_mode & LPFC_FCF_SPMA)) |
2082 | *addr_mode = LPFC_FCF_SPMA; |
2083 | else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && |
2084 | (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && |
2085 | !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && |
2086 | (*addr_mode & LPFC_FCF_FPMA)) |
2087 | *addr_mode = LPFC_FCF_FPMA; |
2088 | |
2089 | /* If matching connect list has a vlan id, use it */ |
2090 | if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) |
2091 | *vlan_id = conn_entry->conn_rec.vlan_tag; |
2092 | /* |
2093 | * If no vlan id is specified in connect list, use the vlan id |
2094 | * in the FCF record |
2095 | */ |
2096 | else if (fcf_vlan_id) |
2097 | *vlan_id = fcf_vlan_id; |
2098 | else |
2099 | *vlan_id = LPFC_FCOE_NULL_VID; |
2100 | |
2101 | return 1; |
2102 | } |
2103 | |
2104 | return 0; |
2105 | } |
2106 | |
2107 | /** |
2108 | * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event. |
2109 | * @phba: pointer to lpfc hba data structure. |
2110 | * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned. |
2111 | * |
2112 | * This function check if there is any fcoe event pending while driver |
2113 | * scan FCF entries. If there is any pending event, it will restart the |
2114 | * FCF saning and return 1 else return 0. |
2115 | */ |
2116 | int |
2117 | lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) |
2118 | { |
2119 | /* |
2120 | * If the Link is up and no FCoE events while in the |
2121 | * FCF discovery, no need to restart FCF discovery. |
2122 | */ |
2123 | if ((phba->link_state >= LPFC_LINK_UP) && |
2124 | (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) |
2125 | return 0; |
2126 | |
2127 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2128 | "2768 Pending link or FCF event during current " |
2129 | "handling of the previous event: link_state:x%x, " |
2130 | "evt_tag_at_scan:x%x, evt_tag_current:x%x\n" , |
2131 | phba->link_state, phba->fcoe_eventtag_at_fcf_scan, |
2132 | phba->fcoe_eventtag); |
2133 | |
2134 | spin_lock_irq(lock: &phba->hbalock); |
2135 | phba->fcf.fcf_flag &= ~FCF_AVAILABLE; |
2136 | spin_unlock_irq(lock: &phba->hbalock); |
2137 | |
2138 | if (phba->link_state >= LPFC_LINK_UP) { |
2139 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, |
2140 | "2780 Restart FCF table scan due to " |
2141 | "pending FCF event:evt_tag_at_scan:x%x, " |
2142 | "evt_tag_current:x%x\n" , |
2143 | phba->fcoe_eventtag_at_fcf_scan, |
2144 | phba->fcoe_eventtag); |
2145 | lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); |
2146 | } else { |
2147 | /* |
2148 | * Do not continue FCF discovery and clear FCF_TS_INPROG |
2149 | * flag |
2150 | */ |
2151 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, |
2152 | "2833 Stop FCF discovery process due to link " |
2153 | "state change (x%x)\n" , phba->link_state); |
2154 | spin_lock_irq(lock: &phba->hbalock); |
2155 | phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); |
2156 | phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); |
2157 | spin_unlock_irq(lock: &phba->hbalock); |
2158 | } |
2159 | |
2160 | /* Unregister the currently registered FCF if required */ |
2161 | if (unreg_fcf) { |
2162 | spin_lock_irq(lock: &phba->hbalock); |
2163 | phba->fcf.fcf_flag &= ~FCF_REGISTERED; |
2164 | spin_unlock_irq(lock: &phba->hbalock); |
2165 | lpfc_sli4_unregister_fcf(phba); |
2166 | } |
2167 | return 1; |
2168 | } |
2169 | |
2170 | /** |
2171 | * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record |
2172 | * @phba: pointer to lpfc hba data structure. |
2173 | * @fcf_cnt: number of eligible fcf record seen so far. |
2174 | * |
2175 | * This function makes an running random selection decision on FCF record to |
2176 | * use through a sequence of @fcf_cnt eligible FCF records with equal |
2177 | * probability. To perform integer manunipulation of random numbers with |
2178 | * size unit32_t, a 16-bit random number returned from get_random_u16() is |
2179 | * taken as the random random number generated. |
2180 | * |
2181 | * Returns true when outcome is for the newly read FCF record should be |
2182 | * chosen; otherwise, return false when outcome is for keeping the previously |
2183 | * chosen FCF record. |
2184 | **/ |
2185 | static bool |
2186 | lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt) |
2187 | { |
2188 | uint32_t rand_num; |
2189 | |
2190 | /* Get 16-bit uniform random number */ |
2191 | rand_num = get_random_u16(); |
2192 | |
2193 | /* Decision with probability 1/fcf_cnt */ |
2194 | if ((fcf_cnt * rand_num) < 0xFFFF) |
2195 | return true; |
2196 | else |
2197 | return false; |
2198 | } |
2199 | |
2200 | /** |
2201 | * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command. |
2202 | * @phba: pointer to lpfc hba data structure. |
2203 | * @mboxq: pointer to mailbox object. |
2204 | * @next_fcf_index: pointer to holder of next fcf index. |
2205 | * |
2206 | * This routine parses the non-embedded fcf mailbox command by performing the |
2207 | * necessarily error checking, non-embedded read FCF record mailbox command |
2208 | * SGE parsing, and endianness swapping. |
2209 | * |
2210 | * Returns the pointer to the new FCF record in the non-embedded mailbox |
2211 | * command DMA memory if successfully, other NULL. |
2212 | */ |
2213 | static struct fcf_record * |
2214 | lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, |
2215 | uint16_t *next_fcf_index) |
2216 | { |
2217 | void *virt_addr; |
2218 | struct lpfc_mbx_sge sge; |
2219 | struct lpfc_mbx_read_fcf_tbl *read_fcf; |
2220 | uint32_t shdr_status, shdr_add_status, if_type; |
2221 | union lpfc_sli4_cfg_shdr *shdr; |
2222 | struct fcf_record *new_fcf_record; |
2223 | |
2224 | /* Get the first SGE entry from the non-embedded DMA memory. This |
2225 | * routine only uses a single SGE. |
2226 | */ |
2227 | lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); |
2228 | if (unlikely(!mboxq->sge_array)) { |
2229 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2230 | "2524 Failed to get the non-embedded SGE " |
2231 | "virtual address\n" ); |
2232 | return NULL; |
2233 | } |
2234 | virt_addr = mboxq->sge_array->addr[0]; |
2235 | |
2236 | shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; |
2237 | lpfc_sli_pcimem_bcopy(shdr, shdr, |
2238 | sizeof(union lpfc_sli4_cfg_shdr)); |
2239 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
2240 | if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); |
2241 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
2242 | if (shdr_status || shdr_add_status) { |
2243 | if (shdr_status == STATUS_FCF_TABLE_EMPTY || |
2244 | if_type == LPFC_SLI_INTF_IF_TYPE_2) |
2245 | lpfc_printf_log(phba, KERN_ERR, |
2246 | LOG_TRACE_EVENT, |
2247 | "2726 READ_FCF_RECORD Indicates empty " |
2248 | "FCF table.\n" ); |
2249 | else |
2250 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2251 | "2521 READ_FCF_RECORD mailbox failed " |
2252 | "with status x%x add_status x%x, " |
2253 | "mbx\n" , shdr_status, shdr_add_status); |
2254 | return NULL; |
2255 | } |
2256 | |
2257 | /* Interpreting the returned information of the FCF record */ |
2258 | read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; |
2259 | lpfc_sli_pcimem_bcopy(read_fcf, read_fcf, |
2260 | sizeof(struct lpfc_mbx_read_fcf_tbl)); |
2261 | *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); |
2262 | new_fcf_record = (struct fcf_record *)(virt_addr + |
2263 | sizeof(struct lpfc_mbx_read_fcf_tbl)); |
2264 | lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record, |
2265 | offsetof(struct fcf_record, vlan_bitmap)); |
2266 | new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137); |
2267 | new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138); |
2268 | |
2269 | return new_fcf_record; |
2270 | } |
2271 | |
2272 | /** |
2273 | * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record |
2274 | * @phba: pointer to lpfc hba data structure. |
2275 | * @fcf_record: pointer to the fcf record. |
2276 | * @vlan_id: the lowest vlan identifier associated to this fcf record. |
2277 | * @next_fcf_index: the index to the next fcf record in hba's fcf table. |
2278 | * |
2279 | * This routine logs the detailed FCF record if the LOG_FIP loggin is |
2280 | * enabled. |
2281 | **/ |
2282 | static void |
2283 | lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba, |
2284 | struct fcf_record *fcf_record, |
2285 | uint16_t vlan_id, |
2286 | uint16_t next_fcf_index) |
2287 | { |
2288 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2289 | "2764 READ_FCF_RECORD:\n" |
2290 | "\tFCF_Index : x%x\n" |
2291 | "\tFCF_Avail : x%x\n" |
2292 | "\tFCF_Valid : x%x\n" |
2293 | "\tFCF_SOL : x%x\n" |
2294 | "\tFIP_Priority : x%x\n" |
2295 | "\tMAC_Provider : x%x\n" |
2296 | "\tLowest VLANID : x%x\n" |
2297 | "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n" |
2298 | "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n" |
2299 | "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n" |
2300 | "\tNext_FCF_Index: x%x\n" , |
2301 | bf_get(lpfc_fcf_record_fcf_index, fcf_record), |
2302 | bf_get(lpfc_fcf_record_fcf_avail, fcf_record), |
2303 | bf_get(lpfc_fcf_record_fcf_valid, fcf_record), |
2304 | bf_get(lpfc_fcf_record_fcf_sol, fcf_record), |
2305 | fcf_record->fip_priority, |
2306 | bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record), |
2307 | vlan_id, |
2308 | bf_get(lpfc_fcf_record_mac_0, fcf_record), |
2309 | bf_get(lpfc_fcf_record_mac_1, fcf_record), |
2310 | bf_get(lpfc_fcf_record_mac_2, fcf_record), |
2311 | bf_get(lpfc_fcf_record_mac_3, fcf_record), |
2312 | bf_get(lpfc_fcf_record_mac_4, fcf_record), |
2313 | bf_get(lpfc_fcf_record_mac_5, fcf_record), |
2314 | bf_get(lpfc_fcf_record_fab_name_0, fcf_record), |
2315 | bf_get(lpfc_fcf_record_fab_name_1, fcf_record), |
2316 | bf_get(lpfc_fcf_record_fab_name_2, fcf_record), |
2317 | bf_get(lpfc_fcf_record_fab_name_3, fcf_record), |
2318 | bf_get(lpfc_fcf_record_fab_name_4, fcf_record), |
2319 | bf_get(lpfc_fcf_record_fab_name_5, fcf_record), |
2320 | bf_get(lpfc_fcf_record_fab_name_6, fcf_record), |
2321 | bf_get(lpfc_fcf_record_fab_name_7, fcf_record), |
2322 | bf_get(lpfc_fcf_record_switch_name_0, fcf_record), |
2323 | bf_get(lpfc_fcf_record_switch_name_1, fcf_record), |
2324 | bf_get(lpfc_fcf_record_switch_name_2, fcf_record), |
2325 | bf_get(lpfc_fcf_record_switch_name_3, fcf_record), |
2326 | bf_get(lpfc_fcf_record_switch_name_4, fcf_record), |
2327 | bf_get(lpfc_fcf_record_switch_name_5, fcf_record), |
2328 | bf_get(lpfc_fcf_record_switch_name_6, fcf_record), |
2329 | bf_get(lpfc_fcf_record_switch_name_7, fcf_record), |
2330 | next_fcf_index); |
2331 | } |
2332 | |
2333 | /** |
2334 | * lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF |
2335 | * @phba: pointer to lpfc hba data structure. |
2336 | * @fcf_rec: pointer to an existing FCF record. |
2337 | * @new_fcf_record: pointer to a new FCF record. |
2338 | * @new_vlan_id: vlan id from the new FCF record. |
2339 | * |
2340 | * This function performs matching test of a new FCF record against an existing |
2341 | * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id |
2342 | * will not be used as part of the FCF record matching criteria. |
2343 | * |
2344 | * Returns true if all the fields matching, otherwise returns false. |
2345 | */ |
2346 | static bool |
2347 | lpfc_sli4_fcf_record_match(struct lpfc_hba *phba, |
2348 | struct lpfc_fcf_rec *fcf_rec, |
2349 | struct fcf_record *new_fcf_record, |
2350 | uint16_t new_vlan_id) |
2351 | { |
2352 | if (new_vlan_id != LPFC_FCOE_IGNORE_VID) |
2353 | if (!lpfc_vlan_id_match(curr_vlan_id: fcf_rec->vlan_id, new_vlan_id)) |
2354 | return false; |
2355 | if (!lpfc_mac_addr_match(mac_addr: fcf_rec->mac_addr, new_fcf_record)) |
2356 | return false; |
2357 | if (!lpfc_sw_name_match(sw_name: fcf_rec->switch_name, new_fcf_record)) |
2358 | return false; |
2359 | if (!lpfc_fab_name_match(fab_name: fcf_rec->fabric_name, new_fcf_record)) |
2360 | return false; |
2361 | if (fcf_rec->priority != new_fcf_record->fip_priority) |
2362 | return false; |
2363 | return true; |
2364 | } |
2365 | |
2366 | /** |
2367 | * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf |
2368 | * @vport: Pointer to vport object. |
2369 | * @fcf_index: index to next fcf. |
2370 | * |
2371 | * This function processing the roundrobin fcf failover to next fcf index. |
2372 | * When this function is invoked, there will be a current fcf registered |
2373 | * for flogi. |
2374 | * Return: 0 for continue retrying flogi on currently registered fcf; |
2375 | * 1 for stop flogi on currently registered fcf; |
2376 | */ |
2377 | int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index) |
2378 | { |
2379 | struct lpfc_hba *phba = vport->phba; |
2380 | int rc; |
2381 | |
2382 | if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { |
2383 | spin_lock_irq(lock: &phba->hbalock); |
2384 | if (phba->hba_flag & HBA_DEVLOSS_TMO) { |
2385 | spin_unlock_irq(lock: &phba->hbalock); |
2386 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2387 | "2872 Devloss tmo with no eligible " |
2388 | "FCF, unregister in-use FCF (x%x) " |
2389 | "and rescan FCF table\n" , |
2390 | phba->fcf.current_rec.fcf_indx); |
2391 | lpfc_unregister_fcf_rescan(phba); |
2392 | goto stop_flogi_current_fcf; |
2393 | } |
2394 | /* Mark the end to FLOGI roundrobin failover */ |
2395 | phba->hba_flag &= ~FCF_RR_INPROG; |
2396 | /* Allow action to new fcf asynchronous event */ |
2397 | phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); |
2398 | spin_unlock_irq(lock: &phba->hbalock); |
2399 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2400 | "2865 No FCF available, stop roundrobin FCF " |
2401 | "failover and change port state:x%x/x%x\n" , |
2402 | phba->pport->port_state, LPFC_VPORT_UNKNOWN); |
2403 | phba->pport->port_state = LPFC_VPORT_UNKNOWN; |
2404 | |
2405 | if (!phba->fcf.fcf_redisc_attempted) { |
2406 | lpfc_unregister_fcf(phba); |
2407 | |
2408 | rc = lpfc_sli4_redisc_fcf_table(phba); |
2409 | if (!rc) { |
2410 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2411 | "3195 Rediscover FCF table\n" ); |
2412 | phba->fcf.fcf_redisc_attempted = 1; |
2413 | lpfc_sli4_clear_fcf_rr_bmask(phba); |
2414 | } else { |
2415 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, |
2416 | "3196 Rediscover FCF table " |
2417 | "failed. Status:x%x\n" , rc); |
2418 | } |
2419 | } else { |
2420 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, |
2421 | "3197 Already rediscover FCF table " |
2422 | "attempted. No more retry\n" ); |
2423 | } |
2424 | goto stop_flogi_current_fcf; |
2425 | } else { |
2426 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS, |
2427 | "2794 Try FLOGI roundrobin FCF failover to " |
2428 | "(x%x)\n" , fcf_index); |
2429 | rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index); |
2430 | if (rc) |
2431 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, |
2432 | "2761 FLOGI roundrobin FCF failover " |
2433 | "failed (rc:x%x) to read FCF (x%x)\n" , |
2434 | rc, phba->fcf.current_rec.fcf_indx); |
2435 | else |
2436 | goto stop_flogi_current_fcf; |
2437 | } |
2438 | return 0; |
2439 | |
2440 | stop_flogi_current_fcf: |
2441 | lpfc_can_disctmo(vport); |
2442 | return 1; |
2443 | } |
2444 | |
2445 | /** |
2446 | * lpfc_sli4_fcf_pri_list_del |
2447 | * @phba: pointer to lpfc hba data structure. |
2448 | * @fcf_index: the index of the fcf record to delete |
2449 | * This routine checks the on list flag of the fcf_index to be deleted. |
2450 | * If it is one the list then it is removed from the list, and the flag |
2451 | * is cleared. This routine grab the hbalock before removing the fcf |
2452 | * record from the list. |
2453 | **/ |
2454 | static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba, |
2455 | uint16_t fcf_index) |
2456 | { |
2457 | struct lpfc_fcf_pri *new_fcf_pri; |
2458 | |
2459 | new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; |
2460 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2461 | "3058 deleting idx x%x pri x%x flg x%x\n" , |
2462 | fcf_index, new_fcf_pri->fcf_rec.priority, |
2463 | new_fcf_pri->fcf_rec.flag); |
2464 | spin_lock_irq(lock: &phba->hbalock); |
2465 | if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) { |
2466 | if (phba->fcf.current_rec.priority == |
2467 | new_fcf_pri->fcf_rec.priority) |
2468 | phba->fcf.eligible_fcf_cnt--; |
2469 | list_del_init(entry: &new_fcf_pri->list); |
2470 | new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST; |
2471 | } |
2472 | spin_unlock_irq(lock: &phba->hbalock); |
2473 | } |
2474 | |
2475 | /** |
2476 | * lpfc_sli4_set_fcf_flogi_fail |
2477 | * @phba: pointer to lpfc hba data structure. |
2478 | * @fcf_index: the index of the fcf record to update |
2479 | * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED |
2480 | * flag so the round robin selection for the particular priority level |
2481 | * will try a different fcf record that does not have this bit set. |
2482 | * If the fcf record is re-read for any reason this flag is cleared brfore |
2483 | * adding it to the priority list. |
2484 | **/ |
2485 | void |
2486 | lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index) |
2487 | { |
2488 | struct lpfc_fcf_pri *new_fcf_pri; |
2489 | new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; |
2490 | spin_lock_irq(lock: &phba->hbalock); |
2491 | new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED; |
2492 | spin_unlock_irq(lock: &phba->hbalock); |
2493 | } |
2494 | |
2495 | /** |
2496 | * lpfc_sli4_fcf_pri_list_add |
2497 | * @phba: pointer to lpfc hba data structure. |
2498 | * @fcf_index: the index of the fcf record to add |
2499 | * @new_fcf_record: pointer to a new FCF record. |
2500 | * This routine checks the priority of the fcf_index to be added. |
2501 | * If it is a lower priority than the current head of the fcf_pri list |
2502 | * then it is added to the list in the right order. |
2503 | * If it is the same priority as the current head of the list then it |
2504 | * is added to the head of the list and its bit in the rr_bmask is set. |
2505 | * If the fcf_index to be added is of a higher priority than the current |
2506 | * head of the list then the rr_bmask is cleared, its bit is set in the |
2507 | * rr_bmask and it is added to the head of the list. |
2508 | * returns: |
2509 | * 0=success 1=failure |
2510 | **/ |
2511 | static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, |
2512 | uint16_t fcf_index, |
2513 | struct fcf_record *new_fcf_record) |
2514 | { |
2515 | uint16_t current_fcf_pri; |
2516 | uint16_t last_index; |
2517 | struct lpfc_fcf_pri *fcf_pri; |
2518 | struct lpfc_fcf_pri *next_fcf_pri; |
2519 | struct lpfc_fcf_pri *new_fcf_pri; |
2520 | int ret; |
2521 | |
2522 | new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; |
2523 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2524 | "3059 adding idx x%x pri x%x flg x%x\n" , |
2525 | fcf_index, new_fcf_record->fip_priority, |
2526 | new_fcf_pri->fcf_rec.flag); |
2527 | spin_lock_irq(lock: &phba->hbalock); |
2528 | if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) |
2529 | list_del_init(entry: &new_fcf_pri->list); |
2530 | new_fcf_pri->fcf_rec.fcf_index = fcf_index; |
2531 | new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority; |
2532 | if (list_empty(head: &phba->fcf.fcf_pri_list)) { |
2533 | list_add(new: &new_fcf_pri->list, head: &phba->fcf.fcf_pri_list); |
2534 | ret = lpfc_sli4_fcf_rr_index_set(phba, |
2535 | new_fcf_pri->fcf_rec.fcf_index); |
2536 | goto out; |
2537 | } |
2538 | |
2539 | last_index = find_first_bit(addr: phba->fcf.fcf_rr_bmask, |
2540 | LPFC_SLI4_FCF_TBL_INDX_MAX); |
2541 | if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { |
2542 | ret = 0; /* Empty rr list */ |
2543 | goto out; |
2544 | } |
2545 | current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority; |
2546 | if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) { |
2547 | list_add(new: &new_fcf_pri->list, head: &phba->fcf.fcf_pri_list); |
2548 | if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) { |
2549 | memset(phba->fcf.fcf_rr_bmask, 0, |
2550 | sizeof(*phba->fcf.fcf_rr_bmask)); |
2551 | /* fcfs_at_this_priority_level = 1; */ |
2552 | phba->fcf.eligible_fcf_cnt = 1; |
2553 | } else |
2554 | /* fcfs_at_this_priority_level++; */ |
2555 | phba->fcf.eligible_fcf_cnt++; |
2556 | ret = lpfc_sli4_fcf_rr_index_set(phba, |
2557 | new_fcf_pri->fcf_rec.fcf_index); |
2558 | goto out; |
2559 | } |
2560 | |
2561 | list_for_each_entry_safe(fcf_pri, next_fcf_pri, |
2562 | &phba->fcf.fcf_pri_list, list) { |
2563 | if (new_fcf_pri->fcf_rec.priority <= |
2564 | fcf_pri->fcf_rec.priority) { |
2565 | if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list) |
2566 | list_add(new: &new_fcf_pri->list, |
2567 | head: &phba->fcf.fcf_pri_list); |
2568 | else |
2569 | list_add(new: &new_fcf_pri->list, |
2570 | head: &((struct lpfc_fcf_pri *) |
2571 | fcf_pri->list.prev)->list); |
2572 | ret = 0; |
2573 | goto out; |
2574 | } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list |
2575 | || new_fcf_pri->fcf_rec.priority < |
2576 | next_fcf_pri->fcf_rec.priority) { |
2577 | list_add(new: &new_fcf_pri->list, head: &fcf_pri->list); |
2578 | ret = 0; |
2579 | goto out; |
2580 | } |
2581 | if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority) |
2582 | continue; |
2583 | |
2584 | } |
2585 | ret = 1; |
2586 | out: |
2587 | /* we use = instead of |= to clear the FLOGI_FAILED flag. */ |
2588 | new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST; |
2589 | spin_unlock_irq(lock: &phba->hbalock); |
2590 | return ret; |
2591 | } |
2592 | |
2593 | /** |
2594 | * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. |
2595 | * @phba: pointer to lpfc hba data structure. |
2596 | * @mboxq: pointer to mailbox object. |
2597 | * |
2598 | * This function iterates through all the fcf records available in |
2599 | * HBA and chooses the optimal FCF record for discovery. After finding |
2600 | * the FCF for discovery it registers the FCF record and kicks start |
2601 | * discovery. |
2602 | * If FCF_IN_USE flag is set in currently used FCF, the routine tries to |
2603 | * use an FCF record which matches fabric name and mac address of the |
2604 | * currently used FCF record. |
2605 | * If the driver supports only one FCF, it will try to use the FCF record |
2606 | * used by BOOT_BIOS. |
2607 | */ |
2608 | void |
2609 | lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) |
2610 | { |
2611 | struct fcf_record *new_fcf_record; |
2612 | uint32_t boot_flag, addr_mode; |
2613 | uint16_t fcf_index, next_fcf_index; |
2614 | struct lpfc_fcf_rec *fcf_rec = NULL; |
2615 | uint16_t vlan_id = LPFC_FCOE_NULL_VID; |
2616 | bool select_new_fcf; |
2617 | int rc; |
2618 | |
2619 | /* If there is pending FCoE event restart FCF table scan */ |
2620 | if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) { |
2621 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
2622 | return; |
2623 | } |
2624 | |
2625 | /* Parse the FCF record from the non-embedded mailbox command */ |
2626 | new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, |
2627 | next_fcf_index: &next_fcf_index); |
2628 | if (!new_fcf_record) { |
2629 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2630 | "2765 Mailbox command READ_FCF_RECORD " |
2631 | "failed to retrieve a FCF record.\n" ); |
2632 | /* Let next new FCF event trigger fast failover */ |
2633 | spin_lock_irq(lock: &phba->hbalock); |
2634 | phba->hba_flag &= ~FCF_TS_INPROG; |
2635 | spin_unlock_irq(lock: &phba->hbalock); |
2636 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
2637 | return; |
2638 | } |
2639 | |
2640 | /* Check the FCF record against the connection list */ |
2641 | rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, boot_flag: &boot_flag, |
2642 | addr_mode: &addr_mode, vlan_id: &vlan_id); |
2643 | |
2644 | /* Log the FCF record information if turned on */ |
2645 | lpfc_sli4_log_fcf_record_info(phba, fcf_record: new_fcf_record, vlan_id, |
2646 | next_fcf_index); |
2647 | |
2648 | /* |
2649 | * If the fcf record does not match with connect list entries |
2650 | * read the next entry; otherwise, this is an eligible FCF |
2651 | * record for roundrobin FCF failover. |
2652 | */ |
2653 | if (!rc) { |
2654 | lpfc_sli4_fcf_pri_list_del(phba, |
2655 | bf_get(lpfc_fcf_record_fcf_index, |
2656 | new_fcf_record)); |
2657 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, |
2658 | "2781 FCF (x%x) failed connection " |
2659 | "list check: (x%x/x%x/%x)\n" , |
2660 | bf_get(lpfc_fcf_record_fcf_index, |
2661 | new_fcf_record), |
2662 | bf_get(lpfc_fcf_record_fcf_avail, |
2663 | new_fcf_record), |
2664 | bf_get(lpfc_fcf_record_fcf_valid, |
2665 | new_fcf_record), |
2666 | bf_get(lpfc_fcf_record_fcf_sol, |
2667 | new_fcf_record)); |
2668 | if ((phba->fcf.fcf_flag & FCF_IN_USE) && |
2669 | lpfc_sli4_fcf_record_match(phba, fcf_rec: &phba->fcf.current_rec, |
2670 | new_fcf_record, LPFC_FCOE_IGNORE_VID)) { |
2671 | if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) != |
2672 | phba->fcf.current_rec.fcf_indx) { |
2673 | lpfc_printf_log(phba, KERN_ERR, |
2674 | LOG_TRACE_EVENT, |
2675 | "2862 FCF (x%x) matches property " |
2676 | "of in-use FCF (x%x)\n" , |
2677 | bf_get(lpfc_fcf_record_fcf_index, |
2678 | new_fcf_record), |
2679 | phba->fcf.current_rec.fcf_indx); |
2680 | goto read_next_fcf; |
2681 | } |
2682 | /* |
2683 | * In case the current in-use FCF record becomes |
2684 | * invalid/unavailable during FCF discovery that |
2685 | * was not triggered by fast FCF failover process, |
2686 | * treat it as fast FCF failover. |
2687 | */ |
2688 | if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) && |
2689 | !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { |
2690 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, |
2691 | "2835 Invalid in-use FCF " |
2692 | "(x%x), enter FCF failover " |
2693 | "table scan.\n" , |
2694 | phba->fcf.current_rec.fcf_indx); |
2695 | spin_lock_irq(lock: &phba->hbalock); |
2696 | phba->fcf.fcf_flag |= FCF_REDISC_FOV; |
2697 | spin_unlock_irq(lock: &phba->hbalock); |
2698 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
2699 | lpfc_sli4_fcf_scan_read_fcf_rec(phba, |
2700 | LPFC_FCOE_FCF_GET_FIRST); |
2701 | return; |
2702 | } |
2703 | } |
2704 | goto read_next_fcf; |
2705 | } else { |
2706 | fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); |
2707 | rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, |
2708 | new_fcf_record); |
2709 | if (rc) |
2710 | goto read_next_fcf; |
2711 | } |
2712 | |
2713 | /* |
2714 | * If this is not the first FCF discovery of the HBA, use last |
2715 | * FCF record for the discovery. The condition that a rescan |
2716 | * matches the in-use FCF record: fabric name, switch name, mac |
2717 | * address, and vlan_id. |
2718 | */ |
2719 | spin_lock_irq(lock: &phba->hbalock); |
2720 | if (phba->fcf.fcf_flag & FCF_IN_USE) { |
2721 | if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV && |
2722 | lpfc_sli4_fcf_record_match(phba, fcf_rec: &phba->fcf.current_rec, |
2723 | new_fcf_record, new_vlan_id: vlan_id)) { |
2724 | if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) == |
2725 | phba->fcf.current_rec.fcf_indx) { |
2726 | phba->fcf.fcf_flag |= FCF_AVAILABLE; |
2727 | if (phba->fcf.fcf_flag & FCF_REDISC_PEND) |
2728 | /* Stop FCF redisc wait timer */ |
2729 | __lpfc_sli4_stop_fcf_redisc_wait_timer( |
2730 | phba); |
2731 | else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) |
2732 | /* Fast failover, mark completed */ |
2733 | phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; |
2734 | spin_unlock_irq(lock: &phba->hbalock); |
2735 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2736 | "2836 New FCF matches in-use " |
2737 | "FCF (x%x), port_state:x%x, " |
2738 | "fc_flag:x%lx\n" , |
2739 | phba->fcf.current_rec.fcf_indx, |
2740 | phba->pport->port_state, |
2741 | phba->pport->fc_flag); |
2742 | goto out; |
2743 | } else |
2744 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2745 | "2863 New FCF (x%x) matches " |
2746 | "property of in-use FCF (x%x)\n" , |
2747 | bf_get(lpfc_fcf_record_fcf_index, |
2748 | new_fcf_record), |
2749 | phba->fcf.current_rec.fcf_indx); |
2750 | } |
2751 | /* |
2752 | * Read next FCF record from HBA searching for the matching |
2753 | * with in-use record only if not during the fast failover |
2754 | * period. In case of fast failover period, it shall try to |
2755 | * determine whether the FCF record just read should be the |
2756 | * next candidate. |
2757 | */ |
2758 | if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { |
2759 | spin_unlock_irq(lock: &phba->hbalock); |
2760 | goto read_next_fcf; |
2761 | } |
2762 | } |
2763 | /* |
2764 | * Update on failover FCF record only if it's in FCF fast-failover |
2765 | * period; otherwise, update on current FCF record. |
2766 | */ |
2767 | if (phba->fcf.fcf_flag & FCF_REDISC_FOV) |
2768 | fcf_rec = &phba->fcf.failover_rec; |
2769 | else |
2770 | fcf_rec = &phba->fcf.current_rec; |
2771 | |
2772 | if (phba->fcf.fcf_flag & FCF_AVAILABLE) { |
2773 | /* |
2774 | * If the driver FCF record does not have boot flag |
2775 | * set and new hba fcf record has boot flag set, use |
2776 | * the new hba fcf record. |
2777 | */ |
2778 | if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) { |
2779 | /* Choose this FCF record */ |
2780 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2781 | "2837 Update current FCF record " |
2782 | "(x%x) with new FCF record (x%x)\n" , |
2783 | fcf_rec->fcf_indx, |
2784 | bf_get(lpfc_fcf_record_fcf_index, |
2785 | new_fcf_record)); |
2786 | __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, |
2787 | addr_mode, vlan_id, BOOT_ENABLE); |
2788 | spin_unlock_irq(lock: &phba->hbalock); |
2789 | goto read_next_fcf; |
2790 | } |
2791 | /* |
2792 | * If the driver FCF record has boot flag set and the |
2793 | * new hba FCF record does not have boot flag, read |
2794 | * the next FCF record. |
2795 | */ |
2796 | if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) { |
2797 | spin_unlock_irq(lock: &phba->hbalock); |
2798 | goto read_next_fcf; |
2799 | } |
2800 | /* |
2801 | * If the new hba FCF record has lower priority value |
2802 | * than the driver FCF record, use the new record. |
2803 | */ |
2804 | if (new_fcf_record->fip_priority < fcf_rec->priority) { |
2805 | /* Choose the new FCF record with lower priority */ |
2806 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2807 | "2838 Update current FCF record " |
2808 | "(x%x) with new FCF record (x%x)\n" , |
2809 | fcf_rec->fcf_indx, |
2810 | bf_get(lpfc_fcf_record_fcf_index, |
2811 | new_fcf_record)); |
2812 | __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, |
2813 | addr_mode, vlan_id, flag: 0); |
2814 | /* Reset running random FCF selection count */ |
2815 | phba->fcf.eligible_fcf_cnt = 1; |
2816 | } else if (new_fcf_record->fip_priority == fcf_rec->priority) { |
2817 | /* Update running random FCF selection count */ |
2818 | phba->fcf.eligible_fcf_cnt++; |
2819 | select_new_fcf = lpfc_sli4_new_fcf_random_select(phba, |
2820 | fcf_cnt: phba->fcf.eligible_fcf_cnt); |
2821 | if (select_new_fcf) { |
2822 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2823 | "2839 Update current FCF record " |
2824 | "(x%x) with new FCF record (x%x)\n" , |
2825 | fcf_rec->fcf_indx, |
2826 | bf_get(lpfc_fcf_record_fcf_index, |
2827 | new_fcf_record)); |
2828 | /* Choose the new FCF by random selection */ |
2829 | __lpfc_update_fcf_record(phba, fcf_rec, |
2830 | new_fcf_record, |
2831 | addr_mode, vlan_id, flag: 0); |
2832 | } |
2833 | } |
2834 | spin_unlock_irq(lock: &phba->hbalock); |
2835 | goto read_next_fcf; |
2836 | } |
2837 | /* |
2838 | * This is the first suitable FCF record, choose this record for |
2839 | * initial best-fit FCF. |
2840 | */ |
2841 | if (fcf_rec) { |
2842 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2843 | "2840 Update initial FCF candidate " |
2844 | "with FCF (x%x)\n" , |
2845 | bf_get(lpfc_fcf_record_fcf_index, |
2846 | new_fcf_record)); |
2847 | __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, |
2848 | addr_mode, vlan_id, flag: (boot_flag ? |
2849 | BOOT_ENABLE : 0)); |
2850 | phba->fcf.fcf_flag |= FCF_AVAILABLE; |
2851 | /* Setup initial running random FCF selection count */ |
2852 | phba->fcf.eligible_fcf_cnt = 1; |
2853 | } |
2854 | spin_unlock_irq(lock: &phba->hbalock); |
2855 | goto read_next_fcf; |
2856 | |
2857 | read_next_fcf: |
2858 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
2859 | if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) { |
2860 | if (phba->fcf.fcf_flag & FCF_REDISC_FOV) { |
2861 | /* |
2862 | * Case of FCF fast failover scan |
2863 | */ |
2864 | |
2865 | /* |
2866 | * It has not found any suitable FCF record, cancel |
2867 | * FCF scan inprogress, and do nothing |
2868 | */ |
2869 | if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { |
2870 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, |
2871 | "2782 No suitable FCF found: " |
2872 | "(x%x/x%x)\n" , |
2873 | phba->fcoe_eventtag_at_fcf_scan, |
2874 | bf_get(lpfc_fcf_record_fcf_index, |
2875 | new_fcf_record)); |
2876 | spin_lock_irq(lock: &phba->hbalock); |
2877 | if (phba->hba_flag & HBA_DEVLOSS_TMO) { |
2878 | phba->hba_flag &= ~FCF_TS_INPROG; |
2879 | spin_unlock_irq(lock: &phba->hbalock); |
2880 | /* Unregister in-use FCF and rescan */ |
2881 | lpfc_printf_log(phba, KERN_INFO, |
2882 | LOG_FIP, |
2883 | "2864 On devloss tmo " |
2884 | "unreg in-use FCF and " |
2885 | "rescan FCF table\n" ); |
2886 | lpfc_unregister_fcf_rescan(phba); |
2887 | return; |
2888 | } |
2889 | /* |
2890 | * Let next new FCF event trigger fast failover |
2891 | */ |
2892 | phba->hba_flag &= ~FCF_TS_INPROG; |
2893 | spin_unlock_irq(lock: &phba->hbalock); |
2894 | return; |
2895 | } |
2896 | /* |
2897 | * It has found a suitable FCF record that is not |
2898 | * the same as in-use FCF record, unregister the |
2899 | * in-use FCF record, replace the in-use FCF record |
2900 | * with the new FCF record, mark FCF fast failover |
2901 | * completed, and then start register the new FCF |
2902 | * record. |
2903 | */ |
2904 | |
2905 | /* Unregister the current in-use FCF record */ |
2906 | lpfc_unregister_fcf(phba); |
2907 | |
2908 | /* Replace in-use record with the new record */ |
2909 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2910 | "2842 Replace in-use FCF (x%x) " |
2911 | "with failover FCF (x%x)\n" , |
2912 | phba->fcf.current_rec.fcf_indx, |
2913 | phba->fcf.failover_rec.fcf_indx); |
2914 | memcpy(&phba->fcf.current_rec, |
2915 | &phba->fcf.failover_rec, |
2916 | sizeof(struct lpfc_fcf_rec)); |
2917 | /* |
2918 | * Mark the fast FCF failover rediscovery completed |
2919 | * and the start of the first round of the roundrobin |
2920 | * FCF failover. |
2921 | */ |
2922 | spin_lock_irq(lock: &phba->hbalock); |
2923 | phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; |
2924 | spin_unlock_irq(lock: &phba->hbalock); |
2925 | /* Register to the new FCF record */ |
2926 | lpfc_register_fcf(phba); |
2927 | } else { |
2928 | /* |
2929 | * In case of transaction period to fast FCF failover, |
2930 | * do nothing when search to the end of the FCF table. |
2931 | */ |
2932 | if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) || |
2933 | (phba->fcf.fcf_flag & FCF_REDISC_PEND)) |
2934 | return; |
2935 | |
2936 | if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV && |
2937 | phba->fcf.fcf_flag & FCF_IN_USE) { |
2938 | /* |
2939 | * In case the current in-use FCF record no |
2940 | * longer existed during FCF discovery that |
2941 | * was not triggered by fast FCF failover |
2942 | * process, treat it as fast FCF failover. |
2943 | */ |
2944 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2945 | "2841 In-use FCF record (x%x) " |
2946 | "not reported, entering fast " |
2947 | "FCF failover mode scanning.\n" , |
2948 | phba->fcf.current_rec.fcf_indx); |
2949 | spin_lock_irq(lock: &phba->hbalock); |
2950 | phba->fcf.fcf_flag |= FCF_REDISC_FOV; |
2951 | spin_unlock_irq(lock: &phba->hbalock); |
2952 | lpfc_sli4_fcf_scan_read_fcf_rec(phba, |
2953 | LPFC_FCOE_FCF_GET_FIRST); |
2954 | return; |
2955 | } |
2956 | /* Register to the new FCF record */ |
2957 | lpfc_register_fcf(phba); |
2958 | } |
2959 | } else |
2960 | lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index); |
2961 | return; |
2962 | |
2963 | out: |
2964 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
2965 | lpfc_register_fcf(phba); |
2966 | |
2967 | return; |
2968 | } |
2969 | |
2970 | /** |
2971 | * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler |
2972 | * @phba: pointer to lpfc hba data structure. |
2973 | * @mboxq: pointer to mailbox object. |
2974 | * |
2975 | * This is the callback function for FLOGI failure roundrobin FCF failover |
2976 | * read FCF record mailbox command from the eligible FCF record bmask for |
2977 | * performing the failover. If the FCF read back is not valid/available, it |
2978 | * fails through to retrying FLOGI to the currently registered FCF again. |
2979 | * Otherwise, if the FCF read back is valid and available, it will set the |
2980 | * newly read FCF record to the failover FCF record, unregister currently |
2981 | * registered FCF record, copy the failover FCF record to the current |
2982 | * FCF record, and then register the current FCF record before proceeding |
2983 | * to trying FLOGI on the new failover FCF. |
2984 | */ |
2985 | void |
2986 | lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) |
2987 | { |
2988 | struct fcf_record *new_fcf_record; |
2989 | uint32_t boot_flag, addr_mode; |
2990 | uint16_t next_fcf_index, fcf_index; |
2991 | uint16_t current_fcf_index; |
2992 | uint16_t vlan_id = LPFC_FCOE_NULL_VID; |
2993 | int rc; |
2994 | |
2995 | /* If link state is not up, stop the roundrobin failover process */ |
2996 | if (phba->link_state < LPFC_LINK_UP) { |
2997 | spin_lock_irq(lock: &phba->hbalock); |
2998 | phba->fcf.fcf_flag &= ~FCF_DISCOVERY; |
2999 | phba->hba_flag &= ~FCF_RR_INPROG; |
3000 | spin_unlock_irq(lock: &phba->hbalock); |
3001 | goto out; |
3002 | } |
3003 | |
3004 | /* Parse the FCF record from the non-embedded mailbox command */ |
3005 | new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, |
3006 | next_fcf_index: &next_fcf_index); |
3007 | if (!new_fcf_record) { |
3008 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, |
3009 | "2766 Mailbox command READ_FCF_RECORD " |
3010 | "failed to retrieve a FCF record. " |
3011 | "hba_flg x%x fcf_flg x%x\n" , phba->hba_flag, |
3012 | phba->fcf.fcf_flag); |
3013 | lpfc_unregister_fcf_rescan(phba); |
3014 | goto out; |
3015 | } |
3016 | |
3017 | /* Get the needed parameters from FCF record */ |
3018 | rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, boot_flag: &boot_flag, |
3019 | addr_mode: &addr_mode, vlan_id: &vlan_id); |
3020 | |
3021 | /* Log the FCF record information if turned on */ |
3022 | lpfc_sli4_log_fcf_record_info(phba, fcf_record: new_fcf_record, vlan_id, |
3023 | next_fcf_index); |
3024 | |
3025 | fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); |
3026 | if (!rc) { |
3027 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
3028 | "2848 Remove ineligible FCF (x%x) from " |
3029 | "from roundrobin bmask\n" , fcf_index); |
3030 | /* Clear roundrobin bmask bit for ineligible FCF */ |
3031 | lpfc_sli4_fcf_rr_index_clear(phba, fcf_index); |
3032 | /* Perform next round of roundrobin FCF failover */ |
3033 | fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); |
3034 | rc = lpfc_sli4_fcf_rr_next_proc(vport: phba->pport, fcf_index); |
3035 | if (rc) |
3036 | goto out; |
3037 | goto error_out; |
3038 | } |
3039 | |
3040 | if (fcf_index == phba->fcf.current_rec.fcf_indx) { |
3041 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
3042 | "2760 Perform FLOGI roundrobin FCF failover: " |
3043 | "FCF (x%x) back to FCF (x%x)\n" , |
3044 | phba->fcf.current_rec.fcf_indx, fcf_index); |
3045 | /* Wait 500 ms before retrying FLOGI to current FCF */ |
3046 | msleep(msecs: 500); |
3047 | lpfc_issue_init_vfi(phba->pport); |
3048 | goto out; |
3049 | } |
3050 | |
3051 | /* Upload new FCF record to the failover FCF record */ |
3052 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
3053 | "2834 Update current FCF (x%x) with new FCF (x%x)\n" , |
3054 | phba->fcf.failover_rec.fcf_indx, fcf_index); |
3055 | spin_lock_irq(lock: &phba->hbalock); |
3056 | __lpfc_update_fcf_record(phba, fcf_rec: &phba->fcf.failover_rec, |
3057 | new_fcf_record, addr_mode, vlan_id, |
3058 | flag: (boot_flag ? BOOT_ENABLE : 0)); |
3059 | spin_unlock_irq(lock: &phba->hbalock); |
3060 | |
3061 | current_fcf_index = phba->fcf.current_rec.fcf_indx; |
3062 | |
3063 | /* Unregister the current in-use FCF record */ |
3064 | lpfc_unregister_fcf(phba); |
3065 | |
3066 | /* Replace in-use record with the new record */ |
3067 | memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec, |
3068 | sizeof(struct lpfc_fcf_rec)); |
3069 | |
3070 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
3071 | "2783 Perform FLOGI roundrobin FCF failover: FCF " |
3072 | "(x%x) to FCF (x%x)\n" , current_fcf_index, fcf_index); |
3073 | |
3074 | error_out: |
3075 | lpfc_register_fcf(phba); |
3076 | out: |
3077 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
3078 | } |
3079 | |
3080 | /** |
3081 | * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler. |
3082 | * @phba: pointer to lpfc hba data structure. |
3083 | * @mboxq: pointer to mailbox object. |
3084 | * |
3085 | * This is the callback function of read FCF record mailbox command for |
3086 | * updating the eligible FCF bmask for FLOGI failure roundrobin FCF |
3087 | * failover when a new FCF event happened. If the FCF read back is |
3088 | * valid/available and it passes the connection list check, it updates |
3089 | * the bmask for the eligible FCF record for roundrobin failover. |
3090 | */ |
3091 | void |
3092 | lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) |
3093 | { |
3094 | struct fcf_record *new_fcf_record; |
3095 | uint32_t boot_flag, addr_mode; |
3096 | uint16_t fcf_index, next_fcf_index; |
3097 | uint16_t vlan_id = LPFC_FCOE_NULL_VID; |
3098 | int rc; |
3099 | |
3100 | /* If link state is not up, no need to proceed */ |
3101 | if (phba->link_state < LPFC_LINK_UP) |
3102 | goto out; |
3103 | |
3104 | /* If FCF discovery period is over, no need to proceed */ |
3105 | if (!(phba->fcf.fcf_flag & FCF_DISCOVERY)) |
3106 | goto out; |
3107 | |
3108 | /* Parse the FCF record from the non-embedded mailbox command */ |
3109 | new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, |
3110 | next_fcf_index: &next_fcf_index); |
3111 | if (!new_fcf_record) { |
3112 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
3113 | "2767 Mailbox command READ_FCF_RECORD " |
3114 | "failed to retrieve a FCF record.\n" ); |
3115 | goto out; |
3116 | } |
3117 | |
3118 | /* Check the connection list for eligibility */ |
3119 | rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, boot_flag: &boot_flag, |
3120 | addr_mode: &addr_mode, vlan_id: &vlan_id); |
3121 | |
3122 | /* Log the FCF record information if turned on */ |
3123 | lpfc_sli4_log_fcf_record_info(phba, fcf_record: new_fcf_record, vlan_id, |
3124 | next_fcf_index); |
3125 | |
3126 | if (!rc) |
3127 | goto out; |
3128 | |
3129 | /* Update the eligible FCF record index bmask */ |
3130 | fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); |
3131 | |
3132 | rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record); |
3133 | |
3134 | out: |
3135 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
3136 | } |
3137 | |
3138 | /** |
3139 | * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command. |
3140 | * @phba: pointer to lpfc hba data structure. |
3141 | * @mboxq: pointer to mailbox data structure. |
3142 | * |
3143 | * This function handles completion of init vfi mailbox command. |
3144 | */ |
3145 | static void |
3146 | lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) |
3147 | { |
3148 | struct lpfc_vport *vport = mboxq->vport; |
3149 | |
3150 | /* |
3151 | * VFI not supported on interface type 0, just do the flogi |
3152 | * Also continue if the VFI is in use - just use the same one. |
3153 | */ |
3154 | if (mboxq->u.mb.mbxStatus && |
3155 | (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != |
3156 | LPFC_SLI_INTF_IF_TYPE_0) && |
3157 | mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) { |
3158 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
3159 | "2891 Init VFI mailbox failed 0x%x\n" , |
3160 | mboxq->u.mb.mbxStatus); |
3161 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
3162 | lpfc_vport_set_state(vport, new_state: FC_VPORT_FAILED); |
3163 | return; |
3164 | } |
3165 | |
3166 | lpfc_initial_flogi(vport); |
3167 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
3168 | return; |
3169 | } |
3170 | |
3171 | /** |
3172 | * lpfc_issue_init_vfi - Issue init_vfi mailbox command. |
3173 | * @vport: pointer to lpfc_vport data structure. |
3174 | * |
3175 | * This function issue a init_vfi mailbox command to initialize the VFI and |
3176 | * VPI for the physical port. |
3177 | */ |
3178 | void |
3179 | lpfc_issue_init_vfi(struct lpfc_vport *vport) |
3180 | { |
3181 | LPFC_MBOXQ_t *mboxq; |
3182 | int rc; |
3183 | struct lpfc_hba *phba = vport->phba; |
3184 | |
3185 | mboxq = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
3186 | if (!mboxq) { |
3187 | lpfc_printf_vlog(vport, KERN_ERR, |
3188 | LOG_TRACE_EVENT, "2892 Failed to allocate " |
3189 | "init_vfi mailbox\n" ); |
3190 | return; |
3191 | } |
3192 | lpfc_init_vfi(mboxq, vport); |
3193 | mboxq->mbox_cmpl = lpfc_init_vfi_cmpl; |
3194 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); |
3195 | if (rc == MBX_NOT_FINISHED) { |
3196 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
3197 | "2893 Failed to issue init_vfi mailbox\n" ); |
3198 | mempool_free(element: mboxq, pool: vport->phba->mbox_mem_pool); |
3199 | } |
3200 | } |
3201 | |
3202 | /** |
3203 | * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command. |
3204 | * @phba: pointer to lpfc hba data structure. |
3205 | * @mboxq: pointer to mailbox data structure. |
3206 | * |
3207 | * This function handles completion of init vpi mailbox command. |
3208 | */ |
3209 | void |
3210 | lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) |
3211 | { |
3212 | struct lpfc_vport *vport = mboxq->vport; |
3213 | struct lpfc_nodelist *ndlp; |
3214 | |
3215 | if (mboxq->u.mb.mbxStatus) { |
3216 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
3217 | "2609 Init VPI mailbox failed 0x%x\n" , |
3218 | mboxq->u.mb.mbxStatus); |
3219 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
3220 | lpfc_vport_set_state(vport, new_state: FC_VPORT_FAILED); |
3221 | return; |
3222 | } |
3223 | clear_bit(nr: FC_VPORT_NEEDS_INIT_VPI, addr: &vport->fc_flag); |
3224 | |
3225 | /* If this port is physical port or FDISC is done, do reg_vpi */ |
3226 | if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) { |
3227 | ndlp = lpfc_findnode_did(vport, Fabric_DID); |
3228 | if (!ndlp) |
3229 | lpfc_printf_vlog(vport, KERN_ERR, |
3230 | LOG_TRACE_EVENT, |
3231 | "2731 Cannot find fabric " |
3232 | "controller node\n" ); |
3233 | else |
3234 | lpfc_register_new_vport(phba, vport, ndlp); |
3235 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
3236 | return; |
3237 | } |
3238 | |
3239 | if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) |
3240 | lpfc_initial_fdisc(vport); |
3241 | else { |
3242 | lpfc_vport_set_state(vport, new_state: FC_VPORT_NO_FABRIC_SUPP); |
3243 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
3244 | "2606 No NPIV Fabric support\n" ); |
3245 | } |
3246 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
3247 | return; |
3248 | } |
3249 | |
3250 | /** |
3251 | * lpfc_issue_init_vpi - Issue init_vpi mailbox command. |
3252 | * @vport: pointer to lpfc_vport data structure. |
3253 | * |
3254 | * This function issue a init_vpi mailbox command to initialize |
3255 | * VPI for the vport. |
3256 | */ |
3257 | void |
3258 | lpfc_issue_init_vpi(struct lpfc_vport *vport) |
3259 | { |
3260 | LPFC_MBOXQ_t *mboxq; |
3261 | int rc, vpi; |
3262 | |
3263 | if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) { |
3264 | vpi = lpfc_alloc_vpi(phba: vport->phba); |
3265 | if (!vpi) { |
3266 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
3267 | "3303 Failed to obtain vport vpi\n" ); |
3268 | lpfc_vport_set_state(vport, new_state: FC_VPORT_FAILED); |
3269 | return; |
3270 | } |
3271 | vport->vpi = vpi; |
3272 | } |
3273 | |
3274 | mboxq = mempool_alloc(pool: vport->phba->mbox_mem_pool, GFP_KERNEL); |
3275 | if (!mboxq) { |
3276 | lpfc_printf_vlog(vport, KERN_ERR, |
3277 | LOG_TRACE_EVENT, "2607 Failed to allocate " |
3278 | "init_vpi mailbox\n" ); |
3279 | return; |
3280 | } |
3281 | lpfc_init_vpi(vport->phba, mboxq, vport->vpi); |
3282 | mboxq->vport = vport; |
3283 | mboxq->mbox_cmpl = lpfc_init_vpi_cmpl; |
3284 | rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT); |
3285 | if (rc == MBX_NOT_FINISHED) { |
3286 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
3287 | "2608 Failed to issue init_vpi mailbox\n" ); |
3288 | mempool_free(element: mboxq, pool: vport->phba->mbox_mem_pool); |
3289 | } |
3290 | } |
3291 | |
3292 | /** |
3293 | * lpfc_start_fdiscs - send fdiscs for each vports on this port. |
3294 | * @phba: pointer to lpfc hba data structure. |
3295 | * |
3296 | * This function loops through the list of vports on the @phba and issues an |
3297 | * FDISC if possible. |
3298 | */ |
3299 | void |
3300 | lpfc_start_fdiscs(struct lpfc_hba *phba) |
3301 | { |
3302 | struct lpfc_vport **vports; |
3303 | int i; |
3304 | |
3305 | vports = lpfc_create_vport_work_array(phba); |
3306 | if (vports != NULL) { |
3307 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
3308 | if (vports[i]->port_type == LPFC_PHYSICAL_PORT) |
3309 | continue; |
3310 | /* There are no vpi for this vport */ |
3311 | if (vports[i]->vpi > phba->max_vpi) { |
3312 | lpfc_vport_set_state(vport: vports[i], |
3313 | new_state: FC_VPORT_FAILED); |
3314 | continue; |
3315 | } |
3316 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { |
3317 | lpfc_vport_set_state(vport: vports[i], |
3318 | new_state: FC_VPORT_LINKDOWN); |
3319 | continue; |
3320 | } |
3321 | if (test_bit(FC_VPORT_NEEDS_INIT_VPI, |
3322 | &vports[i]->fc_flag)) { |
3323 | lpfc_issue_init_vpi(vport: vports[i]); |
3324 | continue; |
3325 | } |
3326 | if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) |
3327 | lpfc_initial_fdisc(vports[i]); |
3328 | else { |
3329 | lpfc_vport_set_state(vport: vports[i], |
3330 | new_state: FC_VPORT_NO_FABRIC_SUPP); |
3331 | lpfc_printf_vlog(vports[i], KERN_ERR, |
3332 | LOG_TRACE_EVENT, |
3333 | "0259 No NPIV " |
3334 | "Fabric support\n" ); |
3335 | } |
3336 | } |
3337 | } |
3338 | lpfc_destroy_vport_work_array(phba, vports); |
3339 | } |
3340 | |
3341 | void |
3342 | lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) |
3343 | { |
3344 | struct lpfc_vport *vport = mboxq->vport; |
3345 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
3346 | |
3347 | /* |
3348 | * VFI not supported for interface type 0, so ignore any mailbox |
3349 | * error (except VFI in use) and continue with the discovery. |
3350 | */ |
3351 | if (mboxq->u.mb.mbxStatus && |
3352 | (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != |
3353 | LPFC_SLI_INTF_IF_TYPE_0) && |
3354 | mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) { |
3355 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
3356 | "2018 REG_VFI mbxStatus error x%x " |
3357 | "HBA state x%x\n" , |
3358 | mboxq->u.mb.mbxStatus, vport->port_state); |
3359 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { |
3360 | /* FLOGI failed, use loop map to make discovery list */ |
3361 | lpfc_disc_list_loopmap(vport); |
3362 | /* Start discovery */ |
3363 | lpfc_disc_start(vport); |
3364 | goto out_free_mem; |
3365 | } |
3366 | lpfc_vport_set_state(vport, new_state: FC_VPORT_FAILED); |
3367 | goto out_free_mem; |
3368 | } |
3369 | |
3370 | /* If the VFI is already registered, there is nothing else to do |
3371 | * Unless this was a VFI update and we are in PT2PT mode, then |
3372 | * we should drop through to set the port state to ready. |
3373 | */ |
3374 | if (test_bit(FC_VFI_REGISTERED, &vport->fc_flag)) |
3375 | if (!(phba->sli_rev == LPFC_SLI_REV4 && |
3376 | test_bit(FC_PT2PT, &vport->fc_flag))) |
3377 | goto out_free_mem; |
3378 | |
3379 | /* The VPI is implicitly registered when the VFI is registered */ |
3380 | set_bit(nr: FC_VFI_REGISTERED, addr: &vport->fc_flag); |
3381 | clear_bit(nr: FC_VPORT_NEEDS_REG_VPI, addr: &vport->fc_flag); |
3382 | clear_bit(nr: FC_VPORT_NEEDS_INIT_VPI, addr: &vport->fc_flag); |
3383 | spin_lock_irq(lock: shost->host_lock); |
3384 | vport->vpi_state |= LPFC_VPI_REGISTERED; |
3385 | spin_unlock_irq(lock: shost->host_lock); |
3386 | |
3387 | /* In case SLI4 FC loopback test, we are ready */ |
3388 | if ((phba->sli_rev == LPFC_SLI_REV4) && |
3389 | (phba->link_flag & LS_LOOPBACK_MODE)) { |
3390 | phba->link_state = LPFC_HBA_READY; |
3391 | goto out_free_mem; |
3392 | } |
3393 | |
3394 | lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, |
3395 | "3313 cmpl reg vfi port_state:%x fc_flag:%lx " |
3396 | "myDid:%x alpacnt:%d LinkState:%x topology:%x\n" , |
3397 | vport->port_state, vport->fc_flag, vport->fc_myDID, |
3398 | vport->phba->alpa_map[0], |
3399 | phba->link_state, phba->fc_topology); |
3400 | |
3401 | if (vport->port_state == LPFC_FABRIC_CFG_LINK) { |
3402 | /* |
3403 | * For private loop or for NPort pt2pt, |
3404 | * just start discovery and we are done. |
3405 | */ |
3406 | if (test_bit(FC_PT2PT, &vport->fc_flag) || |
3407 | (phba->fc_topology == LPFC_TOPOLOGY_LOOP && |
3408 | !test_bit(FC_PUBLIC_LOOP, &vport->fc_flag))) { |
3409 | |
3410 | /* Use loop map to make discovery list */ |
3411 | lpfc_disc_list_loopmap(vport); |
3412 | /* Start discovery */ |
3413 | if (test_bit(FC_PT2PT, &vport->fc_flag)) |
3414 | vport->port_state = LPFC_VPORT_READY; |
3415 | else |
3416 | lpfc_disc_start(vport); |
3417 | } else { |
3418 | lpfc_start_fdiscs(phba); |
3419 | lpfc_do_scr_ns_plogi(phba, vport); |
3420 | } |
3421 | } |
3422 | |
3423 | out_free_mem: |
3424 | lpfc_mbox_rsrc_cleanup(phba, mbox: mboxq, locked: MBOX_THD_UNLOCKED); |
3425 | } |
3426 | |
3427 | static void |
3428 | lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
3429 | { |
3430 | MAILBOX_t *mb = &pmb->u.mb; |
3431 | struct lpfc_dmabuf *mp = pmb->ctx_buf; |
3432 | struct lpfc_vport *vport = pmb->vport; |
3433 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
3434 | struct serv_parm *sp = &vport->fc_sparam; |
3435 | uint32_t ed_tov; |
3436 | |
3437 | /* Check for error */ |
3438 | if (mb->mbxStatus) { |
3439 | /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */ |
3440 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
3441 | "0319 READ_SPARAM mbxStatus error x%x " |
3442 | "hba state x%x>\n" , |
3443 | mb->mbxStatus, vport->port_state); |
3444 | lpfc_linkdown(phba); |
3445 | goto out; |
3446 | } |
3447 | |
3448 | memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt, |
3449 | sizeof (struct serv_parm)); |
3450 | |
3451 | ed_tov = be32_to_cpu(sp->cmn.e_d_tov); |
3452 | if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ |
3453 | ed_tov = (ed_tov + 999999) / 1000000; |
3454 | |
3455 | phba->fc_edtov = ed_tov; |
3456 | phba->fc_ratov = (2 * ed_tov) / 1000; |
3457 | if (phba->fc_ratov < FF_DEF_RATOV) { |
3458 | /* RA_TOV should be atleast 10sec for initial flogi */ |
3459 | phba->fc_ratov = FF_DEF_RATOV; |
3460 | } |
3461 | |
3462 | lpfc_update_vport_wwn(vport); |
3463 | fc_host_port_name(shost) = wwn_to_u64(wwn: vport->fc_portname.u.wwn); |
3464 | if (vport->port_type == LPFC_PHYSICAL_PORT) { |
3465 | memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn)); |
3466 | memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn)); |
3467 | } |
3468 | |
3469 | lpfc_mbox_rsrc_cleanup(phba, mbox: pmb, locked: MBOX_THD_UNLOCKED); |
3470 | |
3471 | /* Check if sending the FLOGI is being deferred to after we get |
3472 | * up to date CSPs from MBX_READ_SPARAM. |
3473 | */ |
3474 | if (phba->hba_flag & HBA_DEFER_FLOGI) { |
3475 | lpfc_initial_flogi(vport); |
3476 | phba->hba_flag &= ~HBA_DEFER_FLOGI; |
3477 | } |
3478 | return; |
3479 | |
3480 | out: |
3481 | lpfc_mbox_rsrc_cleanup(phba, mbox: pmb, locked: MBOX_THD_UNLOCKED); |
3482 | lpfc_issue_clear_la(phba, vport); |
3483 | } |
3484 | |
3485 | static void |
3486 | lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) |
3487 | { |
3488 | struct lpfc_vport *vport = phba->pport; |
3489 | LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL; |
3490 | int i; |
3491 | int rc; |
3492 | struct fcf_record *fcf_record; |
3493 | unsigned long iflags; |
3494 | |
3495 | spin_lock_irqsave(&phba->hbalock, iflags); |
3496 | phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la); |
3497 | |
3498 | if (!(phba->hba_flag & HBA_FCOE_MODE)) { |
3499 | switch (bf_get(lpfc_mbx_read_top_link_spd, la)) { |
3500 | case LPFC_LINK_SPEED_1GHZ: |
3501 | case LPFC_LINK_SPEED_2GHZ: |
3502 | case LPFC_LINK_SPEED_4GHZ: |
3503 | case LPFC_LINK_SPEED_8GHZ: |
3504 | case LPFC_LINK_SPEED_10GHZ: |
3505 | case LPFC_LINK_SPEED_16GHZ: |
3506 | case LPFC_LINK_SPEED_32GHZ: |
3507 | case LPFC_LINK_SPEED_64GHZ: |
3508 | case LPFC_LINK_SPEED_128GHZ: |
3509 | case LPFC_LINK_SPEED_256GHZ: |
3510 | break; |
3511 | default: |
3512 | phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN; |
3513 | break; |
3514 | } |
3515 | } |
3516 | |
3517 | if (phba->fc_topology && |
3518 | phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) { |
3519 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
3520 | "3314 Toplogy changed was 0x%x is 0x%x\n" , |
3521 | phba->fc_topology, |
3522 | bf_get(lpfc_mbx_read_top_topology, la)); |
3523 | phba->fc_topology_changed = 1; |
3524 | } |
3525 | |
3526 | phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la); |
3527 | phba->link_flag &= ~(LS_NPIV_FAB_SUPPORTED | LS_CT_VEN_RPA); |
3528 | |
3529 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { |
3530 | phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; |
3531 | |
3532 | /* if npiv is enabled and this adapter supports npiv log |
3533 | * a message that npiv is not supported in this topology |
3534 | */ |
3535 | if (phba->cfg_enable_npiv && phba->max_vpi) |
3536 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
3537 | "1309 Link Up Event npiv not supported in loop " |
3538 | "topology\n" ); |
3539 | /* Get Loop Map information */ |
3540 | if (bf_get(lpfc_mbx_read_top_il, la)) |
3541 | set_bit(nr: FC_LBIT, addr: &vport->fc_flag); |
3542 | |
3543 | vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la); |
3544 | i = la->lilpBde64.tus.f.bdeSize; |
3545 | |
3546 | if (i == 0) { |
3547 | phba->alpa_map[0] = 0; |
3548 | } else { |
3549 | if (vport->cfg_log_verbose & LOG_LINK_EVENT) { |
3550 | int numalpa, j, k; |
3551 | union { |
3552 | uint8_t pamap[16]; |
3553 | struct { |
3554 | uint32_t wd1; |
3555 | uint32_t wd2; |
3556 | uint32_t wd3; |
3557 | uint32_t wd4; |
3558 | } pa; |
3559 | } un; |
3560 | numalpa = phba->alpa_map[0]; |
3561 | j = 0; |
3562 | while (j < numalpa) { |
3563 | memset(un.pamap, 0, 16); |
3564 | for (k = 1; j < numalpa; k++) { |
3565 | un.pamap[k - 1] = |
3566 | phba->alpa_map[j + 1]; |
3567 | j++; |
3568 | if (k == 16) |
3569 | break; |
3570 | } |
3571 | /* Link Up Event ALPA map */ |
3572 | lpfc_printf_log(phba, |
3573 | KERN_WARNING, |
3574 | LOG_LINK_EVENT, |
3575 | "1304 Link Up Event " |
3576 | "ALPA map Data: x%x " |
3577 | "x%x x%x x%x\n" , |
3578 | un.pa.wd1, un.pa.wd2, |
3579 | un.pa.wd3, un.pa.wd4); |
3580 | } |
3581 | } |
3582 | } |
3583 | } else { |
3584 | if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) { |
3585 | if (phba->max_vpi && phba->cfg_enable_npiv && |
3586 | (phba->sli_rev >= LPFC_SLI_REV3)) |
3587 | phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; |
3588 | } |
3589 | vport->fc_myDID = phba->fc_pref_DID; |
3590 | set_bit(nr: FC_LBIT, addr: &vport->fc_flag); |
3591 | } |
3592 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
3593 | |
3594 | lpfc_linkup(phba); |
3595 | sparam_mbox = NULL; |
3596 | |
3597 | sparam_mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
3598 | if (!sparam_mbox) |
3599 | goto out; |
3600 | |
3601 | rc = lpfc_read_sparam(phba, sparam_mbox, 0); |
3602 | if (rc) { |
3603 | mempool_free(element: sparam_mbox, pool: phba->mbox_mem_pool); |
3604 | goto out; |
3605 | } |
3606 | sparam_mbox->vport = vport; |
3607 | sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; |
3608 | rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); |
3609 | if (rc == MBX_NOT_FINISHED) { |
3610 | lpfc_mbox_rsrc_cleanup(phba, mbox: sparam_mbox, locked: MBOX_THD_UNLOCKED); |
3611 | goto out; |
3612 | } |
3613 | |
3614 | if (!(phba->hba_flag & HBA_FCOE_MODE)) { |
3615 | cfglink_mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
3616 | if (!cfglink_mbox) |
3617 | goto out; |
3618 | vport->port_state = LPFC_LOCAL_CFG_LINK; |
3619 | lpfc_config_link(phba, cfglink_mbox); |
3620 | cfglink_mbox->vport = vport; |
3621 | cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; |
3622 | rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); |
3623 | if (rc == MBX_NOT_FINISHED) { |
3624 | mempool_free(element: cfglink_mbox, pool: phba->mbox_mem_pool); |
3625 | goto out; |
3626 | } |
3627 | } else { |
3628 | vport->port_state = LPFC_VPORT_UNKNOWN; |
3629 | /* |
3630 | * Add the driver's default FCF record at FCF index 0 now. This |
3631 | * is phase 1 implementation that support FCF index 0 and driver |
3632 | * defaults. |
3633 | */ |
3634 | if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { |
3635 | fcf_record = kzalloc(size: sizeof(struct fcf_record), |
3636 | GFP_KERNEL); |
3637 | if (unlikely(!fcf_record)) { |
3638 | lpfc_printf_log(phba, KERN_ERR, |
3639 | LOG_TRACE_EVENT, |
3640 | "2554 Could not allocate memory for " |
3641 | "fcf record\n" ); |
3642 | rc = -ENODEV; |
3643 | goto out; |
3644 | } |
3645 | |
3646 | lpfc_sli4_build_dflt_fcf_record(phba, fcf_record, |
3647 | LPFC_FCOE_FCF_DEF_INDEX); |
3648 | rc = lpfc_sli4_add_fcf_record(phba, fcf_record); |
3649 | if (unlikely(rc)) { |
3650 | lpfc_printf_log(phba, KERN_ERR, |
3651 | LOG_TRACE_EVENT, |
3652 | "2013 Could not manually add FCF " |
3653 | "record 0, status %d\n" , rc); |
3654 | rc = -ENODEV; |
3655 | kfree(objp: fcf_record); |
3656 | goto out; |
3657 | } |
3658 | kfree(objp: fcf_record); |
3659 | } |
3660 | /* |
3661 | * The driver is expected to do FIP/FCF. Call the port |
3662 | * and get the FCF Table. |
3663 | */ |
3664 | spin_lock_irqsave(&phba->hbalock, iflags); |
3665 | if (phba->hba_flag & FCF_TS_INPROG) { |
3666 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
3667 | return; |
3668 | } |
3669 | /* This is the initial FCF discovery scan */ |
3670 | phba->fcf.fcf_flag |= FCF_INIT_DISC; |
3671 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
3672 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, |
3673 | "2778 Start FCF table scan at linkup\n" ); |
3674 | rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, |
3675 | LPFC_FCOE_FCF_GET_FIRST); |
3676 | if (rc) { |
3677 | spin_lock_irqsave(&phba->hbalock, iflags); |
3678 | phba->fcf.fcf_flag &= ~FCF_INIT_DISC; |
3679 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
3680 | goto out; |
3681 | } |
3682 | /* Reset FCF roundrobin bmask for new discovery */ |
3683 | lpfc_sli4_clear_fcf_rr_bmask(phba); |
3684 | } |
3685 | |
3686 | /* Prepare for LINK up registrations */ |
3687 | memset(phba->os_host_name, 0, sizeof(phba->os_host_name)); |
3688 | scnprintf(buf: phba->os_host_name, size: sizeof(phba->os_host_name), fmt: "%s" , |
3689 | init_utsname()->nodename); |
3690 | return; |
3691 | out: |
3692 | lpfc_vport_set_state(vport, new_state: FC_VPORT_FAILED); |
3693 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
3694 | "0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n" , |
3695 | vport->port_state, sparam_mbox, cfglink_mbox); |
3696 | lpfc_issue_clear_la(phba, vport); |
3697 | return; |
3698 | } |
3699 | |
3700 | static void |
3701 | lpfc_enable_la(struct lpfc_hba *phba) |
3702 | { |
3703 | uint32_t control; |
3704 | struct lpfc_sli *psli = &phba->sli; |
3705 | spin_lock_irq(lock: &phba->hbalock); |
3706 | psli->sli_flag |= LPFC_PROCESS_LA; |
3707 | if (phba->sli_rev <= LPFC_SLI_REV3) { |
3708 | control = readl(addr: phba->HCregaddr); |
3709 | control |= HC_LAINT_ENA; |
3710 | writel(val: control, addr: phba->HCregaddr); |
3711 | readl(addr: phba->HCregaddr); /* flush */ |
3712 | } |
3713 | spin_unlock_irq(lock: &phba->hbalock); |
3714 | } |
3715 | |
3716 | static void |
3717 | lpfc_mbx_issue_link_down(struct lpfc_hba *phba) |
3718 | { |
3719 | lpfc_linkdown(phba); |
3720 | lpfc_enable_la(phba); |
3721 | lpfc_unregister_unused_fcf(phba); |
3722 | /* turn on Link Attention interrupts - no CLEAR_LA needed */ |
3723 | } |
3724 | |
3725 | |
3726 | /* |
3727 | * This routine handles processing a READ_TOPOLOGY mailbox |
3728 | * command upon completion. It is setup in the LPFC_MBOXQ |
3729 | * as the completion routine when the command is |
3730 | * handed off to the SLI layer. SLI4 only. |
3731 | */ |
3732 | void |
3733 | lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
3734 | { |
3735 | struct lpfc_vport *vport = pmb->vport; |
3736 | struct lpfc_mbx_read_top *la; |
3737 | struct lpfc_sli_ring *pring; |
3738 | MAILBOX_t *mb = &pmb->u.mb; |
3739 | struct lpfc_dmabuf *mp = pmb->ctx_buf; |
3740 | uint8_t attn_type; |
3741 | |
3742 | /* Unblock ELS traffic */ |
3743 | pring = lpfc_phba_elsring(phba); |
3744 | if (pring) |
3745 | pring->flag &= ~LPFC_STOP_IOCB_EVENT; |
3746 | |
3747 | /* Check for error */ |
3748 | if (mb->mbxStatus) { |
3749 | lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, |
3750 | "1307 READ_LA mbox error x%x state x%x\n" , |
3751 | mb->mbxStatus, vport->port_state); |
3752 | lpfc_mbx_issue_link_down(phba); |
3753 | phba->link_state = LPFC_HBA_ERROR; |
3754 | goto lpfc_mbx_cmpl_read_topology_free_mbuf; |
3755 | } |
3756 | |
3757 | la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; |
3758 | attn_type = bf_get(lpfc_mbx_read_top_att_type, la); |
3759 | |
3760 | memcpy(&phba->alpa_map[0], mp->virt, 128); |
3761 | |
3762 | if (bf_get(lpfc_mbx_read_top_pb, la)) |
3763 | set_bit(nr: FC_BYPASSED_MODE, addr: &vport->fc_flag); |
3764 | else |
3765 | clear_bit(nr: FC_BYPASSED_MODE, addr: &vport->fc_flag); |
3766 | |
3767 | if (phba->fc_eventTag <= la->eventTag) { |
3768 | phba->fc_stat.LinkMultiEvent++; |
3769 | if (attn_type == LPFC_ATT_LINK_UP) |
3770 | if (phba->fc_eventTag != 0) |
3771 | lpfc_linkdown(phba); |
3772 | } |
3773 | |
3774 | phba->fc_eventTag = la->eventTag; |
3775 | phba->link_events++; |
3776 | if (attn_type == LPFC_ATT_LINK_UP) { |
3777 | phba->fc_stat.LinkUp++; |
3778 | if (phba->link_flag & LS_LOOPBACK_MODE) { |
3779 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
3780 | "1306 Link Up Event in loop back mode " |
3781 | "x%x received Data: x%x x%x x%x x%x\n" , |
3782 | la->eventTag, phba->fc_eventTag, |
3783 | bf_get(lpfc_mbx_read_top_alpa_granted, |
3784 | la), |
3785 | bf_get(lpfc_mbx_read_top_link_spd, la), |
3786 | phba->alpa_map[0]); |
3787 | } else { |
3788 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
3789 | "1303 Link Up Event x%x received " |
3790 | "Data: x%x x%x x%x x%x x%x\n" , |
3791 | la->eventTag, phba->fc_eventTag, |
3792 | bf_get(lpfc_mbx_read_top_alpa_granted, |
3793 | la), |
3794 | bf_get(lpfc_mbx_read_top_link_spd, la), |
3795 | phba->alpa_map[0], |
3796 | bf_get(lpfc_mbx_read_top_fa, la)); |
3797 | } |
3798 | lpfc_mbx_process_link_up(phba, la); |
3799 | |
3800 | if (phba->cmf_active_mode != LPFC_CFG_OFF) |
3801 | lpfc_cmf_signal_init(phba); |
3802 | |
3803 | if (phba->lmt & LMT_64Gb) |
3804 | lpfc_read_lds_params(phba); |
3805 | |
3806 | } else if (attn_type == LPFC_ATT_LINK_DOWN || |
3807 | attn_type == LPFC_ATT_UNEXP_WWPN) { |
3808 | phba->fc_stat.LinkDown++; |
3809 | if (phba->link_flag & LS_LOOPBACK_MODE) |
3810 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
3811 | "1308 Link Down Event in loop back mode " |
3812 | "x%x received " |
3813 | "Data: x%x x%x x%lx\n" , |
3814 | la->eventTag, phba->fc_eventTag, |
3815 | phba->pport->port_state, vport->fc_flag); |
3816 | else if (attn_type == LPFC_ATT_UNEXP_WWPN) |
3817 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
3818 | "1313 Link Down Unexpected FA WWPN Event x%x " |
3819 | "received Data: x%x x%x x%lx x%x\n" , |
3820 | la->eventTag, phba->fc_eventTag, |
3821 | phba->pport->port_state, vport->fc_flag, |
3822 | bf_get(lpfc_mbx_read_top_fa, la)); |
3823 | else |
3824 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
3825 | "1305 Link Down Event x%x received " |
3826 | "Data: x%x x%x x%lx x%x\n" , |
3827 | la->eventTag, phba->fc_eventTag, |
3828 | phba->pport->port_state, vport->fc_flag, |
3829 | bf_get(lpfc_mbx_read_top_fa, la)); |
3830 | lpfc_mbx_issue_link_down(phba); |
3831 | } |
3832 | |
3833 | if ((phba->sli_rev < LPFC_SLI_REV4) && |
3834 | bf_get(lpfc_mbx_read_top_fa, la)) |
3835 | lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, |
3836 | "1311 fa %d\n" , |
3837 | bf_get(lpfc_mbx_read_top_fa, la)); |
3838 | |
3839 | lpfc_mbx_cmpl_read_topology_free_mbuf: |
3840 | lpfc_mbox_rsrc_cleanup(phba, mbox: pmb, locked: MBOX_THD_UNLOCKED); |
3841 | } |
3842 | |
3843 | /* |
3844 | * This routine handles processing a REG_LOGIN mailbox |
3845 | * command upon completion. It is setup in the LPFC_MBOXQ |
3846 | * as the completion routine when the command is |
3847 | * handed off to the SLI layer. |
3848 | */ |
3849 | void |
3850 | lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
3851 | { |
3852 | struct lpfc_vport *vport = pmb->vport; |
3853 | struct lpfc_dmabuf *mp = pmb->ctx_buf; |
3854 | struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; |
3855 | |
3856 | /* The driver calls the state machine with the pmb pointer |
3857 | * but wants to make sure a stale ctx_buf isn't acted on. |
3858 | * The ctx_buf is restored later and cleaned up. |
3859 | */ |
3860 | pmb->ctx_buf = NULL; |
3861 | pmb->ctx_ndlp = NULL; |
3862 | |
3863 | lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NODE | LOG_DISCOVERY, |
3864 | "0002 rpi:%x DID:%x flg:%x %d x%px\n" , |
3865 | ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, |
3866 | kref_read(&ndlp->kref), |
3867 | ndlp); |
3868 | if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) |
3869 | ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; |
3870 | |
3871 | if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL || |
3872 | ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { |
3873 | /* We rcvd a rscn after issuing this |
3874 | * mbox reg login, we may have cycled |
3875 | * back through the state and be |
3876 | * back at reg login state so this |
3877 | * mbox needs to be ignored becase |
3878 | * there is another reg login in |
3879 | * process. |
3880 | */ |
3881 | spin_lock_irq(lock: &ndlp->lock); |
3882 | ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; |
3883 | spin_unlock_irq(lock: &ndlp->lock); |
3884 | |
3885 | /* |
3886 | * We cannot leave the RPI registered because |
3887 | * if we go thru discovery again for this ndlp |
3888 | * a subsequent REG_RPI will fail. |
3889 | */ |
3890 | ndlp->nlp_flag |= NLP_RPI_REGISTERED; |
3891 | lpfc_unreg_rpi(vport, ndlp); |
3892 | } |
3893 | |
3894 | /* Call state machine */ |
3895 | lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN); |
3896 | pmb->ctx_buf = mp; |
3897 | lpfc_mbox_rsrc_cleanup(phba, mbox: pmb, locked: MBOX_THD_UNLOCKED); |
3898 | |
3899 | /* decrement the node reference count held for this callback |
3900 | * function. |
3901 | */ |
3902 | lpfc_nlp_put(ndlp); |
3903 | |
3904 | return; |
3905 | } |
3906 | |
3907 | static void |
3908 | lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
3909 | { |
3910 | MAILBOX_t *mb = &pmb->u.mb; |
3911 | struct lpfc_vport *vport = pmb->vport; |
3912 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
3913 | |
3914 | switch (mb->mbxStatus) { |
3915 | case 0x0011: |
3916 | case 0x0020: |
3917 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, |
3918 | "0911 cmpl_unreg_vpi, mb status = 0x%x\n" , |
3919 | mb->mbxStatus); |
3920 | break; |
3921 | /* If VPI is busy, reset the HBA */ |
3922 | case 0x9700: |
3923 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
3924 | "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n" , |
3925 | vport->vpi, mb->mbxStatus); |
3926 | if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) |
3927 | lpfc_workq_post_event(phba, NULL, NULL, |
3928 | evt: LPFC_EVT_RESET_HBA); |
3929 | } |
3930 | |
3931 | set_bit(nr: FC_VPORT_NEEDS_REG_VPI, addr: &vport->fc_flag); |
3932 | spin_lock_irq(lock: shost->host_lock); |
3933 | vport->vpi_state &= ~LPFC_VPI_REGISTERED; |
3934 | spin_unlock_irq(lock: shost->host_lock); |
3935 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
3936 | lpfc_cleanup_vports_rrqs(vport, NULL); |
3937 | /* |
3938 | * This shost reference might have been taken at the beginning of |
3939 | * lpfc_vport_delete() |
3940 | */ |
3941 | if (test_bit(FC_UNLOADING, &vport->load_flag) && vport != phba->pport) |
3942 | scsi_host_put(t: shost); |
3943 | } |
3944 | |
3945 | int |
3946 | lpfc_mbx_unreg_vpi(struct lpfc_vport *vport) |
3947 | { |
3948 | struct lpfc_hba *phba = vport->phba; |
3949 | LPFC_MBOXQ_t *mbox; |
3950 | int rc; |
3951 | |
3952 | mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
3953 | if (!mbox) |
3954 | return 1; |
3955 | |
3956 | lpfc_unreg_vpi(phba, vport->vpi, mbox); |
3957 | mbox->vport = vport; |
3958 | mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi; |
3959 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); |
3960 | if (rc == MBX_NOT_FINISHED) { |
3961 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
3962 | "1800 Could not issue unreg_vpi\n" ); |
3963 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
3964 | return rc; |
3965 | } |
3966 | return 0; |
3967 | } |
3968 | |
3969 | static void |
3970 | lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
3971 | { |
3972 | struct lpfc_vport *vport = pmb->vport; |
3973 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
3974 | MAILBOX_t *mb = &pmb->u.mb; |
3975 | |
3976 | switch (mb->mbxStatus) { |
3977 | case 0x0011: |
3978 | case 0x9601: |
3979 | case 0x9602: |
3980 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, |
3981 | "0912 cmpl_reg_vpi, mb status = 0x%x\n" , |
3982 | mb->mbxStatus); |
3983 | lpfc_vport_set_state(vport, new_state: FC_VPORT_FAILED); |
3984 | clear_bit(nr: FC_FABRIC, addr: &vport->fc_flag); |
3985 | clear_bit(nr: FC_PUBLIC_LOOP, addr: &vport->fc_flag); |
3986 | vport->fc_myDID = 0; |
3987 | |
3988 | if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || |
3989 | (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { |
3990 | if (phba->nvmet_support) |
3991 | lpfc_nvmet_update_targetport(phba); |
3992 | else |
3993 | lpfc_nvme_update_localport(vport); |
3994 | } |
3995 | goto out; |
3996 | } |
3997 | |
3998 | clear_bit(nr: FC_VPORT_NEEDS_REG_VPI, addr: &vport->fc_flag); |
3999 | spin_lock_irq(lock: shost->host_lock); |
4000 | vport->vpi_state |= LPFC_VPI_REGISTERED; |
4001 | spin_unlock_irq(lock: shost->host_lock); |
4002 | vport->num_disc_nodes = 0; |
4003 | /* go thru NPR list and issue ELS PLOGIs */ |
4004 | if (atomic_read(v: &vport->fc_npr_cnt)) |
4005 | lpfc_els_disc_plogi(vport); |
4006 | |
4007 | if (!vport->num_disc_nodes) { |
4008 | clear_bit(nr: FC_NDISC_ACTIVE, addr: &vport->fc_flag); |
4009 | lpfc_can_disctmo(vport); |
4010 | } |
4011 | vport->port_state = LPFC_VPORT_READY; |
4012 | |
4013 | out: |
4014 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
4015 | return; |
4016 | } |
4017 | |
4018 | /** |
4019 | * lpfc_create_static_vport - Read HBA config region to create static vports. |
4020 | * @phba: pointer to lpfc hba data structure. |
4021 | * |
4022 | * This routine issue a DUMP mailbox command for config region 22 to get |
4023 | * the list of static vports to be created. The function create vports |
4024 | * based on the information returned from the HBA. |
4025 | **/ |
4026 | void |
4027 | lpfc_create_static_vport(struct lpfc_hba *phba) |
4028 | { |
4029 | LPFC_MBOXQ_t *pmb = NULL; |
4030 | MAILBOX_t *mb; |
4031 | struct static_vport_info *vport_info; |
4032 | int mbx_wait_rc = 0, i; |
4033 | struct fc_vport_identifiers vport_id; |
4034 | struct fc_vport *new_fc_vport; |
4035 | struct Scsi_Host *shost; |
4036 | struct lpfc_vport *vport; |
4037 | uint16_t offset = 0; |
4038 | uint8_t *vport_buff; |
4039 | struct lpfc_dmabuf *mp; |
4040 | uint32_t byte_count = 0; |
4041 | |
4042 | pmb = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
4043 | if (!pmb) { |
4044 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
4045 | "0542 lpfc_create_static_vport failed to" |
4046 | " allocate mailbox memory\n" ); |
4047 | return; |
4048 | } |
4049 | memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); |
4050 | mb = &pmb->u.mb; |
4051 | |
4052 | vport_info = kzalloc(size: sizeof(struct static_vport_info), GFP_KERNEL); |
4053 | if (!vport_info) { |
4054 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
4055 | "0543 lpfc_create_static_vport failed to" |
4056 | " allocate vport_info\n" ); |
4057 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
4058 | return; |
4059 | } |
4060 | |
4061 | vport_buff = (uint8_t *) vport_info; |
4062 | do { |
4063 | /* While loop iteration forces a free dma buffer from |
4064 | * the previous loop because the mbox is reused and |
4065 | * the dump routine is a single-use construct. |
4066 | */ |
4067 | if (pmb->ctx_buf) { |
4068 | mp = pmb->ctx_buf; |
4069 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
4070 | kfree(objp: mp); |
4071 | pmb->ctx_buf = NULL; |
4072 | } |
4073 | if (lpfc_dump_static_vport(phba, pmb, offset)) |
4074 | goto out; |
4075 | |
4076 | pmb->vport = phba->pport; |
4077 | mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb, |
4078 | LPFC_MBOX_TMO); |
4079 | |
4080 | if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) { |
4081 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, |
4082 | "0544 lpfc_create_static_vport failed to" |
4083 | " issue dump mailbox command ret 0x%x " |
4084 | "status 0x%x\n" , |
4085 | mbx_wait_rc, mb->mbxStatus); |
4086 | goto out; |
4087 | } |
4088 | |
4089 | if (phba->sli_rev == LPFC_SLI_REV4) { |
4090 | byte_count = pmb->u.mqe.un.mb_words[5]; |
4091 | mp = pmb->ctx_buf; |
4092 | if (byte_count > sizeof(struct static_vport_info) - |
4093 | offset) |
4094 | byte_count = sizeof(struct static_vport_info) |
4095 | - offset; |
4096 | memcpy(vport_buff + offset, mp->virt, byte_count); |
4097 | offset += byte_count; |
4098 | } else { |
4099 | if (mb->un.varDmp.word_cnt > |
4100 | sizeof(struct static_vport_info) - offset) |
4101 | mb->un.varDmp.word_cnt = |
4102 | sizeof(struct static_vport_info) |
4103 | - offset; |
4104 | byte_count = mb->un.varDmp.word_cnt; |
4105 | lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, |
4106 | vport_buff + offset, |
4107 | byte_count); |
4108 | |
4109 | offset += byte_count; |
4110 | } |
4111 | |
4112 | } while (byte_count && |
4113 | offset < sizeof(struct static_vport_info)); |
4114 | |
4115 | |
4116 | if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) || |
4117 | ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK) |
4118 | != VPORT_INFO_REV)) { |
4119 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
4120 | "0545 lpfc_create_static_vport bad" |
4121 | " information header 0x%x 0x%x\n" , |
4122 | le32_to_cpu(vport_info->signature), |
4123 | le32_to_cpu(vport_info->rev) & |
4124 | VPORT_INFO_REV_MASK); |
4125 | |
4126 | goto out; |
4127 | } |
4128 | |
4129 | shost = lpfc_shost_from_vport(vport: phba->pport); |
4130 | |
4131 | for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) { |
4132 | memset(&vport_id, 0, sizeof(vport_id)); |
4133 | vport_id.port_name = wwn_to_u64(wwn: vport_info->vport_list[i].wwpn); |
4134 | vport_id.node_name = wwn_to_u64(wwn: vport_info->vport_list[i].wwnn); |
4135 | if (!vport_id.port_name || !vport_id.node_name) |
4136 | continue; |
4137 | |
4138 | vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; |
4139 | vport_id.vport_type = FC_PORTTYPE_NPIV; |
4140 | vport_id.disable = false; |
4141 | new_fc_vport = fc_vport_create(shost, channel: 0, &vport_id); |
4142 | |
4143 | if (!new_fc_vport) { |
4144 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, |
4145 | "0546 lpfc_create_static_vport failed to" |
4146 | " create vport\n" ); |
4147 | continue; |
4148 | } |
4149 | |
4150 | vport = *(struct lpfc_vport **)new_fc_vport->dd_data; |
4151 | vport->vport_flag |= STATIC_VPORT; |
4152 | } |
4153 | |
4154 | out: |
4155 | kfree(objp: vport_info); |
4156 | if (mbx_wait_rc != MBX_TIMEOUT) |
4157 | lpfc_mbox_rsrc_cleanup(phba, mbox: pmb, locked: MBOX_THD_UNLOCKED); |
4158 | } |
4159 | |
4160 | /* |
4161 | * This routine handles processing a Fabric REG_LOGIN mailbox |
4162 | * command upon completion. It is setup in the LPFC_MBOXQ |
4163 | * as the completion routine when the command is |
4164 | * handed off to the SLI layer. |
4165 | */ |
4166 | void |
4167 | lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
4168 | { |
4169 | struct lpfc_vport *vport = pmb->vport; |
4170 | MAILBOX_t *mb = &pmb->u.mb; |
4171 | struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; |
4172 | |
4173 | pmb->ctx_ndlp = NULL; |
4174 | |
4175 | if (mb->mbxStatus) { |
4176 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
4177 | "0258 Register Fabric login error: 0x%x\n" , |
4178 | mb->mbxStatus); |
4179 | lpfc_mbox_rsrc_cleanup(phba, mbox: pmb, locked: MBOX_THD_UNLOCKED); |
4180 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { |
4181 | /* FLOGI failed, use loop map to make discovery list */ |
4182 | lpfc_disc_list_loopmap(vport); |
4183 | |
4184 | /* Start discovery */ |
4185 | lpfc_disc_start(vport); |
4186 | /* Decrement the reference count to ndlp after the |
4187 | * reference to the ndlp are done. |
4188 | */ |
4189 | lpfc_nlp_put(ndlp); |
4190 | return; |
4191 | } |
4192 | |
4193 | lpfc_vport_set_state(vport, new_state: FC_VPORT_FAILED); |
4194 | /* Decrement the reference count to ndlp after the reference |
4195 | * to the ndlp are done. |
4196 | */ |
4197 | lpfc_nlp_put(ndlp); |
4198 | return; |
4199 | } |
4200 | |
4201 | if (phba->sli_rev < LPFC_SLI_REV4) |
4202 | ndlp->nlp_rpi = mb->un.varWords[0]; |
4203 | ndlp->nlp_flag |= NLP_RPI_REGISTERED; |
4204 | ndlp->nlp_type |= NLP_FABRIC; |
4205 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); |
4206 | |
4207 | if (vport->port_state == LPFC_FABRIC_CFG_LINK) { |
4208 | /* when physical port receive logo donot start |
4209 | * vport discovery */ |
4210 | if (!test_and_clear_bit(nr: FC_LOGO_RCVD_DID_CHNG, addr: &vport->fc_flag)) |
4211 | lpfc_start_fdiscs(phba); |
4212 | lpfc_do_scr_ns_plogi(phba, vport); |
4213 | } |
4214 | |
4215 | lpfc_mbox_rsrc_cleanup(phba, mbox: pmb, locked: MBOX_THD_UNLOCKED); |
4216 | |
4217 | /* Drop the reference count from the mbox at the end after |
4218 | * all the current reference to the ndlp have been done. |
4219 | */ |
4220 | lpfc_nlp_put(ndlp); |
4221 | return; |
4222 | } |
4223 | |
4224 | /* |
4225 | * This routine will issue a GID_FT for each FC4 Type supported |
4226 | * by the driver. ALL GID_FTs must complete before discovery is started. |
4227 | */ |
4228 | int |
4229 | lpfc_issue_gidft(struct lpfc_vport *vport) |
4230 | { |
4231 | /* Good status, issue CT Request to NameServer */ |
4232 | if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || |
4233 | (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) { |
4234 | if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_FCP)) { |
4235 | /* Cannot issue NameServer FCP Query, so finish up |
4236 | * discovery |
4237 | */ |
4238 | lpfc_printf_vlog(vport, KERN_ERR, |
4239 | LOG_TRACE_EVENT, |
4240 | "0604 %s FC TYPE %x %s\n" , |
4241 | "Failed to issue GID_FT to " , |
4242 | FC_TYPE_FCP, |
4243 | "Finishing discovery." ); |
4244 | return 0; |
4245 | } |
4246 | vport->gidft_inp++; |
4247 | } |
4248 | |
4249 | if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || |
4250 | (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { |
4251 | if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_NVME)) { |
4252 | /* Cannot issue NameServer NVME Query, so finish up |
4253 | * discovery |
4254 | */ |
4255 | lpfc_printf_vlog(vport, KERN_ERR, |
4256 | LOG_TRACE_EVENT, |
4257 | "0605 %s FC_TYPE %x %s %d\n" , |
4258 | "Failed to issue GID_FT to " , |
4259 | FC_TYPE_NVME, |
4260 | "Finishing discovery: gidftinp " , |
4261 | vport->gidft_inp); |
4262 | if (vport->gidft_inp == 0) |
4263 | return 0; |
4264 | } else |
4265 | vport->gidft_inp++; |
4266 | } |
4267 | return vport->gidft_inp; |
4268 | } |
4269 | |
4270 | /** |
4271 | * lpfc_issue_gidpt - issue a GID_PT for all N_Ports |
4272 | * @vport: The virtual port for which this call is being executed. |
4273 | * |
4274 | * This routine will issue a GID_PT to get a list of all N_Ports |
4275 | * |
4276 | * Return value : |
4277 | * 0 - Failure to issue a GID_PT |
4278 | * 1 - GID_PT issued |
4279 | **/ |
4280 | int |
4281 | lpfc_issue_gidpt(struct lpfc_vport *vport) |
4282 | { |
4283 | /* Good status, issue CT Request to NameServer */ |
4284 | if (lpfc_ns_cmd(vport, SLI_CTNS_GID_PT, 0, GID_PT_N_PORT)) { |
4285 | /* Cannot issue NameServer FCP Query, so finish up |
4286 | * discovery |
4287 | */ |
4288 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
4289 | "0606 %s Port TYPE %x %s\n" , |
4290 | "Failed to issue GID_PT to " , |
4291 | GID_PT_N_PORT, |
4292 | "Finishing discovery." ); |
4293 | return 0; |
4294 | } |
4295 | vport->gidft_inp++; |
4296 | return 1; |
4297 | } |
4298 | |
4299 | /* |
4300 | * This routine handles processing a NameServer REG_LOGIN mailbox |
4301 | * command upon completion. It is setup in the LPFC_MBOXQ |
4302 | * as the completion routine when the command is |
4303 | * handed off to the SLI layer. |
4304 | */ |
4305 | void |
4306 | lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
4307 | { |
4308 | MAILBOX_t *mb = &pmb->u.mb; |
4309 | struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; |
4310 | struct lpfc_vport *vport = pmb->vport; |
4311 | int rc; |
4312 | |
4313 | pmb->ctx_ndlp = NULL; |
4314 | vport->gidft_inp = 0; |
4315 | |
4316 | if (mb->mbxStatus) { |
4317 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
4318 | "0260 Register NameServer error: 0x%x\n" , |
4319 | mb->mbxStatus); |
4320 | |
4321 | out: |
4322 | /* decrement the node reference count held for this |
4323 | * callback function. |
4324 | */ |
4325 | lpfc_nlp_put(ndlp); |
4326 | lpfc_mbox_rsrc_cleanup(phba, mbox: pmb, locked: MBOX_THD_UNLOCKED); |
4327 | |
4328 | /* If the node is not registered with the scsi or nvme |
4329 | * transport, remove the fabric node. The failed reg_login |
4330 | * is terminal and forces the removal of the last node |
4331 | * reference. |
4332 | */ |
4333 | if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { |
4334 | spin_lock_irq(lock: &ndlp->lock); |
4335 | ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; |
4336 | spin_unlock_irq(lock: &ndlp->lock); |
4337 | lpfc_nlp_put(ndlp); |
4338 | } |
4339 | |
4340 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { |
4341 | /* |
4342 | * RegLogin failed, use loop map to make discovery |
4343 | * list |
4344 | */ |
4345 | lpfc_disc_list_loopmap(vport); |
4346 | |
4347 | /* Start discovery */ |
4348 | lpfc_disc_start(vport); |
4349 | return; |
4350 | } |
4351 | lpfc_vport_set_state(vport, new_state: FC_VPORT_FAILED); |
4352 | return; |
4353 | } |
4354 | |
4355 | if (phba->sli_rev < LPFC_SLI_REV4) |
4356 | ndlp->nlp_rpi = mb->un.varWords[0]; |
4357 | ndlp->nlp_flag |= NLP_RPI_REGISTERED; |
4358 | ndlp->nlp_type |= NLP_FABRIC; |
4359 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); |
4360 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, |
4361 | "0003 rpi:%x DID:%x flg:%x %d x%px\n" , |
4362 | ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, |
4363 | kref_read(&ndlp->kref), |
4364 | ndlp); |
4365 | |
4366 | if (vport->port_state < LPFC_VPORT_READY) { |
4367 | /* Link up discovery requires Fabric registration. */ |
4368 | lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0); |
4369 | lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0); |
4370 | lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); |
4371 | lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0); |
4372 | |
4373 | if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || |
4374 | (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) |
4375 | lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_FCP); |
4376 | |
4377 | if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || |
4378 | (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) |
4379 | lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, |
4380 | FC_TYPE_NVME); |
4381 | |
4382 | /* Issue SCR just before NameServer GID_FT Query */ |
4383 | lpfc_issue_els_scr(vport, retry: 0); |
4384 | |
4385 | /* Link was bounced or a Fabric LOGO occurred. Start EDC |
4386 | * with initial FW values provided the congestion mode is |
4387 | * not off. Note that signals may or may not be supported |
4388 | * by the adapter but FPIN is provided by default for 1 |
4389 | * or both missing signals support. |
4390 | */ |
4391 | if (phba->cmf_active_mode != LPFC_CFG_OFF) { |
4392 | phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; |
4393 | phba->cgn_reg_signal = phba->cgn_init_reg_signal; |
4394 | rc = lpfc_issue_els_edc(vport, retry: 0); |
4395 | lpfc_printf_log(phba, KERN_INFO, |
4396 | LOG_INIT | LOG_ELS | LOG_DISCOVERY, |
4397 | "4220 Issue EDC status x%x Data x%x\n" , |
4398 | rc, phba->cgn_init_reg_signal); |
4399 | } else if (phba->lmt & LMT_64Gb) { |
4400 | /* may send link fault capability descriptor */ |
4401 | lpfc_issue_els_edc(vport, retry: 0); |
4402 | } else { |
4403 | lpfc_issue_els_rdf(vport, retry: 0); |
4404 | } |
4405 | } |
4406 | |
4407 | vport->fc_ns_retry = 0; |
4408 | if (lpfc_issue_gidft(vport) == 0) |
4409 | goto out; |
4410 | |
4411 | /* |
4412 | * At this point in time we may need to wait for multiple |
4413 | * SLI_CTNS_GID_FT CT commands to complete before we start discovery. |
4414 | * |
4415 | * decrement the node reference count held for this |
4416 | * callback function. |
4417 | */ |
4418 | lpfc_nlp_put(ndlp); |
4419 | lpfc_mbox_rsrc_cleanup(phba, mbox: pmb, locked: MBOX_THD_UNLOCKED); |
4420 | return; |
4421 | } |
4422 | |
4423 | /* |
4424 | * This routine handles processing a Fabric Controller REG_LOGIN mailbox |
4425 | * command upon completion. It is setup in the LPFC_MBOXQ |
4426 | * as the completion routine when the command is handed off to the SLI layer. |
4427 | */ |
4428 | void |
4429 | lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
4430 | { |
4431 | struct lpfc_vport *vport = pmb->vport; |
4432 | MAILBOX_t *mb = &pmb->u.mb; |
4433 | struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; |
4434 | |
4435 | pmb->ctx_ndlp = NULL; |
4436 | if (mb->mbxStatus) { |
4437 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
4438 | "0933 %s: Register FC login error: 0x%x\n" , |
4439 | __func__, mb->mbxStatus); |
4440 | goto out; |
4441 | } |
4442 | |
4443 | lpfc_check_nlp_post_devloss(vport, ndlp); |
4444 | |
4445 | if (phba->sli_rev < LPFC_SLI_REV4) |
4446 | ndlp->nlp_rpi = mb->un.varWords[0]; |
4447 | |
4448 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, |
4449 | "0934 %s: Complete FC x%x RegLogin rpi x%x ste x%x\n" , |
4450 | __func__, ndlp->nlp_DID, ndlp->nlp_rpi, |
4451 | ndlp->nlp_state); |
4452 | |
4453 | ndlp->nlp_flag |= NLP_RPI_REGISTERED; |
4454 | ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; |
4455 | ndlp->nlp_type |= NLP_FABRIC; |
4456 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); |
4457 | |
4458 | out: |
4459 | lpfc_mbox_rsrc_cleanup(phba, mbox: pmb, locked: MBOX_THD_UNLOCKED); |
4460 | |
4461 | /* Drop the reference count from the mbox at the end after |
4462 | * all the current reference to the ndlp have been done. |
4463 | */ |
4464 | lpfc_nlp_put(ndlp); |
4465 | } |
4466 | |
4467 | static void |
4468 | lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
4469 | { |
4470 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
4471 | struct fc_rport *rport; |
4472 | struct lpfc_rport_data *rdata; |
4473 | struct fc_rport_identifiers rport_ids; |
4474 | struct lpfc_hba *phba = vport->phba; |
4475 | unsigned long flags; |
4476 | |
4477 | if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME) |
4478 | return; |
4479 | |
4480 | /* Remote port has reappeared. Re-register w/ FC transport */ |
4481 | rport_ids.node_name = wwn_to_u64(wwn: ndlp->nlp_nodename.u.wwn); |
4482 | rport_ids.port_name = wwn_to_u64(wwn: ndlp->nlp_portname.u.wwn); |
4483 | rport_ids.port_id = ndlp->nlp_DID; |
4484 | rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; |
4485 | |
4486 | |
4487 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, |
4488 | "rport add: did:x%x flg:x%x type x%x" , |
4489 | ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); |
4490 | |
4491 | /* Don't add the remote port if unloading. */ |
4492 | if (test_bit(FC_UNLOADING, &vport->load_flag)) |
4493 | return; |
4494 | |
4495 | ndlp->rport = rport = fc_remote_port_add(shost, channel: 0, ids: &rport_ids); |
4496 | if (!rport) { |
4497 | dev_printk(KERN_WARNING, &phba->pcidev->dev, |
4498 | "Warning: fc_remote_port_add failed\n" ); |
4499 | return; |
4500 | } |
4501 | |
4502 | /* Successful port add. Complete initializing node data */ |
4503 | rport->maxframe_size = ndlp->nlp_maxframe; |
4504 | rport->supported_classes = ndlp->nlp_class_sup; |
4505 | rdata = rport->dd_data; |
4506 | rdata->pnode = lpfc_nlp_get(ndlp); |
4507 | if (!rdata->pnode) { |
4508 | dev_warn(&phba->pcidev->dev, |
4509 | "Warning - node ref failed. Unreg rport\n" ); |
4510 | fc_remote_port_delete(rport); |
4511 | ndlp->rport = NULL; |
4512 | return; |
4513 | } |
4514 | |
4515 | spin_lock_irqsave(&ndlp->lock, flags); |
4516 | ndlp->fc4_xpt_flags |= SCSI_XPT_REGD; |
4517 | spin_unlock_irqrestore(lock: &ndlp->lock, flags); |
4518 | |
4519 | if (ndlp->nlp_type & NLP_FCP_TARGET) |
4520 | rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET; |
4521 | if (ndlp->nlp_type & NLP_FCP_INITIATOR) |
4522 | rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; |
4523 | if (ndlp->nlp_type & NLP_NVME_INITIATOR) |
4524 | rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR; |
4525 | if (ndlp->nlp_type & NLP_NVME_TARGET) |
4526 | rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET; |
4527 | if (ndlp->nlp_type & NLP_NVME_DISCOVERY) |
4528 | rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY; |
4529 | |
4530 | if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) |
4531 | fc_remote_port_rolechg(rport, roles: rport_ids.roles); |
4532 | |
4533 | lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, |
4534 | "3183 %s rport x%px DID x%x, role x%x refcnt %d\n" , |
4535 | __func__, rport, rport->port_id, rport->roles, |
4536 | kref_read(&ndlp->kref)); |
4537 | |
4538 | if ((rport->scsi_target_id != -1) && |
4539 | (rport->scsi_target_id < LPFC_MAX_TARGET)) { |
4540 | ndlp->nlp_sid = rport->scsi_target_id; |
4541 | } |
4542 | |
4543 | return; |
4544 | } |
4545 | |
4546 | static void |
4547 | lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) |
4548 | { |
4549 | struct fc_rport *rport = ndlp->rport; |
4550 | struct lpfc_vport *vport = ndlp->vport; |
4551 | |
4552 | if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME) |
4553 | return; |
4554 | |
4555 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, |
4556 | "rport delete: did:x%x flg:x%x type x%x" , |
4557 | ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); |
4558 | |
4559 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, |
4560 | "3184 rport unregister x%06x, rport x%px " |
4561 | "xptflg x%x refcnt %d\n" , |
4562 | ndlp->nlp_DID, rport, ndlp->fc4_xpt_flags, |
4563 | kref_read(&ndlp->kref)); |
4564 | |
4565 | fc_remote_port_delete(rport); |
4566 | lpfc_nlp_put(ndlp); |
4567 | } |
4568 | |
4569 | static void |
4570 | lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count) |
4571 | { |
4572 | switch (state) { |
4573 | case NLP_STE_UNUSED_NODE: |
4574 | atomic_add(i: count, v: &vport->fc_unused_cnt); |
4575 | break; |
4576 | case NLP_STE_PLOGI_ISSUE: |
4577 | atomic_add(i: count, v: &vport->fc_plogi_cnt); |
4578 | break; |
4579 | case NLP_STE_ADISC_ISSUE: |
4580 | atomic_add(i: count, v: &vport->fc_adisc_cnt); |
4581 | break; |
4582 | case NLP_STE_REG_LOGIN_ISSUE: |
4583 | atomic_add(i: count, v: &vport->fc_reglogin_cnt); |
4584 | break; |
4585 | case NLP_STE_PRLI_ISSUE: |
4586 | atomic_add(i: count, v: &vport->fc_prli_cnt); |
4587 | break; |
4588 | case NLP_STE_UNMAPPED_NODE: |
4589 | atomic_add(i: count, v: &vport->fc_unmap_cnt); |
4590 | break; |
4591 | case NLP_STE_MAPPED_NODE: |
4592 | atomic_add(i: count, v: &vport->fc_map_cnt); |
4593 | break; |
4594 | case NLP_STE_NPR_NODE: |
4595 | if (!atomic_read(v: &vport->fc_npr_cnt) && count == -1) |
4596 | atomic_set(v: &vport->fc_npr_cnt, i: 0); |
4597 | else |
4598 | atomic_add(i: count, v: &vport->fc_npr_cnt); |
4599 | break; |
4600 | } |
4601 | } |
4602 | |
4603 | /* Register a node with backend if not already done */ |
4604 | void |
4605 | lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
4606 | { |
4607 | unsigned long iflags; |
4608 | |
4609 | lpfc_check_nlp_post_devloss(vport, ndlp); |
4610 | |
4611 | spin_lock_irqsave(&ndlp->lock, iflags); |
4612 | if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) { |
4613 | /* Already registered with backend, trigger rescan */ |
4614 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
4615 | |
4616 | if (ndlp->fc4_xpt_flags & NVME_XPT_REGD && |
4617 | ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) { |
4618 | lpfc_nvme_rescan_port(vport, ndlp); |
4619 | } |
4620 | return; |
4621 | } |
4622 | |
4623 | ndlp->fc4_xpt_flags |= NLP_XPT_REGD; |
4624 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
4625 | |
4626 | if (lpfc_valid_xpt_node(ndlp)) { |
4627 | vport->phba->nport_event_cnt++; |
4628 | /* |
4629 | * Tell the fc transport about the port, if we haven't |
4630 | * already. If we have, and it's a scsi entity, be |
4631 | */ |
4632 | lpfc_register_remote_port(vport, ndlp); |
4633 | } |
4634 | |
4635 | /* We are done if we do not have any NVME remote node */ |
4636 | if (!(ndlp->nlp_fc4_type & NLP_FC4_NVME)) |
4637 | return; |
4638 | |
4639 | /* Notify the NVME transport of this new rport. */ |
4640 | if (vport->phba->sli_rev >= LPFC_SLI_REV4 && |
4641 | ndlp->nlp_fc4_type & NLP_FC4_NVME) { |
4642 | if (vport->phba->nvmet_support == 0) { |
4643 | /* Register this rport with the transport. |
4644 | * Only NVME Target Rports are registered with |
4645 | * the transport. |
4646 | */ |
4647 | if (ndlp->nlp_type & NLP_NVME_TARGET) { |
4648 | vport->phba->nport_event_cnt++; |
4649 | lpfc_nvme_register_port(vport, ndlp); |
4650 | } |
4651 | } else { |
4652 | /* Just take an NDLP ref count since the |
4653 | * target does not register rports. |
4654 | */ |
4655 | lpfc_nlp_get(ndlp); |
4656 | } |
4657 | } |
4658 | } |
4659 | |
4660 | /* Unregister a node with backend if not already done */ |
4661 | void |
4662 | lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
4663 | { |
4664 | unsigned long iflags; |
4665 | |
4666 | spin_lock_irqsave(&ndlp->lock, iflags); |
4667 | if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) { |
4668 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
4669 | lpfc_printf_vlog(vport, KERN_INFO, |
4670 | LOG_ELS | LOG_NODE | LOG_DISCOVERY, |
4671 | "0999 %s Not regd: ndlp x%px rport x%px DID " |
4672 | "x%x FLG x%x XPT x%x\n" , |
4673 | __func__, ndlp, ndlp->rport, ndlp->nlp_DID, |
4674 | ndlp->nlp_flag, ndlp->fc4_xpt_flags); |
4675 | return; |
4676 | } |
4677 | |
4678 | ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; |
4679 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
4680 | |
4681 | if (ndlp->rport && |
4682 | ndlp->fc4_xpt_flags & SCSI_XPT_REGD) { |
4683 | vport->phba->nport_event_cnt++; |
4684 | lpfc_unregister_remote_port(ndlp); |
4685 | } else if (!ndlp->rport) { |
4686 | lpfc_printf_vlog(vport, KERN_INFO, |
4687 | LOG_ELS | LOG_NODE | LOG_DISCOVERY, |
4688 | "1999 %s NDLP in devloss x%px DID x%x FLG x%x" |
4689 | " XPT x%x refcnt %u\n" , |
4690 | __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag, |
4691 | ndlp->fc4_xpt_flags, |
4692 | kref_read(&ndlp->kref)); |
4693 | } |
4694 | |
4695 | if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) { |
4696 | vport->phba->nport_event_cnt++; |
4697 | if (vport->phba->nvmet_support == 0) { |
4698 | /* Start devloss if target. */ |
4699 | if (ndlp->nlp_type & NLP_NVME_TARGET) |
4700 | lpfc_nvme_unregister_port(vport, ndlp); |
4701 | } else { |
4702 | /* NVMET has no upcall. */ |
4703 | lpfc_nlp_put(ndlp); |
4704 | } |
4705 | } |
4706 | |
4707 | } |
4708 | |
4709 | /* |
4710 | * Adisc state change handling |
4711 | */ |
4712 | static void |
4713 | lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
4714 | int new_state) |
4715 | { |
4716 | switch (new_state) { |
4717 | /* |
4718 | * Any state to ADISC_ISSUE |
4719 | * Do nothing, adisc cmpl handling will trigger state changes |
4720 | */ |
4721 | case NLP_STE_ADISC_ISSUE: |
4722 | break; |
4723 | |
4724 | /* |
4725 | * ADISC_ISSUE to mapped states |
4726 | * Trigger a registration with backend, it will be nop if |
4727 | * already registered |
4728 | */ |
4729 | case NLP_STE_UNMAPPED_NODE: |
4730 | ndlp->nlp_type |= NLP_FC_NODE; |
4731 | fallthrough; |
4732 | case NLP_STE_MAPPED_NODE: |
4733 | ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; |
4734 | lpfc_nlp_reg_node(vport, ndlp); |
4735 | break; |
4736 | |
4737 | /* |
4738 | * ADISC_ISSUE to non-mapped states |
4739 | * We are moving from ADISC_ISSUE to a non-mapped state because |
4740 | * ADISC failed, we would have skipped unregistering with |
4741 | * backend, attempt it now |
4742 | */ |
4743 | case NLP_STE_NPR_NODE: |
4744 | ndlp->nlp_flag &= ~NLP_RCV_PLOGI; |
4745 | fallthrough; |
4746 | default: |
4747 | lpfc_nlp_unreg_node(vport, ndlp); |
4748 | break; |
4749 | } |
4750 | |
4751 | } |
4752 | |
4753 | static void |
4754 | lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
4755 | int old_state, int new_state) |
4756 | { |
4757 | /* Trap ADISC changes here */ |
4758 | if (new_state == NLP_STE_ADISC_ISSUE || |
4759 | old_state == NLP_STE_ADISC_ISSUE) { |
4760 | lpfc_handle_adisc_state(vport, ndlp, new_state); |
4761 | return; |
4762 | } |
4763 | |
4764 | if (new_state == NLP_STE_UNMAPPED_NODE) { |
4765 | ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; |
4766 | ndlp->nlp_type |= NLP_FC_NODE; |
4767 | } |
4768 | if (new_state == NLP_STE_MAPPED_NODE) |
4769 | ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; |
4770 | if (new_state == NLP_STE_NPR_NODE) |
4771 | ndlp->nlp_flag &= ~NLP_RCV_PLOGI; |
4772 | |
4773 | /* Reg/Unreg for FCP and NVME Transport interface */ |
4774 | if ((old_state == NLP_STE_MAPPED_NODE || |
4775 | old_state == NLP_STE_UNMAPPED_NODE)) { |
4776 | /* For nodes marked for ADISC, Handle unreg in ADISC cmpl |
4777 | * if linkup. In linkdown do unreg_node |
4778 | */ |
4779 | if (!(ndlp->nlp_flag & NLP_NPR_ADISC) || |
4780 | !lpfc_is_link_up(phba: vport->phba)) |
4781 | lpfc_nlp_unreg_node(vport, ndlp); |
4782 | } |
4783 | |
4784 | if (new_state == NLP_STE_MAPPED_NODE || |
4785 | new_state == NLP_STE_UNMAPPED_NODE) |
4786 | lpfc_nlp_reg_node(vport, ndlp); |
4787 | |
4788 | /* |
4789 | * If the node just added to Mapped list was an FCP target, |
4790 | * but the remote port registration failed or assigned a target |
4791 | * id outside the presentable range - move the node to the |
4792 | * Unmapped List. |
4793 | */ |
4794 | if ((new_state == NLP_STE_MAPPED_NODE) && |
4795 | (ndlp->nlp_type & NLP_FCP_TARGET) && |
4796 | (!ndlp->rport || |
4797 | ndlp->rport->scsi_target_id == -1 || |
4798 | ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) { |
4799 | spin_lock_irq(lock: &ndlp->lock); |
4800 | ndlp->nlp_flag |= NLP_TGT_NO_SCSIID; |
4801 | spin_unlock_irq(lock: &ndlp->lock); |
4802 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); |
4803 | } |
4804 | } |
4805 | |
4806 | static char * |
4807 | lpfc_nlp_state_name(char *buffer, size_t size, int state) |
4808 | { |
4809 | static char *states[] = { |
4810 | [NLP_STE_UNUSED_NODE] = "UNUSED" , |
4811 | [NLP_STE_PLOGI_ISSUE] = "PLOGI" , |
4812 | [NLP_STE_ADISC_ISSUE] = "ADISC" , |
4813 | [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN" , |
4814 | [NLP_STE_PRLI_ISSUE] = "PRLI" , |
4815 | [NLP_STE_LOGO_ISSUE] = "LOGO" , |
4816 | [NLP_STE_UNMAPPED_NODE] = "UNMAPPED" , |
4817 | [NLP_STE_MAPPED_NODE] = "MAPPED" , |
4818 | [NLP_STE_NPR_NODE] = "NPR" , |
4819 | }; |
4820 | |
4821 | if (state < NLP_STE_MAX_STATE && states[state]) |
4822 | strscpy(buffer, states[state], size); |
4823 | else |
4824 | snprintf(buf: buffer, size, fmt: "unknown (%d)" , state); |
4825 | return buffer; |
4826 | } |
4827 | |
4828 | void |
4829 | lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
4830 | int state) |
4831 | { |
4832 | int old_state = ndlp->nlp_state; |
4833 | int node_dropped = ndlp->nlp_flag & NLP_DROPPED; |
4834 | char name1[16], name2[16]; |
4835 | unsigned long iflags; |
4836 | |
4837 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, |
4838 | "0904 NPort state transition x%06x, %s -> %s\n" , |
4839 | ndlp->nlp_DID, |
4840 | lpfc_nlp_state_name(name1, sizeof(name1), old_state), |
4841 | lpfc_nlp_state_name(name2, sizeof(name2), state)); |
4842 | |
4843 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, |
4844 | "node statechg did:x%x old:%d ste:%d" , |
4845 | ndlp->nlp_DID, old_state, state); |
4846 | |
4847 | if (node_dropped && old_state == NLP_STE_UNUSED_NODE && |
4848 | state != NLP_STE_UNUSED_NODE) { |
4849 | ndlp->nlp_flag &= ~NLP_DROPPED; |
4850 | lpfc_nlp_get(ndlp); |
4851 | } |
4852 | |
4853 | if (old_state == NLP_STE_NPR_NODE && |
4854 | state != NLP_STE_NPR_NODE) |
4855 | lpfc_cancel_retry_delay_tmo(vport, ndlp); |
4856 | if (old_state == NLP_STE_UNMAPPED_NODE) { |
4857 | ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID; |
4858 | ndlp->nlp_type &= ~NLP_FC_NODE; |
4859 | } |
4860 | |
4861 | if (list_empty(head: &ndlp->nlp_listp)) { |
4862 | spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags); |
4863 | list_add_tail(new: &ndlp->nlp_listp, head: &vport->fc_nodes); |
4864 | spin_unlock_irqrestore(lock: &vport->fc_nodes_list_lock, flags: iflags); |
4865 | } else if (old_state) |
4866 | lpfc_nlp_counters(vport, state: old_state, count: -1); |
4867 | |
4868 | ndlp->nlp_state = state; |
4869 | lpfc_nlp_counters(vport, state, count: 1); |
4870 | lpfc_nlp_state_cleanup(vport, ndlp, old_state, new_state: state); |
4871 | } |
4872 | |
4873 | void |
4874 | lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
4875 | { |
4876 | unsigned long iflags; |
4877 | |
4878 | if (list_empty(head: &ndlp->nlp_listp)) { |
4879 | spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags); |
4880 | list_add_tail(new: &ndlp->nlp_listp, head: &vport->fc_nodes); |
4881 | spin_unlock_irqrestore(lock: &vport->fc_nodes_list_lock, flags: iflags); |
4882 | } |
4883 | } |
4884 | |
4885 | void |
4886 | lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
4887 | { |
4888 | unsigned long iflags; |
4889 | |
4890 | lpfc_cancel_retry_delay_tmo(vport, ndlp); |
4891 | if (ndlp->nlp_state && !list_empty(head: &ndlp->nlp_listp)) |
4892 | lpfc_nlp_counters(vport, state: ndlp->nlp_state, count: -1); |
4893 | spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags); |
4894 | list_del_init(entry: &ndlp->nlp_listp); |
4895 | spin_unlock_irqrestore(lock: &vport->fc_nodes_list_lock, flags: iflags); |
4896 | lpfc_nlp_state_cleanup(vport, ndlp, old_state: ndlp->nlp_state, |
4897 | NLP_STE_UNUSED_NODE); |
4898 | } |
4899 | |
4900 | /** |
4901 | * lpfc_initialize_node - Initialize all fields of node object |
4902 | * @vport: Pointer to Virtual Port object. |
4903 | * @ndlp: Pointer to FC node object. |
4904 | * @did: FC_ID of the node. |
4905 | * |
4906 | * This function is always called when node object need to be initialized. |
4907 | * It initializes all the fields of the node object. Although the reference |
4908 | * to phba from @ndlp can be obtained indirectly through it's reference to |
4909 | * @vport, a direct reference to phba is taken here by @ndlp. This is due |
4910 | * to the life-span of the @ndlp might go beyond the existence of @vport as |
4911 | * the final release of ndlp is determined by its reference count. And, the |
4912 | * operation on @ndlp needs the reference to phba. |
4913 | **/ |
4914 | static inline void |
4915 | lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
4916 | uint32_t did) |
4917 | { |
4918 | INIT_LIST_HEAD(list: &ndlp->els_retry_evt.evt_listp); |
4919 | INIT_LIST_HEAD(list: &ndlp->dev_loss_evt.evt_listp); |
4920 | timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0); |
4921 | INIT_LIST_HEAD(list: &ndlp->recovery_evt.evt_listp); |
4922 | |
4923 | ndlp->nlp_DID = did; |
4924 | ndlp->vport = vport; |
4925 | ndlp->phba = vport->phba; |
4926 | ndlp->nlp_sid = NLP_NO_SID; |
4927 | ndlp->nlp_fc4_type = NLP_FC4_NONE; |
4928 | kref_init(kref: &ndlp->kref); |
4929 | atomic_set(v: &ndlp->cmd_pending, i: 0); |
4930 | ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; |
4931 | ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; |
4932 | } |
4933 | |
4934 | void |
4935 | lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
4936 | { |
4937 | /* |
4938 | * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should |
4939 | * be used when lpfc wants to remove the "last" lpfc_nlp_put() to |
4940 | * release the ndlp from the vport when conditions are correct. |
4941 | */ |
4942 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) |
4943 | return; |
4944 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); |
4945 | if (vport->phba->sli_rev == LPFC_SLI_REV4) { |
4946 | lpfc_cleanup_vports_rrqs(vport, ndlp); |
4947 | lpfc_unreg_rpi(vport, ndlp); |
4948 | } |
4949 | |
4950 | /* NLP_DROPPED means another thread already removed the initial |
4951 | * reference from lpfc_nlp_init. If set, don't drop it again and |
4952 | * introduce an imbalance. |
4953 | */ |
4954 | spin_lock_irq(lock: &ndlp->lock); |
4955 | if (!(ndlp->nlp_flag & NLP_DROPPED)) { |
4956 | ndlp->nlp_flag |= NLP_DROPPED; |
4957 | spin_unlock_irq(lock: &ndlp->lock); |
4958 | lpfc_nlp_put(ndlp); |
4959 | return; |
4960 | } |
4961 | spin_unlock_irq(lock: &ndlp->lock); |
4962 | } |
4963 | |
4964 | /* |
4965 | * Start / ReStart rescue timer for Discovery / RSCN handling |
4966 | */ |
4967 | void |
4968 | lpfc_set_disctmo(struct lpfc_vport *vport) |
4969 | { |
4970 | struct lpfc_hba *phba = vport->phba; |
4971 | uint32_t tmo; |
4972 | |
4973 | if (vport->port_state == LPFC_LOCAL_CFG_LINK) { |
4974 | /* For FAN, timeout should be greater than edtov */ |
4975 | tmo = (((phba->fc_edtov + 999) / 1000) + 1); |
4976 | } else { |
4977 | /* Normal discovery timeout should be > than ELS/CT timeout |
4978 | * FC spec states we need 3 * ratov for CT requests |
4979 | */ |
4980 | tmo = ((phba->fc_ratov * 3) + 3); |
4981 | } |
4982 | |
4983 | |
4984 | if (!timer_pending(timer: &vport->fc_disctmo)) { |
4985 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
4986 | "set disc timer: tmo:x%x state:x%x flg:x%x" , |
4987 | tmo, vport->port_state, vport->fc_flag); |
4988 | } |
4989 | |
4990 | mod_timer(timer: &vport->fc_disctmo, expires: jiffies + msecs_to_jiffies(m: 1000 * tmo)); |
4991 | set_bit(nr: FC_DISC_TMO, addr: &vport->fc_flag); |
4992 | |
4993 | /* Start Discovery Timer state <hba_state> */ |
4994 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
4995 | "0247 Start Discovery Timer state x%x " |
4996 | "Data: x%x x%lx x%x x%x\n" , |
4997 | vport->port_state, tmo, |
4998 | (unsigned long)&vport->fc_disctmo, |
4999 | atomic_read(&vport->fc_plogi_cnt), |
5000 | atomic_read(&vport->fc_adisc_cnt)); |
5001 | |
5002 | return; |
5003 | } |
5004 | |
5005 | /* |
5006 | * Cancel rescue timer for Discovery / RSCN handling |
5007 | */ |
5008 | int |
5009 | lpfc_can_disctmo(struct lpfc_vport *vport) |
5010 | { |
5011 | unsigned long iflags; |
5012 | |
5013 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
5014 | "can disc timer: state:x%x rtry:x%x flg:x%x" , |
5015 | vport->port_state, vport->fc_ns_retry, vport->fc_flag); |
5016 | |
5017 | /* Turn off discovery timer if its running */ |
5018 | if (test_bit(FC_DISC_TMO, &vport->fc_flag) || |
5019 | timer_pending(timer: &vport->fc_disctmo)) { |
5020 | clear_bit(nr: FC_DISC_TMO, addr: &vport->fc_flag); |
5021 | del_timer_sync(timer: &vport->fc_disctmo); |
5022 | spin_lock_irqsave(&vport->work_port_lock, iflags); |
5023 | vport->work_port_events &= ~WORKER_DISC_TMO; |
5024 | spin_unlock_irqrestore(lock: &vport->work_port_lock, flags: iflags); |
5025 | } |
5026 | |
5027 | /* Cancel Discovery Timer state <hba_state> */ |
5028 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
5029 | "0248 Cancel Discovery Timer state x%x " |
5030 | "Data: x%lx x%x x%x\n" , |
5031 | vport->port_state, vport->fc_flag, |
5032 | atomic_read(&vport->fc_plogi_cnt), |
5033 | atomic_read(&vport->fc_adisc_cnt)); |
5034 | return 0; |
5035 | } |
5036 | |
5037 | /* |
5038 | * Check specified ring for outstanding IOCB on the SLI queue |
5039 | * Return true if iocb matches the specified nport |
5040 | */ |
5041 | int |
5042 | lpfc_check_sli_ndlp(struct lpfc_hba *phba, |
5043 | struct lpfc_sli_ring *pring, |
5044 | struct lpfc_iocbq *iocb, |
5045 | struct lpfc_nodelist *ndlp) |
5046 | { |
5047 | struct lpfc_vport *vport = ndlp->vport; |
5048 | u8 ulp_command; |
5049 | u16 ulp_context; |
5050 | u32 remote_id; |
5051 | |
5052 | if (iocb->vport != vport) |
5053 | return 0; |
5054 | |
5055 | ulp_command = get_job_cmnd(phba, iocbq: iocb); |
5056 | ulp_context = get_job_ulpcontext(phba, iocbq: iocb); |
5057 | remote_id = get_job_els_rsp64_did(phba, iocbq: iocb); |
5058 | |
5059 | if (pring->ringno == LPFC_ELS_RING) { |
5060 | switch (ulp_command) { |
5061 | case CMD_GEN_REQUEST64_CR: |
5062 | if (iocb->ndlp == ndlp) |
5063 | return 1; |
5064 | fallthrough; |
5065 | case CMD_ELS_REQUEST64_CR: |
5066 | if (remote_id == ndlp->nlp_DID) |
5067 | return 1; |
5068 | fallthrough; |
5069 | case CMD_XMIT_ELS_RSP64_CX: |
5070 | if (iocb->ndlp == ndlp) |
5071 | return 1; |
5072 | } |
5073 | } else if (pring->ringno == LPFC_FCP_RING) { |
5074 | /* Skip match check if waiting to relogin to FCP target */ |
5075 | if ((ndlp->nlp_type & NLP_FCP_TARGET) && |
5076 | (ndlp->nlp_flag & NLP_DELAY_TMO)) { |
5077 | return 0; |
5078 | } |
5079 | if (ulp_context == ndlp->nlp_rpi) |
5080 | return 1; |
5081 | } |
5082 | return 0; |
5083 | } |
5084 | |
5085 | static void |
5086 | __lpfc_dequeue_nport_iocbs(struct lpfc_hba *phba, |
5087 | struct lpfc_nodelist *ndlp, struct lpfc_sli_ring *pring, |
5088 | struct list_head *dequeue_list) |
5089 | { |
5090 | struct lpfc_iocbq *iocb, *next_iocb; |
5091 | |
5092 | list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { |
5093 | /* Check to see if iocb matches the nport */ |
5094 | if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) |
5095 | /* match, dequeue */ |
5096 | list_move_tail(list: &iocb->list, head: dequeue_list); |
5097 | } |
5098 | } |
5099 | |
5100 | static void |
5101 | lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba *phba, |
5102 | struct lpfc_nodelist *ndlp, struct list_head *dequeue_list) |
5103 | { |
5104 | struct lpfc_sli *psli = &phba->sli; |
5105 | uint32_t i; |
5106 | |
5107 | spin_lock_irq(lock: &phba->hbalock); |
5108 | for (i = 0; i < psli->num_rings; i++) |
5109 | __lpfc_dequeue_nport_iocbs(phba, ndlp, pring: &psli->sli3_ring[i], |
5110 | dequeue_list); |
5111 | spin_unlock_irq(lock: &phba->hbalock); |
5112 | } |
5113 | |
5114 | static void |
5115 | lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba, |
5116 | struct lpfc_nodelist *ndlp, struct list_head *dequeue_list) |
5117 | { |
5118 | struct lpfc_sli_ring *pring; |
5119 | struct lpfc_queue *qp = NULL; |
5120 | |
5121 | spin_lock_irq(lock: &phba->hbalock); |
5122 | list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { |
5123 | pring = qp->pring; |
5124 | if (!pring) |
5125 | continue; |
5126 | spin_lock(lock: &pring->ring_lock); |
5127 | __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list); |
5128 | spin_unlock(lock: &pring->ring_lock); |
5129 | } |
5130 | spin_unlock_irq(lock: &phba->hbalock); |
5131 | } |
5132 | |
5133 | /* |
5134 | * Free resources / clean up outstanding I/Os |
5135 | * associated with nlp_rpi in the LPFC_NODELIST entry. |
5136 | */ |
5137 | static int |
5138 | lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) |
5139 | { |
5140 | LIST_HEAD(completions); |
5141 | |
5142 | lpfc_fabric_abort_nport(ndlp); |
5143 | |
5144 | /* |
5145 | * Everything that matches on txcmplq will be returned |
5146 | * by firmware with a no rpi error. |
5147 | */ |
5148 | if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { |
5149 | if (phba->sli_rev != LPFC_SLI_REV4) |
5150 | lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, dequeue_list: &completions); |
5151 | else |
5152 | lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, dequeue_list: &completions); |
5153 | } |
5154 | |
5155 | /* Cancel all the IOCBs from the completions list */ |
5156 | lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, |
5157 | IOERR_SLI_ABORTED); |
5158 | |
5159 | return 0; |
5160 | } |
5161 | |
5162 | /** |
5163 | * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO |
5164 | * @phba: Pointer to HBA context object. |
5165 | * @pmb: Pointer to mailbox object. |
5166 | * |
5167 | * This function will issue an ELS LOGO command after completing |
5168 | * the UNREG_RPI. |
5169 | **/ |
5170 | static void |
5171 | lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
5172 | { |
5173 | struct lpfc_vport *vport = pmb->vport; |
5174 | struct lpfc_nodelist *ndlp; |
5175 | |
5176 | ndlp = pmb->ctx_ndlp; |
5177 | if (!ndlp) |
5178 | return; |
5179 | lpfc_issue_els_logo(vport, ndlp, 0); |
5180 | |
5181 | /* Check to see if there are any deferred events to process */ |
5182 | if ((ndlp->nlp_flag & NLP_UNREG_INP) && |
5183 | (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { |
5184 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
5185 | "1434 UNREG cmpl deferred logo x%x " |
5186 | "on NPort x%x Data: x%x x%px\n" , |
5187 | ndlp->nlp_rpi, ndlp->nlp_DID, |
5188 | ndlp->nlp_defer_did, ndlp); |
5189 | |
5190 | ndlp->nlp_flag &= ~NLP_UNREG_INP; |
5191 | ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; |
5192 | lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); |
5193 | } else { |
5194 | /* NLP_RELEASE_RPI is only set for SLI4 ports. */ |
5195 | if (ndlp->nlp_flag & NLP_RELEASE_RPI) { |
5196 | lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); |
5197 | spin_lock_irq(lock: &ndlp->lock); |
5198 | ndlp->nlp_flag &= ~NLP_RELEASE_RPI; |
5199 | ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; |
5200 | spin_unlock_irq(lock: &ndlp->lock); |
5201 | } |
5202 | spin_lock_irq(lock: &ndlp->lock); |
5203 | ndlp->nlp_flag &= ~NLP_UNREG_INP; |
5204 | spin_unlock_irq(lock: &ndlp->lock); |
5205 | } |
5206 | |
5207 | /* The node has an outstanding reference for the unreg. Now |
5208 | * that the LOGO action and cleanup are finished, release |
5209 | * resources. |
5210 | */ |
5211 | lpfc_nlp_put(ndlp); |
5212 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
5213 | } |
5214 | |
5215 | /* |
5216 | * Sets the mailbox completion handler to be used for the |
5217 | * unreg_rpi command. The handler varies based on the state of |
5218 | * the port and what will be happening to the rpi next. |
5219 | */ |
5220 | static void |
5221 | lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport, |
5222 | struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox) |
5223 | { |
5224 | unsigned long iflags; |
5225 | |
5226 | /* Driver always gets a reference on the mailbox job |
5227 | * in support of async jobs. |
5228 | */ |
5229 | mbox->ctx_ndlp = lpfc_nlp_get(ndlp); |
5230 | if (!mbox->ctx_ndlp) |
5231 | return; |
5232 | |
5233 | if (ndlp->nlp_flag & NLP_ISSUE_LOGO) { |
5234 | mbox->mbox_cmpl = lpfc_nlp_logo_unreg; |
5235 | |
5236 | } else if (phba->sli_rev == LPFC_SLI_REV4 && |
5237 | !test_bit(FC_UNLOADING, &vport->load_flag) && |
5238 | (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= |
5239 | LPFC_SLI_INTF_IF_TYPE_2) && |
5240 | (kref_read(kref: &ndlp->kref) > 0)) { |
5241 | mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr; |
5242 | } else { |
5243 | if (test_bit(FC_UNLOADING, &vport->load_flag)) { |
5244 | if (phba->sli_rev == LPFC_SLI_REV4) { |
5245 | spin_lock_irqsave(&ndlp->lock, iflags); |
5246 | ndlp->nlp_flag |= NLP_RELEASE_RPI; |
5247 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
5248 | } |
5249 | } |
5250 | mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
5251 | } |
5252 | } |
5253 | |
5254 | /* |
5255 | * Free rpi associated with LPFC_NODELIST entry. |
5256 | * This routine is called from lpfc_freenode(), when we are removing |
5257 | * a LPFC_NODELIST entry. It is also called if the driver initiates a |
5258 | * LOGO that completes successfully, and we are waiting to PLOGI back |
5259 | * to the remote NPort. In addition, it is called after we receive |
5260 | * and unsolicated ELS cmd, send back a rsp, the rsp completes and |
5261 | * we are waiting to PLOGI back to the remote NPort. |
5262 | */ |
5263 | int |
5264 | lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
5265 | { |
5266 | struct lpfc_hba *phba = vport->phba; |
5267 | LPFC_MBOXQ_t *mbox; |
5268 | int rc, acc_plogi = 1; |
5269 | uint16_t rpi; |
5270 | |
5271 | if (ndlp->nlp_flag & NLP_RPI_REGISTERED || |
5272 | ndlp->nlp_flag & NLP_REG_LOGIN_SEND) { |
5273 | if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) |
5274 | lpfc_printf_vlog(vport, KERN_INFO, |
5275 | LOG_NODE | LOG_DISCOVERY, |
5276 | "3366 RPI x%x needs to be " |
5277 | "unregistered nlp_flag x%x " |
5278 | "did x%x\n" , |
5279 | ndlp->nlp_rpi, ndlp->nlp_flag, |
5280 | ndlp->nlp_DID); |
5281 | |
5282 | /* If there is already an UNREG in progress for this ndlp, |
5283 | * no need to queue up another one. |
5284 | */ |
5285 | if (ndlp->nlp_flag & NLP_UNREG_INP) { |
5286 | lpfc_printf_vlog(vport, KERN_INFO, |
5287 | LOG_NODE | LOG_DISCOVERY, |
5288 | "1436 unreg_rpi SKIP UNREG x%x on " |
5289 | "NPort x%x deferred x%x flg x%x " |
5290 | "Data: x%px\n" , |
5291 | ndlp->nlp_rpi, ndlp->nlp_DID, |
5292 | ndlp->nlp_defer_did, |
5293 | ndlp->nlp_flag, ndlp); |
5294 | goto out; |
5295 | } |
5296 | |
5297 | mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
5298 | if (mbox) { |
5299 | /* SLI4 ports require the physical rpi value. */ |
5300 | rpi = ndlp->nlp_rpi; |
5301 | if (phba->sli_rev == LPFC_SLI_REV4) |
5302 | rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; |
5303 | |
5304 | lpfc_unreg_login(phba, vport->vpi, rpi, mbox); |
5305 | mbox->vport = vport; |
5306 | lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox); |
5307 | if (!mbox->ctx_ndlp) { |
5308 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
5309 | return 1; |
5310 | } |
5311 | |
5312 | if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr) |
5313 | /* |
5314 | * accept PLOGIs after unreg_rpi_cmpl |
5315 | */ |
5316 | acc_plogi = 0; |
5317 | if (((ndlp->nlp_DID & Fabric_DID_MASK) != |
5318 | Fabric_DID_MASK) && |
5319 | (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag))) |
5320 | ndlp->nlp_flag |= NLP_UNREG_INP; |
5321 | |
5322 | lpfc_printf_vlog(vport, KERN_INFO, |
5323 | LOG_NODE | LOG_DISCOVERY, |
5324 | "1433 unreg_rpi UNREG x%x on " |
5325 | "NPort x%x deferred flg x%x " |
5326 | "Data:x%px\n" , |
5327 | ndlp->nlp_rpi, ndlp->nlp_DID, |
5328 | ndlp->nlp_flag, ndlp); |
5329 | |
5330 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); |
5331 | if (rc == MBX_NOT_FINISHED) { |
5332 | ndlp->nlp_flag &= ~NLP_UNREG_INP; |
5333 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
5334 | acc_plogi = 1; |
5335 | lpfc_nlp_put(ndlp); |
5336 | } |
5337 | } else { |
5338 | lpfc_printf_vlog(vport, KERN_INFO, |
5339 | LOG_NODE | LOG_DISCOVERY, |
5340 | "1444 Failed to allocate mempool " |
5341 | "unreg_rpi UNREG x%x, " |
5342 | "DID x%x, flag x%x, " |
5343 | "ndlp x%px\n" , |
5344 | ndlp->nlp_rpi, ndlp->nlp_DID, |
5345 | ndlp->nlp_flag, ndlp); |
5346 | |
5347 | /* Because mempool_alloc failed, we |
5348 | * will issue a LOGO here and keep the rpi alive if |
5349 | * not unloading. |
5350 | */ |
5351 | if (!test_bit(FC_UNLOADING, &vport->load_flag)) { |
5352 | ndlp->nlp_flag &= ~NLP_UNREG_INP; |
5353 | lpfc_issue_els_logo(vport, ndlp, 0); |
5354 | ndlp->nlp_prev_state = ndlp->nlp_state; |
5355 | lpfc_nlp_set_state(vport, ndlp, |
5356 | NLP_STE_NPR_NODE); |
5357 | } |
5358 | |
5359 | return 1; |
5360 | } |
5361 | lpfc_no_rpi(phba, ndlp); |
5362 | out: |
5363 | if (phba->sli_rev != LPFC_SLI_REV4) |
5364 | ndlp->nlp_rpi = 0; |
5365 | ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; |
5366 | ndlp->nlp_flag &= ~NLP_NPR_ADISC; |
5367 | if (acc_plogi) |
5368 | ndlp->nlp_flag &= ~NLP_LOGO_ACC; |
5369 | return 1; |
5370 | } |
5371 | ndlp->nlp_flag &= ~NLP_LOGO_ACC; |
5372 | return 0; |
5373 | } |
5374 | |
5375 | /** |
5376 | * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba. |
5377 | * @phba: pointer to lpfc hba data structure. |
5378 | * |
5379 | * This routine is invoked to unregister all the currently registered RPIs |
5380 | * to the HBA. |
5381 | **/ |
5382 | void |
5383 | lpfc_unreg_hba_rpis(struct lpfc_hba *phba) |
5384 | { |
5385 | struct lpfc_vport **vports; |
5386 | struct lpfc_nodelist *ndlp; |
5387 | int i; |
5388 | unsigned long iflags; |
5389 | |
5390 | vports = lpfc_create_vport_work_array(phba); |
5391 | if (!vports) { |
5392 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
5393 | "2884 Vport array allocation failed \n" ); |
5394 | return; |
5395 | } |
5396 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
5397 | spin_lock_irqsave(&vports[i]->fc_nodes_list_lock, iflags); |
5398 | list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { |
5399 | if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { |
5400 | /* The mempool_alloc might sleep */ |
5401 | spin_unlock_irqrestore(lock: &vports[i]->fc_nodes_list_lock, |
5402 | flags: iflags); |
5403 | lpfc_unreg_rpi(vport: vports[i], ndlp); |
5404 | spin_lock_irqsave(&vports[i]->fc_nodes_list_lock, |
5405 | iflags); |
5406 | } |
5407 | } |
5408 | spin_unlock_irqrestore(lock: &vports[i]->fc_nodes_list_lock, flags: iflags); |
5409 | } |
5410 | lpfc_destroy_vport_work_array(phba, vports); |
5411 | } |
5412 | |
5413 | void |
5414 | lpfc_unreg_all_rpis(struct lpfc_vport *vport) |
5415 | { |
5416 | struct lpfc_hba *phba = vport->phba; |
5417 | LPFC_MBOXQ_t *mbox; |
5418 | int rc; |
5419 | |
5420 | if (phba->sli_rev == LPFC_SLI_REV4) { |
5421 | lpfc_sli4_unreg_all_rpis(vport); |
5422 | return; |
5423 | } |
5424 | |
5425 | mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
5426 | if (mbox) { |
5427 | lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT, |
5428 | mbox); |
5429 | mbox->vport = vport; |
5430 | mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
5431 | mbox->ctx_ndlp = NULL; |
5432 | rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); |
5433 | if (rc != MBX_TIMEOUT) |
5434 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
5435 | |
5436 | if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED)) |
5437 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
5438 | "1836 Could not issue " |
5439 | "unreg_login(all_rpis) status %d\n" , |
5440 | rc); |
5441 | } |
5442 | } |
5443 | |
5444 | void |
5445 | lpfc_unreg_default_rpis(struct lpfc_vport *vport) |
5446 | { |
5447 | struct lpfc_hba *phba = vport->phba; |
5448 | LPFC_MBOXQ_t *mbox; |
5449 | int rc; |
5450 | |
5451 | /* Unreg DID is an SLI3 operation. */ |
5452 | if (phba->sli_rev > LPFC_SLI_REV3) |
5453 | return; |
5454 | |
5455 | mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
5456 | if (mbox) { |
5457 | lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS, |
5458 | mbox); |
5459 | mbox->vport = vport; |
5460 | mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
5461 | mbox->ctx_ndlp = NULL; |
5462 | rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); |
5463 | if (rc != MBX_TIMEOUT) |
5464 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
5465 | |
5466 | if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED)) |
5467 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
5468 | "1815 Could not issue " |
5469 | "unreg_did (default rpis) status %d\n" , |
5470 | rc); |
5471 | } |
5472 | } |
5473 | |
5474 | /* |
5475 | * Free resources associated with LPFC_NODELIST entry |
5476 | * so it can be freed. |
5477 | */ |
5478 | static int |
5479 | lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
5480 | { |
5481 | struct lpfc_hba *phba = vport->phba; |
5482 | LPFC_MBOXQ_t *mb, *nextmb; |
5483 | |
5484 | /* Cleanup node for NPort <nlp_DID> */ |
5485 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, |
5486 | "0900 Cleanup node for NPort x%x " |
5487 | "Data: x%x x%x x%x\n" , |
5488 | ndlp->nlp_DID, ndlp->nlp_flag, |
5489 | ndlp->nlp_state, ndlp->nlp_rpi); |
5490 | lpfc_dequeue_node(vport, ndlp); |
5491 | |
5492 | /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */ |
5493 | |
5494 | /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ |
5495 | if ((mb = phba->sli.mbox_active)) { |
5496 | if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && |
5497 | !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) && |
5498 | (ndlp == mb->ctx_ndlp)) { |
5499 | mb->ctx_ndlp = NULL; |
5500 | mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
5501 | } |
5502 | } |
5503 | |
5504 | spin_lock_irq(lock: &phba->hbalock); |
5505 | /* Cleanup REG_LOGIN completions which are not yet processed */ |
5506 | list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { |
5507 | if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) || |
5508 | (mb->mbox_flag & LPFC_MBX_IMED_UNREG) || |
5509 | (ndlp != mb->ctx_ndlp)) |
5510 | continue; |
5511 | |
5512 | mb->ctx_ndlp = NULL; |
5513 | mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
5514 | } |
5515 | |
5516 | list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { |
5517 | if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && |
5518 | !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) && |
5519 | (ndlp == mb->ctx_ndlp)) { |
5520 | list_del(entry: &mb->list); |
5521 | lpfc_mbox_rsrc_cleanup(phba, mbox: mb, locked: MBOX_THD_LOCKED); |
5522 | |
5523 | /* Don't invoke lpfc_nlp_put. The driver is in |
5524 | * lpfc_nlp_release context. |
5525 | */ |
5526 | } |
5527 | } |
5528 | spin_unlock_irq(lock: &phba->hbalock); |
5529 | |
5530 | lpfc_els_abort(phba, ndlp); |
5531 | |
5532 | spin_lock_irq(lock: &ndlp->lock); |
5533 | ndlp->nlp_flag &= ~NLP_DELAY_TMO; |
5534 | spin_unlock_irq(lock: &ndlp->lock); |
5535 | |
5536 | ndlp->nlp_last_elscmd = 0; |
5537 | del_timer_sync(timer: &ndlp->nlp_delayfunc); |
5538 | |
5539 | list_del_init(entry: &ndlp->els_retry_evt.evt_listp); |
5540 | list_del_init(entry: &ndlp->dev_loss_evt.evt_listp); |
5541 | list_del_init(entry: &ndlp->recovery_evt.evt_listp); |
5542 | lpfc_cleanup_vports_rrqs(vport, ndlp); |
5543 | |
5544 | if (phba->sli_rev == LPFC_SLI_REV4) |
5545 | ndlp->nlp_flag |= NLP_RELEASE_RPI; |
5546 | |
5547 | return 0; |
5548 | } |
5549 | |
5550 | static int |
5551 | lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
5552 | uint32_t did) |
5553 | { |
5554 | D_ID mydid, ndlpdid, matchdid; |
5555 | |
5556 | if (did == Bcast_DID) |
5557 | return 0; |
5558 | |
5559 | /* First check for Direct match */ |
5560 | if (ndlp->nlp_DID == did) |
5561 | return 1; |
5562 | |
5563 | /* Next check for area/domain identically equals 0 match */ |
5564 | mydid.un.word = vport->fc_myDID; |
5565 | if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) { |
5566 | return 0; |
5567 | } |
5568 | |
5569 | matchdid.un.word = did; |
5570 | ndlpdid.un.word = ndlp->nlp_DID; |
5571 | if (matchdid.un.b.id == ndlpdid.un.b.id) { |
5572 | if ((mydid.un.b.domain == matchdid.un.b.domain) && |
5573 | (mydid.un.b.area == matchdid.un.b.area)) { |
5574 | /* This code is supposed to match the ID |
5575 | * for a private loop device that is |
5576 | * connect to fl_port. But we need to |
5577 | * check that the port did not just go |
5578 | * from pt2pt to fabric or we could end |
5579 | * up matching ndlp->nlp_DID 000001 to |
5580 | * fabric DID 0x20101 |
5581 | */ |
5582 | if ((ndlpdid.un.b.domain == 0) && |
5583 | (ndlpdid.un.b.area == 0)) { |
5584 | if (ndlpdid.un.b.id && |
5585 | vport->phba->fc_topology == |
5586 | LPFC_TOPOLOGY_LOOP) |
5587 | return 1; |
5588 | } |
5589 | return 0; |
5590 | } |
5591 | |
5592 | matchdid.un.word = ndlp->nlp_DID; |
5593 | if ((mydid.un.b.domain == ndlpdid.un.b.domain) && |
5594 | (mydid.un.b.area == ndlpdid.un.b.area)) { |
5595 | if ((matchdid.un.b.domain == 0) && |
5596 | (matchdid.un.b.area == 0)) { |
5597 | if (matchdid.un.b.id) |
5598 | return 1; |
5599 | } |
5600 | } |
5601 | } |
5602 | return 0; |
5603 | } |
5604 | |
5605 | /* Search for a nodelist entry */ |
5606 | static struct lpfc_nodelist * |
5607 | __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) |
5608 | { |
5609 | struct lpfc_nodelist *ndlp; |
5610 | uint32_t data1; |
5611 | |
5612 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
5613 | if (lpfc_matchdid(vport, ndlp, did)) { |
5614 | data1 = (((uint32_t)ndlp->nlp_state << 24) | |
5615 | ((uint32_t)ndlp->nlp_xri << 16) | |
5616 | ((uint32_t)ndlp->nlp_type << 8) |
5617 | ); |
5618 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE, |
5619 | "0929 FIND node DID " |
5620 | "Data: x%px x%x x%x x%x x%x x%px\n" , |
5621 | ndlp, ndlp->nlp_DID, |
5622 | ndlp->nlp_flag, data1, ndlp->nlp_rpi, |
5623 | ndlp->active_rrqs_xri_bitmap); |
5624 | return ndlp; |
5625 | } |
5626 | } |
5627 | |
5628 | /* FIND node did <did> NOT FOUND */ |
5629 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, |
5630 | "0932 FIND node did x%x NOT FOUND.\n" , did); |
5631 | return NULL; |
5632 | } |
5633 | |
5634 | struct lpfc_nodelist * |
5635 | lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) |
5636 | { |
5637 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
5638 | struct lpfc_nodelist *ndlp; |
5639 | unsigned long iflags; |
5640 | |
5641 | spin_lock_irqsave(shost->host_lock, iflags); |
5642 | ndlp = __lpfc_findnode_did(vport, did); |
5643 | spin_unlock_irqrestore(lock: shost->host_lock, flags: iflags); |
5644 | return ndlp; |
5645 | } |
5646 | |
5647 | struct lpfc_nodelist * |
5648 | lpfc_findnode_mapped(struct lpfc_vport *vport) |
5649 | { |
5650 | struct lpfc_nodelist *ndlp; |
5651 | uint32_t data1; |
5652 | unsigned long iflags; |
5653 | |
5654 | spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags); |
5655 | |
5656 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
5657 | if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || |
5658 | ndlp->nlp_state == NLP_STE_MAPPED_NODE) { |
5659 | data1 = (((uint32_t)ndlp->nlp_state << 24) | |
5660 | ((uint32_t)ndlp->nlp_xri << 16) | |
5661 | ((uint32_t)ndlp->nlp_type << 8) | |
5662 | ((uint32_t)ndlp->nlp_rpi & 0xff)); |
5663 | spin_unlock_irqrestore(lock: &vport->fc_nodes_list_lock, |
5664 | flags: iflags); |
5665 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE, |
5666 | "2025 FIND node DID MAPPED " |
5667 | "Data: x%px x%x x%x x%x x%px\n" , |
5668 | ndlp, ndlp->nlp_DID, |
5669 | ndlp->nlp_flag, data1, |
5670 | ndlp->active_rrqs_xri_bitmap); |
5671 | return ndlp; |
5672 | } |
5673 | } |
5674 | spin_unlock_irqrestore(lock: &vport->fc_nodes_list_lock, flags: iflags); |
5675 | |
5676 | /* FIND node did <did> NOT FOUND */ |
5677 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, |
5678 | "2026 FIND mapped did NOT FOUND.\n" ); |
5679 | return NULL; |
5680 | } |
5681 | |
5682 | struct lpfc_nodelist * |
5683 | lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) |
5684 | { |
5685 | struct lpfc_nodelist *ndlp; |
5686 | |
5687 | ndlp = lpfc_findnode_did(vport, did); |
5688 | if (!ndlp) { |
5689 | if (vport->phba->nvmet_support) |
5690 | return NULL; |
5691 | if (test_bit(FC_RSCN_MODE, &vport->fc_flag) && |
5692 | lpfc_rscn_payload_check(vport, did) == 0) |
5693 | return NULL; |
5694 | ndlp = lpfc_nlp_init(vport, did); |
5695 | if (!ndlp) |
5696 | return NULL; |
5697 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); |
5698 | |
5699 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
5700 | "6453 Setup New Node 2B_DISC x%x " |
5701 | "Data:x%x x%x x%lx\n" , |
5702 | ndlp->nlp_DID, ndlp->nlp_flag, |
5703 | ndlp->nlp_state, vport->fc_flag); |
5704 | |
5705 | spin_lock_irq(lock: &ndlp->lock); |
5706 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; |
5707 | spin_unlock_irq(lock: &ndlp->lock); |
5708 | return ndlp; |
5709 | } |
5710 | |
5711 | /* The NVME Target does not want to actively manage an rport. |
5712 | * The goal is to allow the target to reset its state and clear |
5713 | * pending IO in preparation for the initiator to recover. |
5714 | */ |
5715 | if (test_bit(FC_RSCN_MODE, &vport->fc_flag) && |
5716 | !test_bit(FC_NDISC_ACTIVE, &vport->fc_flag)) { |
5717 | if (lpfc_rscn_payload_check(vport, did)) { |
5718 | |
5719 | /* Since this node is marked for discovery, |
5720 | * delay timeout is not needed. |
5721 | */ |
5722 | lpfc_cancel_retry_delay_tmo(vport, ndlp); |
5723 | |
5724 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
5725 | "6455 Setup RSCN Node 2B_DISC x%x " |
5726 | "Data:x%x x%x x%lx\n" , |
5727 | ndlp->nlp_DID, ndlp->nlp_flag, |
5728 | ndlp->nlp_state, vport->fc_flag); |
5729 | |
5730 | /* NVME Target mode waits until rport is known to be |
5731 | * impacted by the RSCN before it transitions. No |
5732 | * active management - just go to NPR provided the |
5733 | * node had a valid login. |
5734 | */ |
5735 | if (vport->phba->nvmet_support) |
5736 | return ndlp; |
5737 | |
5738 | if (ndlp->nlp_state > NLP_STE_UNUSED_NODE && |
5739 | ndlp->nlp_state < NLP_STE_PRLI_ISSUE) { |
5740 | lpfc_disc_state_machine(vport, ndlp, NULL, |
5741 | NLP_EVT_DEVICE_RECOVERY); |
5742 | } |
5743 | |
5744 | spin_lock_irq(lock: &ndlp->lock); |
5745 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; |
5746 | spin_unlock_irq(lock: &ndlp->lock); |
5747 | } else { |
5748 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
5749 | "6456 Skip Setup RSCN Node x%x " |
5750 | "Data:x%x x%x x%lx\n" , |
5751 | ndlp->nlp_DID, ndlp->nlp_flag, |
5752 | ndlp->nlp_state, vport->fc_flag); |
5753 | ndlp = NULL; |
5754 | } |
5755 | } else { |
5756 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
5757 | "6457 Setup Active Node 2B_DISC x%x " |
5758 | "Data:x%x x%x x%lx\n" , |
5759 | ndlp->nlp_DID, ndlp->nlp_flag, |
5760 | ndlp->nlp_state, vport->fc_flag); |
5761 | |
5762 | /* If the initiator received a PLOGI from this NPort or if the |
5763 | * initiator is already in the process of discovery on it, |
5764 | * there's no need to try to discover it again. |
5765 | */ |
5766 | if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || |
5767 | ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || |
5768 | (!vport->phba->nvmet_support && |
5769 | ndlp->nlp_flag & NLP_RCV_PLOGI)) |
5770 | return NULL; |
5771 | |
5772 | if (vport->phba->nvmet_support) |
5773 | return ndlp; |
5774 | |
5775 | /* Moving to NPR state clears unsolicited flags and |
5776 | * allows for rediscovery |
5777 | */ |
5778 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); |
5779 | |
5780 | spin_lock_irq(lock: &ndlp->lock); |
5781 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; |
5782 | spin_unlock_irq(lock: &ndlp->lock); |
5783 | } |
5784 | return ndlp; |
5785 | } |
5786 | |
5787 | /* Build a list of nodes to discover based on the loopmap */ |
5788 | void |
5789 | lpfc_disc_list_loopmap(struct lpfc_vport *vport) |
5790 | { |
5791 | struct lpfc_hba *phba = vport->phba; |
5792 | int j; |
5793 | uint32_t alpa, index; |
5794 | |
5795 | if (!lpfc_is_link_up(phba)) |
5796 | return; |
5797 | |
5798 | if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) |
5799 | return; |
5800 | |
5801 | /* Check for loop map present or not */ |
5802 | if (phba->alpa_map[0]) { |
5803 | for (j = 1; j <= phba->alpa_map[0]; j++) { |
5804 | alpa = phba->alpa_map[j]; |
5805 | if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0)) |
5806 | continue; |
5807 | lpfc_setup_disc_node(vport, did: alpa); |
5808 | } |
5809 | } else { |
5810 | /* No alpamap, so try all alpa's */ |
5811 | for (j = 0; j < FC_MAXLOOP; j++) { |
5812 | /* If cfg_scan_down is set, start from highest |
5813 | * ALPA (0xef) to lowest (0x1). |
5814 | */ |
5815 | if (vport->cfg_scan_down) |
5816 | index = j; |
5817 | else |
5818 | index = FC_MAXLOOP - j - 1; |
5819 | alpa = lpfcAlpaArray[index]; |
5820 | if ((vport->fc_myDID & 0xff) == alpa) |
5821 | continue; |
5822 | lpfc_setup_disc_node(vport, did: alpa); |
5823 | } |
5824 | } |
5825 | return; |
5826 | } |
5827 | |
5828 | /* SLI3 only */ |
5829 | void |
5830 | lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport) |
5831 | { |
5832 | LPFC_MBOXQ_t *mbox; |
5833 | struct lpfc_sli *psli = &phba->sli; |
5834 | struct lpfc_sli_ring * = &psli->sli3_ring[LPFC_EXTRA_RING]; |
5835 | struct lpfc_sli_ring *fcp_ring = &psli->sli3_ring[LPFC_FCP_RING]; |
5836 | int rc; |
5837 | |
5838 | /* |
5839 | * if it's not a physical port or if we already send |
5840 | * clear_la then don't send it. |
5841 | */ |
5842 | if ((phba->link_state >= LPFC_CLEAR_LA) || |
5843 | (vport->port_type != LPFC_PHYSICAL_PORT) || |
5844 | (phba->sli_rev == LPFC_SLI_REV4)) |
5845 | return; |
5846 | |
5847 | /* Link up discovery */ |
5848 | if ((mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL)) != NULL) { |
5849 | phba->link_state = LPFC_CLEAR_LA; |
5850 | lpfc_clear_la(phba, mbox); |
5851 | mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; |
5852 | mbox->vport = vport; |
5853 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); |
5854 | if (rc == MBX_NOT_FINISHED) { |
5855 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
5856 | lpfc_disc_flush_list(vport); |
5857 | extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT; |
5858 | fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT; |
5859 | phba->link_state = LPFC_HBA_ERROR; |
5860 | } |
5861 | } |
5862 | } |
5863 | |
5864 | /* Reg_vpi to tell firmware to resume normal operations */ |
5865 | void |
5866 | lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport) |
5867 | { |
5868 | LPFC_MBOXQ_t *regvpimbox; |
5869 | |
5870 | regvpimbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
5871 | if (regvpimbox) { |
5872 | lpfc_reg_vpi(vport, regvpimbox); |
5873 | regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; |
5874 | regvpimbox->vport = vport; |
5875 | if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) |
5876 | == MBX_NOT_FINISHED) { |
5877 | mempool_free(element: regvpimbox, pool: phba->mbox_mem_pool); |
5878 | } |
5879 | } |
5880 | } |
5881 | |
5882 | /* Start Link up / RSCN discovery on NPR nodes */ |
5883 | void |
5884 | lpfc_disc_start(struct lpfc_vport *vport) |
5885 | { |
5886 | struct lpfc_hba *phba = vport->phba; |
5887 | uint32_t num_sent; |
5888 | uint32_t clear_la_pending; |
5889 | |
5890 | if (!lpfc_is_link_up(phba)) { |
5891 | lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, |
5892 | "3315 Link is not up %x\n" , |
5893 | phba->link_state); |
5894 | return; |
5895 | } |
5896 | |
5897 | if (phba->link_state == LPFC_CLEAR_LA) |
5898 | clear_la_pending = 1; |
5899 | else |
5900 | clear_la_pending = 0; |
5901 | |
5902 | if (vport->port_state < LPFC_VPORT_READY) |
5903 | vport->port_state = LPFC_DISC_AUTH; |
5904 | |
5905 | lpfc_set_disctmo(vport); |
5906 | |
5907 | vport->fc_prevDID = vport->fc_myDID; |
5908 | vport->num_disc_nodes = 0; |
5909 | |
5910 | /* Start Discovery state <hba_state> */ |
5911 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
5912 | "0202 Start Discovery port state x%x " |
5913 | "flg x%lx Data: x%x x%x x%x\n" , |
5914 | vport->port_state, vport->fc_flag, |
5915 | atomic_read(&vport->fc_plogi_cnt), |
5916 | atomic_read(&vport->fc_adisc_cnt), |
5917 | atomic_read(&vport->fc_npr_cnt)); |
5918 | |
5919 | /* First do ADISCs - if any */ |
5920 | num_sent = lpfc_els_disc_adisc(vport); |
5921 | |
5922 | if (num_sent) |
5923 | return; |
5924 | |
5925 | /* Register the VPI for SLI3, NPIV only. */ |
5926 | if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && |
5927 | !test_bit(FC_PT2PT, &vport->fc_flag) && |
5928 | !test_bit(FC_RSCN_MODE, &vport->fc_flag) && |
5929 | (phba->sli_rev < LPFC_SLI_REV4)) { |
5930 | lpfc_issue_clear_la(phba, vport); |
5931 | lpfc_issue_reg_vpi(phba, vport); |
5932 | return; |
5933 | } |
5934 | |
5935 | /* |
5936 | * For SLI2, we need to set port_state to READY and continue |
5937 | * discovery. |
5938 | */ |
5939 | if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) { |
5940 | /* If we get here, there is nothing to ADISC */ |
5941 | lpfc_issue_clear_la(phba, vport); |
5942 | |
5943 | if (!test_bit(FC_ABORT_DISCOVERY, &vport->fc_flag)) { |
5944 | vport->num_disc_nodes = 0; |
5945 | /* go thru NPR nodes and issue ELS PLOGIs */ |
5946 | if (atomic_read(v: &vport->fc_npr_cnt)) |
5947 | lpfc_els_disc_plogi(vport); |
5948 | |
5949 | if (!vport->num_disc_nodes) { |
5950 | clear_bit(nr: FC_NDISC_ACTIVE, addr: &vport->fc_flag); |
5951 | lpfc_can_disctmo(vport); |
5952 | } |
5953 | } |
5954 | vport->port_state = LPFC_VPORT_READY; |
5955 | } else { |
5956 | /* Next do PLOGIs - if any */ |
5957 | num_sent = lpfc_els_disc_plogi(vport); |
5958 | |
5959 | if (num_sent) |
5960 | return; |
5961 | |
5962 | if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) { |
5963 | /* Check to see if more RSCNs came in while we |
5964 | * were processing this one. |
5965 | */ |
5966 | if (vport->fc_rscn_id_cnt == 0 && |
5967 | !test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag)) { |
5968 | clear_bit(nr: FC_RSCN_MODE, addr: &vport->fc_flag); |
5969 | lpfc_can_disctmo(vport); |
5970 | } else { |
5971 | lpfc_els_handle_rscn(vport); |
5972 | } |
5973 | } |
5974 | } |
5975 | return; |
5976 | } |
5977 | |
5978 | /* |
5979 | * Ignore completion for all IOCBs on tx and txcmpl queue for ELS |
5980 | * ring the match the sppecified nodelist. |
5981 | */ |
5982 | static void |
5983 | lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) |
5984 | { |
5985 | LIST_HEAD(completions); |
5986 | struct lpfc_iocbq *iocb, *next_iocb; |
5987 | struct lpfc_sli_ring *pring; |
5988 | u32 ulp_command; |
5989 | |
5990 | pring = lpfc_phba_elsring(phba); |
5991 | if (unlikely(!pring)) |
5992 | return; |
5993 | |
5994 | /* Error matching iocb on txq or txcmplq |
5995 | * First check the txq. |
5996 | */ |
5997 | spin_lock_irq(lock: &phba->hbalock); |
5998 | list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { |
5999 | if (iocb->ndlp != ndlp) |
6000 | continue; |
6001 | |
6002 | ulp_command = get_job_cmnd(phba, iocbq: iocb); |
6003 | |
6004 | if (ulp_command == CMD_ELS_REQUEST64_CR || |
6005 | ulp_command == CMD_XMIT_ELS_RSP64_CX) { |
6006 | |
6007 | list_move_tail(list: &iocb->list, head: &completions); |
6008 | } |
6009 | } |
6010 | |
6011 | /* Next check the txcmplq */ |
6012 | list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { |
6013 | if (iocb->ndlp != ndlp) |
6014 | continue; |
6015 | |
6016 | ulp_command = get_job_cmnd(phba, iocbq: iocb); |
6017 | |
6018 | if (ulp_command == CMD_ELS_REQUEST64_CR || |
6019 | ulp_command == CMD_XMIT_ELS_RSP64_CX) { |
6020 | lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL); |
6021 | } |
6022 | } |
6023 | spin_unlock_irq(lock: &phba->hbalock); |
6024 | |
6025 | /* Make sure HBA is alive */ |
6026 | lpfc_issue_hb_tmo(phba); |
6027 | |
6028 | /* Cancel all the IOCBs from the completions list */ |
6029 | lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, |
6030 | IOERR_SLI_ABORTED); |
6031 | } |
6032 | |
6033 | static void |
6034 | lpfc_disc_flush_list(struct lpfc_vport *vport) |
6035 | { |
6036 | struct lpfc_nodelist *ndlp, *next_ndlp; |
6037 | struct lpfc_hba *phba = vport->phba; |
6038 | |
6039 | if (atomic_read(v: &vport->fc_plogi_cnt) || |
6040 | atomic_read(v: &vport->fc_adisc_cnt)) { |
6041 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, |
6042 | nlp_listp) { |
6043 | if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || |
6044 | ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { |
6045 | lpfc_free_tx(phba, ndlp); |
6046 | } |
6047 | } |
6048 | } |
6049 | } |
6050 | |
6051 | /* |
6052 | * lpfc_notify_xport_npr - notifies xport of node disappearance |
6053 | * @vport: Pointer to Virtual Port object. |
6054 | * |
6055 | * Transitions all ndlps to NPR state. When lpfc_nlp_set_state |
6056 | * calls lpfc_nlp_state_cleanup, the ndlp->rport is unregistered |
6057 | * and transport notified that the node is gone. |
6058 | * Return Code: |
6059 | * none |
6060 | */ |
6061 | static void |
6062 | lpfc_notify_xport_npr(struct lpfc_vport *vport) |
6063 | { |
6064 | struct lpfc_nodelist *ndlp, *next_ndlp; |
6065 | |
6066 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, |
6067 | nlp_listp) { |
6068 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); |
6069 | } |
6070 | } |
6071 | void |
6072 | lpfc_cleanup_discovery_resources(struct lpfc_vport *vport) |
6073 | { |
6074 | lpfc_els_flush_rscn(vport); |
6075 | lpfc_els_flush_cmd(vport); |
6076 | lpfc_disc_flush_list(vport); |
6077 | if (pci_channel_offline(pdev: vport->phba->pcidev)) |
6078 | lpfc_notify_xport_npr(vport); |
6079 | } |
6080 | |
6081 | /*****************************************************************************/ |
6082 | /* |
6083 | * NAME: lpfc_disc_timeout |
6084 | * |
6085 | * FUNCTION: Fibre Channel driver discovery timeout routine. |
6086 | * |
6087 | * EXECUTION ENVIRONMENT: interrupt only |
6088 | * |
6089 | * CALLED FROM: |
6090 | * Timer function |
6091 | * |
6092 | * RETURNS: |
6093 | * none |
6094 | */ |
6095 | /*****************************************************************************/ |
6096 | void |
6097 | lpfc_disc_timeout(struct timer_list *t) |
6098 | { |
6099 | struct lpfc_vport *vport = from_timer(vport, t, fc_disctmo); |
6100 | struct lpfc_hba *phba = vport->phba; |
6101 | uint32_t tmo_posted; |
6102 | unsigned long flags = 0; |
6103 | |
6104 | if (unlikely(!phba)) |
6105 | return; |
6106 | |
6107 | spin_lock_irqsave(&vport->work_port_lock, flags); |
6108 | tmo_posted = vport->work_port_events & WORKER_DISC_TMO; |
6109 | if (!tmo_posted) |
6110 | vport->work_port_events |= WORKER_DISC_TMO; |
6111 | spin_unlock_irqrestore(lock: &vport->work_port_lock, flags); |
6112 | |
6113 | if (!tmo_posted) |
6114 | lpfc_worker_wake_up(phba); |
6115 | return; |
6116 | } |
6117 | |
6118 | static void |
6119 | lpfc_disc_timeout_handler(struct lpfc_vport *vport) |
6120 | { |
6121 | struct lpfc_hba *phba = vport->phba; |
6122 | struct lpfc_sli *psli = &phba->sli; |
6123 | struct lpfc_nodelist *ndlp, *next_ndlp; |
6124 | LPFC_MBOXQ_t *initlinkmbox; |
6125 | int rc, clrlaerr = 0; |
6126 | |
6127 | if (!test_and_clear_bit(nr: FC_DISC_TMO, addr: &vport->fc_flag)) |
6128 | return; |
6129 | |
6130 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
6131 | "disc timeout: state:x%x rtry:x%x flg:x%x" , |
6132 | vport->port_state, vport->fc_ns_retry, vport->fc_flag); |
6133 | |
6134 | switch (vport->port_state) { |
6135 | |
6136 | case LPFC_LOCAL_CFG_LINK: |
6137 | /* |
6138 | * port_state is identically LPFC_LOCAL_CFG_LINK while |
6139 | * waiting for FAN timeout |
6140 | */ |
6141 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, |
6142 | "0221 FAN timeout\n" ); |
6143 | |
6144 | /* Start discovery by sending FLOGI, clean up old rpis */ |
6145 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, |
6146 | nlp_listp) { |
6147 | if (ndlp->nlp_state != NLP_STE_NPR_NODE) |
6148 | continue; |
6149 | if (ndlp->nlp_type & NLP_FABRIC) { |
6150 | /* Clean up the ndlp on Fabric connections */ |
6151 | lpfc_drop_node(vport, ndlp); |
6152 | |
6153 | } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { |
6154 | /* Fail outstanding IO now since device |
6155 | * is marked for PLOGI. |
6156 | */ |
6157 | lpfc_unreg_rpi(vport, ndlp); |
6158 | } |
6159 | } |
6160 | if (vport->port_state != LPFC_FLOGI) { |
6161 | if (phba->sli_rev <= LPFC_SLI_REV3) |
6162 | lpfc_initial_flogi(vport); |
6163 | else |
6164 | lpfc_issue_init_vfi(vport); |
6165 | return; |
6166 | } |
6167 | break; |
6168 | |
6169 | case LPFC_FDISC: |
6170 | case LPFC_FLOGI: |
6171 | /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */ |
6172 | /* Initial FLOGI timeout */ |
6173 | lpfc_printf_vlog(vport, KERN_ERR, |
6174 | LOG_TRACE_EVENT, |
6175 | "0222 Initial %s timeout\n" , |
6176 | vport->vpi ? "FDISC" : "FLOGI" ); |
6177 | |
6178 | /* Assume no Fabric and go on with discovery. |
6179 | * Check for outstanding ELS FLOGI to abort. |
6180 | */ |
6181 | |
6182 | /* FLOGI failed, so just use loop map to make discovery list */ |
6183 | lpfc_disc_list_loopmap(vport); |
6184 | |
6185 | /* Start discovery */ |
6186 | lpfc_disc_start(vport); |
6187 | break; |
6188 | |
6189 | case LPFC_FABRIC_CFG_LINK: |
6190 | /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for |
6191 | NameServer login */ |
6192 | lpfc_printf_vlog(vport, KERN_ERR, |
6193 | LOG_TRACE_EVENT, |
6194 | "0223 Timeout while waiting for " |
6195 | "NameServer login\n" ); |
6196 | /* Next look for NameServer ndlp */ |
6197 | ndlp = lpfc_findnode_did(vport, NameServer_DID); |
6198 | if (ndlp) |
6199 | lpfc_els_abort(phba, ndlp); |
6200 | |
6201 | /* ReStart discovery */ |
6202 | goto restart_disc; |
6203 | |
6204 | case LPFC_NS_QRY: |
6205 | /* Check for wait for NameServer Rsp timeout */ |
6206 | lpfc_printf_vlog(vport, KERN_ERR, |
6207 | LOG_TRACE_EVENT, |
6208 | "0224 NameServer Query timeout " |
6209 | "Data: x%x x%x\n" , |
6210 | vport->fc_ns_retry, LPFC_MAX_NS_RETRY); |
6211 | |
6212 | if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { |
6213 | /* Try it one more time */ |
6214 | vport->fc_ns_retry++; |
6215 | vport->gidft_inp = 0; |
6216 | rc = lpfc_issue_gidft(vport); |
6217 | if (rc == 0) |
6218 | break; |
6219 | } |
6220 | vport->fc_ns_retry = 0; |
6221 | |
6222 | restart_disc: |
6223 | /* |
6224 | * Discovery is over. |
6225 | * set port_state to PORT_READY if SLI2. |
6226 | * cmpl_reg_vpi will set port_state to READY for SLI3. |
6227 | */ |
6228 | if (phba->sli_rev < LPFC_SLI_REV4) { |
6229 | if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) |
6230 | lpfc_issue_reg_vpi(phba, vport); |
6231 | else { |
6232 | lpfc_issue_clear_la(phba, vport); |
6233 | vport->port_state = LPFC_VPORT_READY; |
6234 | } |
6235 | } |
6236 | |
6237 | /* Setup and issue mailbox INITIALIZE LINK command */ |
6238 | initlinkmbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
6239 | if (!initlinkmbox) { |
6240 | lpfc_printf_vlog(vport, KERN_ERR, |
6241 | LOG_TRACE_EVENT, |
6242 | "0206 Device Discovery " |
6243 | "completion error\n" ); |
6244 | phba->link_state = LPFC_HBA_ERROR; |
6245 | break; |
6246 | } |
6247 | |
6248 | lpfc_linkdown(phba); |
6249 | lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, |
6250 | phba->cfg_link_speed); |
6251 | initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; |
6252 | initlinkmbox->vport = vport; |
6253 | initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
6254 | rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT); |
6255 | lpfc_set_loopback_flag(phba); |
6256 | if (rc == MBX_NOT_FINISHED) |
6257 | mempool_free(element: initlinkmbox, pool: phba->mbox_mem_pool); |
6258 | |
6259 | break; |
6260 | |
6261 | case LPFC_DISC_AUTH: |
6262 | /* Node Authentication timeout */ |
6263 | lpfc_printf_vlog(vport, KERN_ERR, |
6264 | LOG_TRACE_EVENT, |
6265 | "0227 Node Authentication timeout\n" ); |
6266 | lpfc_disc_flush_list(vport); |
6267 | |
6268 | /* |
6269 | * set port_state to PORT_READY if SLI2. |
6270 | * cmpl_reg_vpi will set port_state to READY for SLI3. |
6271 | */ |
6272 | if (phba->sli_rev < LPFC_SLI_REV4) { |
6273 | if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) |
6274 | lpfc_issue_reg_vpi(phba, vport); |
6275 | else { /* NPIV Not enabled */ |
6276 | lpfc_issue_clear_la(phba, vport); |
6277 | vport->port_state = LPFC_VPORT_READY; |
6278 | } |
6279 | } |
6280 | break; |
6281 | |
6282 | case LPFC_VPORT_READY: |
6283 | if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) { |
6284 | lpfc_printf_vlog(vport, KERN_ERR, |
6285 | LOG_TRACE_EVENT, |
6286 | "0231 RSCN timeout Data: x%x " |
6287 | "x%x x%x x%x\n" , |
6288 | vport->fc_ns_retry, LPFC_MAX_NS_RETRY, |
6289 | vport->port_state, vport->gidft_inp); |
6290 | |
6291 | /* Cleanup any outstanding ELS commands */ |
6292 | lpfc_els_flush_cmd(vport); |
6293 | |
6294 | lpfc_els_flush_rscn(vport); |
6295 | lpfc_disc_flush_list(vport); |
6296 | } |
6297 | break; |
6298 | |
6299 | default: |
6300 | lpfc_printf_vlog(vport, KERN_ERR, |
6301 | LOG_TRACE_EVENT, |
6302 | "0273 Unexpected discovery timeout, " |
6303 | "vport State x%x\n" , vport->port_state); |
6304 | break; |
6305 | } |
6306 | |
6307 | switch (phba->link_state) { |
6308 | case LPFC_CLEAR_LA: |
6309 | /* CLEAR LA timeout */ |
6310 | lpfc_printf_vlog(vport, KERN_ERR, |
6311 | LOG_TRACE_EVENT, |
6312 | "0228 CLEAR LA timeout\n" ); |
6313 | clrlaerr = 1; |
6314 | break; |
6315 | |
6316 | case LPFC_LINK_UP: |
6317 | lpfc_issue_clear_la(phba, vport); |
6318 | fallthrough; |
6319 | case LPFC_LINK_UNKNOWN: |
6320 | case LPFC_WARM_START: |
6321 | case LPFC_INIT_START: |
6322 | case LPFC_INIT_MBX_CMDS: |
6323 | case LPFC_LINK_DOWN: |
6324 | case LPFC_HBA_ERROR: |
6325 | lpfc_printf_vlog(vport, KERN_ERR, |
6326 | LOG_TRACE_EVENT, |
6327 | "0230 Unexpected timeout, hba link " |
6328 | "state x%x\n" , phba->link_state); |
6329 | clrlaerr = 1; |
6330 | break; |
6331 | |
6332 | case LPFC_HBA_READY: |
6333 | break; |
6334 | } |
6335 | |
6336 | if (clrlaerr) { |
6337 | lpfc_disc_flush_list(vport); |
6338 | if (phba->sli_rev != LPFC_SLI_REV4) { |
6339 | psli->sli3_ring[(LPFC_EXTRA_RING)].flag &= |
6340 | ~LPFC_STOP_IOCB_EVENT; |
6341 | psli->sli3_ring[LPFC_FCP_RING].flag &= |
6342 | ~LPFC_STOP_IOCB_EVENT; |
6343 | } |
6344 | vport->port_state = LPFC_VPORT_READY; |
6345 | } |
6346 | return; |
6347 | } |
6348 | |
6349 | /* |
6350 | * This routine handles processing a NameServer REG_LOGIN mailbox |
6351 | * command upon completion. It is setup in the LPFC_MBOXQ |
6352 | * as the completion routine when the command is |
6353 | * handed off to the SLI layer. |
6354 | */ |
6355 | void |
6356 | lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
6357 | { |
6358 | MAILBOX_t *mb = &pmb->u.mb; |
6359 | struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; |
6360 | struct lpfc_vport *vport = pmb->vport; |
6361 | |
6362 | pmb->ctx_ndlp = NULL; |
6363 | |
6364 | if (phba->sli_rev < LPFC_SLI_REV4) |
6365 | ndlp->nlp_rpi = mb->un.varWords[0]; |
6366 | ndlp->nlp_flag |= NLP_RPI_REGISTERED; |
6367 | ndlp->nlp_type |= NLP_FABRIC; |
6368 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); |
6369 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, |
6370 | "0004 rpi:%x DID:%x flg:%x %d x%px\n" , |
6371 | ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, |
6372 | kref_read(&ndlp->kref), |
6373 | ndlp); |
6374 | /* |
6375 | * Start issuing Fabric-Device Management Interface (FDMI) command to |
6376 | * 0xfffffa (FDMI well known port). |
6377 | * DHBA -> DPRT -> RHBA -> RPA (physical port) |
6378 | * DPRT -> RPRT (vports) |
6379 | */ |
6380 | if (vport->port_type == LPFC_PHYSICAL_PORT) { |
6381 | phba->link_flag &= ~LS_CT_VEN_RPA; /* For extra Vendor RPA */ |
6382 | lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0); |
6383 | } else { |
6384 | lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0); |
6385 | } |
6386 | |
6387 | |
6388 | /* decrement the node reference count held for this callback |
6389 | * function. |
6390 | */ |
6391 | lpfc_nlp_put(ndlp); |
6392 | lpfc_mbox_rsrc_cleanup(phba, mbox: pmb, locked: MBOX_THD_UNLOCKED); |
6393 | return; |
6394 | } |
6395 | |
6396 | static int |
6397 | lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param) |
6398 | { |
6399 | uint16_t *rpi = param; |
6400 | |
6401 | return ndlp->nlp_rpi == *rpi; |
6402 | } |
6403 | |
6404 | static int |
6405 | lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param) |
6406 | { |
6407 | return memcmp(p: &ndlp->nlp_portname, q: param, |
6408 | size: sizeof(ndlp->nlp_portname)) == 0; |
6409 | } |
6410 | |
6411 | static struct lpfc_nodelist * |
6412 | __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) |
6413 | { |
6414 | struct lpfc_nodelist *ndlp; |
6415 | |
6416 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
6417 | if (filter(ndlp, param)) { |
6418 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE, |
6419 | "3185 FIND node filter %ps DID " |
6420 | "ndlp x%px did x%x flg x%x st x%x " |
6421 | "xri x%x type x%x rpi x%x\n" , |
6422 | filter, ndlp, ndlp->nlp_DID, |
6423 | ndlp->nlp_flag, ndlp->nlp_state, |
6424 | ndlp->nlp_xri, ndlp->nlp_type, |
6425 | ndlp->nlp_rpi); |
6426 | return ndlp; |
6427 | } |
6428 | } |
6429 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, |
6430 | "3186 FIND node filter %ps NOT FOUND.\n" , filter); |
6431 | return NULL; |
6432 | } |
6433 | |
6434 | /* |
6435 | * This routine looks up the ndlp lists for the given RPI. If rpi found it |
6436 | * returns the node list element pointer else return NULL. |
6437 | */ |
6438 | struct lpfc_nodelist * |
6439 | __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) |
6440 | { |
6441 | return __lpfc_find_node(vport, filter: lpfc_filter_by_rpi, param: &rpi); |
6442 | } |
6443 | |
6444 | /* |
6445 | * This routine looks up the ndlp lists for the given WWPN. If WWPN found it |
6446 | * returns the node element list pointer else return NULL. |
6447 | */ |
6448 | struct lpfc_nodelist * |
6449 | lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn) |
6450 | { |
6451 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
6452 | struct lpfc_nodelist *ndlp; |
6453 | |
6454 | spin_lock_irq(lock: shost->host_lock); |
6455 | ndlp = __lpfc_find_node(vport, filter: lpfc_filter_by_wwpn, param: wwpn); |
6456 | spin_unlock_irq(lock: shost->host_lock); |
6457 | return ndlp; |
6458 | } |
6459 | |
6460 | /* |
6461 | * This routine looks up the ndlp lists for the given RPI. If the rpi |
6462 | * is found, the routine returns the node element list pointer else |
6463 | * return NULL. |
6464 | */ |
6465 | struct lpfc_nodelist * |
6466 | lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) |
6467 | { |
6468 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
6469 | struct lpfc_nodelist *ndlp; |
6470 | unsigned long flags; |
6471 | |
6472 | spin_lock_irqsave(shost->host_lock, flags); |
6473 | ndlp = __lpfc_findnode_rpi(vport, rpi); |
6474 | spin_unlock_irqrestore(lock: shost->host_lock, flags); |
6475 | return ndlp; |
6476 | } |
6477 | |
6478 | /** |
6479 | * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier |
6480 | * @phba: pointer to lpfc hba data structure. |
6481 | * @vpi: the physical host virtual N_Port identifier. |
6482 | * |
6483 | * This routine finds a vport on a HBA (referred by @phba) through a |
6484 | * @vpi. The function walks the HBA's vport list and returns the address |
6485 | * of the vport with the matching @vpi. |
6486 | * |
6487 | * Return code |
6488 | * NULL - No vport with the matching @vpi found |
6489 | * Otherwise - Address to the vport with the matching @vpi. |
6490 | **/ |
6491 | struct lpfc_vport * |
6492 | lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) |
6493 | { |
6494 | struct lpfc_vport *vport; |
6495 | unsigned long flags; |
6496 | int i = 0; |
6497 | |
6498 | /* The physical ports are always vpi 0 - translate is unnecessary. */ |
6499 | if (vpi > 0) { |
6500 | /* |
6501 | * Translate the physical vpi to the logical vpi. The |
6502 | * vport stores the logical vpi. |
6503 | */ |
6504 | for (i = 0; i <= phba->max_vpi; i++) { |
6505 | if (vpi == phba->vpi_ids[i]) |
6506 | break; |
6507 | } |
6508 | |
6509 | if (i > phba->max_vpi) { |
6510 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
6511 | "2936 Could not find Vport mapped " |
6512 | "to vpi %d\n" , vpi); |
6513 | return NULL; |
6514 | } |
6515 | } |
6516 | |
6517 | spin_lock_irqsave(&phba->port_list_lock, flags); |
6518 | list_for_each_entry(vport, &phba->port_list, listentry) { |
6519 | if (vport->vpi == i) { |
6520 | spin_unlock_irqrestore(lock: &phba->port_list_lock, flags); |
6521 | return vport; |
6522 | } |
6523 | } |
6524 | spin_unlock_irqrestore(lock: &phba->port_list_lock, flags); |
6525 | return NULL; |
6526 | } |
6527 | |
6528 | struct lpfc_nodelist * |
6529 | lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did) |
6530 | { |
6531 | struct lpfc_nodelist *ndlp; |
6532 | int rpi = LPFC_RPI_ALLOC_ERROR; |
6533 | |
6534 | if (vport->phba->sli_rev == LPFC_SLI_REV4) { |
6535 | rpi = lpfc_sli4_alloc_rpi(vport->phba); |
6536 | if (rpi == LPFC_RPI_ALLOC_ERROR) |
6537 | return NULL; |
6538 | } |
6539 | |
6540 | ndlp = mempool_alloc(pool: vport->phba->nlp_mem_pool, GFP_KERNEL); |
6541 | if (!ndlp) { |
6542 | if (vport->phba->sli_rev == LPFC_SLI_REV4) |
6543 | lpfc_sli4_free_rpi(vport->phba, rpi); |
6544 | return NULL; |
6545 | } |
6546 | |
6547 | memset(ndlp, 0, sizeof (struct lpfc_nodelist)); |
6548 | |
6549 | spin_lock_init(&ndlp->lock); |
6550 | |
6551 | lpfc_initialize_node(vport, ndlp, did); |
6552 | INIT_LIST_HEAD(list: &ndlp->nlp_listp); |
6553 | if (vport->phba->sli_rev == LPFC_SLI_REV4) { |
6554 | ndlp->nlp_rpi = rpi; |
6555 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, |
6556 | "0007 Init New ndlp x%px, rpi:x%x DID:%x " |
6557 | "flg:x%x refcnt:%d\n" , |
6558 | ndlp, ndlp->nlp_rpi, ndlp->nlp_DID, |
6559 | ndlp->nlp_flag, kref_read(&ndlp->kref)); |
6560 | |
6561 | ndlp->active_rrqs_xri_bitmap = |
6562 | mempool_alloc(pool: vport->phba->active_rrq_pool, |
6563 | GFP_KERNEL); |
6564 | if (ndlp->active_rrqs_xri_bitmap) |
6565 | memset(ndlp->active_rrqs_xri_bitmap, 0, |
6566 | ndlp->phba->cfg_rrq_xri_bitmap_sz); |
6567 | } |
6568 | |
6569 | |
6570 | |
6571 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, |
6572 | "node init: did:x%x" , |
6573 | ndlp->nlp_DID, 0, 0); |
6574 | |
6575 | return ndlp; |
6576 | } |
6577 | |
6578 | /* This routine releases all resources associated with a specifc NPort's ndlp |
6579 | * and mempool_free's the nodelist. |
6580 | */ |
6581 | static void |
6582 | lpfc_nlp_release(struct kref *kref) |
6583 | { |
6584 | struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, |
6585 | kref); |
6586 | struct lpfc_vport *vport = ndlp->vport; |
6587 | |
6588 | lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, |
6589 | "node release: did:x%x flg:x%x type:x%x" , |
6590 | ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); |
6591 | |
6592 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, |
6593 | "0279 %s: ndlp: x%px did %x refcnt:%d rpi:%x\n" , |
6594 | __func__, ndlp, ndlp->nlp_DID, |
6595 | kref_read(&ndlp->kref), ndlp->nlp_rpi); |
6596 | |
6597 | /* remove ndlp from action. */ |
6598 | lpfc_cancel_retry_delay_tmo(vport, ndlp); |
6599 | lpfc_cleanup_node(vport, ndlp); |
6600 | |
6601 | /* Not all ELS transactions have registered the RPI with the port. |
6602 | * In these cases the rpi usage is temporary and the node is |
6603 | * released when the WQE is completed. Catch this case to free the |
6604 | * RPI to the pool. Because this node is in the release path, a lock |
6605 | * is unnecessary. All references are gone and the node has been |
6606 | * dequeued. |
6607 | */ |
6608 | if (ndlp->nlp_flag & NLP_RELEASE_RPI) { |
6609 | if (ndlp->nlp_rpi != LPFC_RPI_ALLOC_ERROR && |
6610 | !(ndlp->nlp_flag & (NLP_RPI_REGISTERED | NLP_UNREG_INP))) { |
6611 | lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); |
6612 | ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; |
6613 | } |
6614 | } |
6615 | |
6616 | /* The node is not freed back to memory, it is released to a pool so |
6617 | * the node fields need to be cleaned up. |
6618 | */ |
6619 | ndlp->vport = NULL; |
6620 | ndlp->nlp_state = NLP_STE_FREED_NODE; |
6621 | ndlp->nlp_flag = 0; |
6622 | ndlp->fc4_xpt_flags = 0; |
6623 | |
6624 | /* free ndlp memory for final ndlp release */ |
6625 | if (ndlp->phba->sli_rev == LPFC_SLI_REV4) |
6626 | mempool_free(element: ndlp->active_rrqs_xri_bitmap, |
6627 | pool: ndlp->phba->active_rrq_pool); |
6628 | mempool_free(element: ndlp, pool: ndlp->phba->nlp_mem_pool); |
6629 | } |
6630 | |
6631 | /* This routine bumps the reference count for a ndlp structure to ensure |
6632 | * that one discovery thread won't free a ndlp while another discovery thread |
6633 | * is using it. |
6634 | */ |
6635 | struct lpfc_nodelist * |
6636 | lpfc_nlp_get(struct lpfc_nodelist *ndlp) |
6637 | { |
6638 | unsigned long flags; |
6639 | |
6640 | if (ndlp) { |
6641 | lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, |
6642 | "node get: did:x%x flg:x%x refcnt:x%x" , |
6643 | ndlp->nlp_DID, ndlp->nlp_flag, |
6644 | kref_read(kref: &ndlp->kref)); |
6645 | |
6646 | /* The check of ndlp usage to prevent incrementing the |
6647 | * ndlp reference count that is in the process of being |
6648 | * released. |
6649 | */ |
6650 | spin_lock_irqsave(&ndlp->lock, flags); |
6651 | if (!kref_get_unless_zero(kref: &ndlp->kref)) { |
6652 | spin_unlock_irqrestore(lock: &ndlp->lock, flags); |
6653 | lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, |
6654 | "0276 %s: ndlp:x%px refcnt:%d\n" , |
6655 | __func__, (void *)ndlp, kref_read(&ndlp->kref)); |
6656 | return NULL; |
6657 | } |
6658 | spin_unlock_irqrestore(lock: &ndlp->lock, flags); |
6659 | } else { |
6660 | WARN_ONCE(!ndlp, "**** %s, get ref on NULL ndlp!" , __func__); |
6661 | } |
6662 | |
6663 | return ndlp; |
6664 | } |
6665 | |
6666 | /* This routine decrements the reference count for a ndlp structure. If the |
6667 | * count goes to 0, this indicates the associated nodelist should be freed. |
6668 | */ |
6669 | int |
6670 | lpfc_nlp_put(struct lpfc_nodelist *ndlp) |
6671 | { |
6672 | if (ndlp) { |
6673 | lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, |
6674 | "node put: did:x%x flg:x%x refcnt:x%x" , |
6675 | ndlp->nlp_DID, ndlp->nlp_flag, |
6676 | kref_read(kref: &ndlp->kref)); |
6677 | } else { |
6678 | WARN_ONCE(!ndlp, "**** %s, put ref on NULL ndlp!" , __func__); |
6679 | } |
6680 | |
6681 | return ndlp ? kref_put(kref: &ndlp->kref, release: lpfc_nlp_release) : 0; |
6682 | } |
6683 | |
6684 | /** |
6685 | * lpfc_fcf_inuse - Check if FCF can be unregistered. |
6686 | * @phba: Pointer to hba context object. |
6687 | * |
6688 | * This function iterate through all FC nodes associated |
6689 | * will all vports to check if there is any node with |
6690 | * fc_rports associated with it. If there is an fc_rport |
6691 | * associated with the node, then the node is either in |
6692 | * discovered state or its devloss_timer is pending. |
6693 | */ |
6694 | static int |
6695 | lpfc_fcf_inuse(struct lpfc_hba *phba) |
6696 | { |
6697 | struct lpfc_vport **vports; |
6698 | int i, ret = 0; |
6699 | struct lpfc_nodelist *ndlp; |
6700 | unsigned long iflags; |
6701 | |
6702 | vports = lpfc_create_vport_work_array(phba); |
6703 | |
6704 | /* If driver cannot allocate memory, indicate fcf is in use */ |
6705 | if (!vports) |
6706 | return 1; |
6707 | |
6708 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
6709 | /* |
6710 | * IF the CVL_RCVD bit is not set then we have sent the |
6711 | * flogi. |
6712 | * If dev_loss fires while we are waiting we do not want to |
6713 | * unreg the fcf. |
6714 | */ |
6715 | if (!test_bit(FC_VPORT_CVL_RCVD, &vports[i]->fc_flag)) { |
6716 | ret = 1; |
6717 | goto out; |
6718 | } |
6719 | spin_lock_irqsave(&vports[i]->fc_nodes_list_lock, iflags); |
6720 | list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { |
6721 | if (ndlp->rport && |
6722 | (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { |
6723 | ret = 1; |
6724 | spin_unlock_irqrestore(lock: &vports[i]->fc_nodes_list_lock, |
6725 | flags: iflags); |
6726 | goto out; |
6727 | } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { |
6728 | ret = 1; |
6729 | lpfc_printf_log(phba, KERN_INFO, |
6730 | LOG_NODE | LOG_DISCOVERY, |
6731 | "2624 RPI %x DID %x flag %x " |
6732 | "still logged in\n" , |
6733 | ndlp->nlp_rpi, ndlp->nlp_DID, |
6734 | ndlp->nlp_flag); |
6735 | } |
6736 | } |
6737 | spin_unlock_irqrestore(lock: &vports[i]->fc_nodes_list_lock, flags: iflags); |
6738 | } |
6739 | out: |
6740 | lpfc_destroy_vport_work_array(phba, vports); |
6741 | return ret; |
6742 | } |
6743 | |
6744 | /** |
6745 | * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi. |
6746 | * @phba: Pointer to hba context object. |
6747 | * @mboxq: Pointer to mailbox object. |
6748 | * |
6749 | * This function frees memory associated with the mailbox command. |
6750 | */ |
6751 | void |
6752 | lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) |
6753 | { |
6754 | struct lpfc_vport *vport = mboxq->vport; |
6755 | |
6756 | if (mboxq->u.mb.mbxStatus) { |
6757 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
6758 | "2555 UNREG_VFI mbxStatus error x%x " |
6759 | "HBA state x%x\n" , |
6760 | mboxq->u.mb.mbxStatus, vport->port_state); |
6761 | } |
6762 | clear_bit(nr: FC_VFI_REGISTERED, addr: &phba->pport->fc_flag); |
6763 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
6764 | return; |
6765 | } |
6766 | |
6767 | /** |
6768 | * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi. |
6769 | * @phba: Pointer to hba context object. |
6770 | * @mboxq: Pointer to mailbox object. |
6771 | * |
6772 | * This function frees memory associated with the mailbox command. |
6773 | */ |
6774 | static void |
6775 | lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) |
6776 | { |
6777 | struct lpfc_vport *vport = mboxq->vport; |
6778 | |
6779 | if (mboxq->u.mb.mbxStatus) { |
6780 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
6781 | "2550 UNREG_FCFI mbxStatus error x%x " |
6782 | "HBA state x%x\n" , |
6783 | mboxq->u.mb.mbxStatus, vport->port_state); |
6784 | } |
6785 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
6786 | return; |
6787 | } |
6788 | |
6789 | /** |
6790 | * lpfc_unregister_fcf_prep - Unregister fcf record preparation |
6791 | * @phba: Pointer to hba context object. |
6792 | * |
6793 | * This function prepare the HBA for unregistering the currently registered |
6794 | * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and |
6795 | * VFIs. |
6796 | */ |
6797 | int |
6798 | lpfc_unregister_fcf_prep(struct lpfc_hba *phba) |
6799 | { |
6800 | struct lpfc_vport **vports; |
6801 | struct lpfc_nodelist *ndlp; |
6802 | struct Scsi_Host *shost; |
6803 | int i = 0, rc; |
6804 | |
6805 | /* Unregister RPIs */ |
6806 | if (lpfc_fcf_inuse(phba)) |
6807 | lpfc_unreg_hba_rpis(phba); |
6808 | |
6809 | /* At this point, all discovery is aborted */ |
6810 | phba->pport->port_state = LPFC_VPORT_UNKNOWN; |
6811 | |
6812 | /* Unregister VPIs */ |
6813 | vports = lpfc_create_vport_work_array(phba); |
6814 | if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) |
6815 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
6816 | /* Stop FLOGI/FDISC retries */ |
6817 | ndlp = lpfc_findnode_did(vport: vports[i], Fabric_DID); |
6818 | if (ndlp) |
6819 | lpfc_cancel_retry_delay_tmo(vports[i], ndlp); |
6820 | lpfc_cleanup_pending_mbox(vports[i]); |
6821 | if (phba->sli_rev == LPFC_SLI_REV4) |
6822 | lpfc_sli4_unreg_all_rpis(vports[i]); |
6823 | lpfc_mbx_unreg_vpi(vport: vports[i]); |
6824 | shost = lpfc_shost_from_vport(vport: vports[i]); |
6825 | spin_lock_irq(lock: shost->host_lock); |
6826 | vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; |
6827 | spin_unlock_irq(lock: shost->host_lock); |
6828 | set_bit(nr: FC_VPORT_NEEDS_INIT_VPI, addr: &vports[i]->fc_flag); |
6829 | } |
6830 | lpfc_destroy_vport_work_array(phba, vports); |
6831 | if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) { |
6832 | ndlp = lpfc_findnode_did(vport: phba->pport, Fabric_DID); |
6833 | if (ndlp) |
6834 | lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); |
6835 | lpfc_cleanup_pending_mbox(phba->pport); |
6836 | if (phba->sli_rev == LPFC_SLI_REV4) |
6837 | lpfc_sli4_unreg_all_rpis(phba->pport); |
6838 | lpfc_mbx_unreg_vpi(vport: phba->pport); |
6839 | shost = lpfc_shost_from_vport(vport: phba->pport); |
6840 | spin_lock_irq(lock: shost->host_lock); |
6841 | phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED; |
6842 | spin_unlock_irq(lock: shost->host_lock); |
6843 | set_bit(nr: FC_VPORT_NEEDS_INIT_VPI, addr: &phba->pport->fc_flag); |
6844 | } |
6845 | |
6846 | /* Cleanup any outstanding ELS commands */ |
6847 | lpfc_els_flush_all_cmd(phba); |
6848 | |
6849 | /* Unregister the physical port VFI */ |
6850 | rc = lpfc_issue_unreg_vfi(phba->pport); |
6851 | return rc; |
6852 | } |
6853 | |
6854 | /** |
6855 | * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record |
6856 | * @phba: Pointer to hba context object. |
6857 | * |
6858 | * This function issues synchronous unregister FCF mailbox command to HBA to |
6859 | * unregister the currently registered FCF record. The driver does not reset |
6860 | * the driver FCF usage state flags. |
6861 | * |
6862 | * Return 0 if successfully issued, none-zero otherwise. |
6863 | */ |
6864 | int |
6865 | lpfc_sli4_unregister_fcf(struct lpfc_hba *phba) |
6866 | { |
6867 | LPFC_MBOXQ_t *mbox; |
6868 | int rc; |
6869 | |
6870 | mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
6871 | if (!mbox) { |
6872 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
6873 | "2551 UNREG_FCFI mbox allocation failed" |
6874 | "HBA state x%x\n" , phba->pport->port_state); |
6875 | return -ENOMEM; |
6876 | } |
6877 | lpfc_unreg_fcfi(mbox, phba->fcf.fcfi); |
6878 | mbox->vport = phba->pport; |
6879 | mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl; |
6880 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); |
6881 | |
6882 | if (rc == MBX_NOT_FINISHED) { |
6883 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
6884 | "2552 Unregister FCFI command failed rc x%x " |
6885 | "HBA state x%x\n" , |
6886 | rc, phba->pport->port_state); |
6887 | return -EINVAL; |
6888 | } |
6889 | return 0; |
6890 | } |
6891 | |
6892 | /** |
6893 | * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan |
6894 | * @phba: Pointer to hba context object. |
6895 | * |
6896 | * This function unregisters the currently reigstered FCF. This function |
6897 | * also tries to find another FCF for discovery by rescan the HBA FCF table. |
6898 | */ |
6899 | void |
6900 | lpfc_unregister_fcf_rescan(struct lpfc_hba *phba) |
6901 | { |
6902 | int rc; |
6903 | |
6904 | /* Preparation for unregistering fcf */ |
6905 | rc = lpfc_unregister_fcf_prep(phba); |
6906 | if (rc) { |
6907 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
6908 | "2748 Failed to prepare for unregistering " |
6909 | "HBA's FCF record: rc=%d\n" , rc); |
6910 | return; |
6911 | } |
6912 | |
6913 | /* Now, unregister FCF record and reset HBA FCF state */ |
6914 | rc = lpfc_sli4_unregister_fcf(phba); |
6915 | if (rc) |
6916 | return; |
6917 | /* Reset HBA FCF states after successful unregister FCF */ |
6918 | spin_lock_irq(lock: &phba->hbalock); |
6919 | phba->fcf.fcf_flag = 0; |
6920 | spin_unlock_irq(lock: &phba->hbalock); |
6921 | phba->fcf.current_rec.flag = 0; |
6922 | |
6923 | /* |
6924 | * If driver is not unloading, check if there is any other |
6925 | * FCF record that can be used for discovery. |
6926 | */ |
6927 | if (test_bit(FC_UNLOADING, &phba->pport->load_flag) || |
6928 | phba->link_state < LPFC_LINK_UP) |
6929 | return; |
6930 | |
6931 | /* This is considered as the initial FCF discovery scan */ |
6932 | spin_lock_irq(lock: &phba->hbalock); |
6933 | phba->fcf.fcf_flag |= FCF_INIT_DISC; |
6934 | spin_unlock_irq(lock: &phba->hbalock); |
6935 | |
6936 | /* Reset FCF roundrobin bmask for new discovery */ |
6937 | lpfc_sli4_clear_fcf_rr_bmask(phba); |
6938 | |
6939 | rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); |
6940 | |
6941 | if (rc) { |
6942 | spin_lock_irq(lock: &phba->hbalock); |
6943 | phba->fcf.fcf_flag &= ~FCF_INIT_DISC; |
6944 | spin_unlock_irq(lock: &phba->hbalock); |
6945 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
6946 | "2553 lpfc_unregister_unused_fcf failed " |
6947 | "to read FCF record HBA state x%x\n" , |
6948 | phba->pport->port_state); |
6949 | } |
6950 | } |
6951 | |
6952 | /** |
6953 | * lpfc_unregister_fcf - Unregister the currently registered fcf record |
6954 | * @phba: Pointer to hba context object. |
6955 | * |
6956 | * This function just unregisters the currently reigstered FCF. It does not |
6957 | * try to find another FCF for discovery. |
6958 | */ |
6959 | void |
6960 | lpfc_unregister_fcf(struct lpfc_hba *phba) |
6961 | { |
6962 | int rc; |
6963 | |
6964 | /* Preparation for unregistering fcf */ |
6965 | rc = lpfc_unregister_fcf_prep(phba); |
6966 | if (rc) { |
6967 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
6968 | "2749 Failed to prepare for unregistering " |
6969 | "HBA's FCF record: rc=%d\n" , rc); |
6970 | return; |
6971 | } |
6972 | |
6973 | /* Now, unregister FCF record and reset HBA FCF state */ |
6974 | rc = lpfc_sli4_unregister_fcf(phba); |
6975 | if (rc) |
6976 | return; |
6977 | /* Set proper HBA FCF states after successful unregister FCF */ |
6978 | spin_lock_irq(lock: &phba->hbalock); |
6979 | phba->fcf.fcf_flag &= ~FCF_REGISTERED; |
6980 | spin_unlock_irq(lock: &phba->hbalock); |
6981 | } |
6982 | |
6983 | /** |
6984 | * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected. |
6985 | * @phba: Pointer to hba context object. |
6986 | * |
6987 | * This function check if there are any connected remote port for the FCF and |
6988 | * if all the devices are disconnected, this function unregister FCFI. |
6989 | * This function also tries to use another FCF for discovery. |
6990 | */ |
6991 | void |
6992 | lpfc_unregister_unused_fcf(struct lpfc_hba *phba) |
6993 | { |
6994 | /* |
6995 | * If HBA is not running in FIP mode, if HBA does not support |
6996 | * FCoE, if FCF discovery is ongoing, or if FCF has not been |
6997 | * registered, do nothing. |
6998 | */ |
6999 | spin_lock_irq(lock: &phba->hbalock); |
7000 | if (!(phba->hba_flag & HBA_FCOE_MODE) || |
7001 | !(phba->fcf.fcf_flag & FCF_REGISTERED) || |
7002 | !(phba->hba_flag & HBA_FIP_SUPPORT) || |
7003 | (phba->fcf.fcf_flag & FCF_DISCOVERY) || |
7004 | (phba->pport->port_state == LPFC_FLOGI)) { |
7005 | spin_unlock_irq(lock: &phba->hbalock); |
7006 | return; |
7007 | } |
7008 | spin_unlock_irq(lock: &phba->hbalock); |
7009 | |
7010 | if (lpfc_fcf_inuse(phba)) |
7011 | return; |
7012 | |
7013 | lpfc_unregister_fcf_rescan(phba); |
7014 | } |
7015 | |
7016 | /** |
7017 | * lpfc_read_fcf_conn_tbl - Create driver FCF connection table. |
7018 | * @phba: Pointer to hba context object. |
7019 | * @buff: Buffer containing the FCF connection table as in the config |
7020 | * region. |
7021 | * This function create driver data structure for the FCF connection |
7022 | * record table read from config region 23. |
7023 | */ |
7024 | static void |
7025 | lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba, |
7026 | uint8_t *buff) |
7027 | { |
7028 | struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; |
7029 | struct lpfc_fcf_conn_hdr *conn_hdr; |
7030 | struct lpfc_fcf_conn_rec *conn_rec; |
7031 | uint32_t record_count; |
7032 | int i; |
7033 | |
7034 | /* Free the current connect table */ |
7035 | list_for_each_entry_safe(conn_entry, next_conn_entry, |
7036 | &phba->fcf_conn_rec_list, list) { |
7037 | list_del_init(entry: &conn_entry->list); |
7038 | kfree(objp: conn_entry); |
7039 | } |
7040 | |
7041 | conn_hdr = (struct lpfc_fcf_conn_hdr *) buff; |
7042 | record_count = conn_hdr->length * sizeof(uint32_t)/ |
7043 | sizeof(struct lpfc_fcf_conn_rec); |
7044 | |
7045 | conn_rec = (struct lpfc_fcf_conn_rec *) |
7046 | (buff + sizeof(struct lpfc_fcf_conn_hdr)); |
7047 | |
7048 | for (i = 0; i < record_count; i++) { |
7049 | if (!(conn_rec[i].flags & FCFCNCT_VALID)) |
7050 | continue; |
7051 | conn_entry = kzalloc(size: sizeof(struct lpfc_fcf_conn_entry), |
7052 | GFP_KERNEL); |
7053 | if (!conn_entry) { |
7054 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
7055 | "2566 Failed to allocate connection" |
7056 | " table entry\n" ); |
7057 | return; |
7058 | } |
7059 | |
7060 | memcpy(&conn_entry->conn_rec, &conn_rec[i], |
7061 | sizeof(struct lpfc_fcf_conn_rec)); |
7062 | list_add_tail(new: &conn_entry->list, |
7063 | head: &phba->fcf_conn_rec_list); |
7064 | } |
7065 | |
7066 | if (!list_empty(head: &phba->fcf_conn_rec_list)) { |
7067 | i = 0; |
7068 | list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, |
7069 | list) { |
7070 | conn_rec = &conn_entry->conn_rec; |
7071 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
7072 | "3345 FCF connection list rec[%02d]: " |
7073 | "flags:x%04x, vtag:x%04x, " |
7074 | "fabric_name:x%02x:%02x:%02x:%02x:" |
7075 | "%02x:%02x:%02x:%02x, " |
7076 | "switch_name:x%02x:%02x:%02x:%02x:" |
7077 | "%02x:%02x:%02x:%02x\n" , i++, |
7078 | conn_rec->flags, conn_rec->vlan_tag, |
7079 | conn_rec->fabric_name[0], |
7080 | conn_rec->fabric_name[1], |
7081 | conn_rec->fabric_name[2], |
7082 | conn_rec->fabric_name[3], |
7083 | conn_rec->fabric_name[4], |
7084 | conn_rec->fabric_name[5], |
7085 | conn_rec->fabric_name[6], |
7086 | conn_rec->fabric_name[7], |
7087 | conn_rec->switch_name[0], |
7088 | conn_rec->switch_name[1], |
7089 | conn_rec->switch_name[2], |
7090 | conn_rec->switch_name[3], |
7091 | conn_rec->switch_name[4], |
7092 | conn_rec->switch_name[5], |
7093 | conn_rec->switch_name[6], |
7094 | conn_rec->switch_name[7]); |
7095 | } |
7096 | } |
7097 | } |
7098 | |
7099 | /** |
7100 | * lpfc_read_fcoe_param - Read FCoe parameters from conf region.. |
7101 | * @phba: Pointer to hba context object. |
7102 | * @buff: Buffer containing the FCoE parameter data structure. |
7103 | * |
7104 | * This function update driver data structure with config |
7105 | * parameters read from config region 23. |
7106 | */ |
7107 | static void |
7108 | lpfc_read_fcoe_param(struct lpfc_hba *phba, |
7109 | uint8_t *buff) |
7110 | { |
7111 | struct lpfc_fip_param_hdr *fcoe_param_hdr; |
7112 | struct lpfc_fcoe_params *fcoe_param; |
7113 | |
7114 | fcoe_param_hdr = (struct lpfc_fip_param_hdr *) |
7115 | buff; |
7116 | fcoe_param = (struct lpfc_fcoe_params *) |
7117 | (buff + sizeof(struct lpfc_fip_param_hdr)); |
7118 | |
7119 | if ((fcoe_param_hdr->parm_version != FIPP_VERSION) || |
7120 | (fcoe_param_hdr->length != FCOE_PARAM_LENGTH)) |
7121 | return; |
7122 | |
7123 | if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) { |
7124 | phba->valid_vlan = 1; |
7125 | phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) & |
7126 | 0xFFF; |
7127 | } |
7128 | |
7129 | phba->fc_map[0] = fcoe_param->fc_map[0]; |
7130 | phba->fc_map[1] = fcoe_param->fc_map[1]; |
7131 | phba->fc_map[2] = fcoe_param->fc_map[2]; |
7132 | return; |
7133 | } |
7134 | |
7135 | /** |
7136 | * lpfc_get_rec_conf23 - Get a record type in config region data. |
7137 | * @buff: Buffer containing config region 23 data. |
7138 | * @size: Size of the data buffer. |
7139 | * @rec_type: Record type to be searched. |
7140 | * |
7141 | * This function searches config region data to find the beginning |
7142 | * of the record specified by record_type. If record found, this |
7143 | * function return pointer to the record else return NULL. |
7144 | */ |
7145 | static uint8_t * |
7146 | lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type) |
7147 | { |
7148 | uint32_t offset = 0, rec_length; |
7149 | |
7150 | if ((buff[0] == LPFC_REGION23_LAST_REC) || |
7151 | (size < sizeof(uint32_t))) |
7152 | return NULL; |
7153 | |
7154 | rec_length = buff[offset + 1]; |
7155 | |
7156 | /* |
7157 | * One TLV record has one word header and number of data words |
7158 | * specified in the rec_length field of the record header. |
7159 | */ |
7160 | while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t)) |
7161 | <= size) { |
7162 | if (buff[offset] == rec_type) |
7163 | return &buff[offset]; |
7164 | |
7165 | if (buff[offset] == LPFC_REGION23_LAST_REC) |
7166 | return NULL; |
7167 | |
7168 | offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t); |
7169 | rec_length = buff[offset + 1]; |
7170 | } |
7171 | return NULL; |
7172 | } |
7173 | |
7174 | /** |
7175 | * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23. |
7176 | * @phba: Pointer to lpfc_hba data structure. |
7177 | * @buff: Buffer containing config region 23 data. |
7178 | * @size: Size of the data buffer. |
7179 | * |
7180 | * This function parses the FCoE config parameters in config region 23 and |
7181 | * populate driver data structure with the parameters. |
7182 | */ |
7183 | void |
7184 | lpfc_parse_fcoe_conf(struct lpfc_hba *phba, |
7185 | uint8_t *buff, |
7186 | uint32_t size) |
7187 | { |
7188 | uint32_t offset = 0; |
7189 | uint8_t *rec_ptr; |
7190 | |
7191 | /* |
7192 | * If data size is less than 2 words signature and version cannot be |
7193 | * verified. |
7194 | */ |
7195 | if (size < 2*sizeof(uint32_t)) |
7196 | return; |
7197 | |
7198 | /* Check the region signature first */ |
7199 | if (memcmp(p: buff, LPFC_REGION23_SIGNATURE, size: 4)) { |
7200 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
7201 | "2567 Config region 23 has bad signature\n" ); |
7202 | return; |
7203 | } |
7204 | |
7205 | offset += 4; |
7206 | |
7207 | /* Check the data structure version */ |
7208 | if (buff[offset] != LPFC_REGION23_VERSION) { |
7209 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
7210 | "2568 Config region 23 has bad version\n" ); |
7211 | return; |
7212 | } |
7213 | offset += 4; |
7214 | |
7215 | /* Read FCoE param record */ |
7216 | rec_ptr = lpfc_get_rec_conf23(buff: &buff[offset], |
7217 | size: size - offset, FCOE_PARAM_TYPE); |
7218 | if (rec_ptr) |
7219 | lpfc_read_fcoe_param(phba, buff: rec_ptr); |
7220 | |
7221 | /* Read FCF connection table */ |
7222 | rec_ptr = lpfc_get_rec_conf23(buff: &buff[offset], |
7223 | size: size - offset, FCOE_CONN_TBL_TYPE); |
7224 | if (rec_ptr) |
7225 | lpfc_read_fcf_conn_tbl(phba, buff: rec_ptr); |
7226 | |
7227 | } |
7228 | |
7229 | /* |
7230 | * lpfc_error_lost_link - IO failure from link event or FW reset check. |
7231 | * |
7232 | * @vport: Pointer to lpfc_vport data structure. |
7233 | * @ulp_status: IO completion status. |
7234 | * @ulp_word4: Reason code for the ulp_status. |
7235 | * |
7236 | * This function evaluates the ulp_status and ulp_word4 values |
7237 | * for specific error values that indicate an internal link fault |
7238 | * or fw reset event for the completing IO. Callers require this |
7239 | * common data to decide next steps on the IO. |
7240 | * |
7241 | * Return: |
7242 | * false - No link or reset error occurred. |
7243 | * true - A link or reset error occurred. |
7244 | */ |
7245 | bool |
7246 | lpfc_error_lost_link(struct lpfc_vport *vport, u32 ulp_status, u32 ulp_word4) |
7247 | { |
7248 | /* Mask off the extra port data to get just the reason code. */ |
7249 | u32 rsn_code = IOERR_PARAM_MASK & ulp_word4; |
7250 | |
7251 | if (ulp_status == IOSTAT_LOCAL_REJECT && |
7252 | (rsn_code == IOERR_SLI_ABORTED || |
7253 | rsn_code == IOERR_LINK_DOWN || |
7254 | rsn_code == IOERR_SLI_DOWN)) { |
7255 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI | LOG_ELS, |
7256 | "0408 Report link error true: <x%x:x%x>\n" , |
7257 | ulp_status, ulp_word4); |
7258 | return true; |
7259 | } |
7260 | |
7261 | return false; |
7262 | } |
7263 | |